if_vtnet, sync 6/x: refactor rx_- and update_vlan_filter functions.
[dragonfly.git] / sys / dev / virtual / virtio / net / if_vtnet.c
1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26
27 /* Driver for VirtIO network devices. */
28
29 #include <sys/cdefs.h>
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/sockio.h>
35 #include <sys/mbuf.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/taskqueue.h>
41 #include <sys/random.h>
42 #include <sys/sglist.h>
43 #include <sys/serialize.h>
44 #include <sys/bus.h>
45 #include <sys/rman.h>
46
47 #include <machine/limits.h>
48
49 #include <net/ethernet.h>
50 #include <net/if.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_types.h>
54 #include <net/if_media.h>
55 #include <net/vlan/if_vlan_var.h>
56 #include <net/vlan/if_vlan_ether.h>
57 #include <net/ifq_var.h>
58
59 #include <net/bpf.h>
60
61 #include <netinet/in_systm.h>
62 #include <netinet/in.h>
63 #include <netinet/ip.h>
64 #include <netinet/ip6.h>
65 #include <netinet/udp.h>
66 #include <netinet/tcp.h>
67
68 #include <dev/virtual/virtio/virtio/virtio.h>
69 #include <dev/virtual/virtio/virtio/virtqueue.h>
70
71 #include "virtio_net.h"
72 #include "virtio_if.h"
73
74 struct vtnet_statistics {
75         uint64_t        mbuf_alloc_failed;
76
77         uint64_t        rx_frame_too_large;
78         uint64_t        rx_enq_replacement_failed;
79         uint64_t        rx_mergeable_failed;
80         uint64_t        rx_csum_bad_ethtype;
81         uint64_t        rx_csum_bad_ipproto;
82         uint64_t        rx_csum_bad_offset;
83         uint64_t        rx_csum_failed;
84         uint64_t        rx_csum_offloaded;
85         uint64_t        rx_task_rescheduled;
86
87         uint64_t        tx_csum_offloaded;
88         uint64_t        tx_tso_offloaded;
89         uint64_t        tx_csum_bad_ethtype;
90         uint64_t        tx_tso_bad_ethtype;
91         uint64_t        tx_task_rescheduled;
92 };
93
94 struct vtnet_softc {
95         device_t                vtnet_dev;
96         struct ifnet            *vtnet_ifp;
97         struct lwkt_serialize   vtnet_slz;
98
99         uint32_t                vtnet_flags;
100 #define VTNET_FLAG_LINK         0x0001
101 #define VTNET_FLAG_SUSPENDED    0x0002
102 #define VTNET_FLAG_MAC          0x0004
103 #define VTNET_FLAG_CTRL_VQ      0x0008
104 #define VTNET_FLAG_CTRL_RX      0x0010
105 #define VTNET_FLAG_CTRL_MAC     0x0020
106 #define VTNET_FLAG_VLAN_FILTER  0x0040
107 #define VTNET_FLAG_TSO_ECN      0x0080
108 #define VTNET_FLAG_MRG_RXBUFS   0x0100
109 #define VTNET_FLAG_LRO_NOMRG    0x0200
110
111         struct virtqueue        *vtnet_rx_vq;
112         struct virtqueue        *vtnet_tx_vq;
113         struct virtqueue        *vtnet_ctrl_vq;
114
115         struct vtnet_tx_header  *vtnet_txhdrarea;
116         uint32_t                vtnet_txhdridx;
117         struct vtnet_mac_filter *vtnet_macfilter;
118
119         int                     vtnet_hdr_size;
120         int                     vtnet_tx_size;
121         int                     vtnet_rx_size;
122         int                     vtnet_rx_process_limit;
123         int                     vtnet_rx_mbuf_size;
124         int                     vtnet_rx_mbuf_count;
125         int                     vtnet_if_flags;
126         int                     vtnet_watchdog_timer;
127         uint64_t                vtnet_features;
128
129         struct task             vtnet_cfgchg_task;
130
131         struct vtnet_statistics vtnet_stats;
132
133         struct callout          vtnet_tick_ch;
134
135         eventhandler_tag        vtnet_vlan_attach;
136         eventhandler_tag        vtnet_vlan_detach;
137
138         struct ifmedia          vtnet_media;
139         /*
140          * Fake media type; the host does not provide us with
141          * any real media information.
142          */
143 #define VTNET_MEDIATYPE         (IFM_ETHER | IFM_1000_T | IFM_FDX)
144         char                    vtnet_hwaddr[ETHER_ADDR_LEN];
145
146         /*
147          * During reset, the host's VLAN filtering table is lost. The
148          * array below is used to restore all the VLANs configured on
149          * this interface after a reset.
150          */
151 #define VTNET_VLAN_SHADOW_SIZE  (4096 / 32)
152         int                     vtnet_nvlans;
153         uint32_t                vtnet_vlan_shadow[VTNET_VLAN_SHADOW_SIZE];
154
155         char                    vtnet_mtx_name[16];
156 };
157
158 /*
159  * When mergeable buffers are not negotiated, the vtnet_rx_header structure
160  * below is placed at the beginning of the mbuf data. Use 4 bytes of pad to
161  * both keep the VirtIO header and the data non-contiguous and to keep the
162  * frame's payload 4 byte aligned.
163  *
164  * When mergeable buffers are negotiated, the host puts the VirtIO header in
165  * the beginning of the first mbuf's data.
166  */
167 #define VTNET_RX_HEADER_PAD     4
168 struct vtnet_rx_header {
169         struct virtio_net_hdr   vrh_hdr;
170         char                    vrh_pad[VTNET_RX_HEADER_PAD];
171 } __packed;
172
173 /*
174  * For each outgoing frame, the vtnet_tx_header below is allocated from
175  * the vtnet_tx_header_zone.
176  */
177 struct vtnet_tx_header {
178         union {
179                 struct virtio_net_hdr           hdr;
180                 struct virtio_net_hdr_mrg_rxbuf mhdr;
181         } vth_uhdr;
182
183         struct mbuf             *vth_mbuf;
184 };
185
186 MALLOC_DEFINE(M_VTNET, "VTNET_TX", "Outgoing VTNET TX frame header");
187
188 /*
189  * The VirtIO specification does not place a limit on the number of MAC
190  * addresses the guest driver may request to be filtered. In practice,
191  * the host is constrained by available resources. To simplify this driver,
192  * impose a reasonably high limit of MAC addresses we will filter before
193  * falling back to promiscuous or all-multicast modes.
194  */
195 #define VTNET_MAX_MAC_ENTRIES   128
196
197 struct vtnet_mac_table {
198         uint32_t                nentries;
199         uint8_t                 macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN];
200 } __packed;
201
202 struct vtnet_mac_filter {
203         struct vtnet_mac_table  vmf_unicast;
204         uint32_t                vmf_pad; /* Make tables non-contiguous. */
205         struct vtnet_mac_table  vmf_multicast;
206 };
207
208 #define VTNET_WATCHDOG_TIMEOUT  5
209 #define VTNET_CSUM_OFFLOAD      (CSUM_TCP | CSUM_UDP)
210
211 /* Features desired/implemented by this driver. */
212 #define VTNET_FEATURES          \
213     (VIRTIO_NET_F_MAC           | \
214      VIRTIO_NET_F_STATUS        | \
215      VIRTIO_NET_F_CTRL_VQ       | \
216      VIRTIO_NET_F_CTRL_RX       | \
217      VIRTIO_NET_F_CTRL_MAC_ADDR | \
218      VIRTIO_NET_F_CTRL_VLAN     | \
219      VIRTIO_NET_F_CSUM          | \
220      VIRTIO_NET_F_HOST_TSO4     | \
221      VIRTIO_NET_F_HOST_TSO6     | \
222      VIRTIO_NET_F_HOST_ECN      | \
223      VIRTIO_NET_F_GUEST_CSUM    | \
224      VIRTIO_NET_F_GUEST_TSO4    | \
225      VIRTIO_NET_F_GUEST_TSO6    | \
226      VIRTIO_NET_F_GUEST_ECN     | \
227      VIRTIO_NET_F_MRG_RXBUF)
228
229 /*
230  * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
231  * frames larger than 1514 bytes. We do not yet support software LRO
232  * via tcp_lro_rx().
233  */
234 #define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \
235                             VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN)
236
237 #define VTNET_MAX_MTU           65536
238 #define VTNET_MAX_RX_SIZE       65550
239
240 /*
241  * Used to preallocate the Vq indirect descriptors. The first segment
242  * is reserved for the header.
243  */
244 #define VTNET_MIN_RX_SEGS       2
245 #define VTNET_MAX_RX_SEGS       34
246 #define VTNET_MAX_TX_SEGS       34
247
248 #define IFCAP_LRO               0x00400 /* can do Large Receive Offload */
249 #define IFCAP_VLAN_HWFILTER     0x10000 /* interface hw can filter vlan tag */
250 #define IFCAP_VLAN_HWTSO        0x40000 /* can do IFCAP_TSO on VLANs */
251
252
253 /*
254  * Assert we can receive and transmit the maximum with regular
255  * size clusters.
256  */
257 CTASSERT(((VTNET_MAX_RX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE);
258 CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_MTU);
259
260 /*
261  * Determine how many mbufs are in each receive buffer. For LRO without
262  * mergeable descriptors, we must allocate an mbuf chain large enough to
263  * hold both the vtnet_rx_header and the maximum receivable data.
264  */
265 #define VTNET_NEEDED_RX_MBUFS(_sc)                                      \
266         ((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 :          \
267         howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE,     \
268         (_sc)->vtnet_rx_mbuf_size)
269
270 static int      vtnet_modevent(module_t, int, void *);
271
272 static int      vtnet_probe(device_t);
273 static int      vtnet_attach(device_t);
274 static int      vtnet_detach(device_t);
275 static int      vtnet_suspend(device_t);
276 static int      vtnet_resume(device_t);
277 static int      vtnet_shutdown(device_t);
278 static int      vtnet_config_change(device_t);
279
280 static void     vtnet_negotiate_features(struct vtnet_softc *);
281 static int      vtnet_alloc_virtqueues(struct vtnet_softc *);
282 static void     vtnet_get_hwaddr(struct vtnet_softc *);
283 static void     vtnet_set_hwaddr(struct vtnet_softc *);
284 static int      vtnet_is_link_up(struct vtnet_softc *);
285 static void     vtnet_update_link_status(struct vtnet_softc *);
286 #if 0
287 static void     vtnet_watchdog(struct vtnet_softc *);
288 #endif
289 static void     vtnet_config_change_task(void *, int);
290 static int      vtnet_setup_interface(struct vtnet_softc *);
291 static int      vtnet_change_mtu(struct vtnet_softc *, int);
292 static int      vtnet_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
293
294 static int      vtnet_init_rx_vq(struct vtnet_softc *);
295 static void     vtnet_free_rx_mbufs(struct vtnet_softc *);
296 static void     vtnet_free_tx_mbufs(struct vtnet_softc *);
297 static void     vtnet_free_ctrl_vq(struct vtnet_softc *);
298
299 static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *, int,
300                     struct mbuf **);
301 static int      vtnet_replace_rxbuf(struct vtnet_softc *,
302                     struct mbuf *, int);
303 static int      vtnet_newbuf(struct vtnet_softc *);
304 static void     vtnet_discard_merged_rxbuf(struct vtnet_softc *, int);
305 static void     vtnet_discard_rxbuf(struct vtnet_softc *, struct mbuf *);
306 static int      vtnet_enqueue_rxbuf(struct vtnet_softc *, struct mbuf *);
307 static void     vtnet_vlan_tag_remove(struct mbuf *);
308 static int      vtnet_rx_csum(struct vtnet_softc *, struct mbuf *,
309                     struct virtio_net_hdr *);
310 static int      vtnet_rxeof_merged(struct vtnet_softc *, struct mbuf *, int);
311 static int      vtnet_rxeof(struct vtnet_softc *, int, int *);
312 static void     vtnet_rx_intr_task(void *);
313 static int      vtnet_rx_vq_intr(void *);
314
315 static void     vtnet_txeof(struct vtnet_softc *);
316 static struct mbuf * vtnet_tx_offload(struct vtnet_softc *, struct mbuf *,
317                     struct virtio_net_hdr *);
318 static int      vtnet_enqueue_txbuf(struct vtnet_softc *, struct mbuf **,
319                     struct vtnet_tx_header *);
320 static int      vtnet_encap(struct vtnet_softc *, struct mbuf **);
321 static void     vtnet_start_locked(struct ifnet *, struct ifaltq_subque *);
322 static void     vtnet_start(struct ifnet *, struct ifaltq_subque *);
323 static void     vtnet_tick(void *);
324 static void     vtnet_tx_intr_task(void *);
325 static int      vtnet_tx_vq_intr(void *);
326
327 static void     vtnet_stop(struct vtnet_softc *);
328 static int      vtnet_virtio_reinit(struct vtnet_softc *);
329 static void     vtnet_init_locked(struct vtnet_softc *);
330 static void     vtnet_init(void *);
331
332 static void     vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
333                     struct sglist *, int, int);
334
335 static int      vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
336 static int      vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
337 static int      vtnet_set_promisc(struct vtnet_softc *, int);
338 static int      vtnet_set_allmulti(struct vtnet_softc *, int);
339 static void     vtnet_rx_filter(struct vtnet_softc *sc);
340 static void     vtnet_rx_filter_mac(struct vtnet_softc *);
341
342 static int      vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
343 static void     vtnet_rx_filter_vlan(struct vtnet_softc *);
344 static void     vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
345 static void     vtnet_register_vlan(void *, struct ifnet *, uint16_t);
346 static void     vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
347
348 static int      vtnet_ifmedia_upd(struct ifnet *);
349 static void     vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
350
351 static void     vtnet_add_statistics(struct vtnet_softc *);
352
353 static int      vtnet_enable_rx_intr(struct vtnet_softc *);
354 static int      vtnet_enable_tx_intr(struct vtnet_softc *);
355 static void     vtnet_disable_rx_intr(struct vtnet_softc *);
356 static void     vtnet_disable_tx_intr(struct vtnet_softc *);
357
358 /* Tunables. */
359 static int vtnet_csum_disable = 0;
360 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
361 static int vtnet_tso_disable = 1;
362 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
363 static int vtnet_lro_disable = 1;
364 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
365
366 /*
367  * Reducing the number of transmit completed interrupts can
368  * improve performance. To do so, the define below keeps the
369  * Tx vq interrupt disabled and adds calls to vtnet_txeof()
370  * in the start and watchdog paths. The price to pay for this
371  * is the m_free'ing of transmitted mbufs may be delayed until
372  * the watchdog fires.
373  */
374 #define VTNET_TX_INTR_MODERATION
375
376 static struct virtio_feature_desc vtnet_feature_desc[] = {
377         { VIRTIO_NET_F_CSUM,            "TxChecksum"    },
378         { VIRTIO_NET_F_GUEST_CSUM,      "RxChecksum"    },
379         { VIRTIO_NET_F_MAC,             "MacAddress"    },
380         { VIRTIO_NET_F_GSO,             "TxAllGSO"      },
381         { VIRTIO_NET_F_GUEST_TSO4,      "RxTSOv4"       },
382         { VIRTIO_NET_F_GUEST_TSO6,      "RxTSOv6"       },
383         { VIRTIO_NET_F_GUEST_ECN,       "RxECN"         },
384         { VIRTIO_NET_F_GUEST_UFO,       "RxUFO"         },
385         { VIRTIO_NET_F_HOST_TSO4,       "TxTSOv4"       },
386         { VIRTIO_NET_F_HOST_TSO6,       "TxTSOv6"       },
387         { VIRTIO_NET_F_HOST_ECN,        "TxTSOECN"      },
388         { VIRTIO_NET_F_HOST_UFO,        "TxUFO"         },
389         { VIRTIO_NET_F_MRG_RXBUF,       "MrgRxBuf"      },
390         { VIRTIO_NET_F_STATUS,          "Status"        },
391         { VIRTIO_NET_F_CTRL_VQ,         "ControlVq"     },
392         { VIRTIO_NET_F_CTRL_RX,         "RxMode"        },
393         { VIRTIO_NET_F_CTRL_VLAN,       "VLanFilter"    },
394         { VIRTIO_NET_F_CTRL_RX_EXTRA,   "RxModeExtra"   },
395         { VIRTIO_NET_F_GUEST_ANNOUNCE,  "GuestAnnounce" },
396         { VIRTIO_NET_F_MQ,              "RFS"           },
397         { VIRTIO_NET_F_CTRL_MAC_ADDR,   "SetMacAddress" },
398         { 0, NULL }
399 };
400
401 static device_method_t vtnet_methods[] = {
402         /* Device methods. */
403         DEVMETHOD(device_probe,         vtnet_probe),
404         DEVMETHOD(device_attach,        vtnet_attach),
405         DEVMETHOD(device_detach,        vtnet_detach),
406         DEVMETHOD(device_suspend,       vtnet_suspend),
407         DEVMETHOD(device_resume,        vtnet_resume),
408         DEVMETHOD(device_shutdown,      vtnet_shutdown),
409
410         /* VirtIO methods. */
411         DEVMETHOD(virtio_config_change, vtnet_config_change),
412
413         { 0, 0 }
414 };
415
416 static driver_t vtnet_driver = {
417         "vtnet",
418         vtnet_methods,
419         sizeof(struct vtnet_softc)
420 };
421
422 static devclass_t vtnet_devclass;
423
424 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass,
425     vtnet_modevent, 0);
426 MODULE_VERSION(vtnet, 1);
427 MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
428
429 static int
430 vtnet_modevent(module_t mod, int type, void *unused)
431 {
432         int error;
433
434         error = 0;
435
436         switch (type) {
437         case MOD_LOAD:
438                 break;
439         case MOD_UNLOAD:
440                 break;
441         case MOD_SHUTDOWN:
442                 break;
443         default:
444                 error = EOPNOTSUPP;
445                 break;
446         }
447
448         return (error);
449 }
450
451 static int
452 vtnet_probe(device_t dev)
453 {
454         if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
455                 return (ENXIO);
456
457         device_set_desc(dev, "VirtIO Networking Adapter");
458
459         return (BUS_PROBE_DEFAULT);
460 }
461
462 static int
463 vtnet_attach(device_t dev)
464 {
465         struct vtnet_softc *sc;
466         int error;
467
468         sc = device_get_softc(dev);
469         sc->vtnet_dev = dev;
470
471         lwkt_serialize_init(&sc->vtnet_slz);
472         callout_init(&sc->vtnet_tick_ch);
473
474         ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
475                      vtnet_ifmedia_sts);
476         ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
477         ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
478
479         vtnet_add_statistics(sc);
480
481         /* Register our feature descriptions. */
482         virtio_set_feature_desc(dev, vtnet_feature_desc);
483         vtnet_negotiate_features(sc);
484
485         if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
486                 /* This feature should always be negotiated. */
487                 sc->vtnet_flags |= VTNET_FLAG_MAC;
488         }
489
490         if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
491                 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
492                 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
493         } else {
494                 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
495         }
496
497         sc->vtnet_rx_mbuf_size = MCLBYTES;
498         sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
499
500         if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
501                 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
502
503                 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
504                         sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
505                 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
506                         sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
507                 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
508                     virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
509                         sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
510         }
511
512         /* Read (or generate) the MAC address for the adapter. */
513         vtnet_get_hwaddr(sc);
514
515         error = vtnet_alloc_virtqueues(sc);
516         if (error) {
517                 device_printf(dev, "cannot allocate virtqueues\n");
518                 goto fail;
519         }
520
521         error = vtnet_setup_interface(sc);
522         if (error) {
523                 device_printf(dev, "cannot setup interface\n");
524                 goto fail;
525         }
526
527         TASK_INIT(&sc->vtnet_cfgchg_task, 0, vtnet_config_change_task, sc);
528
529         error = virtio_setup_intr(dev, &sc->vtnet_slz);
530         if (error) {
531                 device_printf(dev, "cannot setup virtqueue interrupts\n");
532                 ether_ifdetach(sc->vtnet_ifp);
533                 goto fail;
534         }
535
536         /*
537          * Device defaults to promiscuous mode for backwards
538          * compatibility. Turn it off if possible.
539          */
540         if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
541                 lwkt_serialize_enter(&sc->vtnet_slz);
542                 if (vtnet_set_promisc(sc, 0) != 0) {
543                         sc->vtnet_ifp->if_flags |= IFF_PROMISC;
544                         device_printf(dev,
545                             "cannot disable promiscuous mode\n");
546                 }
547                 lwkt_serialize_exit(&sc->vtnet_slz);
548         } else
549                 sc->vtnet_ifp->if_flags |= IFF_PROMISC;
550
551 fail:
552         if (error)
553                 vtnet_detach(dev);
554
555         return (error);
556 }
557
558 static int
559 vtnet_detach(device_t dev)
560 {
561         struct vtnet_softc *sc;
562         struct ifnet *ifp;
563
564         sc = device_get_softc(dev);
565         ifp = sc->vtnet_ifp;
566
567         if (device_is_attached(dev)) {
568                 lwkt_serialize_enter(&sc->vtnet_slz);
569                 vtnet_stop(sc);
570                 lwkt_serialize_exit(&sc->vtnet_slz);
571
572                 callout_stop(&sc->vtnet_tick_ch);
573                 taskqueue_drain(taskqueue_swi, &sc->vtnet_cfgchg_task);
574
575                 ether_ifdetach(ifp);
576         }
577
578         if (sc->vtnet_vlan_attach != NULL) {
579                 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
580                 sc->vtnet_vlan_attach = NULL;
581         }
582         if (sc->vtnet_vlan_detach != NULL) {
583                 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach);
584                 sc->vtnet_vlan_detach = NULL;
585         }
586
587         if (ifp) {
588                 if_free(ifp);
589                 sc->vtnet_ifp = NULL;
590         }
591
592         if (sc->vtnet_rx_vq != NULL)
593                 vtnet_free_rx_mbufs(sc);
594         if (sc->vtnet_tx_vq != NULL)
595                 vtnet_free_tx_mbufs(sc);
596         if (sc->vtnet_ctrl_vq != NULL)
597                 vtnet_free_ctrl_vq(sc);
598
599         if (sc->vtnet_txhdrarea != NULL) {
600                 contigfree(sc->vtnet_txhdrarea,
601                     ((sc->vtnet_tx_size / 2) + 1) *
602                     sizeof(struct vtnet_tx_header), M_VTNET);
603                 sc->vtnet_txhdrarea = NULL;
604         }
605         if (sc->vtnet_macfilter != NULL) {
606                 contigfree(sc->vtnet_macfilter,
607                     sizeof(struct vtnet_mac_filter), M_DEVBUF);
608                 sc->vtnet_macfilter = NULL;
609         }
610
611         ifmedia_removeall(&sc->vtnet_media);
612
613         return (0);
614 }
615
616 static int
617 vtnet_suspend(device_t dev)
618 {
619         struct vtnet_softc *sc;
620
621         sc = device_get_softc(dev);
622
623         lwkt_serialize_enter(&sc->vtnet_slz);
624         vtnet_stop(sc);
625         sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
626         lwkt_serialize_exit(&sc->vtnet_slz);
627
628         return (0);
629 }
630
631 static int
632 vtnet_resume(device_t dev)
633 {
634         struct vtnet_softc *sc;
635         struct ifnet *ifp;
636
637         sc = device_get_softc(dev);
638         ifp = sc->vtnet_ifp;
639
640         lwkt_serialize_enter(&sc->vtnet_slz);
641         if (ifp->if_flags & IFF_UP)
642                 vtnet_init_locked(sc);
643         sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
644         lwkt_serialize_exit(&sc->vtnet_slz);
645
646         return (0);
647 }
648
649 static int
650 vtnet_shutdown(device_t dev)
651 {
652
653         /*
654          * Suspend already does all of what we need to
655          * do here; we just never expect to be resumed.
656          */
657         return (vtnet_suspend(dev));
658 }
659
660 static int
661 vtnet_config_change(device_t dev)
662 {
663         struct vtnet_softc *sc;
664
665         sc = device_get_softc(dev);
666
667         taskqueue_enqueue(taskqueue_thread[mycpuid], &sc->vtnet_cfgchg_task);
668
669         return (1);
670 }
671
672 static void
673 vtnet_negotiate_features(struct vtnet_softc *sc)
674 {
675         device_t dev;
676         uint64_t mask, features;
677
678         dev = sc->vtnet_dev;
679         mask = 0;
680
681         if (vtnet_csum_disable)
682                 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
683
684         /*
685          * TSO and LRO are only available when their corresponding checksum
686          * offload feature is also negotiated.
687          */
688
689         if (vtnet_csum_disable || vtnet_tso_disable)
690                 mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 |
691                     VIRTIO_NET_F_HOST_ECN;
692
693         if (vtnet_csum_disable || vtnet_lro_disable)
694                 mask |= VTNET_LRO_FEATURES;
695
696         features = VTNET_FEATURES & ~mask;
697         features |= VIRTIO_F_NOTIFY_ON_EMPTY;
698         sc->vtnet_features = virtio_negotiate_features(dev, features);
699
700         if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
701             virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
702                 /*
703                  * LRO without mergeable buffers requires special care. This
704                  * is not ideal because every receive buffer must be large
705                  * enough to hold the maximum TCP packet, the Ethernet header,
706                  * and the header. This requires up to 34 descriptors with
707                  * MCLBYTES clusters. If we do not have indirect descriptors,
708                  * LRO is disabled since the virtqueue will not contain very
709                  * many receive buffers.
710                  */
711                 if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
712                         device_printf(dev,
713                             "LRO disabled due to both mergeable buffers and "
714                             "indirect descriptors not negotiated\n");
715
716                         features &= ~VTNET_LRO_FEATURES;
717                         sc->vtnet_features =
718                             virtio_negotiate_features(dev, features);
719                 } else
720                         sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
721         }
722 }
723
724 static int
725 vtnet_alloc_virtqueues(struct vtnet_softc *sc)
726 {
727         device_t dev;
728         struct vq_alloc_info vq_info[3];
729         int nvqs, rxsegs;
730
731         dev = sc->vtnet_dev;
732         nvqs = 2;
733
734         /*
735          * Indirect descriptors are not needed for the Rx
736          * virtqueue when mergeable buffers are negotiated.
737          * The header is placed inline with the data, not
738          * in a separate descriptor, and mbuf clusters are
739          * always physically contiguous.
740          */
741         if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
742                 rxsegs = sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG ?
743                     VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS;
744         } else
745                 rxsegs = 0;
746
747         VQ_ALLOC_INFO_INIT(&vq_info[0], rxsegs,
748             vtnet_rx_vq_intr, sc, &sc->vtnet_rx_vq,
749             "%s receive", device_get_nameunit(dev));
750
751         VQ_ALLOC_INFO_INIT(&vq_info[1], VTNET_MAX_TX_SEGS,
752             vtnet_tx_vq_intr, sc, &sc->vtnet_tx_vq,
753             "%s transmit", device_get_nameunit(dev));
754
755         if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
756                 nvqs++;
757
758                 VQ_ALLOC_INFO_INIT(&vq_info[2], 0, NULL, NULL,
759                     &sc->vtnet_ctrl_vq, "%s control",
760                     device_get_nameunit(dev));
761         }
762
763         return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
764 }
765
766 static int
767 vtnet_setup_interface(struct vtnet_softc *sc)
768 {
769         device_t dev;
770         struct ifnet *ifp;
771         int tx_size;
772
773         dev = sc->vtnet_dev;
774
775         ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
776         if (ifp == NULL) {
777                 device_printf(dev, "cannot allocate ifnet structure\n");
778                 return (ENOSPC);
779         }
780
781         ifp->if_softc = sc;
782         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
783         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
784         ifp->if_init = vtnet_init;
785         ifp->if_start = vtnet_start;
786         ifp->if_ioctl = vtnet_ioctl;
787
788         sc->vtnet_rx_size = virtqueue_size(sc->vtnet_rx_vq);
789         sc->vtnet_rx_process_limit = sc->vtnet_rx_size;
790
791         tx_size = virtqueue_size(sc->vtnet_tx_vq);
792         sc->vtnet_tx_size = tx_size;
793         sc->vtnet_txhdridx = 0;
794         sc->vtnet_txhdrarea = contigmalloc(
795             ((sc->vtnet_tx_size / 2) + 1) * sizeof(struct vtnet_tx_header),
796             M_VTNET, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
797         if (sc->vtnet_txhdrarea == NULL) {
798                 device_printf(dev, "cannot contigmalloc the tx headers\n");
799                 return (ENOMEM);
800         }
801         sc->vtnet_macfilter = contigmalloc(
802             sizeof(struct vtnet_mac_filter),
803             M_DEVBUF, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
804         if (sc->vtnet_macfilter == NULL) {
805                 device_printf(dev,
806                     "cannot contigmalloc the mac filter table\n");
807                 return (ENOMEM);
808         }
809         ifq_set_maxlen(&ifp->if_snd, tx_size - 1);
810         ifq_set_ready(&ifp->if_snd);
811
812         ether_ifattach(ifp, sc->vtnet_hwaddr, NULL);
813
814         if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)){
815                 //ifp->if_capabilities |= IFCAP_LINKSTATE;
816                  kprintf("add dynamic link state\n");
817         }
818
819         /* Tell the upper layer(s) we support long frames. */
820         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
821         ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
822
823         if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
824                 ifp->if_capabilities |= IFCAP_TXCSUM;
825
826                 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
827                         ifp->if_capabilities |= IFCAP_TSO4;
828                 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
829                         ifp->if_capabilities |= IFCAP_TSO6;
830                 if (ifp->if_capabilities & IFCAP_TSO)
831                         ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
832
833                 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
834                         sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
835         }
836
837         if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
838                 ifp->if_capabilities |= IFCAP_RXCSUM;
839
840                 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
841                     virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
842                         ifp->if_capabilities |= IFCAP_LRO;
843         }
844
845         if (ifp->if_capabilities & IFCAP_HWCSUM) {
846                 /*
847                  * VirtIO does not support VLAN tagging, but we can fake
848                  * it by inserting and removing the 802.1Q header during
849                  * transmit and receive. We are then able to do checksum
850                  * offloading of VLAN frames.
851                  */
852                 ifp->if_capabilities |=
853                         IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
854         }
855
856         ifp->if_capenable = ifp->if_capabilities;
857
858         /*
859          * Capabilities after here are not enabled by default.
860          */
861
862         if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
863                 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
864
865                 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
866                     vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
867                 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
868                     vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
869         }
870
871         return (0);
872 }
873
874 static void
875 vtnet_set_hwaddr(struct vtnet_softc *sc)
876 {
877         device_t dev;
878
879         dev = sc->vtnet_dev;
880
881         if ((sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) &&
882             (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)) {
883                 if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
884                         device_printf(dev, "unable to set MAC address\n");
885         } else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
886                 virtio_write_device_config(dev,
887                     offsetof(struct virtio_net_config, mac),
888                     sc->vtnet_hwaddr, ETHER_ADDR_LEN);
889         }
890 }
891
892 static void
893 vtnet_get_hwaddr(struct vtnet_softc *sc)
894 {
895         device_t dev;
896
897         dev = sc->vtnet_dev;
898
899         if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
900                 /*
901                  * Generate a random locally administered unicast address.
902                  *
903                  * It would be nice to generate the same MAC address across
904                  * reboots, but it seems all the hosts currently available
905                  * support the MAC feature, so this isn't too important.
906                  */
907                 sc->vtnet_hwaddr[0] = 0xB2;
908                 karc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1);
909                 vtnet_set_hwaddr(sc);
910                 return;
911         }
912
913         virtio_read_device_config(dev,
914             offsetof(struct virtio_net_config, mac),
915             sc->vtnet_hwaddr, ETHER_ADDR_LEN);
916 }
917
918 static int
919 vtnet_is_link_up(struct vtnet_softc *sc)
920 {
921         device_t dev;
922         struct ifnet *ifp;
923         uint16_t status;
924
925         dev = sc->vtnet_dev;
926         ifp = sc->vtnet_ifp;
927
928         ASSERT_SERIALIZED(&sc->vtnet_slz);
929
930         status = virtio_read_dev_config_2(dev,
931                         offsetof(struct virtio_net_config, status));
932
933         return ((status & VIRTIO_NET_S_LINK_UP) != 0);
934 }
935
936 static void
937 vtnet_update_link_status(struct vtnet_softc *sc)
938 {
939         device_t dev;
940         struct ifnet *ifp;
941         struct ifaltq_subque *ifsq;
942         int link;
943
944         dev = sc->vtnet_dev;
945         ifp = sc->vtnet_ifp;
946         ifsq = ifq_get_subq_default(&ifp->if_snd);
947
948         link = vtnet_is_link_up(sc);
949
950         if (link && ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) {
951                 sc->vtnet_flags |= VTNET_FLAG_LINK;
952                 if (bootverbose)
953                         device_printf(dev, "Link is up\n");
954                 ifp->if_link_state = LINK_STATE_UP;
955                 if_link_state_change(ifp);
956                 if (!ifsq_is_empty(ifsq))
957                         vtnet_start_locked(ifp, ifsq);
958         } else if (!link && (sc->vtnet_flags & VTNET_FLAG_LINK)) {
959                 sc->vtnet_flags &= ~VTNET_FLAG_LINK;
960                 if (bootverbose)
961                         device_printf(dev, "Link is down\n");
962
963                 ifp->if_link_state = LINK_STATE_DOWN;
964                 if_link_state_change(ifp);
965         }
966 }
967
968 #if 0
969 static void
970 vtnet_watchdog(struct vtnet_softc *sc)
971 {
972         struct ifnet *ifp;
973
974         ifp = sc->vtnet_ifp;
975
976 #ifdef VTNET_TX_INTR_MODERATION
977         vtnet_txeof(sc);
978 #endif
979
980         if (sc->vtnet_watchdog_timer == 0 || --sc->vtnet_watchdog_timer)
981                 return;
982
983         if_printf(ifp, "watchdog timeout -- resetting\n");
984 #ifdef VTNET_DEBUG
985         virtqueue_dump(sc->vtnet_tx_vq);
986 #endif
987         ifp->if_oerrors++;
988         ifp->if_flags &= ~IFF_RUNNING;
989         vtnet_init_locked(sc);
990 }
991 #endif
992
993 static void
994 vtnet_config_change_task(void *arg, int pending)
995 {
996         struct vtnet_softc *sc;
997
998         sc = arg;
999
1000         lwkt_serialize_enter(&sc->vtnet_slz);
1001         vtnet_update_link_status(sc);
1002         lwkt_serialize_exit(&sc->vtnet_slz);
1003 }
1004
1005 static int
1006 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data,struct ucred *cr)
1007 {
1008         struct vtnet_softc *sc;
1009         struct ifreq *ifr;
1010         int reinit, mask, error;
1011
1012         sc = ifp->if_softc;
1013         ifr = (struct ifreq *) data;
1014         reinit = 0;
1015         error = 0;
1016
1017         switch (cmd) {
1018         case SIOCSIFMTU:
1019                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VTNET_MAX_MTU)
1020                         error = EINVAL;
1021                 else if (ifp->if_mtu != ifr->ifr_mtu) {
1022                         lwkt_serialize_enter(&sc->vtnet_slz);
1023                         error = vtnet_change_mtu(sc, ifr->ifr_mtu);
1024                         lwkt_serialize_exit(&sc->vtnet_slz);
1025                 }
1026                 break;
1027
1028         case SIOCSIFFLAGS:
1029                 lwkt_serialize_enter(&sc->vtnet_slz);
1030                 if ((ifp->if_flags & IFF_UP) == 0) {
1031                         if (ifp->if_flags & IFF_RUNNING)
1032                                 vtnet_stop(sc);
1033                 } else if (ifp->if_flags & IFF_RUNNING) {
1034                         if ((ifp->if_flags ^ sc->vtnet_if_flags) &
1035                             (IFF_PROMISC | IFF_ALLMULTI)) {
1036                                 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
1037                                         vtnet_rx_filter(sc);
1038                                 else
1039                                         error = ENOTSUP;
1040                         }
1041                 } else
1042                         vtnet_init_locked(sc);
1043
1044                 if (error == 0)
1045                         sc->vtnet_if_flags = ifp->if_flags;
1046                 lwkt_serialize_exit(&sc->vtnet_slz);
1047                 break;
1048
1049         case SIOCADDMULTI:
1050         case SIOCDELMULTI:
1051                 lwkt_serialize_enter(&sc->vtnet_slz);
1052                 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) &&
1053                     (ifp->if_flags & IFF_RUNNING))
1054                         vtnet_rx_filter_mac(sc);
1055                 lwkt_serialize_exit(&sc->vtnet_slz);
1056                 break;
1057
1058         case SIOCSIFMEDIA:
1059         case SIOCGIFMEDIA:
1060                 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
1061                 break;
1062
1063         case SIOCSIFCAP:
1064                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1065
1066                 lwkt_serialize_enter(&sc->vtnet_slz);
1067
1068                 if (mask & IFCAP_TXCSUM) {
1069                         ifp->if_capenable ^= IFCAP_TXCSUM;
1070                         if (ifp->if_capenable & IFCAP_TXCSUM)
1071                                 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
1072                         else
1073                                 ifp->if_hwassist &= ~VTNET_CSUM_OFFLOAD;
1074                 }
1075
1076                 if (mask & IFCAP_TSO4) {
1077                         ifp->if_capenable ^= IFCAP_TSO4;
1078                         if (ifp->if_capenable & IFCAP_TSO4)
1079                                 ifp->if_hwassist |= CSUM_TSO;
1080                         else
1081                                 ifp->if_hwassist &= ~CSUM_TSO;
1082                 }
1083
1084                 if (mask & IFCAP_RXCSUM) {
1085                         ifp->if_capenable ^= IFCAP_RXCSUM;
1086                         reinit = 1;
1087                 }
1088
1089                 if (mask & IFCAP_LRO) {
1090                         ifp->if_capenable ^= IFCAP_LRO;
1091                         reinit = 1;
1092                 }
1093
1094                 if (mask & IFCAP_VLAN_HWFILTER) {
1095                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1096                         reinit = 1;
1097                 }
1098
1099                 if (mask & IFCAP_VLAN_HWTSO)
1100                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1101
1102                 if (mask & IFCAP_VLAN_HWTAGGING)
1103                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1104
1105                 if (reinit && (ifp->if_flags & IFF_RUNNING)) {
1106                         ifp->if_flags &= ~IFF_RUNNING;
1107                         vtnet_init_locked(sc);
1108                 }
1109                 //VLAN_CAPABILITIES(ifp);
1110
1111                 lwkt_serialize_exit(&sc->vtnet_slz);
1112                 break;
1113
1114         default:
1115                 error = ether_ioctl(ifp, cmd, data);
1116                 break;
1117         }
1118
1119         return (error);
1120 }
1121
1122 static int
1123 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
1124 {
1125         struct ifnet *ifp;
1126         int new_frame_size, clsize;
1127
1128         ifp = sc->vtnet_ifp;
1129
1130         if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1131                 new_frame_size = sizeof(struct vtnet_rx_header) +
1132                     sizeof(struct ether_vlan_header) + new_mtu;
1133
1134                 if (new_frame_size > MJUM9BYTES)
1135                         return (EINVAL);
1136
1137                 if (new_frame_size <= MCLBYTES)
1138                         clsize = MCLBYTES;
1139                 else
1140                         clsize = MJUM9BYTES;
1141         } else {
1142                 new_frame_size = sizeof(struct virtio_net_hdr_mrg_rxbuf) +
1143                     sizeof(struct ether_vlan_header) + new_mtu;
1144
1145                 if (new_frame_size <= MCLBYTES)
1146                         clsize = MCLBYTES;
1147                 else
1148                         clsize = MJUMPAGESIZE;
1149         }
1150
1151         sc->vtnet_rx_mbuf_size = clsize;
1152         sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
1153         KASSERT(sc->vtnet_rx_mbuf_count < VTNET_MAX_RX_SEGS,
1154             ("too many rx mbufs: %d", sc->vtnet_rx_mbuf_count));
1155
1156         ifp->if_mtu = new_mtu;
1157
1158         if (ifp->if_flags & IFF_RUNNING) {
1159                 ifp->if_flags &= ~IFF_RUNNING;
1160                 vtnet_init_locked(sc);
1161         }
1162
1163         return (0);
1164 }
1165
1166 static int
1167 vtnet_init_rx_vq(struct vtnet_softc *sc)
1168 {
1169         struct virtqueue *vq;
1170         int nbufs, error;
1171
1172         vq = sc->vtnet_rx_vq;
1173         nbufs = 0;
1174         error = ENOSPC;
1175
1176         while (!virtqueue_full(vq)) {
1177                 if ((error = vtnet_newbuf(sc)) != 0)
1178                         break;
1179                 nbufs++;
1180         }
1181
1182         if (nbufs > 0) {
1183                 virtqueue_notify(vq, &sc->vtnet_slz);
1184
1185                 /*
1186                  * EMSGSIZE signifies the virtqueue did not have enough
1187                  * entries available to hold the last mbuf. This is not
1188                  * an error. We should not get ENOSPC since we check if
1189                  * the virtqueue is full before attempting to add a
1190                  * buffer.
1191                  */
1192                 if (error == EMSGSIZE)
1193                         error = 0;
1194         }
1195
1196         return (error);
1197 }
1198
1199 static void
1200 vtnet_free_rx_mbufs(struct vtnet_softc *sc)
1201 {
1202         struct virtqueue *vq;
1203         struct mbuf *m;
1204         int last;
1205
1206         vq = sc->vtnet_rx_vq;
1207         last = 0;
1208
1209         while ((m = virtqueue_drain(vq, &last)) != NULL)
1210                 m_freem(m);
1211
1212         KASSERT(virtqueue_empty(vq), ("mbufs remaining in Rx Vq"));
1213 }
1214
1215 static void
1216 vtnet_free_tx_mbufs(struct vtnet_softc *sc)
1217 {
1218         struct virtqueue *vq;
1219         struct vtnet_tx_header *txhdr;
1220         int last;
1221
1222         vq = sc->vtnet_tx_vq;
1223         last = 0;
1224
1225         while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1226                 m_freem(txhdr->vth_mbuf);
1227         }
1228
1229         KASSERT(virtqueue_empty(vq), ("mbufs remaining in Tx Vq"));
1230 }
1231
1232 static void
1233 vtnet_free_ctrl_vq(struct vtnet_softc *sc)
1234 {
1235         /*
1236          * The control virtqueue is only polled, therefore
1237          * it should already be empty.
1238          */
1239         KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
1240                 ("Ctrl Vq not empty"));
1241 }
1242
1243 static struct mbuf *
1244 vtnet_alloc_rxbuf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1245 {
1246         struct mbuf *m_head, *m_tail, *m;
1247         int i, clsize;
1248
1249         clsize = sc->vtnet_rx_mbuf_size;
1250
1251         /*use getcl instead of getjcl. see  if_mxge.c comment line 2398*/
1252         //m_head = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, clsize);
1253         m_head = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR );
1254         if (m_head == NULL)
1255                 goto fail;
1256
1257         m_head->m_len = clsize;
1258         m_tail = m_head;
1259
1260         if (nbufs > 1) {
1261                 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1262                         ("chained Rx mbuf requested without LRO_NOMRG"));
1263
1264                 for (i = 0; i < nbufs - 1; i++) {
1265                         //m = m_getjcl(M_DONTWAIT, MT_DATA, 0, clsize);
1266                         m = m_getcl(M_NOWAIT, MT_DATA, 0);
1267                         if (m == NULL)
1268                                 goto fail;
1269
1270                         m->m_len = clsize;
1271                         m_tail->m_next = m;
1272                         m_tail = m;
1273                 }
1274         }
1275
1276         if (m_tailp != NULL)
1277                 *m_tailp = m_tail;
1278
1279         return (m_head);
1280
1281 fail:
1282         sc->vtnet_stats.mbuf_alloc_failed++;
1283         m_freem(m_head);
1284
1285         return (NULL);
1286 }
1287
1288 static int
1289 vtnet_replace_rxbuf(struct vtnet_softc *sc, struct mbuf *m0, int len0)
1290 {
1291         struct mbuf *m, *m_prev;
1292         struct mbuf *m_new, *m_tail;
1293         int len, clsize, nreplace, error;
1294
1295         m = m0;
1296         m_prev = NULL;
1297         len = len0;
1298
1299         m_tail = NULL;
1300         clsize = sc->vtnet_rx_mbuf_size;
1301         nreplace = 0;
1302
1303         if (m->m_next != NULL)
1304                 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1305                     ("chained Rx mbuf without LRO_NOMRG"));
1306
1307         /*
1308          * Since LRO_NOMRG mbuf chains are so large, we want to avoid
1309          * allocating an entire chain for each received frame. When
1310          * the received frame's length is less than that of the chain,
1311          * the unused mbufs are reassigned to the new chain.
1312          */
1313         while (len > 0) {
1314                 /*
1315                  * Something is seriously wrong if we received
1316                  * a frame larger than the mbuf chain. Drop it.
1317                  */
1318                 if (m == NULL) {
1319                         sc->vtnet_stats.rx_frame_too_large++;
1320                         return (EMSGSIZE);
1321                 }
1322
1323                 KASSERT(m->m_len == clsize,
1324                     ("mbuf length not expected cluster size: %d",
1325                     m->m_len));
1326
1327                 m->m_len = MIN(m->m_len, len);
1328                 len -= m->m_len;
1329
1330                 m_prev = m;
1331                 m = m->m_next;
1332                 nreplace++;
1333         }
1334
1335         KASSERT(m_prev != NULL, ("m_prev == NULL"));
1336         KASSERT(nreplace <= sc->vtnet_rx_mbuf_count,
1337                 ("too many replacement mbufs: %d/%d", nreplace,
1338                 sc->vtnet_rx_mbuf_count));
1339
1340         m_new = vtnet_alloc_rxbuf(sc, nreplace, &m_tail);
1341         if (m_new == NULL) {
1342                 m_prev->m_len = clsize;
1343                 return (ENOBUFS);
1344         }
1345
1346         /*
1347          * Move unused mbufs, if any, from the original chain
1348          * onto the end of the new chain.
1349          */
1350         if (m_prev->m_next != NULL) {
1351                 m_tail->m_next = m_prev->m_next;
1352                 m_prev->m_next = NULL;
1353         }
1354
1355         error = vtnet_enqueue_rxbuf(sc, m_new);
1356         if (error) {
1357                 /*
1358                  * BAD! We could not enqueue the replacement mbuf chain. We
1359                  * must restore the m0 chain to the original state if it was
1360                  * modified so we can subsequently discard it.
1361                  *
1362                  * NOTE: The replacement is suppose to be an identical copy
1363                  * to the one just dequeued so this is an unexpected error.
1364                  */
1365                 sc->vtnet_stats.rx_enq_replacement_failed++;
1366
1367                 if (m_tail->m_next != NULL) {
1368                         m_prev->m_next = m_tail->m_next;
1369                         m_tail->m_next = NULL;
1370                 }
1371
1372                 m_prev->m_len = clsize;
1373                 m_freem(m_new);
1374         }
1375
1376         return (error);
1377 }
1378
1379 static int
1380 vtnet_newbuf(struct vtnet_softc *sc)
1381 {
1382         struct mbuf *m;
1383         int error;
1384
1385         m = vtnet_alloc_rxbuf(sc, sc->vtnet_rx_mbuf_count, NULL);
1386         if (m == NULL)
1387                 return (ENOBUFS);
1388
1389         error = vtnet_enqueue_rxbuf(sc, m);
1390         if (error)
1391                 m_freem(m);
1392
1393         return (error);
1394 }
1395
1396 static void
1397 vtnet_discard_merged_rxbuf(struct vtnet_softc *sc, int nbufs)
1398 {
1399         struct virtqueue *vq;
1400         struct mbuf *m;
1401
1402         vq = sc->vtnet_rx_vq;
1403
1404         while (--nbufs > 0) {
1405                 if ((m = virtqueue_dequeue(vq, NULL)) == NULL)
1406                         break;
1407                 vtnet_discard_rxbuf(sc, m);
1408         }
1409 }
1410
1411 static void
1412 vtnet_discard_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1413 {
1414         int error;
1415
1416         /*
1417          * Requeue the discarded mbuf. This should always be
1418          * successful since it was just dequeued.
1419          */
1420         error = vtnet_enqueue_rxbuf(sc, m);
1421         KASSERT(error == 0, ("cannot requeue discarded mbuf"));
1422 }
1423
1424 static int
1425 vtnet_enqueue_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1426 {
1427         struct sglist sg;
1428         struct sglist_seg segs[VTNET_MAX_RX_SEGS];
1429         struct vtnet_rx_header *rxhdr;
1430         struct virtio_net_hdr *hdr;
1431         uint8_t *mdata;
1432         int offset, error;
1433
1434         ASSERT_SERIALIZED(&sc->vtnet_slz);
1435         if ((sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0)
1436                 KASSERT(m->m_next == NULL, ("chained Rx mbuf"));
1437
1438         sglist_init(&sg, VTNET_MAX_RX_SEGS, segs);
1439
1440         mdata = mtod(m, uint8_t *);
1441         offset = 0;
1442
1443         if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1444                 rxhdr = (struct vtnet_rx_header *) mdata;
1445                 hdr = &rxhdr->vrh_hdr;
1446                 offset += sizeof(struct vtnet_rx_header);
1447
1448                 error = sglist_append(&sg, hdr, sc->vtnet_hdr_size);
1449                 KASSERT(error == 0, ("cannot add header to sglist"));
1450         }
1451
1452         error = sglist_append(&sg, mdata + offset, m->m_len - offset);
1453         if (error)
1454                 return (error);
1455
1456         if (m->m_next != NULL) {
1457                 error = sglist_append_mbuf(&sg, m->m_next);
1458                 if (error)
1459                         return (error);
1460         }
1461
1462         return (virtqueue_enqueue(sc->vtnet_rx_vq, m, &sg, 0, sg.sg_nseg));
1463 }
1464
1465 static void
1466 vtnet_vlan_tag_remove(struct mbuf *m)
1467 {
1468         struct ether_vlan_header *evl;
1469
1470         evl = mtod(m, struct ether_vlan_header *);
1471
1472         m->m_pkthdr.ether_vlantag = ntohs(evl->evl_tag);
1473         m->m_flags |= M_VLANTAG;
1474
1475         /* Strip the 802.1Q header. */
1476         bcopy((char *) evl, (char *) evl + ETHER_VLAN_ENCAP_LEN,
1477             ETHER_HDR_LEN - ETHER_TYPE_LEN);
1478         m_adj(m, ETHER_VLAN_ENCAP_LEN);
1479 }
1480
1481 /*
1482  * Alternative method of doing receive checksum offloading. Rather
1483  * than parsing the received frame down to the IP header, use the
1484  * csum_offset to determine which CSUM_* flags are appropriate. We
1485  * can get by with doing this only because the checksum offsets are
1486  * unique for the things we care about.
1487  */
1488 static int
1489 vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m,
1490     struct virtio_net_hdr *hdr)
1491 {
1492         struct ether_header *eh;
1493         struct ether_vlan_header *evh;
1494         struct udphdr *udp;
1495         int csum_len;
1496         uint16_t eth_type;
1497
1498         csum_len = hdr->csum_start + hdr->csum_offset;
1499
1500         if (csum_len < sizeof(struct ether_header) + sizeof(struct ip))
1501                 return (1);
1502         if (m->m_len < csum_len)
1503                 return (1);
1504
1505         eh = mtod(m, struct ether_header *);
1506         eth_type = ntohs(eh->ether_type);
1507         if (eth_type == ETHERTYPE_VLAN) {
1508                 evh = mtod(m, struct ether_vlan_header *);
1509                 eth_type = ntohs(evh->evl_proto);
1510         }
1511
1512         if (eth_type != ETHERTYPE_IP && eth_type != ETHERTYPE_IPV6) {
1513                 sc->vtnet_stats.rx_csum_bad_ethtype++;
1514                 return (1);
1515         }
1516
1517         /* Use the offset to determine the appropriate CSUM_* flags. */
1518         switch (hdr->csum_offset) {
1519         case offsetof(struct udphdr, uh_sum):
1520                 if (m->m_len < hdr->csum_start + sizeof(struct udphdr))
1521                         return (1);
1522                 udp = (struct udphdr *)(mtod(m, uint8_t *) + hdr->csum_start);
1523                 if (udp->uh_sum == 0)
1524                         return (0);
1525
1526                 /* FALLTHROUGH */
1527
1528         case offsetof(struct tcphdr, th_sum):
1529                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1530                 m->m_pkthdr.csum_data = 0xFFFF;
1531                 break;
1532
1533         default:
1534                 sc->vtnet_stats.rx_csum_bad_offset++;
1535                 return (1);
1536         }
1537
1538         sc->vtnet_stats.rx_csum_offloaded++;
1539
1540         return (0);
1541 }
1542
1543 static int
1544 vtnet_rxeof_merged(struct vtnet_softc *sc, struct mbuf *m_head, int nbufs)
1545 {
1546         struct ifnet *ifp;
1547         struct virtqueue *vq;
1548         struct mbuf *m, *m_tail;
1549         int len;
1550
1551         ifp = sc->vtnet_ifp;
1552         vq = sc->vtnet_rx_vq;
1553         m_tail = m_head;
1554
1555         while (--nbufs > 0) {
1556                 m = virtqueue_dequeue(vq, &len);
1557                 if (m == NULL) {
1558                         ifp->if_ierrors++;
1559                         goto fail;
1560                 }
1561
1562                 if (vtnet_newbuf(sc) != 0) {
1563                         ifp->if_iqdrops++;
1564                         vtnet_discard_rxbuf(sc, m);
1565                         if (nbufs > 1)
1566                                 vtnet_discard_merged_rxbuf(sc, nbufs);
1567                         goto fail;
1568                 }
1569
1570                 if (m->m_len < len)
1571                         len = m->m_len;
1572
1573                 m->m_len = len;
1574                 m->m_flags &= ~M_PKTHDR;
1575
1576                 m_head->m_pkthdr.len += len;
1577                 m_tail->m_next = m;
1578                 m_tail = m;
1579         }
1580
1581         return (0);
1582
1583 fail:
1584         sc->vtnet_stats.rx_mergeable_failed++;
1585         m_freem(m_head);
1586
1587         return (1);
1588 }
1589
1590 static int
1591 vtnet_rxeof(struct vtnet_softc *sc, int count, int *rx_npktsp)
1592 {
1593         struct virtio_net_hdr lhdr;
1594         struct ifnet *ifp;
1595         struct virtqueue *vq;
1596         struct mbuf *m;
1597         struct ether_header *eh;
1598         struct virtio_net_hdr *hdr;
1599         struct virtio_net_hdr_mrg_rxbuf *mhdr;
1600         int len, deq, nbufs, adjsz, rx_npkts;
1601
1602         ifp = sc->vtnet_ifp;
1603         vq = sc->vtnet_rx_vq;
1604         hdr = &lhdr;
1605         deq = 0;
1606         rx_npkts = 0;
1607
1608         ASSERT_SERIALIZED(&sc->vtnet_slz);
1609
1610         while (--count >= 0) {
1611                 m = virtqueue_dequeue(vq, &len);
1612                 if (m == NULL)
1613                         break;
1614                 deq++;
1615
1616                 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
1617                         ifp->if_ierrors++;
1618                         vtnet_discard_rxbuf(sc, m);
1619                         continue;
1620                 }
1621
1622                 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1623                         nbufs = 1;
1624                         adjsz = sizeof(struct vtnet_rx_header);
1625                         /*
1626                          * Account for our pad between the header and
1627                          * the actual start of the frame.
1628                          */
1629                         len += VTNET_RX_HEADER_PAD;
1630                 } else {
1631                         mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1632                         nbufs = mhdr->num_buffers;
1633                         adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1634                 }
1635
1636                 if (vtnet_replace_rxbuf(sc, m, len) != 0) {
1637                         ifp->if_iqdrops++;
1638                         vtnet_discard_rxbuf(sc, m);
1639                         if (nbufs > 1)
1640                                 vtnet_discard_merged_rxbuf(sc, nbufs);
1641                         continue;
1642                 }
1643
1644                 m->m_pkthdr.len = len;
1645                 m->m_pkthdr.rcvif = ifp;
1646                 m->m_pkthdr.csum_flags = 0;
1647
1648                 if (nbufs > 1) {
1649                         if (vtnet_rxeof_merged(sc, m, nbufs) != 0)
1650                                 continue;
1651                 }
1652
1653                 ifp->if_ipackets++;
1654
1655                 /*
1656                  * Save copy of header before we strip it. For both mergeable
1657                  * and non-mergeable, the VirtIO header is placed first in the
1658                  * mbuf's data. We no longer need num_buffers, so always use a
1659                  * virtio_net_hdr.
1660                  */
1661                 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
1662                 m_adj(m, adjsz);
1663
1664                 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1665                         eh = mtod(m, struct ether_header *);
1666                         if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1667                                 vtnet_vlan_tag_remove(m);
1668
1669                                 /*
1670                                  * With the 802.1Q header removed, update the
1671                                  * checksum starting location accordingly.
1672                                  */
1673                                 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1674                                         hdr->csum_start -=
1675                                             ETHER_VLAN_ENCAP_LEN;
1676                         }
1677                 }
1678
1679                 if (ifp->if_capenable & IFCAP_RXCSUM &&
1680                     hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1681                         if (vtnet_rx_csum(sc, m, hdr) != 0)
1682                                 sc->vtnet_stats.rx_csum_failed++;
1683                 }
1684
1685                 lwkt_serialize_exit(&sc->vtnet_slz);
1686                 rx_npkts++;
1687                 ifp->if_input(ifp, m, NULL, -1);
1688                 lwkt_serialize_enter(&sc->vtnet_slz);
1689
1690                 /*
1691                  * The interface may have been stopped while we were
1692                  * passing the packet up the network stack.
1693                  */
1694                 if ((ifp->if_flags & IFF_RUNNING) == 0)
1695                         break;
1696         }
1697
1698         virtqueue_notify(vq, &sc->vtnet_slz);
1699
1700         if (rx_npktsp != NULL)
1701                 *rx_npktsp = rx_npkts;
1702
1703         return (count > 0 ? 0 : EAGAIN);
1704 }
1705
1706 static void
1707 vtnet_rx_intr_task(void *arg)
1708 {
1709         struct vtnet_softc *sc;
1710         struct ifnet *ifp;
1711         int more;
1712
1713         sc = arg;
1714         ifp = sc->vtnet_ifp;
1715
1716 next:
1717 //      lwkt_serialize_enter(&sc->vtnet_slz);
1718
1719         if ((ifp->if_flags & IFF_RUNNING) == 0) {
1720                 vtnet_enable_rx_intr(sc);
1721 //              lwkt_serialize_exit(&sc->vtnet_slz);
1722                 return;
1723         }
1724
1725         more = vtnet_rxeof(sc, sc->vtnet_rx_process_limit, NULL);
1726         if (!more && vtnet_enable_rx_intr(sc) != 0) {
1727                 vtnet_disable_rx_intr(sc);
1728                 more = 1;
1729         }
1730
1731 //      lwkt_serialize_exit(&sc->vtnet_slz);
1732
1733         if (more) {
1734                 sc->vtnet_stats.rx_task_rescheduled++;
1735                 goto next;
1736         }
1737 }
1738
1739 static int
1740 vtnet_rx_vq_intr(void *xsc)
1741 {
1742         struct vtnet_softc *sc;
1743
1744         sc = xsc;
1745
1746         vtnet_disable_rx_intr(sc);
1747         vtnet_rx_intr_task(sc);
1748
1749         return (1);
1750 }
1751
1752 static void
1753 vtnet_txeof(struct vtnet_softc *sc)
1754 {
1755         struct virtqueue *vq;
1756         struct ifnet *ifp;
1757         struct vtnet_tx_header *txhdr;
1758         int deq;
1759
1760         vq = sc->vtnet_tx_vq;
1761         ifp = sc->vtnet_ifp;
1762         deq = 0;
1763
1764         ASSERT_SERIALIZED(&sc->vtnet_slz);
1765
1766         while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
1767                 deq++;
1768                 ifp->if_opackets++;
1769                 m_freem(txhdr->vth_mbuf);
1770         }
1771
1772         if (deq > 0) {
1773                 ifq_clr_oactive(&ifp->if_snd);
1774                 if (virtqueue_empty(vq))
1775                         sc->vtnet_watchdog_timer = 0;
1776         }
1777 }
1778
1779 static struct mbuf *
1780 vtnet_tx_offload(struct vtnet_softc *sc, struct mbuf *m,
1781     struct virtio_net_hdr *hdr)
1782 {
1783         struct ifnet *ifp;
1784         struct ether_header *eh;
1785         struct ether_vlan_header *evh;
1786         struct ip *ip;
1787         struct ip6_hdr *ip6;
1788         struct tcphdr *tcp;
1789         int ip_offset;
1790         uint16_t eth_type, csum_start;
1791         uint8_t ip_proto, gso_type;
1792
1793         ifp = sc->vtnet_ifp;
1794         M_ASSERTPKTHDR(m);
1795
1796         ip_offset = sizeof(struct ether_header);
1797         if (m->m_len < ip_offset) {
1798                 if ((m = m_pullup(m, ip_offset)) == NULL)
1799                         return (NULL);
1800         }
1801
1802         eh = mtod(m, struct ether_header *);
1803         eth_type = ntohs(eh->ether_type);
1804         if (eth_type == ETHERTYPE_VLAN) {
1805                 ip_offset = sizeof(struct ether_vlan_header);
1806                 if (m->m_len < ip_offset) {
1807                         if ((m = m_pullup(m, ip_offset)) == NULL)
1808                                 return (NULL);
1809                 }
1810                 evh = mtod(m, struct ether_vlan_header *);
1811                 eth_type = ntohs(evh->evl_proto);
1812         }
1813
1814         switch (eth_type) {
1815         case ETHERTYPE_IP:
1816                 if (m->m_len < ip_offset + sizeof(struct ip)) {
1817                         m = m_pullup(m, ip_offset + sizeof(struct ip));
1818                         if (m == NULL)
1819                                 return (NULL);
1820                 }
1821
1822                 ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset);
1823                 ip_proto = ip->ip_p;
1824                 csum_start = ip_offset + (ip->ip_hl << 2);
1825                 gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1826                 break;
1827
1828         case ETHERTYPE_IPV6:
1829                 if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) {
1830                         m = m_pullup(m, ip_offset + sizeof(struct ip6_hdr));
1831                         if (m == NULL)
1832                                 return (NULL);
1833                 }
1834
1835                 ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset);
1836                 /*
1837                  * XXX Assume no extension headers are present. Presently,
1838                  * this will always be true in the case of TSO, and FreeBSD
1839                  * does not perform checksum offloading of IPv6 yet.
1840                  */
1841                 ip_proto = ip6->ip6_nxt;
1842                 csum_start = ip_offset + sizeof(struct ip6_hdr);
1843                 gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1844                 break;
1845
1846         default:
1847                 return (m);
1848         }
1849
1850         if (m->m_pkthdr.csum_flags & VTNET_CSUM_OFFLOAD) {
1851                 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
1852                 hdr->csum_start = csum_start;
1853                 hdr->csum_offset = m->m_pkthdr.csum_data;
1854
1855                 sc->vtnet_stats.tx_csum_offloaded++;
1856         }
1857
1858         if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1859                 if (ip_proto != IPPROTO_TCP)
1860                         return (m);
1861
1862                 if (m->m_len < csum_start + sizeof(struct tcphdr)) {
1863                         m = m_pullup(m, csum_start + sizeof(struct tcphdr));
1864                         if (m == NULL)
1865                                 return (NULL);
1866                 }
1867
1868                 tcp = (struct tcphdr *)(mtod(m, uint8_t *) + csum_start);
1869                 hdr->gso_type = gso_type;
1870                 hdr->hdr_len = csum_start + (tcp->th_off << 2);
1871                 hdr->gso_size = m->m_pkthdr.tso_segsz;
1872
1873                 if (tcp->th_flags & TH_CWR) {
1874                         /*
1875                          * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN.
1876                          * ECN support is only configurable globally with the
1877                          * net.inet.tcp.ecn.enable sysctl knob.
1878                          */
1879                         if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
1880                                 if_printf(ifp, "TSO with ECN not supported "
1881                                     "by host\n");
1882                                 m_freem(m);
1883                                 return (NULL);
1884                         }
1885
1886                         hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1887                 }
1888
1889                 sc->vtnet_stats.tx_tso_offloaded++;
1890         }
1891
1892         return (m);
1893 }
1894
1895 static int
1896 vtnet_enqueue_txbuf(struct vtnet_softc *sc, struct mbuf **m_head,
1897     struct vtnet_tx_header *txhdr)
1898 {
1899         struct sglist sg;
1900         struct sglist_seg segs[VTNET_MAX_TX_SEGS];
1901         struct virtqueue *vq;
1902         struct mbuf *m;
1903         int collapsed, error;
1904
1905         vq = sc->vtnet_tx_vq;
1906         m = *m_head;
1907         collapsed = 0;
1908
1909         sglist_init(&sg, VTNET_MAX_TX_SEGS, segs);
1910         error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
1911         KASSERT(error == 0 && sg.sg_nseg == 1,
1912             ("cannot add header to sglist"));
1913
1914 again:
1915         error = sglist_append_mbuf(&sg, m);
1916         if (error) {
1917                 if (collapsed)
1918                         goto fail;
1919
1920                 //m = m_collapse(m, M_NOWAIT, VTNET_MAX_TX_SEGS - 1);
1921                 m = m_defrag(m, M_NOWAIT);
1922                 if (m == NULL)
1923                         goto fail;
1924
1925                 *m_head = m;
1926                 collapsed = 1;
1927                 goto again;
1928         }
1929
1930         txhdr->vth_mbuf = m;
1931
1932         return (virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0));
1933
1934 fail:
1935         m_freem(*m_head);
1936         *m_head = NULL;
1937
1938         return (ENOBUFS);
1939 }
1940
1941 static struct mbuf *
1942 vtnet_vlan_tag_insert(struct mbuf *m)
1943 {
1944         struct mbuf *n;
1945         struct ether_vlan_header *evl;
1946
1947         if (M_WRITABLE(m) == 0) {
1948                 n = m_dup(m, M_NOWAIT);
1949                 m_freem(m);
1950                 if ((m = n) == NULL)
1951                         return (NULL);
1952         }
1953
1954         M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
1955         if (m == NULL)
1956                 return (NULL);
1957         if (m->m_len < sizeof(struct ether_vlan_header)) {
1958                 m = m_pullup(m, sizeof(struct ether_vlan_header));
1959                 if (m == NULL)
1960                         return (NULL);
1961         }
1962
1963         /* Insert 802.1Q header into the existing Ethernet header. */
1964         evl = mtod(m, struct ether_vlan_header *);
1965         bcopy((char *) evl + ETHER_VLAN_ENCAP_LEN,
1966               (char *) evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
1967         evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1968         evl->evl_tag = htons(m->m_pkthdr.ether_vlantag);
1969         m->m_flags &= ~M_VLANTAG;
1970
1971         return (m);
1972 }
1973
1974 static int
1975 vtnet_encap(struct vtnet_softc *sc, struct mbuf **m_head)
1976 {
1977         struct vtnet_tx_header *txhdr;
1978         struct virtio_net_hdr *hdr;
1979         struct mbuf *m;
1980         int error;
1981
1982         txhdr = &sc->vtnet_txhdrarea[sc->vtnet_txhdridx];
1983         memset(txhdr, 0, sizeof(struct vtnet_tx_header));
1984
1985         /*
1986          * Always use the non-mergeable header to simplify things. When
1987          * the mergeable feature is negotiated, the num_buffers field
1988          * must be set to zero. We use vtnet_hdr_size later to enqueue
1989          * the correct header size to the host.
1990          */
1991         hdr = &txhdr->vth_uhdr.hdr;
1992         m = *m_head;
1993
1994         error = ENOBUFS;
1995
1996         if (m->m_flags & M_VLANTAG) {
1997                 //m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1998                 m = vtnet_vlan_tag_insert(m);
1999                 if ((*m_head = m) == NULL)
2000                         goto fail;
2001                 m->m_flags &= ~M_VLANTAG;
2002         }
2003
2004         if (m->m_pkthdr.csum_flags != 0) {
2005                 m = vtnet_tx_offload(sc, m, hdr);
2006                 if ((*m_head = m) == NULL)
2007                         goto fail;
2008         }
2009
2010         error = vtnet_enqueue_txbuf(sc, m_head, txhdr);
2011         if (error == 0)
2012                 sc->vtnet_txhdridx =
2013                     (sc->vtnet_txhdridx + 1) % ((sc->vtnet_tx_size / 2) + 1);
2014 fail:
2015         return (error);
2016 }
2017
2018 static void
2019 vtnet_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
2020 {
2021         struct vtnet_softc *sc;
2022
2023         sc = ifp->if_softc;
2024
2025         ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
2026         lwkt_serialize_enter(&sc->vtnet_slz);
2027         vtnet_start_locked(ifp, ifsq);
2028         lwkt_serialize_exit(&sc->vtnet_slz);
2029 }
2030
2031 static void
2032 vtnet_start_locked(struct ifnet *ifp, struct ifaltq_subque *ifsq)
2033 {
2034         struct vtnet_softc *sc;
2035         struct virtqueue *vq;
2036         struct mbuf *m0;
2037         int enq;
2038
2039         sc = ifp->if_softc;
2040         vq = sc->vtnet_tx_vq;
2041         enq = 0;
2042
2043         ASSERT_SERIALIZED(&sc->vtnet_slz);
2044
2045         if ((ifp->if_flags & (IFF_RUNNING)) !=
2046             IFF_RUNNING || ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0))
2047                 return;
2048
2049 #ifdef VTNET_TX_INTR_MODERATION
2050         if (virtqueue_nused(vq) >= sc->vtnet_tx_size / 2)
2051                 vtnet_txeof(sc);
2052 #endif
2053
2054         while (!ifsq_is_empty(ifsq)) {
2055                 if (virtqueue_full(vq)) {
2056                         ifq_set_oactive(&ifp->if_snd);
2057                         break;
2058                 }
2059
2060                 m0 = ifq_dequeue(&ifp->if_snd);
2061                 if (m0 == NULL)
2062                         break;
2063
2064                 if (vtnet_encap(sc, &m0) != 0) {
2065                         if (m0 == NULL)
2066                                 break;
2067                         ifq_prepend(&ifp->if_snd, m0);
2068                         ifq_set_oactive(&ifp->if_snd);
2069                         break;
2070                 }
2071
2072                 enq++;
2073                 ETHER_BPF_MTAP(ifp, m0);
2074         }
2075
2076         if (enq > 0) {
2077                 virtqueue_notify(vq, &sc->vtnet_slz);
2078                 sc->vtnet_watchdog_timer = VTNET_WATCHDOG_TIMEOUT;
2079         }
2080 }
2081
2082 static void
2083 vtnet_tick(void *xsc)
2084 {
2085         struct vtnet_softc *sc;
2086
2087         sc = xsc;
2088
2089 #if 0
2090         ASSERT_SERIALIZED(&sc->vtnet_slz);
2091 #ifdef VTNET_DEBUG
2092         virtqueue_dump(sc->vtnet_rx_vq);
2093         virtqueue_dump(sc->vtnet_tx_vq);
2094 #endif
2095
2096         vtnet_watchdog(sc);
2097         callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
2098 #endif
2099 }
2100
2101 static void
2102 vtnet_tx_intr_task(void *arg)
2103 {
2104         struct vtnet_softc *sc;
2105         struct ifnet *ifp;
2106         struct ifaltq_subque *ifsq;
2107
2108         sc = arg;
2109         ifp = sc->vtnet_ifp;
2110         ifsq = ifq_get_subq_default(&ifp->if_snd);
2111
2112 next:
2113 //      lwkt_serialize_enter(&sc->vtnet_slz);
2114
2115         if ((ifp->if_flags & IFF_RUNNING) == 0) {
2116                 vtnet_enable_tx_intr(sc);
2117 //              lwkt_serialize_exit(&sc->vtnet_slz);
2118                 return;
2119         }
2120
2121         vtnet_txeof(sc);
2122
2123         if (!ifsq_is_empty(ifsq))
2124                 vtnet_start_locked(ifp, ifsq);
2125
2126         if (vtnet_enable_tx_intr(sc) != 0) {
2127                 vtnet_disable_tx_intr(sc);
2128                 sc->vtnet_stats.tx_task_rescheduled++;
2129 //              lwkt_serialize_exit(&sc->vtnet_slz);
2130                 goto next;
2131         }
2132
2133 //      lwkt_serialize_exit(&sc->vtnet_slz);
2134 }
2135
2136 static int
2137 vtnet_tx_vq_intr(void *xsc)
2138 {
2139         struct vtnet_softc *sc;
2140
2141         sc = xsc;
2142
2143         vtnet_disable_tx_intr(sc);
2144         vtnet_tx_intr_task(sc);
2145
2146         return (1);
2147 }
2148
2149 static void
2150 vtnet_stop(struct vtnet_softc *sc)
2151 {
2152         device_t dev;
2153         struct ifnet *ifp;
2154
2155         dev = sc->vtnet_dev;
2156         ifp = sc->vtnet_ifp;
2157
2158         ASSERT_SERIALIZED(&sc->vtnet_slz);
2159
2160         sc->vtnet_watchdog_timer = 0;
2161         callout_stop(&sc->vtnet_tick_ch);
2162         ifq_clr_oactive(&ifp->if_snd);
2163         ifp->if_flags &= ~(IFF_RUNNING);
2164
2165         vtnet_disable_rx_intr(sc);
2166         vtnet_disable_tx_intr(sc);
2167
2168         /*
2169          * Stop the host VirtIO adapter. Note this will reset the host
2170          * adapter's state back to the pre-initialized state, so in
2171          * order to make the device usable again, we must drive it
2172          * through virtio_reinit() and virtio_reinit_complete().
2173          */
2174         virtio_stop(dev);
2175
2176         sc->vtnet_flags &= ~VTNET_FLAG_LINK;
2177
2178         vtnet_free_rx_mbufs(sc);
2179         vtnet_free_tx_mbufs(sc);
2180 }
2181
2182 static int
2183 vtnet_virtio_reinit(struct vtnet_softc *sc)
2184 {
2185         device_t dev;
2186         struct ifnet *ifp;
2187         uint64_t features;
2188         int error;
2189
2190         dev = sc->vtnet_dev;
2191         ifp = sc->vtnet_ifp;
2192         features = sc->vtnet_features;
2193
2194         /*
2195          * Re-negotiate with the host, removing any disabled receive
2196          * features. Transmit features are disabled only on our side
2197          * via if_capenable and if_hwassist.
2198          */
2199
2200         if (ifp->if_capabilities & IFCAP_RXCSUM) {
2201                 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2202                         features &= ~VIRTIO_NET_F_GUEST_CSUM;
2203         }
2204
2205         if (ifp->if_capabilities & IFCAP_LRO) {
2206                 if ((ifp->if_capenable & IFCAP_LRO) == 0)
2207                         features &= ~VTNET_LRO_FEATURES;
2208         }
2209
2210         if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
2211                 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2212                         features &= ~VIRTIO_NET_F_CTRL_VLAN;
2213         }
2214
2215         error = virtio_reinit(dev, features);
2216         if (error)
2217                 device_printf(dev, "virtio reinit error %d\n", error);
2218
2219         return (error);
2220 }
2221
2222 static void
2223 vtnet_init_locked(struct vtnet_softc *sc)
2224 {
2225         device_t dev;
2226         struct ifnet *ifp;
2227         int error;
2228
2229         dev = sc->vtnet_dev;
2230         ifp = sc->vtnet_ifp;
2231
2232         ASSERT_SERIALIZED(&sc->vtnet_slz);
2233
2234         if (ifp->if_flags & IFF_RUNNING)
2235                 return;
2236
2237         /* Stop host's adapter, cancel any pending I/O. */
2238         vtnet_stop(sc);
2239
2240         /* Reinitialize the host device. */
2241         error = vtnet_virtio_reinit(sc);
2242         if (error) {
2243                 device_printf(dev,
2244                     "reinitialization failed, stopping device...\n");
2245                 vtnet_stop(sc);
2246                 return;
2247         }
2248
2249         /* Update host with assigned MAC address. */
2250         bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
2251         vtnet_set_hwaddr(sc);
2252
2253         ifp->if_hwassist = 0;
2254         if (ifp->if_capenable & IFCAP_TXCSUM)
2255                 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
2256         if (ifp->if_capenable & IFCAP_TSO4)
2257                 ifp->if_hwassist |= CSUM_TSO;
2258
2259         error = vtnet_init_rx_vq(sc);
2260         if (error) {
2261                 device_printf(dev,
2262                     "cannot allocate mbufs for Rx virtqueue\n");
2263                 vtnet_stop(sc);
2264                 return;
2265         }
2266
2267         if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
2268                 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2269                         /* Restore promiscuous and all-multicast modes. */
2270                         vtnet_rx_filter(sc);
2271
2272                         /* Restore filtered MAC addresses. */
2273                         vtnet_rx_filter_mac(sc);
2274                 }
2275
2276                 /* Restore VLAN filters. */
2277                 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2278                         vtnet_rx_filter_vlan(sc);
2279         }
2280
2281         {
2282                 vtnet_enable_rx_intr(sc);
2283                 vtnet_enable_tx_intr(sc);
2284         }
2285
2286         ifp->if_flags |= IFF_RUNNING;
2287         ifq_clr_oactive(&ifp->if_snd);
2288
2289         virtio_reinit_complete(dev);
2290
2291         vtnet_update_link_status(sc);
2292         callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
2293 }
2294
2295 static void
2296 vtnet_init(void *xsc)
2297 {
2298         struct vtnet_softc *sc;
2299
2300         sc = xsc;
2301
2302         lwkt_serialize_enter(&sc->vtnet_slz);
2303         vtnet_init_locked(sc);
2304         lwkt_serialize_exit(&sc->vtnet_slz);
2305 }
2306
2307 static void
2308 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
2309     struct sglist *sg, int readable, int writable)
2310 {
2311         struct virtqueue *vq;
2312         void *c;
2313
2314         vq = sc->vtnet_ctrl_vq;
2315
2316         ASSERT_SERIALIZED(&sc->vtnet_slz);
2317         KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
2318             ("no control virtqueue"));
2319         KASSERT(virtqueue_empty(vq),
2320             ("control command already enqueued"));
2321
2322         if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
2323                 return;
2324
2325         virtqueue_notify(vq, &sc->vtnet_slz);
2326
2327         /*
2328          * Poll until the command is complete. Previously, we would
2329          * sleep until the control virtqueue interrupt handler woke
2330          * us up, but dropping the VTNET_MTX leads to serialization
2331          * difficulties.
2332          *
2333          * Furthermore, it appears QEMU/KVM only allocates three MSIX
2334          * vectors. Two of those vectors are needed for the Rx and Tx
2335          * virtqueues. We do not support sharing both a Vq and config
2336          * changed notification on the same MSIX vector.
2337          */
2338         c = virtqueue_poll(vq, NULL);
2339         KASSERT(c == cookie, ("unexpected control command response"));
2340 }
2341
2342 static int
2343 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
2344 {
2345         struct {
2346                 struct virtio_net_ctrl_hdr hdr __aligned(2);
2347                 uint8_t pad1;
2348                 char aligned_hwaddr[ETHER_ADDR_LEN] __aligned(8);
2349                 uint8_t pad2;
2350                 uint8_t ack;
2351         } s;
2352         struct sglist_seg segs[3];
2353         struct sglist sg;
2354         int error;
2355
2356         s.hdr.class = VIRTIO_NET_CTRL_MAC;
2357         s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
2358         s.ack = VIRTIO_NET_ERR;
2359
2360         /* Copy the mac address into physically contiguous memory */
2361         memcpy(s.aligned_hwaddr, hwaddr, ETHER_ADDR_LEN);
2362
2363         sglist_init(&sg, 3, segs);
2364         error = 0;
2365         error |= sglist_append(&sg, &s.hdr,
2366             sizeof(struct virtio_net_ctrl_hdr));
2367         error |= sglist_append(&sg, s.aligned_hwaddr, ETHER_ADDR_LEN);
2368         error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2369         KASSERT(error == 0 && sg.sg_nseg == 3,
2370             ("%s: error %d adding set MAC msg to sglist", __func__, error));
2371
2372         vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2373
2374         return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2375 }
2376
2377 static void
2378 vtnet_rx_filter(struct vtnet_softc *sc)
2379 {
2380         device_t dev;
2381         struct ifnet *ifp;
2382
2383         dev = sc->vtnet_dev;
2384         ifp = sc->vtnet_ifp;
2385
2386         ASSERT_SERIALIZED(&sc->vtnet_slz);
2387         KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2388             ("CTRL_RX feature not negotiated"));
2389
2390         if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
2391                 device_printf(dev, "cannot %s promiscuous mode\n",
2392                     ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
2393
2394         if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
2395                 device_printf(dev, "cannot %s all-multicast mode\n",
2396                     ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
2397 }
2398
2399 static int
2400 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
2401 {
2402         struct sglist_seg segs[3];
2403         struct sglist sg;
2404         struct {
2405                 struct virtio_net_ctrl_hdr hdr __aligned(2);
2406                 uint8_t pad1;
2407                 uint8_t onoff;
2408                 uint8_t pad2;
2409                 uint8_t ack;
2410         } s;
2411         int error;
2412
2413         KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2414             ("%s: CTRL_RX feature not negotiated", __func__));
2415
2416         s.hdr.class = VIRTIO_NET_CTRL_RX;
2417         s.hdr.cmd = cmd;
2418         s.onoff = !!on;
2419         s.ack = VIRTIO_NET_ERR;
2420
2421         sglist_init(&sg, 3, segs);
2422         error = 0;
2423         error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
2424         error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
2425         error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2426         KASSERT(error == 0 && sg.sg_nseg == 3,
2427             ("%s: error %d adding Rx message to sglist", __func__, error));
2428
2429         vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2430
2431         return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2432 }
2433
2434 static int
2435 vtnet_set_promisc(struct vtnet_softc *sc, int on)
2436 {
2437
2438         return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
2439 }
2440
2441 static int
2442 vtnet_set_allmulti(struct vtnet_softc *sc, int on)
2443 {
2444
2445         return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
2446 }
2447
2448 static void
2449 vtnet_rx_filter_mac(struct vtnet_softc *sc)
2450 {
2451         struct virtio_net_ctrl_hdr hdr __aligned(2);
2452         struct vtnet_mac_filter *filter;
2453         struct sglist_seg segs[4];
2454         struct sglist sg;
2455         struct ifnet *ifp;
2456         struct ifaddr *ifa;
2457         struct ifaddr_container *ifac;
2458         struct ifmultiaddr *ifma;
2459         int ucnt, mcnt, promisc, allmulti, error;
2460         uint8_t ack;
2461
2462         ifp = sc->vtnet_ifp;
2463         ucnt = 0;
2464         mcnt = 0;
2465         promisc = 0;
2466         allmulti = 0;
2467
2468         ASSERT_SERIALIZED(&sc->vtnet_slz);
2469         KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2470             ("%s: CTRL_RX feature not negotiated", __func__));
2471
2472         /* Use the MAC filtering table allocated in vtnet_attach. */
2473         filter = sc->vtnet_macfilter;
2474         memset(filter, 0, sizeof(struct vtnet_mac_filter));
2475
2476         /* Unicast MAC addresses: */
2477         //if_addr_rlock(ifp);
2478         TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
2479                 ifa = ifac->ifa;
2480                 if (ifa->ifa_addr->sa_family != AF_LINK)
2481                         continue;
2482                 else if (memcmp(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2483                     sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
2484                         continue;
2485                 else if (ucnt == VTNET_MAX_MAC_ENTRIES) {
2486                         promisc = 1;
2487                         break;
2488                 }
2489
2490                 bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2491                     &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN);
2492                 ucnt++;
2493         }
2494         //if_addr_runlock(ifp);
2495
2496         if (promisc != 0) {
2497                 filter->vmf_unicast.nentries = 0;
2498                 if_printf(ifp, "more than %d MAC addresses assigned, "
2499                     "falling back to promiscuous mode\n",
2500                     VTNET_MAX_MAC_ENTRIES);
2501         } else
2502                 filter->vmf_unicast.nentries = ucnt;
2503
2504         /* Multicast MAC addresses: */
2505         //if_maddr_rlock(ifp);
2506         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2507                 if (ifma->ifma_addr->sa_family != AF_LINK)
2508                         continue;
2509                 else if (mcnt == VTNET_MAX_MAC_ENTRIES) {
2510                         allmulti = 1;
2511                         break;
2512                 }
2513
2514                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2515                     &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN);
2516                 mcnt++;
2517         }
2518         //if_maddr_runlock(ifp);
2519
2520         if (allmulti != 0) {
2521                 filter->vmf_multicast.nentries = 0;
2522                 if_printf(ifp, "more than %d multicast MAC addresses "
2523                     "assigned, falling back to all-multicast mode\n",
2524                     VTNET_MAX_MAC_ENTRIES);
2525         } else
2526                 filter->vmf_multicast.nentries = mcnt;
2527
2528         if (promisc != 0 && allmulti != 0)
2529                 goto out;
2530
2531         hdr.class = VIRTIO_NET_CTRL_MAC;
2532         hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
2533         ack = VIRTIO_NET_ERR;
2534
2535         sglist_init(&sg, 4, segs);
2536         error = 0;
2537         error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
2538         error |= sglist_append(&sg, &filter->vmf_unicast,
2539             sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
2540         error |= sglist_append(&sg, &filter->vmf_multicast,
2541             sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
2542         error |= sglist_append(&sg, &ack, sizeof(uint8_t));
2543         KASSERT(error == 0 && sg.sg_nseg == 4,
2544             ("%s: error %d adding MAC filter msg to sglist", __func__, error));
2545
2546         vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
2547
2548         if (ack != VIRTIO_NET_OK)
2549                 if_printf(ifp, "error setting host MAC filter table\n");
2550
2551 out:
2552         if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0)
2553                 if_printf(ifp, "cannot enable promiscuous mode\n");
2554         if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0)
2555                 if_printf(ifp, "cannot enable all-multicast mode\n");
2556 }
2557
2558 static int
2559 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2560 {
2561         struct sglist_seg segs[3];
2562         struct sglist sg;
2563         struct {
2564                 struct virtio_net_ctrl_hdr hdr __aligned(2);
2565                 uint8_t pad1;
2566                 uint16_t tag;
2567                 uint8_t pad2;
2568                 uint8_t ack;
2569         } s;
2570         int error;
2571
2572         s.hdr.class = VIRTIO_NET_CTRL_VLAN;
2573         s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
2574         s.tag = tag;
2575         s.ack = VIRTIO_NET_ERR;
2576
2577         sglist_init(&sg, 3, segs);
2578         error = 0;
2579         error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
2580         error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
2581         error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2582         KASSERT(error == 0 && sg.sg_nseg == 3,
2583             ("%s: error %d adding VLAN message to sglist", __func__, error));
2584
2585         vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2586
2587         return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2588 }
2589
2590 static void
2591 vtnet_rx_filter_vlan(struct vtnet_softc *sc)
2592 {
2593         uint32_t w;
2594         uint16_t tag;
2595         int i, bit, nvlans;
2596
2597         ASSERT_SERIALIZED(&sc->vtnet_slz);
2598         KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
2599             ("%s: VLAN_FILTER feature not negotiated", __func__));
2600
2601         nvlans = sc->vtnet_nvlans;
2602
2603         /* Enable the filter for each configured VLAN. */
2604         for (i = 0; i < VTNET_VLAN_SHADOW_SIZE && nvlans > 0; i++) {
2605                 w = sc->vtnet_vlan_shadow[i];
2606                 while ((bit = ffs(w) - 1) != -1) {
2607                         w &= ~(1 << bit);
2608                         tag = sizeof(w) * CHAR_BIT * i + bit;
2609                         nvlans--;
2610
2611                         if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
2612                                 device_printf(sc->vtnet_dev,
2613                                     "cannot enable VLAN %d filter\n", tag);
2614                         }
2615                 }
2616         }
2617
2618         KASSERT(nvlans == 0, ("VLAN count incorrect"));
2619 }
2620
2621 static void
2622 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2623 {
2624         struct ifnet *ifp;
2625         int idx, bit;
2626
2627         ifp = sc->vtnet_ifp;
2628         idx = (tag >> 5) & 0x7F;
2629         bit = tag & 0x1F;
2630
2631         if (tag == 0 || tag > 4095)
2632                 return;
2633
2634         lwkt_serialize_enter(&sc->vtnet_slz);
2635
2636         /* Update shadow VLAN table. */
2637         if (add) {
2638                 sc->vtnet_nvlans++;
2639                 sc->vtnet_vlan_shadow[idx] |= (1 << bit);
2640         } else {
2641                 sc->vtnet_nvlans--;
2642                 sc->vtnet_vlan_shadow[idx] &= ~(1 << bit);
2643         }
2644
2645         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
2646             vtnet_exec_vlan_filter(sc, add, tag) != 0) {
2647                 device_printf(sc->vtnet_dev,
2648                     "cannot %s VLAN %d %s the host filter table\n",
2649                     add ? "add" : "remove", tag, add ? "to" : "from");
2650         }
2651
2652         lwkt_serialize_exit(&sc->vtnet_slz);
2653 }
2654
2655 static void
2656 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2657 {
2658
2659         if (ifp->if_softc != arg)
2660                 return;
2661
2662         vtnet_update_vlan_filter(arg, 1, tag);
2663 }
2664
2665 static void
2666 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2667 {
2668
2669         if (ifp->if_softc != arg)
2670                 return;
2671
2672         vtnet_update_vlan_filter(arg, 0, tag);
2673 }
2674
2675 static int
2676 vtnet_ifmedia_upd(struct ifnet *ifp)
2677 {
2678         struct vtnet_softc *sc;
2679         struct ifmedia *ifm;
2680
2681         sc = ifp->if_softc;
2682         ifm = &sc->vtnet_media;
2683
2684         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2685                 return (EINVAL);
2686
2687         return (0);
2688 }
2689
2690 static void
2691 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2692 {
2693         struct vtnet_softc *sc;
2694
2695         sc = ifp->if_softc;
2696
2697         ifmr->ifm_status = IFM_AVALID;
2698         ifmr->ifm_active = IFM_ETHER;
2699
2700         lwkt_serialize_enter(&sc->vtnet_slz);
2701         if (vtnet_is_link_up(sc) != 0) {
2702                 ifmr->ifm_status |= IFM_ACTIVE;
2703                 ifmr->ifm_active |= VTNET_MEDIATYPE;
2704         } else
2705                 ifmr->ifm_active |= IFM_NONE;
2706         lwkt_serialize_exit(&sc->vtnet_slz);
2707 }
2708
2709 static void
2710 vtnet_add_statistics(struct vtnet_softc *sc)
2711 {
2712         device_t dev;
2713         struct vtnet_statistics *stats;
2714         struct sysctl_ctx_list *ctx;
2715         struct sysctl_oid *tree;
2716         struct sysctl_oid_list *child;
2717
2718         dev = sc->vtnet_dev;
2719         stats = &sc->vtnet_stats;
2720         ctx = device_get_sysctl_ctx(dev);
2721         tree = device_get_sysctl_tree(dev);
2722         child = SYSCTL_CHILDREN(tree);
2723
2724         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
2725             CTLFLAG_RD, &stats->mbuf_alloc_failed, 0,
2726             "Mbuf cluster allocation failures");
2727
2728         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
2729             CTLFLAG_RD, &stats->rx_frame_too_large, 0,
2730             "Received frame larger than the mbuf chain");
2731         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
2732             CTLFLAG_RD, &stats->rx_enq_replacement_failed, 0,
2733             "Enqueuing the replacement receive mbuf failed");
2734         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
2735             CTLFLAG_RD, &stats->rx_mergeable_failed, 0,
2736             "Mergeable buffers receive failures");
2737         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
2738             CTLFLAG_RD, &stats->rx_csum_bad_ethtype, 0,
2739             "Received checksum offloaded buffer with unsupported "
2740             "Ethernet type");
2741         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
2742             CTLFLAG_RD, &stats->rx_csum_bad_ipproto, 0,
2743             "Received checksum offloaded buffer with incorrect IP protocol");
2744         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
2745             CTLFLAG_RD, &stats->rx_csum_bad_offset, 0,
2746             "Received checksum offloaded buffer with incorrect offset");
2747         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
2748             CTLFLAG_RD, &stats->rx_csum_failed, 0,
2749             "Received buffer checksum offload failed");
2750         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
2751             CTLFLAG_RD, &stats->rx_csum_offloaded, 0,
2752             "Received buffer checksum offload succeeded");
2753         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
2754             CTLFLAG_RD, &stats->rx_task_rescheduled, 0,
2755             "Times the receive interrupt task rescheduled itself");
2756
2757         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
2758             CTLFLAG_RD, &stats->tx_csum_bad_ethtype, 0,
2759             "Aborted transmit of checksum offloaded buffer with unknown "
2760             "Ethernet type");
2761         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
2762             CTLFLAG_RD, &stats->tx_tso_bad_ethtype, 0,
2763             "Aborted transmit of TSO buffer with unknown Ethernet type");
2764         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
2765             CTLFLAG_RD, &stats->tx_csum_offloaded, 0,
2766             "Offloaded checksum of transmitted buffer");
2767         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
2768             CTLFLAG_RD, &stats->tx_tso_offloaded, 0,
2769             "Segmentation offload of transmitted buffer");
2770         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
2771             CTLFLAG_RD, &stats->tx_task_rescheduled, 0,
2772             "Times the transmit interrupt task rescheduled itself");
2773 }
2774
2775 static int
2776 vtnet_enable_rx_intr(struct vtnet_softc *sc)
2777 {
2778
2779         return (virtqueue_enable_intr(sc->vtnet_rx_vq));
2780 }
2781
2782 static void
2783 vtnet_disable_rx_intr(struct vtnet_softc *sc)
2784 {
2785
2786         virtqueue_disable_intr(sc->vtnet_rx_vq);
2787 }
2788
2789 static int
2790 vtnet_enable_tx_intr(struct vtnet_softc *sc)
2791 {
2792
2793 #ifdef VTNET_TX_INTR_MODERATION
2794         return (0);
2795 #else
2796         return (virtqueue_enable_intr(sc->vtnet_tx_vq));
2797 #endif
2798 }
2799
2800 static void
2801 vtnet_disable_tx_intr(struct vtnet_softc *sc)
2802 {
2803
2804         virtqueue_disable_intr(sc->vtnet_tx_vq);
2805 }