2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
40 #include <sys/serialize.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <sys/sysctl.h>
45 #include <machine/md_var.h>
46 #include <machine/cothread.h>
48 #include <net/ethernet.h>
51 #include <net/if_arp.h>
52 #include <net/if_media.h>
53 #include <net/ifq_var.h>
54 #include <net/vlan/if_vlan_ether.h>
56 #include <netinet/in_var.h>
59 #include <net/tap/if_tap.h>
67 #define VKE_DEVNAME "vke"
69 #define VKE_CHUNK 8 /* number of mbufs to queue before interrupting */
71 #define NETFIFOINDEX(u, sc) ((u) & ((sc)->sc_ringsize - 1))
73 #define VKE_COTD_RUN 0
74 #define VKE_COTD_EXIT 1
75 #define VKE_COTD_DEAD 2
82 typedef struct vke_fifo *fifo_t;
84 /* Default value for a long time */
85 #define VKE_DEFAULT_RINGSIZE 256
86 static int vke_max_ringsize = 0;
87 TUNABLE_INT("hw.vke.max_ringsize", &vke_max_ringsize);
89 #define LOW_POW_2(n) (1 << (fls(n) - 1))
106 fifo_t sc_txfifo_done;
115 struct sysctl_ctx_list sc_sysctl_ctx;
116 struct sysctl_oid *sc_sysctl_tree;
118 int sc_tap_unit; /* unit of backend tap(4) */
119 in_addr_t sc_addr; /* address */
120 in_addr_t sc_mask; /* netmask */
122 struct ifmedia sc_media;
125 static void vke_start(struct ifnet *, struct ifaltq_subque *);
126 static void vke_init(void *);
127 static int vke_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
129 static int vke_media_change(struct ifnet *);
130 static void vke_media_status(struct ifnet *, struct ifmediareq *);
132 static int vke_attach(const struct vknetif_info *, int);
133 static int vke_stop(struct vke_softc *);
134 static int vke_init_addr(struct ifnet *, in_addr_t, in_addr_t);
135 static void vke_tx_intr(cothread_t cotd);
136 static void vke_tx_thread(cothread_t cotd);
137 static void vke_rx_intr(cothread_t cotd);
138 static void vke_rx_thread(cothread_t cotd);
140 static int vke_txfifo_enqueue(struct vke_softc *sc, struct mbuf *m);
141 static struct mbuf *vke_txfifo_dequeue(struct vke_softc *sc);
143 static int vke_txfifo_done_enqueue(struct vke_softc *sc, struct mbuf *m);
144 static struct mbuf * vke_txfifo_done_dequeue(struct vke_softc *sc, struct mbuf *nm);
146 static struct mbuf *vke_rxfifo_dequeue(struct vke_softc *sc, struct mbuf *nm);
147 static struct mbuf *vke_rxfifo_sniff(struct vke_softc *sc);
150 vke_sysinit(void *arg __unused)
154 KASSERT(NetifNum <= VKNETIF_MAX, ("too many netifs: %d", NetifNum));
157 for (i = 0; i < NetifNum; ++i) {
158 if (vke_attach(&NetifInfo[i], unit) == 0)
162 SYSINIT(vke, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, vke_sysinit, NULL);
165 * vke_txfifo_done_enqueue() - Add an mbuf to the transmit done fifo. Since
166 * the cothread cannot free transmit mbufs after processing we put them on
167 * the done fifo so the kernel can free them.
170 vke_txfifo_done_enqueue(struct vke_softc *sc, struct mbuf *m)
172 fifo_t fifo = sc->sc_txfifo_done;
174 while (NETFIFOINDEX(fifo->windex + 1, sc) ==
175 NETFIFOINDEX(fifo->rindex, sc)) {
178 fifo->array[NETFIFOINDEX(fifo->windex, sc)] = m;
186 * vke_txfifo_done_dequeue() - Remove an mbuf from the transmit done fifo.
189 vke_txfifo_done_dequeue(struct vke_softc *sc, struct mbuf *nm)
191 fifo_t fifo = sc->sc_txfifo_done;
194 if (NETFIFOINDEX(fifo->rindex, sc) == NETFIFOINDEX(fifo->windex, sc))
198 m = fifo->array[NETFIFOINDEX(fifo->rindex, sc)];
199 fifo->array[NETFIFOINDEX(fifo->rindex, sc)] = nm;
206 * vke_txfifo_enqueue() - Add an mbuf to the transmit fifo.
209 vke_txfifo_enqueue(struct vke_softc *sc, struct mbuf *m)
211 fifo_t fifo = sc->sc_txfifo;
213 if (NETFIFOINDEX(fifo->windex + 1, sc) ==
214 NETFIFOINDEX(fifo->rindex, sc)) {
218 fifo->array[NETFIFOINDEX(fifo->windex, sc)] = m;
226 * vke_txfifo_dequeue() - Return next mbuf on the transmit fifo if one
230 vke_txfifo_dequeue(struct vke_softc *sc)
232 fifo_t fifo = sc->sc_txfifo;
235 if (NETFIFOINDEX(fifo->rindex, sc) == NETFIFOINDEX(fifo->windex, sc))
239 m = fifo->array[NETFIFOINDEX(fifo->rindex, sc)];
240 fifo->array[NETFIFOINDEX(fifo->rindex, sc)] = NULL;
248 vke_txfifo_empty(struct vke_softc *sc)
250 fifo_t fifo = sc->sc_txfifo;
252 if (NETFIFOINDEX(fifo->rindex, sc) == NETFIFOINDEX(fifo->windex, sc))
258 * vke_rxfifo_dequeue() - Return next mbuf on the receice fifo if one
259 * exists replacing it with newm which should point to a newly allocated
263 vke_rxfifo_dequeue(struct vke_softc *sc, struct mbuf *newm)
265 fifo_t fifo = sc->sc_rxfifo;
268 if (NETFIFOINDEX(fifo->rindex, sc) == NETFIFOINDEX(fifo->windex, sc))
272 m = fifo->array[NETFIFOINDEX(fifo->rindex, sc)];
273 fifo->array[NETFIFOINDEX(fifo->rindex, sc)] = newm;
281 * Return the next mbuf if available but do NOT remove it from the FIFO.
284 vke_rxfifo_sniff(struct vke_softc *sc)
286 fifo_t fifo = sc->sc_rxfifo;
289 if (NETFIFOINDEX(fifo->rindex, sc) == NETFIFOINDEX(fifo->windex, sc))
293 m = fifo->array[NETFIFOINDEX(fifo->rindex, sc)];
301 struct vke_softc *sc = xsc;
302 struct ifnet *ifp = &sc->arpcom.ac_if;
303 size_t ringsize = sc->sc_ringsize * sizeof(struct mbuf *);
306 ASSERT_SERIALIZED(ifp->if_serializer);
310 ifp->if_flags |= IFF_RUNNING;
311 ifsq_clr_oactive(ifq_get_subq_default(&ifp->if_snd));
314 * Allocate memory for FIFO structures and mbufs.
316 sc->sc_txfifo = kmalloc(sizeof(*sc->sc_txfifo),
317 M_DEVBUF, M_WAITOK | M_ZERO);
318 sc->sc_txfifo_done = kmalloc(sizeof(*sc->sc_txfifo_done),
319 M_DEVBUF, M_WAITOK | M_ZERO);
320 sc->sc_rxfifo = kmalloc(sizeof(*sc->sc_rxfifo),
321 M_DEVBUF, M_WAITOK | M_ZERO);
322 sc->sc_txfifo->array = kmalloc(ringsize,
323 M_DEVBUF, M_WAITOK | M_ZERO);
324 sc->sc_txfifo_done->array = kmalloc(ringsize,
325 M_DEVBUF, M_WAITOK | M_ZERO);
326 sc->sc_rxfifo->array = kmalloc(ringsize,
327 M_DEVBUF, M_WAITOK | M_ZERO);
329 for (i = 0; i < sc->sc_ringsize; i++) {
330 sc->sc_rxfifo->array[i] = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
331 sc->sc_txfifo->array[i] = NULL;
332 sc->sc_txfifo_done->array[i] = NULL;
335 sc->cotd_tx_exit = sc->cotd_rx_exit = VKE_COTD_RUN;
336 sc->cotd_tx = cothread_create(vke_tx_thread, vke_tx_intr, sc, "vke_tx");
337 sc->cotd_rx = cothread_create(vke_rx_thread, vke_rx_intr, sc, "vke_rx");
339 if (sc->sc_addr != 0) {
340 in_addr_t addr, mask;
346 * Make sure vkernel assigned
347 * address will not be added
353 vke_init_addr(ifp, addr, mask);
359 * Called from kernel.
361 * NOTE: We can't make any kernel callbacks while holding cothread lock
362 * because the cothread lock is not governed by the kernel scheduler
363 * (so mplock, tokens, etc will not be released).
366 vke_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
368 struct vke_softc *sc = ifp->if_softc;
370 cothread_t cotd = sc->cotd_tx;
373 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
374 ASSERT_SERIALIZED(ifp->if_serializer);
376 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
380 while ((m = ifsq_dequeue(ifsq)) != NULL) {
381 if (vke_txfifo_enqueue(sc, m) != -1) {
382 ETHER_BPF_MTAP(ifp, m);
383 if (count++ == VKE_CHUNK) {
384 cothread_lock(cotd, 0);
385 cothread_signal(cotd);
386 cothread_unlock(cotd, 0);
394 cothread_lock(cotd, 0);
395 cothread_signal(cotd);
396 cothread_unlock(cotd, 0);
401 vke_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
403 struct vke_softc *sc = ifp->if_softc;
404 struct ifreq *ifr = (struct ifreq *)data;
407 ASSERT_SERIALIZED(ifp->if_serializer);
411 if (ifp->if_flags & IFF_UP) {
412 if ((ifp->if_flags & IFF_RUNNING) == 0)
415 if (ifp->if_flags & IFF_RUNNING)
421 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
423 case SIOCGIFSTATUS: {
424 struct ifstat *ifs = (struct ifstat *)data;
427 len = strlen(ifs->ascii);
428 if (len < sizeof(ifs->ascii)) {
429 if (sc->sc_tap_unit >= 0) {
430 ksnprintf(ifs->ascii + len,
431 sizeof(ifs->ascii) - len,
432 "\tBacked by tap%d\n",
439 if (((struct ifaddr *)data)->ifa_addr->sa_family == AF_INET) {
441 * If we are explicitly requested to change address,
442 * we should invalidate address/netmask passed in
443 * from vkernel command line.
450 error = ether_ioctl(ifp, cmd, data);
457 vke_stop(struct vke_softc *sc)
459 struct ifnet *ifp = &sc->arpcom.ac_if;
462 ASSERT_SERIALIZED(ifp->if_serializer);
464 ifp->if_flags &= ~IFF_RUNNING;
465 ifsq_clr_oactive(ifq_get_subq_default(&ifp->if_snd));
469 cothread_lock(sc->cotd_tx, 0);
470 if (sc->cotd_tx_exit == VKE_COTD_RUN)
471 sc->cotd_tx_exit = VKE_COTD_EXIT;
472 cothread_signal(sc->cotd_tx);
473 cothread_unlock(sc->cotd_tx, 0);
474 cothread_delete(&sc->cotd_tx);
477 cothread_lock(sc->cotd_rx, 0);
478 if (sc->cotd_rx_exit == VKE_COTD_RUN)
479 sc->cotd_rx_exit = VKE_COTD_EXIT;
480 cothread_signal(sc->cotd_rx);
481 cothread_unlock(sc->cotd_rx, 0);
482 cothread_delete(&sc->cotd_rx);
485 for (i = 0; i < sc->sc_ringsize; i++) {
486 if (sc->sc_rxfifo && sc->sc_rxfifo->array[i]) {
487 m_freem(sc->sc_rxfifo->array[i]);
488 sc->sc_rxfifo->array[i] = NULL;
490 if (sc->sc_txfifo && sc->sc_txfifo->array[i]) {
491 m_freem(sc->sc_txfifo->array[i]);
492 sc->sc_txfifo->array[i] = NULL;
494 if (sc->sc_txfifo_done && sc->sc_txfifo_done->array[i]) {
495 m_freem(sc->sc_txfifo_done->array[i]);
496 sc->sc_txfifo_done->array[i] = NULL;
501 if (sc->sc_txfifo->array)
502 kfree(sc->sc_txfifo->array, M_DEVBUF);
503 kfree(sc->sc_txfifo, M_DEVBUF);
504 sc->sc_txfifo = NULL;
507 if (sc->sc_txfifo_done) {
508 if (sc->sc_txfifo_done->array)
509 kfree(sc->sc_txfifo_done->array, M_DEVBUF);
510 kfree(sc->sc_txfifo_done, M_DEVBUF);
511 sc->sc_txfifo_done = NULL;
515 if (sc->sc_rxfifo->array)
516 kfree(sc->sc_rxfifo->array, M_DEVBUF);
517 kfree(sc->sc_rxfifo, M_DEVBUF);
518 sc->sc_rxfifo = NULL;
527 * vke_rx_intr() is the interrupt function for the receive cothread.
530 vke_rx_intr(cothread_t cotd)
534 struct vke_softc *sc = cotd->arg;
535 struct ifnet *ifp = &sc->arpcom.ac_if;
536 static int count = 0;
538 ifnet_serialize_all(ifp);
539 cothread_lock(cotd, 0);
541 if (sc->cotd_rx_exit != VKE_COTD_RUN) {
542 cothread_unlock(cotd, 0);
543 ifnet_deserialize_all(ifp);
546 if (sc->cotd_ipackets) {
547 IFNET_STAT_INC(ifp, ipackets, 1);
548 sc->cotd_ipackets = 0;
550 cothread_unlock(cotd, 0);
552 while ((m = vke_rxfifo_sniff(sc)) != NULL) {
553 nm = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
555 vke_rxfifo_dequeue(sc, nm);
556 ifp->if_input(ifp, m, NULL, -1);
557 if (count++ == VKE_CHUNK) {
558 cothread_lock(cotd, 0);
559 cothread_signal(cotd);
560 cothread_unlock(cotd, 0);
564 vke_rxfifo_dequeue(sc, m);
569 cothread_lock(cotd, 0);
570 cothread_signal(cotd);
571 cothread_unlock(cotd, 0);
573 ifnet_deserialize_all(ifp);
577 * vke_tx_intr() is the interrupt function for the transmit cothread.
578 * Calls vke_start() to handle processing transmit mbufs.
581 vke_tx_intr(cothread_t cotd)
583 struct vke_softc *sc = cotd->arg;
584 struct ifnet *ifp = &sc->arpcom.ac_if;
587 ifnet_serialize_all(ifp);
588 cothread_lock(cotd, 0);
589 if (sc->cotd_tx_exit != VKE_COTD_RUN) {
590 cothread_unlock(cotd, 0);
591 ifnet_deserialize_all(ifp);
594 if (sc->cotd_opackets) {
595 IFNET_STAT_INC(ifp, opackets, 1);
596 sc->cotd_opackets = 0;
598 if (sc->cotd_oerrors) {
599 IFNET_STAT_INC(ifp, oerrors, 1);
600 sc->cotd_oerrors = 0;
602 cothread_unlock(cotd, 0);
605 * Free TX mbufs that have been processed before starting new
606 * ones going to be pipeline friendly.
608 while ((m = vke_txfifo_done_dequeue(sc, NULL)) != NULL) {
612 if ((ifp->if_flags & IFF_RUNNING) == 0)
615 ifnet_deserialize_all(ifp);
619 * vke_rx_thread() is the body of the receive cothread.
621 * WARNING! THIS IS A COTHREAD WHICH HAS NO PER-CPU GLOBALDATA!!!!!
624 vke_rx_thread(cothread_t cotd)
627 struct vke_softc *sc = cotd->arg;
628 struct ifnet *ifp = &sc->arpcom.ac_if;
629 fifo_t fifo = sc->sc_rxfifo;
636 /* Select timeout cannot be infinite since we need to check for
637 * the exit flag sc->cotd_rx_exit.
645 while (sc->cotd_rx_exit == VKE_COTD_RUN) {
647 * Wait for the RX FIFO to be loaded with
650 if (NETFIFOINDEX(fifo->windex + 1, sc) ==
651 NETFIFOINDEX(fifo->rindex, sc)) {
657 * Load data into the rx fifo
660 m = fifo->array[NETFIFOINDEX(fifo->windex, sc)];
663 VKE_DEVNAME "%d: NULL rxring mbuf\n",
665 *(volatile int *)0 = 1;
667 n = read(sc->sc_fd, mtod(m, void *), MCLBYTES);
669 /* no mycpu in cothread */
670 /*IFNET_STAT_INC(ifp, ipackets, 1);*/
672 m->m_pkthdr.rcvif = ifp;
673 m->m_pkthdr.len = m->m_len = n;
676 if (count++ == VKE_CHUNK) {
685 FD_SET(sc->sc_fd, &fdset);
686 r = select(sc->sc_fd + 1, &fdset, NULL, NULL, &tv);
689 VKE_DEVNAME "%d: select failed for "
690 "TAP device\n", sc->sc_unit);
696 sc->cotd_rx_exit = VKE_COTD_DEAD;
700 * vke_tx_thread() is the body of the transmit cothread.
702 * WARNING! THIS IS A COTHREAD WHICH HAS NO PER-CPU GLOBALDATA!!!!!
705 vke_tx_thread(cothread_t cotd)
708 struct vke_softc *sc = cotd->arg;
709 /*struct ifnet *ifp = &sc->arpcom.ac_if;*/
712 while (sc->cotd_tx_exit == VKE_COTD_RUN) {
714 * Write outgoing packets to the TAP interface
716 m = vke_txfifo_dequeue(sc);
718 if (m->m_pkthdr.len <= MCLBYTES) {
719 m_copydata(m, 0, m->m_pkthdr.len, sc->sc_txbuf);
720 sc->sc_txbuf_len = m->m_pkthdr.len;
722 if (write(sc->sc_fd, sc->sc_txbuf,
723 sc->sc_txbuf_len) < 0) {
724 /* no mycpu in cothread */
725 /*IFNET_STAT_INC(ifp, oerrors, 1);*/
728 /* no mycpu in cothread */
729 /*IFNET_STAT_INC(ifp, opackets, 1);*/
733 if (count++ == VKE_CHUNK) {
737 vke_txfifo_done_enqueue(sc, m);
743 cothread_lock(cotd, 1);
744 if (vke_txfifo_empty(sc))
746 cothread_unlock(cotd, 1);
750 sc->cotd_tx_exit = VKE_COTD_DEAD;
754 vke_attach(const struct vknetif_info *info, int unit)
756 struct vke_softc *sc;
758 struct tapinfo tapinfo;
759 uint8_t enaddr[ETHER_ADDR_LEN];
763 KKASSERT(info->tap_fd >= 0);
770 bcopy(info->enaddr, enaddr, ETHER_ADDR_LEN);
773 * This is only a TAP device if tap_unit is non-zero. If
774 * connecting to a virtual socket we generate a unique MAC.
776 * WARNING: enaddr[0] bit 0 is the multicast bit, when
777 * randomizing enaddr[] just leave the first
778 * two bytes 00 00 for now.
780 bzero(enaddr, sizeof(enaddr));
781 if (info->tap_unit >= 0) {
782 if (ioctl(fd, TAPGIFINFO, &tapinfo) < 0) {
783 kprintf(VKE_DEVNAME "%d: ioctl(TAPGIFINFO) "
784 "failed: %s\n", unit, strerror(errno));
788 if (ioctl(fd, SIOCGIFADDR, enaddr) < 0) {
789 kprintf(VKE_DEVNAME "%d: ioctl(SIOCGIFADDR) "
790 "failed: %s\n", unit, strerror(errno));
794 int fd = open("/dev/urandom", O_RDONLY);
796 read(fd, enaddr + 2, 4);
799 enaddr[4] = (int)getpid() >> 8;
800 enaddr[5] = (int)getpid() & 255;
805 if (ETHER_IS_MULTICAST(enaddr)) {
806 kprintf(VKE_DEVNAME "%d: illegal MULTICAST ether mac!\n", unit);
810 sc = kmalloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);
812 sc->sc_txbuf = kmalloc(MCLBYTES, M_DEVBUF, M_WAITOK);
815 sc->sc_tap_unit = info->tap_unit;
816 sc->sc_addr = info->netif_addr;
817 sc->sc_mask = info->netif_mask;
819 if (vke_max_ringsize == 0) {
820 nmbufs = nmbclusters / (NetifNum * 2);
821 sc->sc_ringsize = LOW_POW_2(nmbufs);
822 if (sc->sc_ringsize > VKE_DEFAULT_RINGSIZE)
823 sc->sc_ringsize = VKE_DEFAULT_RINGSIZE;
824 } else if (vke_max_ringsize >= VKE_CHUNK) { /* Tunable specified */
825 sc->sc_ringsize = LOW_POW_2(vke_max_ringsize);
827 sc->sc_ringsize = LOW_POW_2(VKE_CHUNK);
830 ifp = &sc->arpcom.ac_if;
831 if_initname(ifp, VKE_DEVNAME, sc->sc_unit);
833 /* NB: after if_initname() */
834 sysctl_ctx_init(&sc->sc_sysctl_ctx);
835 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
836 SYSCTL_STATIC_CHILDREN(_hw),
837 OID_AUTO, ifp->if_xname,
839 if (sc->sc_sysctl_tree == NULL) {
840 kprintf(VKE_DEVNAME "%d: can't add sysctl node\n", unit);
842 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
843 SYSCTL_CHILDREN(sc->sc_sysctl_tree),
844 OID_AUTO, "tap_unit",
845 CTLFLAG_RD, &sc->sc_tap_unit, 0,
846 "Backend tap(4) unit");
850 ifp->if_ioctl = vke_ioctl;
851 ifp->if_start = vke_start;
852 ifp->if_init = vke_init;
853 ifp->if_mtu = tapinfo.mtu;
854 ifp->if_baudrate = tapinfo.baudrate;
855 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
856 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN);
857 ifq_set_ready(&ifp->if_snd);
859 ifmedia_init(&sc->sc_media, 0, vke_media_change, vke_media_status);
860 /* We support as many media types as we please for
861 debugging purposes */
862 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
863 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
864 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_2, 0, NULL);
865 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_5, 0, NULL);
866 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
867 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
868 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_FX, 0, NULL);
869 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_T4, 0, NULL);
870 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_VG, 0, NULL);
871 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_T2, 0, NULL);
872 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_SX, 0, NULL);
873 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_STP, 0, NULL);
874 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_FL, 0, NULL);
875 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_LX, 0, NULL);
876 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_CX, 0, NULL);
877 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
878 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
879 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_HPNA_1, 0, NULL);
880 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_LR, 0, NULL);
881 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_SR, 0, NULL);
882 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
883 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_SX, 0, NULL);
884 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
885 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
886 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_LRM, 0, NULL);
887 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_T, 0, NULL);
888 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
889 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
890 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
891 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
893 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
895 ifp->if_link_state = LINK_STATE_UP;
897 ether_ifattach(ifp, enaddr, NULL);
899 if (bootverbose && sc->sc_addr != 0) {
900 if_printf(ifp, "pre-configured "
901 "address 0x%08x, netmask 0x%08x, %d mbuf clusters\n",
902 ntohl(sc->sc_addr), ntohl(sc->sc_mask), sc->sc_ringsize);
909 vke_init_addr(struct ifnet *ifp, in_addr_t addr, in_addr_t mask)
911 struct ifaliasreq ifra;
912 struct sockaddr_in *sin;
915 ASSERT_SERIALIZED(ifp->if_serializer);
918 if_printf(ifp, "add pre-configured "
919 "address 0x%08x, netmask 0x%08x\n",
920 ntohl(addr), ntohl(mask));
923 bzero(&ifra, sizeof(ifra));
925 /* NB: no need to set ifaliasreq.ifra_name */
927 sin = (struct sockaddr_in *)&ifra.ifra_addr;
928 sin->sin_family = AF_INET;
929 sin->sin_len = sizeof(*sin);
930 sin->sin_addr.s_addr = addr;
933 sin = (struct sockaddr_in *)&ifra.ifra_mask;
934 sin->sin_len = sizeof(*sin);
935 sin->sin_addr.s_addr = mask;
939 * Temporarily release serializer, in_control() will hold
940 * it again before calling ifnet.if_ioctl().
942 ifnet_deserialize_all(ifp);
943 ret = in_control(SIOCAIFADDR, (caddr_t)&ifra, ifp, NULL);
944 ifnet_serialize_all(ifp);
949 static int vke_media_change(struct ifnet *ifp)
955 static void vke_media_status(struct ifnet *ifp, struct ifmediareq *imr)
957 struct vke_softc *sc = (struct vke_softc *)ifp->if_softc;
959 imr->ifm_status = IFM_AVALID;
960 imr->ifm_status |= IFM_ACTIVE;
962 if(sc->sc_media.ifm_cur) {
963 if(sc->sc_media.ifm_cur->ifm_media == IFM_ETHER) {
964 imr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX;
966 imr->ifm_active = sc->sc_media.ifm_cur->ifm_media;
969 imr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX;