2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
40 #include <sys/serialize.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <sys/sysctl.h>
45 #include <machine/md_var.h>
46 #include <machine/cothread.h>
48 #include <net/ethernet.h>
51 #include <net/if_arp.h>
52 #include <net/ifq_var.h>
54 #include <netinet/in_var.h>
57 #include <net/tap/if_tap.h>
65 #define VKE_DEVNAME "vke"
67 #define VKE_CHUNK 8 /* number of mbufs to queue before interrupting */
69 #define NETFIFOINDEX(u, sc) ((u) & ((sc)->sc_ringsize - 1))
71 #define VKE_COTD_RUN 0
72 #define VKE_COTD_EXIT 1
73 #define VKE_COTD_DEAD 2
80 typedef struct vke_fifo *fifo_t;
82 /* Default value for a long time */
83 #define VKE_DEFAULT_RINGSIZE 256
84 static int vke_max_ringsize = 0;
85 TUNABLE_INT("hw.vke.max_ringsize", &vke_max_ringsize);
87 #define LOW_POW_2(n) (1 << (fls(n) - 1))
104 fifo_t sc_txfifo_done;
113 struct sysctl_ctx_list sc_sysctl_ctx;
114 struct sysctl_oid *sc_sysctl_tree;
116 int sc_tap_unit; /* unit of backend tap(4) */
117 in_addr_t sc_addr; /* address */
118 in_addr_t sc_mask; /* netmask */
121 static void vke_start(struct ifnet *, struct ifaltq_subque *);
122 static void vke_init(void *);
123 static int vke_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
125 static int vke_attach(const struct vknetif_info *, int);
126 static int vke_stop(struct vke_softc *);
127 static int vke_init_addr(struct ifnet *, in_addr_t, in_addr_t);
128 static void vke_tx_intr(cothread_t cotd);
129 static void vke_tx_thread(cothread_t cotd);
130 static void vke_rx_intr(cothread_t cotd);
131 static void vke_rx_thread(cothread_t cotd);
133 static int vke_txfifo_enqueue(struct vke_softc *sc, struct mbuf *m);
134 static struct mbuf *vke_txfifo_dequeue(struct vke_softc *sc);
136 static int vke_txfifo_done_enqueue(struct vke_softc *sc, struct mbuf *m);
137 static struct mbuf * vke_txfifo_done_dequeue(struct vke_softc *sc, struct mbuf *nm);
139 static struct mbuf *vke_rxfifo_dequeue(struct vke_softc *sc, struct mbuf *nm);
140 static struct mbuf *vke_rxfifo_sniff(struct vke_softc *sc);
143 vke_sysinit(void *arg __unused)
147 KASSERT(NetifNum <= VKNETIF_MAX, ("too many netifs: %d", NetifNum));
150 for (i = 0; i < NetifNum; ++i) {
151 if (vke_attach(&NetifInfo[i], unit) == 0)
155 SYSINIT(vke, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, vke_sysinit, NULL);
158 * vke_txfifo_done_enqueue() - Add an mbuf to the transmit done fifo. Since
159 * the cothread cannot free transmit mbufs after processing we put them on
160 * the done fifo so the kernel can free them.
163 vke_txfifo_done_enqueue(struct vke_softc *sc, struct mbuf *m)
165 fifo_t fifo = sc->sc_txfifo_done;
167 while (NETFIFOINDEX(fifo->windex + 1, sc) == NETFIFOINDEX(fifo->rindex, sc)) {
171 fifo->array[NETFIFOINDEX(fifo->windex, sc)] = m;
178 * vke_txfifo_done_dequeue() - Remove an mbuf from the transmit done fifo.
181 vke_txfifo_done_dequeue(struct vke_softc *sc, struct mbuf *nm)
183 fifo_t fifo = sc->sc_txfifo_done;
186 if (NETFIFOINDEX(fifo->rindex, sc) == NETFIFOINDEX(fifo->windex, sc))
189 m = fifo->array[NETFIFOINDEX(fifo->rindex, sc)];
190 fifo->array[NETFIFOINDEX(fifo->rindex, sc)] = nm;
197 * vke_txfifo_enqueue() - Add an mbuf to the transmit fifo.
200 vke_txfifo_enqueue(struct vke_softc *sc, struct mbuf *m)
202 fifo_t fifo = sc->sc_txfifo;
204 if (NETFIFOINDEX(fifo->windex + 1, sc) == NETFIFOINDEX(fifo->rindex, sc))
207 fifo->array[NETFIFOINDEX(fifo->windex, sc)] = m;
215 * vke_txfifo_dequeue() - Return next mbuf on the transmit fifo if one
219 vke_txfifo_dequeue(struct vke_softc *sc)
221 fifo_t fifo = sc->sc_txfifo;
224 if (NETFIFOINDEX(fifo->rindex, sc) == NETFIFOINDEX(fifo->windex, sc))
227 m = fifo->array[NETFIFOINDEX(fifo->rindex, sc)];
228 fifo->array[NETFIFOINDEX(fifo->rindex, sc)] = NULL;
236 vke_txfifo_empty(struct vke_softc *sc)
238 fifo_t fifo = sc->sc_txfifo;
240 if (NETFIFOINDEX(fifo->rindex, sc) == NETFIFOINDEX(fifo->windex, sc))
246 * vke_rxfifo_dequeue() - Return next mbuf on the receice fifo if one
247 * exists replacing it with newm which should point to a newly allocated
251 vke_rxfifo_dequeue(struct vke_softc *sc, struct mbuf *newm)
253 fifo_t fifo = sc->sc_rxfifo;
256 if (NETFIFOINDEX(fifo->rindex, sc) == NETFIFOINDEX(fifo->windex, sc))
259 m = fifo->array[NETFIFOINDEX(fifo->rindex, sc)];
260 fifo->array[NETFIFOINDEX(fifo->rindex, sc)] = newm;
267 * Return the next mbuf if available but do NOT remove it from the FIFO.
270 vke_rxfifo_sniff(struct vke_softc *sc)
272 fifo_t fifo = sc->sc_rxfifo;
275 if (NETFIFOINDEX(fifo->rindex, sc) == NETFIFOINDEX(fifo->windex, sc))
278 m = fifo->array[NETFIFOINDEX(fifo->rindex, sc)];
286 struct vke_softc *sc = xsc;
287 struct ifnet *ifp = &sc->arpcom.ac_if;
288 size_t ringsize = sc->sc_ringsize * sizeof(struct mbuf *);
291 ASSERT_SERIALIZED(ifp->if_serializer);
295 ifp->if_flags |= IFF_RUNNING;
296 ifsq_clr_oactive(ifq_get_subq_default(&ifp->if_snd));
299 * Allocate memory for FIFO structures and mbufs.
301 sc->sc_txfifo = kmalloc(sizeof(*sc->sc_txfifo),
302 M_DEVBUF, M_WAITOK | M_ZERO);
303 sc->sc_txfifo_done = kmalloc(sizeof(*sc->sc_txfifo_done),
304 M_DEVBUF, M_WAITOK | M_ZERO);
305 sc->sc_rxfifo = kmalloc(sizeof(*sc->sc_rxfifo),
306 M_DEVBUF, M_WAITOK | M_ZERO);
307 sc->sc_txfifo->array = kmalloc(ringsize, M_DEVBUF, M_WAITOK | M_ZERO);
308 sc->sc_txfifo_done->array = kmalloc(ringsize, M_DEVBUF, M_WAITOK | M_ZERO);
309 sc->sc_rxfifo->array = kmalloc(ringsize, M_DEVBUF, M_WAITOK | M_ZERO);
311 for (i = 0; i < sc->sc_ringsize; i++) {
312 sc->sc_rxfifo->array[i] = m_getcl(MB_WAIT, MT_DATA, M_PKTHDR);
313 sc->sc_txfifo->array[i] = NULL;
314 sc->sc_txfifo_done->array[i] = NULL;
317 sc->cotd_tx_exit = sc->cotd_rx_exit = VKE_COTD_RUN;
318 sc->cotd_tx = cothread_create(vke_tx_thread, vke_tx_intr, sc, "vke_tx");
319 sc->cotd_rx = cothread_create(vke_rx_thread, vke_rx_intr, sc, "vke_rx");
321 if (sc->sc_addr != 0) {
322 in_addr_t addr, mask;
328 * Make sure vkernel assigned
329 * address will not be added
335 vke_init_addr(ifp, addr, mask);
341 * Called from kernel.
343 * NOTE: We can't make any kernel callbacks while holding cothread lock
344 * because the cothread lock is not governed by the kernel scheduler
345 * (so mplock, tokens, etc will not be released).
348 vke_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
350 struct vke_softc *sc = ifp->if_softc;
352 cothread_t cotd = sc->cotd_tx;
355 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
356 ASSERT_SERIALIZED(ifp->if_serializer);
358 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
362 while ((m = ifsq_dequeue(ifsq)) != NULL) {
363 if (vke_txfifo_enqueue(sc, m) != -1) {
364 if (count++ == VKE_CHUNK) {
365 cothread_lock(cotd, 0);
366 cothread_signal(cotd);
367 cothread_unlock(cotd, 0);
375 cothread_lock(cotd, 0);
376 cothread_signal(cotd);
377 cothread_unlock(cotd, 0);
382 vke_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
384 struct vke_softc *sc = ifp->if_softc;
387 ASSERT_SERIALIZED(ifp->if_serializer);
391 if (ifp->if_flags & IFF_UP) {
392 if ((ifp->if_flags & IFF_RUNNING) == 0)
395 if (ifp->if_flags & IFF_RUNNING)
404 case SIOCGIFSTATUS: {
405 struct ifstat *ifs = (struct ifstat *)data;
408 len = strlen(ifs->ascii);
409 if (len < sizeof(ifs->ascii)) {
410 if (sc->sc_tap_unit >= 0)
411 ksnprintf(ifs->ascii + len, sizeof(ifs->ascii) - len,
412 "\tBacked by tap%d\n", sc->sc_tap_unit);
417 if (((struct ifaddr *)data)->ifa_addr->sa_family == AF_INET) {
419 * If we are explicitly requested to change address,
420 * we should invalidate address/netmask passed in
421 * from vkernel command line.
428 error = ether_ioctl(ifp, cmd, data);
435 vke_stop(struct vke_softc *sc)
437 struct ifnet *ifp = &sc->arpcom.ac_if;
440 ASSERT_SERIALIZED(ifp->if_serializer);
442 ifp->if_flags &= ~IFF_RUNNING;
443 ifsq_clr_oactive(ifq_get_subq_default(&ifp->if_snd));
447 cothread_lock(sc->cotd_tx, 0);
448 if (sc->cotd_tx_exit == VKE_COTD_RUN)
449 sc->cotd_tx_exit = VKE_COTD_EXIT;
450 cothread_signal(sc->cotd_tx);
451 cothread_unlock(sc->cotd_tx, 0);
452 cothread_delete(&sc->cotd_tx);
455 cothread_lock(sc->cotd_rx, 0);
456 if (sc->cotd_rx_exit == VKE_COTD_RUN)
457 sc->cotd_rx_exit = VKE_COTD_EXIT;
458 cothread_signal(sc->cotd_rx);
459 cothread_unlock(sc->cotd_rx, 0);
460 cothread_delete(&sc->cotd_rx);
463 for (i = 0; i < sc->sc_ringsize; i++) {
464 if (sc->sc_rxfifo && sc->sc_rxfifo->array[i]) {
465 m_freem(sc->sc_rxfifo->array[i]);
466 sc->sc_rxfifo->array[i] = NULL;
468 if (sc->sc_txfifo && sc->sc_txfifo->array[i]) {
469 m_freem(sc->sc_txfifo->array[i]);
470 sc->sc_txfifo->array[i] = NULL;
472 if (sc->sc_txfifo_done && sc->sc_txfifo_done->array[i]) {
473 m_freem(sc->sc_txfifo_done->array[i]);
474 sc->sc_txfifo_done->array[i] = NULL;
479 if (sc->sc_txfifo->array)
480 kfree(sc->sc_txfifo->array, M_DEVBUF);
481 kfree(sc->sc_txfifo, M_DEVBUF);
482 sc->sc_txfifo = NULL;
485 if (sc->sc_txfifo_done) {
486 if (sc->sc_txfifo_done->array)
487 kfree(sc->sc_txfifo_done->array, M_DEVBUF);
488 kfree(sc->sc_txfifo_done, M_DEVBUF);
489 sc->sc_txfifo_done = NULL;
493 if (sc->sc_rxfifo->array)
494 kfree(sc->sc_rxfifo->array, M_DEVBUF);
495 kfree(sc->sc_rxfifo, M_DEVBUF);
496 sc->sc_rxfifo = NULL;
505 * vke_rx_intr() is the interrupt function for the receive cothread.
508 vke_rx_intr(cothread_t cotd)
512 struct vke_softc *sc = cotd->arg;
513 struct ifnet *ifp = &sc->arpcom.ac_if;
514 static int count = 0;
516 ifnet_serialize_all(ifp);
517 cothread_lock(cotd, 0);
519 if (sc->cotd_rx_exit != VKE_COTD_RUN) {
520 cothread_unlock(cotd, 0);
521 ifnet_deserialize_all(ifp);
524 if (sc->cotd_ipackets) {
525 IFNET_STAT_INC(ifp, ipackets, 1);
526 sc->cotd_ipackets = 0;
528 cothread_unlock(cotd, 0);
530 while ((m = vke_rxfifo_sniff(sc)) != NULL) {
531 nm = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
533 vke_rxfifo_dequeue(sc, nm);
534 ifp->if_input(ifp, m, NULL, -1);
535 if (count++ == VKE_CHUNK) {
536 cothread_lock(cotd, 0);
537 cothread_signal(cotd);
538 cothread_unlock(cotd, 0);
542 vke_rxfifo_dequeue(sc, m);
547 cothread_lock(cotd, 0);
548 cothread_signal(cotd);
549 cothread_unlock(cotd, 0);
551 ifnet_deserialize_all(ifp);
555 * vke_tx_intr() is the interrupt function for the transmit cothread.
556 * Calls vke_start() to handle processing transmit mbufs.
559 vke_tx_intr(cothread_t cotd)
561 struct vke_softc *sc = cotd->arg;
562 struct ifnet *ifp = &sc->arpcom.ac_if;
565 ifnet_serialize_all(ifp);
566 cothread_lock(cotd, 0);
567 if (sc->cotd_tx_exit != VKE_COTD_RUN) {
568 cothread_unlock(cotd, 0);
569 ifnet_deserialize_all(ifp);
572 if (sc->cotd_opackets) {
573 IFNET_STAT_INC(ifp, opackets, 1);
574 sc->cotd_opackets = 0;
576 if (sc->cotd_oerrors) {
577 IFNET_STAT_INC(ifp, oerrors, 1);
578 sc->cotd_oerrors = 0;
580 cothread_unlock(cotd, 0);
583 * Free TX mbufs that have been processed before starting new
584 * ones going to be pipeline friendly.
586 while ((m = vke_txfifo_done_dequeue(sc, NULL)) != NULL) {
590 if ((ifp->if_flags & IFF_RUNNING) == 0)
593 ifnet_deserialize_all(ifp);
597 * vke_rx_thread() is the body of the receive cothread.
599 * WARNING! THIS IS A COTHREAD WHICH HAS NO PER-CPU GLOBALDATA!!!!!
602 vke_rx_thread(cothread_t cotd)
605 struct vke_softc *sc = cotd->arg;
606 struct ifnet *ifp = &sc->arpcom.ac_if;
607 fifo_t fifo = sc->sc_rxfifo;
613 /* Select timeout cannot be infinite since we need to check for
614 * the exit flag sc->cotd_rx_exit.
622 while (sc->cotd_rx_exit == VKE_COTD_RUN) {
624 * Wait for the RX FIFO to be loaded with
627 if (NETFIFOINDEX(fifo->windex + 1, sc) ==
628 NETFIFOINDEX(fifo->rindex, sc)) {
634 * Load data into the rx fifo
636 m = fifo->array[NETFIFOINDEX(fifo->windex, sc)];
639 n = read(sc->sc_fd, mtod(m, void *), MCLBYTES);
641 /* no mycpu in cothread */
642 /*IFNET_STAT_INC(ifp, ipackets, 1);*/
644 m->m_pkthdr.rcvif = ifp;
645 m->m_pkthdr.len = m->m_len = n;
648 if (count++ == VKE_CHUNK) {
657 FD_SET(sc->sc_fd, &fdset);
659 if (select(sc->sc_fd + 1, &fdset, NULL, NULL, &tv) == -1) {
661 VKE_DEVNAME "%d: select failed for "
662 "TAP device\n", sc->sc_unit);
668 sc->cotd_rx_exit = VKE_COTD_DEAD;
672 * vke_tx_thread() is the body of the transmit cothread.
674 * WARNING! THIS IS A COTHREAD WHICH HAS NO PER-CPU GLOBALDATA!!!!!
677 vke_tx_thread(cothread_t cotd)
680 struct vke_softc *sc = cotd->arg;
681 /*struct ifnet *ifp = &sc->arpcom.ac_if;*/
684 while (sc->cotd_tx_exit == VKE_COTD_RUN) {
686 * Write outgoing packets to the TAP interface
688 m = vke_txfifo_dequeue(sc);
690 if (m->m_pkthdr.len <= MCLBYTES) {
691 m_copydata(m, 0, m->m_pkthdr.len, sc->sc_txbuf);
692 sc->sc_txbuf_len = m->m_pkthdr.len;
694 if (write(sc->sc_fd, sc->sc_txbuf,
695 sc->sc_txbuf_len) < 0) {
696 /* no mycpu in cothread */
697 /*IFNET_STAT_INC(ifp, oerrors, 1);*/
700 /* no mycpu in cothread */
701 /*IFNET_STAT_INC(ifp, opackets, 1);*/
705 if (count++ == VKE_CHUNK) {
709 vke_txfifo_done_enqueue(sc, m);
715 cothread_lock(cotd, 1);
716 if (vke_txfifo_empty(sc))
718 cothread_unlock(cotd, 1);
722 sc->cotd_tx_exit = VKE_COTD_DEAD;
726 vke_attach(const struct vknetif_info *info, int unit)
728 struct vke_softc *sc;
730 struct tapinfo tapinfo;
731 uint8_t enaddr[ETHER_ADDR_LEN];
735 KKASSERT(info->tap_fd >= 0);
742 bcopy(info->enaddr, enaddr, ETHER_ADDR_LEN);
745 * This is only a TAP device if tap_unit is non-zero. If
746 * connecting to a virtual socket we generate a unique MAC.
748 * WARNING: enaddr[0] bit 0 is the multicast bit, when
749 * randomizing enaddr[] just leave the first
750 * two bytes 00 00 for now.
752 bzero(enaddr, sizeof(enaddr));
753 if (info->tap_unit >= 0) {
754 if (ioctl(fd, TAPGIFINFO, &tapinfo) < 0) {
755 kprintf(VKE_DEVNAME "%d: ioctl(TAPGIFINFO) "
756 "failed: %s\n", unit, strerror(errno));
760 if (ioctl(fd, SIOCGIFADDR, enaddr) < 0) {
761 kprintf(VKE_DEVNAME "%d: ioctl(SIOCGIFADDR) "
762 "failed: %s\n", unit, strerror(errno));
766 int fd = open("/dev/urandom", O_RDONLY);
768 read(fd, enaddr + 2, 4);
771 enaddr[4] = (int)getpid() >> 8;
772 enaddr[5] = (int)getpid() & 255;
777 if (ETHER_IS_MULTICAST(enaddr)) {
778 kprintf(VKE_DEVNAME "%d: illegal MULTICAST ether mac!\n", unit);
782 sc = kmalloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);
784 sc->sc_txbuf = kmalloc(MCLBYTES, M_DEVBUF, M_WAITOK);
787 sc->sc_tap_unit = info->tap_unit;
788 sc->sc_addr = info->netif_addr;
789 sc->sc_mask = info->netif_mask;
791 if (vke_max_ringsize == 0) {
792 nmbufs = nmbclusters / (NetifNum * 2);
793 sc->sc_ringsize = LOW_POW_2(nmbufs);
794 if (sc->sc_ringsize > VKE_DEFAULT_RINGSIZE)
795 sc->sc_ringsize = VKE_DEFAULT_RINGSIZE;
796 } else if (vke_max_ringsize >= VKE_CHUNK) { /* Tunable specified */
797 sc->sc_ringsize = LOW_POW_2(vke_max_ringsize);
799 sc->sc_ringsize = LOW_POW_2(VKE_CHUNK);
802 ifp = &sc->arpcom.ac_if;
803 if_initname(ifp, VKE_DEVNAME, sc->sc_unit);
805 /* NB: after if_initname() */
806 sysctl_ctx_init(&sc->sc_sysctl_ctx);
807 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
808 SYSCTL_STATIC_CHILDREN(_hw),
809 OID_AUTO, ifp->if_xname,
811 if (sc->sc_sysctl_tree == NULL) {
812 kprintf(VKE_DEVNAME "%d: can't add sysctl node\n", unit);
814 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
815 SYSCTL_CHILDREN(sc->sc_sysctl_tree),
816 OID_AUTO, "tap_unit",
817 CTLFLAG_RD, &sc->sc_tap_unit, 0,
818 "Backend tap(4) unit");
822 ifp->if_ioctl = vke_ioctl;
823 ifp->if_start = vke_start;
824 ifp->if_init = vke_init;
825 ifp->if_mtu = tapinfo.mtu;
826 ifp->if_baudrate = tapinfo.baudrate;
827 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
828 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN);
829 ifq_set_ready(&ifp->if_snd);
833 ether_ifattach(ifp, enaddr, NULL);
835 if (bootverbose && sc->sc_addr != 0) {
836 if_printf(ifp, "pre-configured "
837 "address 0x%08x, netmask 0x%08x, %d mbuf clusters\n",
838 ntohl(sc->sc_addr), ntohl(sc->sc_mask), sc->sc_ringsize);
845 vke_init_addr(struct ifnet *ifp, in_addr_t addr, in_addr_t mask)
847 struct ifaliasreq ifra;
848 struct sockaddr_in *sin;
851 ASSERT_SERIALIZED(ifp->if_serializer);
854 if_printf(ifp, "add pre-configured "
855 "address 0x%08x, netmask 0x%08x\n",
856 ntohl(addr), ntohl(mask));
859 bzero(&ifra, sizeof(ifra));
861 /* NB: no need to set ifaliasreq.ifra_name */
863 sin = (struct sockaddr_in *)&ifra.ifra_addr;
864 sin->sin_family = AF_INET;
865 sin->sin_len = sizeof(*sin);
866 sin->sin_addr.s_addr = addr;
869 sin = (struct sockaddr_in *)&ifra.ifra_mask;
870 sin->sin_len = sizeof(*sin);
871 sin->sin_addr.s_addr = mask;
875 * Temporarily release serializer, in_control() will hold
876 * it again before calling ifnet.if_ioctl().
878 ifnet_deserialize_all(ifp);
879 ret = in_control(NULL, SIOCAIFADDR, (caddr_t)&ifra, ifp, NULL);
880 ifnet_serialize_all(ifp);