sys/net: add more interface groups related functions
[dragonfly.git] / sys / net / if.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1980, 1986, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)if.c 8.3 (Berkeley) 1/4/94
34 * $FreeBSD: src/sys/net/if.c,v 1.185 2004/03/13 02:35:03 brooks Exp $
35 * $DragonFly: src/sys/net/if.c,v 1.84 2008/11/15 11:58:16 sephe Exp $
36 */
37
38#include "opt_compat.h"
39#include "opt_inet6.h"
40#include "opt_inet.h"
41#include "opt_polling.h"
42#include "opt_ifpoll.h"
43
44#include <sys/param.h>
45#include <sys/malloc.h>
46#include <sys/mbuf.h>
47#include <sys/systm.h>
48#include <sys/proc.h>
49#include <sys/priv.h>
50#include <sys/protosw.h>
51#include <sys/socket.h>
52#include <sys/socketvar.h>
53#include <sys/socketops.h>
54#include <sys/protosw.h>
55#include <sys/kernel.h>
56#include <sys/ktr.h>
57#include <sys/sockio.h>
58#include <sys/syslog.h>
59#include <sys/sysctl.h>
60#include <sys/domain.h>
61#include <sys/thread.h>
62#include <sys/thread2.h>
63#include <sys/serialize.h>
64#include <sys/msgport2.h>
65#include <sys/bus.h>
66
67#include <net/if.h>
68#include <net/if_arp.h>
69#include <net/if_dl.h>
70#include <net/if_types.h>
71#include <net/if_var.h>
72#include <net/ifq_var.h>
73#include <net/radix.h>
74#include <net/route.h>
75#include <net/if_clone.h>
76#include <net/netisr.h>
77#include <net/netmsg2.h>
78
79#include <machine/atomic.h>
80#include <machine/stdarg.h>
81#include <machine/smp.h>
82
83#if defined(INET) || defined(INET6)
84/*XXX*/
85#include <netinet/in.h>
86#include <netinet/in_var.h>
87#include <netinet/if_ether.h>
88#ifdef INET6
89#include <netinet6/in6_var.h>
90#include <netinet6/in6_ifattach.h>
91#endif
92#endif
93
94#if defined(COMPAT_43)
95#include <emulation/43bsd/43bsd_socket.h>
96#endif /* COMPAT_43 */
97
98struct netmsg_ifaddr {
99 struct netmsg netmsg;
100 struct ifaddr *ifa;
101 struct ifnet *ifp;
102 int tail;
103};
104
105/*
106 * System initialization
107 */
108static void if_attachdomain(void *);
109static void if_attachdomain1(struct ifnet *);
110static int ifconf(u_long, caddr_t, struct ucred *);
111static void ifinit(void *);
112static void ifnetinit(void *);
113static void if_slowtimo(void *);
114static void link_rtrequest(int, struct rtentry *, struct rt_addrinfo *);
115static int if_rtdel(struct radix_node *, void *);
116
117#ifdef INET6
118/*
119 * XXX: declare here to avoid to include many inet6 related files..
120 * should be more generalized?
121 */
122extern void nd6_setmtu(struct ifnet *);
123#endif
124
125SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers");
126SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management");
127
128SYSINIT(interfaces, SI_SUB_PROTO_IF, SI_ORDER_FIRST, ifinit, NULL)
129/* Must be after netisr_init */
130SYSINIT(ifnet, SI_SUB_PRE_DRIVERS, SI_ORDER_SECOND, ifnetinit, NULL)
131
132static if_com_alloc_t *if_com_alloc[256];
133static if_com_free_t *if_com_free[256];
134
135MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address");
136MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address");
137MALLOC_DEFINE(M_IFNET, "ifnet", "interface structure");
138
139int ifqmaxlen = IFQ_MAXLEN;
140struct ifnethead ifnet = TAILQ_HEAD_INITIALIZER(ifnet);
141
142/* In ifq_dispatch(), try to do direct ifnet.if_start first */
143static int ifq_dispatch_schedonly = 0;
144SYSCTL_INT(_net_link_generic, OID_AUTO, ifq_dispatch_schedonly, CTLFLAG_RW,
145 &ifq_dispatch_schedonly, 0, "");
146
147/* In ifq_dispatch(), schedule ifnet.if_start without checking ifnet.if_snd */
148static int ifq_dispatch_schednochk = 0;
149SYSCTL_INT(_net_link_generic, OID_AUTO, ifq_dispatch_schednochk, CTLFLAG_RW,
150 &ifq_dispatch_schednochk, 0, "");
151
152/* In if_devstart(), try to do direct ifnet.if_start first */
153static int if_devstart_schedonly = 0;
154SYSCTL_INT(_net_link_generic, OID_AUTO, if_devstart_schedonly, CTLFLAG_RW,
155 &if_devstart_schedonly, 0, "");
156
157/* In if_devstart(), schedule ifnet.if_start without checking ifnet.if_snd */
158static int if_devstart_schednochk = 0;
159SYSCTL_INT(_net_link_generic, OID_AUTO, if_devstart_schednochk, CTLFLAG_RW,
160 &if_devstart_schednochk, 0, "");
161
162#ifdef SMP
163/* Schedule ifnet.if_start on the current CPU */
164static int if_start_oncpu_sched = 0;
165SYSCTL_INT(_net_link_generic, OID_AUTO, if_start_oncpu_sched, CTLFLAG_RW,
166 &if_start_oncpu_sched, 0, "");
167#endif
168
169struct callout if_slowtimo_timer;
170
171int if_index = 0;
172struct ifnet **ifindex2ifnet = NULL;
173static struct thread ifnet_threads[MAXCPU];
174static int ifnet_mpsafe_thread = NETMSG_SERVICE_MPSAFE;
175
176#define IFQ_KTR_STRING "ifq=%p"
177#define IFQ_KTR_ARG_SIZE (sizeof(void *))
178#ifndef KTR_IFQ
179#define KTR_IFQ KTR_ALL
180#endif
181KTR_INFO_MASTER(ifq);
182KTR_INFO(KTR_IFQ, ifq, enqueue, 0, IFQ_KTR_STRING, IFQ_KTR_ARG_SIZE);
183KTR_INFO(KTR_IFQ, ifq, dequeue, 1, IFQ_KTR_STRING, IFQ_KTR_ARG_SIZE);
184#define logifq(name, arg) KTR_LOG(ifq_ ## name, arg)
185
186#define IF_START_KTR_STRING "ifp=%p"
187#define IF_START_KTR_ARG_SIZE (sizeof(void *))
188#ifndef KTR_IF_START
189#define KTR_IF_START KTR_ALL
190#endif
191KTR_INFO_MASTER(if_start);
192KTR_INFO(KTR_IF_START, if_start, run, 0,
193 IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
194KTR_INFO(KTR_IF_START, if_start, sched, 1,
195 IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
196KTR_INFO(KTR_IF_START, if_start, avoid, 2,
197 IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
198KTR_INFO(KTR_IF_START, if_start, contend_sched, 3,
199 IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
200#ifdef SMP
201KTR_INFO(KTR_IF_START, if_start, chase_sched, 4,
202 IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
203#endif
204#define logifstart(name, arg) KTR_LOG(if_start_ ## name, arg)
205
206TAILQ_HEAD(, ifg_group) ifg_head;
207
208/*
209 * Network interface utility routines.
210 *
211 * Routines with ifa_ifwith* names take sockaddr *'s as
212 * parameters.
213 */
214/* ARGSUSED*/
215void
216ifinit(void *dummy)
217{
218 struct ifnet *ifp;
219
220 callout_init(&if_slowtimo_timer);
221
222 crit_enter();
223 TAILQ_FOREACH(ifp, &ifnet, if_link) {
224 if (ifp->if_snd.ifq_maxlen == 0) {
225 if_printf(ifp, "XXX: driver didn't set ifq_maxlen\n");
226 ifp->if_snd.ifq_maxlen = ifqmaxlen;
227 }
228 }
229 crit_exit();
230
231 if_slowtimo(0);
232}
233
234static int
235if_start_cpuid(struct ifnet *ifp)
236{
237 return ifp->if_cpuid;
238}
239
240#ifdef DEVICE_POLLING
241static int
242if_start_cpuid_poll(struct ifnet *ifp)
243{
244 int poll_cpuid = ifp->if_poll_cpuid;
245
246 if (poll_cpuid >= 0)
247 return poll_cpuid;
248 else
249 return ifp->if_cpuid;
250}
251#endif
252
253static void
254if_start_ipifunc(void *arg)
255{
256 struct ifnet *ifp = arg;
257 struct lwkt_msg *lmsg = &ifp->if_start_nmsg[mycpuid].nm_lmsg;
258
259 crit_enter();
260 if (lmsg->ms_flags & MSGF_DONE)
261 lwkt_sendmsg(ifnet_portfn(mycpuid), lmsg);
262 crit_exit();
263}
264
265/*
266 * Schedule ifnet.if_start on ifnet's CPU
267 */
268static void
269if_start_schedule(struct ifnet *ifp)
270{
271#ifdef SMP
272 int cpu;
273
274 if (if_start_oncpu_sched)
275 cpu = mycpuid;
276 else
277 cpu = ifp->if_start_cpuid(ifp);
278
279 if (cpu != mycpuid)
280 lwkt_send_ipiq(globaldata_find(cpu), if_start_ipifunc, ifp);
281 else
282#endif
283 if_start_ipifunc(ifp);
284}
285
286/*
287 * NOTE:
288 * This function will release ifnet.if_start interlock,
289 * if ifnet.if_start does not need to be scheduled
290 */
291static __inline int
292if_start_need_schedule(struct ifaltq *ifq, int running)
293{
294 if (!running || ifq_is_empty(ifq)
295#ifdef ALTQ
296 || ifq->altq_tbr != NULL
297#endif
298 ) {
299 ALTQ_LOCK(ifq);
300 /*
301 * ifnet.if_start interlock is released, if:
302 * 1) Hardware can not take any packets, due to
303 * o interface is marked down
304 * o hardware queue is full (IFF_OACTIVE)
305 * Under the second situation, hardware interrupt
306 * or polling(4) will call/schedule ifnet.if_start
307 * when hardware queue is ready
308 * 2) There is not packet in the ifnet.if_snd.
309 * Further ifq_dispatch or ifq_handoff will call/
310 * schedule ifnet.if_start
311 * 3) TBR is used and it does not allow further
312 * dequeueing.
313 * TBR callout will call ifnet.if_start
314 */
315 if (!running || !ifq_data_ready(ifq)) {
316 ifq->altq_started = 0;
317 ALTQ_UNLOCK(ifq);
318 return 0;
319 }
320 ALTQ_UNLOCK(ifq);
321 }
322 return 1;
323}
324
325static void
326if_start_dispatch(struct netmsg *nmsg)
327{
328 struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
329 struct ifnet *ifp = lmsg->u.ms_resultp;
330 struct ifaltq *ifq = &ifp->if_snd;
331 int running = 0;
332
333 crit_enter();
334 lwkt_replymsg(lmsg, 0); /* reply ASAP */
335 crit_exit();
336
337#ifdef SMP
338 if (!if_start_oncpu_sched && mycpuid != ifp->if_start_cpuid(ifp)) {
339 /*
340 * If the ifnet is still up, we need to
341 * chase its CPU change.
342 */
343 if (ifp->if_flags & IFF_UP) {
344 logifstart(chase_sched, ifp);
345 if_start_schedule(ifp);
346 return;
347 } else {
348 goto check;
349 }
350 }
351#endif
352
353 if (ifp->if_flags & IFF_UP) {
354 ifnet_serialize_tx(ifp); /* XXX try? */
355 if ((ifp->if_flags & IFF_OACTIVE) == 0) {
356 logifstart(run, ifp);
357 ifp->if_start(ifp);
358 if ((ifp->if_flags &
359 (IFF_OACTIVE | IFF_RUNNING)) == IFF_RUNNING)
360 running = 1;
361 }
362 ifnet_deserialize_tx(ifp);
363 }
364#ifdef SMP
365check:
366#endif
367 if (if_start_need_schedule(ifq, running)) {
368 crit_enter();
369 if (lmsg->ms_flags & MSGF_DONE) { /* XXX necessary? */
370 logifstart(sched, ifp);
371 lwkt_sendmsg(ifnet_portfn(mycpuid), lmsg);
372 }
373 crit_exit();
374 }
375}
376
377/* Device driver ifnet.if_start helper function */
378void
379if_devstart(struct ifnet *ifp)
380{
381 struct ifaltq *ifq = &ifp->if_snd;
382 int running = 0;
383
384 ASSERT_IFNET_SERIALIZED_TX(ifp);
385
386 ALTQ_LOCK(ifq);
387 if (ifq->altq_started || !ifq_data_ready(ifq)) {
388 logifstart(avoid, ifp);
389 ALTQ_UNLOCK(ifq);
390 return;
391 }
392 ifq->altq_started = 1;
393 ALTQ_UNLOCK(ifq);
394
395 if (if_devstart_schedonly) {
396 /*
397 * Always schedule ifnet.if_start on ifnet's CPU,
398 * short circuit the rest of this function.
399 */
400 logifstart(sched, ifp);
401 if_start_schedule(ifp);
402 return;
403 }
404
405 logifstart(run, ifp);
406 ifp->if_start(ifp);
407
408 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) == IFF_RUNNING)
409 running = 1;
410
411 if (if_devstart_schednochk || if_start_need_schedule(ifq, running)) {
412 /*
413 * More data need to be transmitted, ifnet.if_start is
414 * scheduled on ifnet's CPU, and we keep going.
415 * NOTE: ifnet.if_start interlock is not released.
416 */
417 logifstart(sched, ifp);
418 if_start_schedule(ifp);
419 }
420}
421
422static void
423if_default_serialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
424{
425 lwkt_serialize_enter(ifp->if_serializer);
426}
427
428static void
429if_default_deserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
430{
431 lwkt_serialize_exit(ifp->if_serializer);
432}
433
434static int
435if_default_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
436{
437 return lwkt_serialize_try(ifp->if_serializer);
438}
439
440#ifdef INVARIANTS
441static void
442if_default_serialize_assert(struct ifnet *ifp,
443 enum ifnet_serialize slz __unused,
444 boolean_t serialized)
445{
446 if (serialized)
447 ASSERT_SERIALIZED(ifp->if_serializer);
448 else
449 ASSERT_NOT_SERIALIZED(ifp->if_serializer);
450}
451#endif
452
453/*
454 * Attach an interface to the list of "active" interfaces.
455 *
456 * The serializer is optional. If non-NULL access to the interface
457 * may be MPSAFE.
458 */
459void
460if_attach(struct ifnet *ifp, lwkt_serialize_t serializer)
461{
462 unsigned socksize, ifasize;
463 int namelen, masklen;
464 struct sockaddr_dl *sdl;
465 struct ifaddr *ifa;
466 struct ifaltq *ifq;
467 int i;
468
469 static int if_indexlim = 8;
470
471 if (ifp->if_serialize != NULL) {
472 KASSERT(ifp->if_deserialize != NULL &&
473 ifp->if_tryserialize != NULL &&
474 ifp->if_serialize_assert != NULL,
475 ("serialize functions are partially setup\n"));
476
477 /*
478 * If the device supplies serialize functions,
479 * then clear if_serializer to catch any invalid
480 * usage of this field.
481 */
482 KASSERT(serializer == NULL,
483 ("both serialize functions and default serializer "
484 "are supplied\n"));
485 ifp->if_serializer = NULL;
486 } else {
487 KASSERT(ifp->if_deserialize == NULL &&
488 ifp->if_tryserialize == NULL &&
489 ifp->if_serialize_assert == NULL,
490 ("serialize functions are partially setup\n"));
491 ifp->if_serialize = if_default_serialize;
492 ifp->if_deserialize = if_default_deserialize;
493 ifp->if_tryserialize = if_default_tryserialize;
494#ifdef INVARIANTS
495 ifp->if_serialize_assert = if_default_serialize_assert;
496#endif
497
498 /*
499 * The serializer can be passed in from the device,
500 * allowing the same serializer to be used for both
501 * the interrupt interlock and the device queue.
502 * If not specified, the netif structure will use an
503 * embedded serializer.
504 */
505 if (serializer == NULL) {
506 serializer = &ifp->if_default_serializer;
507 lwkt_serialize_init(serializer);
508 }
509 ifp->if_serializer = serializer;
510 }
511
512 ifp->if_start_cpuid = if_start_cpuid;
513 ifp->if_cpuid = 0;
514
515#ifdef DEVICE_POLLING
516 /* Device is not in polling mode by default */
517 ifp->if_poll_cpuid = -1;
518 if (ifp->if_poll != NULL)
519 ifp->if_start_cpuid = if_start_cpuid_poll;
520#endif
521
522 ifp->if_start_nmsg = kmalloc(ncpus * sizeof(struct netmsg),
523 M_LWKTMSG, M_WAITOK);
524 for (i = 0; i < ncpus; ++i) {
525 netmsg_init(&ifp->if_start_nmsg[i], NULL, &netisr_adone_rport,
526 0, if_start_dispatch);
527 ifp->if_start_nmsg[i].nm_lmsg.u.ms_resultp = ifp;
528 }
529
530 TAILQ_INSERT_TAIL(&ifnet, ifp, if_link);
531 ifp->if_index = ++if_index;
532
533 /*
534 * XXX -
535 * The old code would work if the interface passed a pre-existing
536 * chain of ifaddrs to this code. We don't trust our callers to
537 * properly initialize the tailq, however, so we no longer allow
538 * this unlikely case.
539 */
540 ifp->if_addrheads = kmalloc(ncpus * sizeof(struct ifaddrhead),
541 M_IFADDR, M_WAITOK | M_ZERO);
542 for (i = 0; i < ncpus; ++i)
543 TAILQ_INIT(&ifp->if_addrheads[i]);
544
545 TAILQ_INIT(&ifp->if_prefixhead);
546 TAILQ_INIT(&ifp->if_multiaddrs);
547 getmicrotime(&ifp->if_lastchange);
548 if (ifindex2ifnet == NULL || if_index >= if_indexlim) {
549 unsigned int n;
550 struct ifnet **q;
551
552 if_indexlim <<= 1;
553
554 /* grow ifindex2ifnet */
555 n = if_indexlim * sizeof(*q);
556 q = kmalloc(n, M_IFADDR, M_WAITOK | M_ZERO);
557 if (ifindex2ifnet) {
558 bcopy(ifindex2ifnet, q, n/2);
559 kfree(ifindex2ifnet, M_IFADDR);
560 }
561 ifindex2ifnet = q;
562 }
563
564 ifindex2ifnet[if_index] = ifp;
565
566 /*
567 * create a Link Level name for this device
568 */
569 namelen = strlen(ifp->if_xname);
570#define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m))
571 masklen = _offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
572 socksize = masklen + ifp->if_addrlen;
573#define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
574 if (socksize < sizeof(*sdl))
575 socksize = sizeof(*sdl);
576 socksize = ROUNDUP(socksize);
577 ifasize = sizeof(struct ifaddr) + 2 * socksize;
578 ifa = ifa_create(ifasize, M_WAITOK);
579 sdl = (struct sockaddr_dl *)(ifa + 1);
580 sdl->sdl_len = socksize;
581 sdl->sdl_family = AF_LINK;
582 bcopy(ifp->if_xname, sdl->sdl_data, namelen);
583 sdl->sdl_nlen = namelen;
584 sdl->sdl_index = ifp->if_index;
585 sdl->sdl_type = ifp->if_type;
586 ifp->if_lladdr = ifa;
587 ifa->ifa_ifp = ifp;
588 ifa->ifa_rtrequest = link_rtrequest;
589 ifa->ifa_addr = (struct sockaddr *)sdl;
590 sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl);
591 ifa->ifa_netmask = (struct sockaddr *)sdl;
592 sdl->sdl_len = masklen;
593 while (namelen != 0)
594 sdl->sdl_data[--namelen] = 0xff;
595 ifa_iflink(ifa, ifp, 0 /* Insert head */);
596
597 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp);
598 devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL);
599
600 ifq = &ifp->if_snd;
601 ifq->altq_type = 0;
602 ifq->altq_disc = NULL;
603 ifq->altq_flags &= ALTQF_CANTCHANGE;
604 ifq->altq_tbr = NULL;
605 ifq->altq_ifp = ifp;
606 ifq->altq_started = 0;
607 ifq->altq_prepended = NULL;
608 ALTQ_LOCK_INIT(ifq);
609 ifq_set_classic(ifq);
610
611 if (!SLIST_EMPTY(&domains))
612 if_attachdomain1(ifp);
613
614 /* Announce the interface. */
615 rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
616}
617
618static void
619if_attachdomain(void *dummy)
620{
621 struct ifnet *ifp;
622
623 crit_enter();
624 TAILQ_FOREACH(ifp, &ifnet, if_list)
625 if_attachdomain1(ifp);
626 crit_exit();
627}
628SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST,
629 if_attachdomain, NULL);
630
631static void
632if_attachdomain1(struct ifnet *ifp)
633{
634 struct domain *dp;
635
636 crit_enter();
637
638 /* address family dependent data region */
639 bzero(ifp->if_afdata, sizeof(ifp->if_afdata));
640 SLIST_FOREACH(dp, &domains, dom_next)
641 if (dp->dom_ifattach)
642 ifp->if_afdata[dp->dom_family] =
643 (*dp->dom_ifattach)(ifp);
644 crit_exit();
645}
646
647/*
648 * Purge all addresses whose type is _not_ AF_LINK
649 */
650void
651if_purgeaddrs_nolink(struct ifnet *ifp)
652{
653 struct ifaddr_container *ifac, *next;
654
655 TAILQ_FOREACH_MUTABLE(ifac, &ifp->if_addrheads[mycpuid],
656 ifa_link, next) {
657 struct ifaddr *ifa = ifac->ifa;
658
659 /* Leave link ifaddr as it is */
660 if (ifa->ifa_addr->sa_family == AF_LINK)
661 continue;
662#ifdef INET
663 /* XXX: Ugly!! ad hoc just for INET */
664 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET) {
665 struct ifaliasreq ifr;
666#ifdef IFADDR_DEBUG_VERBOSE
667 int i;
668
669 kprintf("purge in4 addr %p: ", ifa);
670 for (i = 0; i < ncpus; ++i)
671 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt);
672 kprintf("\n");
673#endif
674
675 bzero(&ifr, sizeof ifr);
676 ifr.ifra_addr = *ifa->ifa_addr;
677 if (ifa->ifa_dstaddr)
678 ifr.ifra_broadaddr = *ifa->ifa_dstaddr;
679 if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp,
680 NULL) == 0)
681 continue;
682 }
683#endif /* INET */
684#ifdef INET6
685 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET6) {
686#ifdef IFADDR_DEBUG_VERBOSE
687 int i;
688
689 kprintf("purge in6 addr %p: ", ifa);
690 for (i = 0; i < ncpus; ++i)
691 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt);
692 kprintf("\n");
693#endif
694
695 in6_purgeaddr(ifa);
696 /* ifp_addrhead is already updated */
697 continue;
698 }
699#endif /* INET6 */
700 ifa_ifunlink(ifa, ifp);
701 ifa_destroy(ifa);
702 }
703}
704
705/*
706 * Detach an interface, removing it from the
707 * list of "active" interfaces.
708 */
709void
710if_detach(struct ifnet *ifp)
711{
712 struct radix_node_head *rnh;
713 int i;
714 int cpu, origcpu;
715 struct domain *dp;
716
717 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp);
718
719 /*
720 * Remove routes and flush queues.
721 */
722 crit_enter();
723#ifdef DEVICE_POLLING
724 if (ifp->if_flags & IFF_POLLING)
725 ether_poll_deregister(ifp);
726#endif
727#ifdef IFPOLL_ENABLE
728 if (ifp->if_flags & IFF_NPOLLING)
729 ifpoll_deregister(ifp);
730#endif
731 if_down(ifp);
732
733#ifdef ALTQ
734 if (ifq_is_enabled(&ifp->if_snd))
735 altq_disable(&ifp->if_snd);
736 if (ifq_is_attached(&ifp->if_snd))
737 altq_detach(&ifp->if_snd);
738#endif
739
740 /*
741 * Clean up all addresses.
742 */
743 ifp->if_lladdr = NULL;
744
745 if_purgeaddrs_nolink(ifp);
746 if (!TAILQ_EMPTY(&ifp->if_addrheads[mycpuid])) {
747 struct ifaddr *ifa;
748
749 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
750 KASSERT(ifa->ifa_addr->sa_family == AF_LINK,
751 ("non-link ifaddr is left on if_addrheads"));
752
753 ifa_ifunlink(ifa, ifp);
754 ifa_destroy(ifa);
755 KASSERT(TAILQ_EMPTY(&ifp->if_addrheads[mycpuid]),
756 ("there are still ifaddrs left on if_addrheads"));
757 }
758
759#ifdef INET
760 /*
761 * Remove all IPv4 kernel structures related to ifp.
762 */
763 in_ifdetach(ifp);
764#endif
765
766#ifdef INET6
767 /*
768 * Remove all IPv6 kernel structs related to ifp. This should be done
769 * before removing routing entries below, since IPv6 interface direct
770 * routes are expected to be removed by the IPv6-specific kernel API.
771 * Otherwise, the kernel will detect some inconsistency and bark it.
772 */
773 in6_ifdetach(ifp);
774#endif
775
776 /*
777 * Delete all remaining routes using this interface
778 * Unfortuneatly the only way to do this is to slog through
779 * the entire routing table looking for routes which point
780 * to this interface...oh well...
781 */
782 origcpu = mycpuid;
783 for (cpu = 0; cpu < ncpus2; cpu++) {
784 lwkt_migratecpu(cpu);
785 for (i = 1; i <= AF_MAX; i++) {
786 if ((rnh = rt_tables[cpu][i]) == NULL)
787 continue;
788 rnh->rnh_walktree(rnh, if_rtdel, ifp);
789 }
790 }
791 lwkt_migratecpu(origcpu);
792
793 /* Announce that the interface is gone. */
794 rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
795 devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL);
796
797 SLIST_FOREACH(dp, &domains, dom_next)
798 if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family])
799 (*dp->dom_ifdetach)(ifp,
800 ifp->if_afdata[dp->dom_family]);
801
802 /*
803 * Remove interface from ifindex2ifp[] and maybe decrement if_index.
804 */
805 ifindex2ifnet[ifp->if_index] = NULL;
806 while (if_index > 0 && ifindex2ifnet[if_index] == NULL)
807 if_index--;
808
809 TAILQ_REMOVE(&ifnet, ifp, if_link);
810 kfree(ifp->if_addrheads, M_IFADDR);
811 kfree(ifp->if_start_nmsg, M_LWKTMSG);
812 crit_exit();
813}
814
815/*
816 * Create interface group without members
817 */
818struct ifg_group *
819if_creategroup(const char *groupname)
820{
821 struct ifg_group *ifg = NULL;
822
823 if ((ifg = (struct ifg_group *)kmalloc(sizeof(struct ifg_group),
824 M_TEMP, M_NOWAIT)) == NULL)
825 return (NULL);
826
827 strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group));
828 ifg->ifg_refcnt = 0;
829 ifg->ifg_carp_demoted = 0;
830 TAILQ_INIT(&ifg->ifg_members);
831#if NPF > 0
832 pfi_attach_ifgroup(ifg);
833#endif
834 TAILQ_INSERT_TAIL(&ifg_head, ifg, ifg_next);
835
836 return (ifg);
837}
838
839/*
840 * Add a group to an interface
841 */
842int
843if_addgroup(struct ifnet *ifp, const char *groupname)
844{
845 struct ifg_list *ifgl;
846 struct ifg_group *ifg = NULL;
847 struct ifg_member *ifgm;
848
849 if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' &&
850 groupname[strlen(groupname) - 1] <= '9')
851 return (EINVAL);
852
853 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
854 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname))
855 return (EEXIST);
856
857 if ((ifgl = kmalloc(sizeof(*ifgl), M_TEMP, M_NOWAIT)) == NULL)
858 return (ENOMEM);
859
860 if ((ifgm = kmalloc(sizeof(*ifgm), M_TEMP, M_NOWAIT)) == NULL) {
861 kfree(ifgl, M_TEMP);
862 return (ENOMEM);
863 }
864
865 TAILQ_FOREACH(ifg, &ifg_head, ifg_next)
866 if (!strcmp(ifg->ifg_group, groupname))
867 break;
868
869 if (ifg == NULL && (ifg = if_creategroup(groupname)) == NULL) {
870 kfree(ifgl, M_TEMP);
871 kfree(ifgm, M_TEMP);
872 return (ENOMEM);
873 }
874
875 ifg->ifg_refcnt++;
876 ifgl->ifgl_group = ifg;
877 ifgm->ifgm_ifp = ifp;
878
879 TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next);
880 TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next);
881
882#if NPF > 0
883 pfi_group_change(groupname);
884#endif
885
886 return (0);
887}
888
889/*
890 * Remove a group from an interface
891 */
892int
893if_delgroup(struct ifnet *ifp, const char *groupname)
894{
895 struct ifg_list *ifgl;
896 struct ifg_member *ifgm;
897
898 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
899 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname))
900 break;
901 if (ifgl == NULL)
902 return (ENOENT);
903
904 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next);
905
906 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next)
907 if (ifgm->ifgm_ifp == ifp)
908 break;
909
910 if (ifgm != NULL) {
911 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next);
912 kfree(ifgm, M_TEMP);
913 }
914
915 if (--ifgl->ifgl_group->ifg_refcnt == 0) {
916 TAILQ_REMOVE(&ifg_head, ifgl->ifgl_group, ifg_next);
917#if NPF > 0
918 pfi_detach_ifgroup(ifgl->ifgl_group);
919#endif
920 kfree(ifgl->ifgl_group, M_TEMP);
921 }
922
923 kfree(ifgl, M_TEMP);
924
925#if NPF > 0
926 pfi_group_change(groupname);
927#endif
928
929 return (0);
930}
931
932/*
933 * Stores all groups from an interface in memory pointed
934 * to by data
935 */
936int
937if_getgroup(caddr_t data, struct ifnet *ifp)
938{
939 int len, error;
940 struct ifg_list *ifgl;
941 struct ifg_req ifgrq, *ifgp;
942 struct ifgroupreq *ifgr = (struct ifgroupreq *)data;
943
944 if (ifgr->ifgr_len == 0) {
945 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
946 ifgr->ifgr_len += sizeof(struct ifg_req);
947 return (0);
948 }
949
950 len = ifgr->ifgr_len;
951 ifgp = ifgr->ifgr_groups;
952 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) {
953 if (len < sizeof(ifgrq))
954 return (EINVAL);
955 bzero(&ifgrq, sizeof ifgrq);
956 strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group,
957 sizeof(ifgrq.ifgrq_group));
958 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp,
959 sizeof(struct ifg_req))))
960 return (error);
961 len -= sizeof(ifgrq);
962 ifgp++;
963 }
964
965 return (0);
966}
967
968/*
969 * Stores all members of a group in memory pointed to by data
970 */
971int
972if_getgroupmembers(caddr_t data)
973{
974 struct ifgroupreq *ifgr = (struct ifgroupreq *)data;
975 struct ifg_group *ifg;
976 struct ifg_member *ifgm;
977 struct ifg_req ifgrq, *ifgp;
978 int len, error;
979
980 TAILQ_FOREACH(ifg, &ifg_head, ifg_next)
981 if (!strcmp(ifg->ifg_group, ifgr->ifgr_name))
982 break;
983 if (ifg == NULL)
984 return (ENOENT);
985
986 if (ifgr->ifgr_len == 0) {
987 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next)
988 ifgr->ifgr_len += sizeof(ifgrq);
989 return (0);
990 }
991
992 len = ifgr->ifgr_len;
993 ifgp = ifgr->ifgr_groups;
994 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) {
995 if (len < sizeof(ifgrq))
996 return (EINVAL);
997 bzero(&ifgrq, sizeof ifgrq);
998 strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname,
999 sizeof(ifgrq.ifgrq_member));
1000 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp,
1001 sizeof(struct ifg_req))))
1002 return (error);
1003 len -= sizeof(ifgrq);
1004 ifgp++;
1005 }
1006
1007 return (0);
1008}
1009
1010/*
1011 * Delete Routes for a Network Interface
1012 *
1013 * Called for each routing entry via the rnh->rnh_walktree() call above
1014 * to delete all route entries referencing a detaching network interface.
1015 *
1016 * Arguments:
1017 * rn pointer to node in the routing table
1018 * arg argument passed to rnh->rnh_walktree() - detaching interface
1019 *
1020 * Returns:
1021 * 0 successful
1022 * errno failed - reason indicated
1023 *
1024 */
1025static int
1026if_rtdel(struct radix_node *rn, void *arg)
1027{
1028 struct rtentry *rt = (struct rtentry *)rn;
1029 struct ifnet *ifp = arg;
1030 int err;
1031
1032 if (rt->rt_ifp == ifp) {
1033
1034 /*
1035 * Protect (sorta) against walktree recursion problems
1036 * with cloned routes
1037 */
1038 if (!(rt->rt_flags & RTF_UP))
1039 return (0);
1040
1041 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
1042 rt_mask(rt), rt->rt_flags,
1043 NULL);
1044 if (err) {
1045 log(LOG_WARNING, "if_rtdel: error %d\n", err);
1046 }
1047 }
1048
1049 return (0);
1050}
1051
1052/*
1053 * Locate an interface based on a complete address.
1054 */
1055struct ifaddr *
1056ifa_ifwithaddr(struct sockaddr *addr)
1057{
1058 struct ifnet *ifp;
1059
1060 TAILQ_FOREACH(ifp, &ifnet, if_link) {
1061 struct ifaddr_container *ifac;
1062
1063 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1064 struct ifaddr *ifa = ifac->ifa;
1065
1066 if (ifa->ifa_addr->sa_family != addr->sa_family)
1067 continue;
1068 if (sa_equal(addr, ifa->ifa_addr))
1069 return (ifa);
1070 if ((ifp->if_flags & IFF_BROADCAST) &&
1071 ifa->ifa_broadaddr &&
1072 /* IPv6 doesn't have broadcast */
1073 ifa->ifa_broadaddr->sa_len != 0 &&
1074 sa_equal(ifa->ifa_broadaddr, addr))
1075 return (ifa);
1076 }
1077 }
1078 return (NULL);
1079}
1080/*
1081 * Locate the point to point interface with a given destination address.
1082 */
1083struct ifaddr *
1084ifa_ifwithdstaddr(struct sockaddr *addr)
1085{
1086 struct ifnet *ifp;
1087
1088 TAILQ_FOREACH(ifp, &ifnet, if_link) {
1089 struct ifaddr_container *ifac;
1090
1091 if (!(ifp->if_flags & IFF_POINTOPOINT))
1092 continue;
1093
1094 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1095 struct ifaddr *ifa = ifac->ifa;
1096
1097 if (ifa->ifa_addr->sa_family != addr->sa_family)
1098 continue;
1099 if (ifa->ifa_dstaddr &&
1100 sa_equal(addr, ifa->ifa_dstaddr))
1101 return (ifa);
1102 }
1103 }
1104 return (NULL);
1105}
1106
1107/*
1108 * Find an interface on a specific network. If many, choice
1109 * is most specific found.
1110 */
1111struct ifaddr *
1112ifa_ifwithnet(struct sockaddr *addr)
1113{
1114 struct ifnet *ifp;
1115 struct ifaddr *ifa_maybe = NULL;
1116 u_int af = addr->sa_family;
1117 char *addr_data = addr->sa_data, *cplim;
1118
1119 /*
1120 * AF_LINK addresses can be looked up directly by their index number,
1121 * so do that if we can.
1122 */
1123 if (af == AF_LINK) {
1124 struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr;
1125
1126 if (sdl->sdl_index && sdl->sdl_index <= if_index)
1127 return (ifindex2ifnet[sdl->sdl_index]->if_lladdr);
1128 }
1129
1130 /*
1131 * Scan though each interface, looking for ones that have
1132 * addresses in this address family.
1133 */
1134 TAILQ_FOREACH(ifp, &ifnet, if_link) {
1135 struct ifaddr_container *ifac;
1136
1137 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1138 struct ifaddr *ifa = ifac->ifa;
1139 char *cp, *cp2, *cp3;
1140
1141 if (ifa->ifa_addr->sa_family != af)
1142next: continue;
1143 if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT) {
1144 /*
1145 * This is a bit broken as it doesn't
1146 * take into account that the remote end may
1147 * be a single node in the network we are
1148 * looking for.
1149 * The trouble is that we don't know the
1150 * netmask for the remote end.
1151 */
1152 if (ifa->ifa_dstaddr != NULL &&
1153 sa_equal(addr, ifa->ifa_dstaddr))
1154 return (ifa);
1155 } else {
1156 /*
1157 * if we have a special address handler,
1158 * then use it instead of the generic one.
1159 */
1160 if (ifa->ifa_claim_addr) {
1161 if ((*ifa->ifa_claim_addr)(ifa, addr)) {
1162 return (ifa);
1163 } else {
1164 continue;
1165 }
1166 }
1167
1168 /*
1169 * Scan all the bits in the ifa's address.
1170 * If a bit dissagrees with what we are
1171 * looking for, mask it with the netmask
1172 * to see if it really matters.
1173 * (A byte at a time)
1174 */
1175 if (ifa->ifa_netmask == 0)
1176 continue;
1177 cp = addr_data;
1178 cp2 = ifa->ifa_addr->sa_data;
1179 cp3 = ifa->ifa_netmask->sa_data;
1180 cplim = ifa->ifa_netmask->sa_len +
1181 (char *)ifa->ifa_netmask;
1182 while (cp3 < cplim)
1183 if ((*cp++ ^ *cp2++) & *cp3++)
1184 goto next; /* next address! */
1185 /*
1186 * If the netmask of what we just found
1187 * is more specific than what we had before
1188 * (if we had one) then remember the new one
1189 * before continuing to search
1190 * for an even better one.
1191 */
1192 if (ifa_maybe == 0 ||
1193 rn_refines((char *)ifa->ifa_netmask,
1194 (char *)ifa_maybe->ifa_netmask))
1195 ifa_maybe = ifa;
1196 }
1197 }
1198 }
1199 return (ifa_maybe);
1200}
1201
1202/*
1203 * Find an interface address specific to an interface best matching
1204 * a given address.
1205 */
1206struct ifaddr *
1207ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp)
1208{
1209 struct ifaddr_container *ifac;
1210 char *cp, *cp2, *cp3;
1211 char *cplim;
1212 struct ifaddr *ifa_maybe = 0;
1213 u_int af = addr->sa_family;
1214
1215 if (af >= AF_MAX)
1216 return (0);
1217 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1218 struct ifaddr *ifa = ifac->ifa;
1219
1220 if (ifa->ifa_addr->sa_family != af)
1221 continue;
1222 if (ifa_maybe == 0)
1223 ifa_maybe = ifa;
1224 if (ifa->ifa_netmask == NULL) {
1225 if (sa_equal(addr, ifa->ifa_addr) ||
1226 (ifa->ifa_dstaddr != NULL &&
1227 sa_equal(addr, ifa->ifa_dstaddr)))
1228 return (ifa);
1229 continue;
1230 }
1231 if (ifp->if_flags & IFF_POINTOPOINT) {
1232 if (sa_equal(addr, ifa->ifa_dstaddr))
1233 return (ifa);
1234 } else {
1235 cp = addr->sa_data;
1236 cp2 = ifa->ifa_addr->sa_data;
1237 cp3 = ifa->ifa_netmask->sa_data;
1238 cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask;
1239 for (; cp3 < cplim; cp3++)
1240 if ((*cp++ ^ *cp2++) & *cp3)
1241 break;
1242 if (cp3 == cplim)
1243 return (ifa);
1244 }
1245 }
1246 return (ifa_maybe);
1247}
1248
1249/*
1250 * Default action when installing a route with a Link Level gateway.
1251 * Lookup an appropriate real ifa to point to.
1252 * This should be moved to /sys/net/link.c eventually.
1253 */
1254static void
1255link_rtrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info)
1256{
1257 struct ifaddr *ifa;
1258 struct sockaddr *dst;
1259 struct ifnet *ifp;
1260
1261 if (cmd != RTM_ADD || (ifa = rt->rt_ifa) == NULL ||
1262 (ifp = ifa->ifa_ifp) == NULL || (dst = rt_key(rt)) == NULL)
1263 return;
1264 ifa = ifaof_ifpforaddr(dst, ifp);
1265 if (ifa != NULL) {
1266 IFAFREE(rt->rt_ifa);
1267 IFAREF(ifa);
1268 rt->rt_ifa = ifa;
1269 if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest)
1270 ifa->ifa_rtrequest(cmd, rt, info);
1271 }
1272}
1273
1274/*
1275 * Mark an interface down and notify protocols of
1276 * the transition.
1277 * NOTE: must be called at splnet or eqivalent.
1278 */
1279void
1280if_unroute(struct ifnet *ifp, int flag, int fam)
1281{
1282 struct ifaddr_container *ifac;
1283
1284 ifp->if_flags &= ~flag;
1285 getmicrotime(&ifp->if_lastchange);
1286 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1287 struct ifaddr *ifa = ifac->ifa;
1288
1289 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
1290 kpfctlinput(PRC_IFDOWN, ifa->ifa_addr);
1291 }
1292 ifq_purge(&ifp->if_snd);
1293 rt_ifmsg(ifp);
1294}
1295
1296/*
1297 * Mark an interface up and notify protocols of
1298 * the transition.
1299 * NOTE: must be called at splnet or eqivalent.
1300 */
1301void
1302if_route(struct ifnet *ifp, int flag, int fam)
1303{
1304 struct ifaddr_container *ifac;
1305
1306 ifq_purge(&ifp->if_snd);
1307 ifp->if_flags |= flag;
1308 getmicrotime(&ifp->if_lastchange);
1309 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1310 struct ifaddr *ifa = ifac->ifa;
1311
1312 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
1313 kpfctlinput(PRC_IFUP, ifa->ifa_addr);
1314 }
1315 rt_ifmsg(ifp);
1316#ifdef INET6
1317 in6_if_up(ifp);
1318#endif
1319}
1320
1321/*
1322 * Mark an interface down and notify protocols of the transition. An
1323 * interface going down is also considered to be a synchronizing event.
1324 * We must ensure that all packet processing related to the interface
1325 * has completed before we return so e.g. the caller can free the ifnet
1326 * structure that the mbufs may be referencing.
1327 *
1328 * NOTE: must be called at splnet or eqivalent.
1329 */
1330void
1331if_down(struct ifnet *ifp)
1332{
1333 if_unroute(ifp, IFF_UP, AF_UNSPEC);
1334 netmsg_service_sync();
1335}
1336
1337/*
1338 * Mark an interface up and notify protocols of
1339 * the transition.
1340 * NOTE: must be called at splnet or eqivalent.
1341 */
1342void
1343if_up(struct ifnet *ifp)
1344{
1345 if_route(ifp, IFF_UP, AF_UNSPEC);
1346}
1347
1348/*
1349 * Process a link state change.
1350 * NOTE: must be called at splsoftnet or equivalent.
1351 */
1352void
1353if_link_state_change(struct ifnet *ifp)
1354{
1355 int link_state = ifp->if_link_state;
1356
1357 rt_ifmsg(ifp);
1358 devctl_notify("IFNET", ifp->if_xname,
1359 (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL);
1360}
1361
1362/*
1363 * Handle interface watchdog timer routines. Called
1364 * from softclock, we decrement timers (if set) and
1365 * call the appropriate interface routine on expiration.
1366 */
1367static void
1368if_slowtimo(void *arg)
1369{
1370 struct ifnet *ifp;
1371
1372 crit_enter();
1373
1374 TAILQ_FOREACH(ifp, &ifnet, if_link) {
1375 if (ifp->if_timer == 0 || --ifp->if_timer)
1376 continue;
1377 if (ifp->if_watchdog) {
1378 if (ifnet_tryserialize_all(ifp)) {
1379 (*ifp->if_watchdog)(ifp);
1380 ifnet_deserialize_all(ifp);
1381 } else {
1382 /* try again next timeout */
1383 ++ifp->if_timer;
1384 }
1385 }
1386 }
1387
1388 crit_exit();
1389
1390 callout_reset(&if_slowtimo_timer, hz / IFNET_SLOWHZ, if_slowtimo, NULL);
1391}
1392
1393/*
1394 * Map interface name to
1395 * interface structure pointer.
1396 */
1397struct ifnet *
1398ifunit(const char *name)
1399{
1400 struct ifnet *ifp;
1401
1402 /*
1403 * Search all the interfaces for this name/number
1404 */
1405
1406 TAILQ_FOREACH(ifp, &ifnet, if_link) {
1407 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0)
1408 break;
1409 }
1410 return (ifp);
1411}
1412
1413
1414/*
1415 * Map interface name in a sockaddr_dl to
1416 * interface structure pointer.
1417 */
1418struct ifnet *
1419if_withname(struct sockaddr *sa)
1420{
1421 char ifname[IFNAMSIZ+1];
1422 struct sockaddr_dl *sdl = (struct sockaddr_dl *)sa;
1423
1424 if ( (sa->sa_family != AF_LINK) || (sdl->sdl_nlen == 0) ||
1425 (sdl->sdl_nlen > IFNAMSIZ) )
1426 return NULL;
1427
1428 /*
1429 * ifunit wants a null-terminated name. It may not be null-terminated
1430 * in the sockaddr. We don't want to change the caller's sockaddr,
1431 * and there might not be room to put the trailing null anyway, so we
1432 * make a local copy that we know we can null terminate safely.
1433 */
1434
1435 bcopy(sdl->sdl_data, ifname, sdl->sdl_nlen);
1436 ifname[sdl->sdl_nlen] = '\0';
1437 return ifunit(ifname);
1438}
1439
1440
1441/*
1442 * Interface ioctls.
1443 */
1444int
1445ifioctl(struct socket *so, u_long cmd, caddr_t data, struct ucred *cred)
1446{
1447 struct ifnet *ifp;
1448 struct ifreq *ifr;
1449 struct ifstat *ifs;
1450 int error;
1451 short oif_flags;
1452 int new_flags;
1453 size_t namelen, onamelen;
1454 char new_name[IFNAMSIZ];
1455 struct ifaddr *ifa;
1456 struct sockaddr_dl *sdl;
1457
1458 switch (cmd) {
1459
1460 case SIOCGIFCONF:
1461 case OSIOCGIFCONF:
1462 return (ifconf(cmd, data, cred));
1463 }
1464 ifr = (struct ifreq *)data;
1465
1466 switch (cmd) {
1467 case SIOCIFCREATE:
1468 case SIOCIFCREATE2:
1469 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
1470 return (error);
1471 return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name),
1472 cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL));
1473 case SIOCIFDESTROY:
1474 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
1475 return (error);
1476 return (if_clone_destroy(ifr->ifr_name));
1477
1478 case SIOCIFGCLONERS:
1479 return (if_clone_list((struct if_clonereq *)data));
1480 }
1481
1482 ifp = ifunit(ifr->ifr_name);
1483 if (ifp == 0)
1484 return (ENXIO);
1485 switch (cmd) {
1486
1487 case SIOCGIFINDEX:
1488 ifr->ifr_index = ifp->if_index;
1489 break;
1490
1491 case SIOCGIFFLAGS:
1492 ifr->ifr_flags = ifp->if_flags;
1493 ifr->ifr_flagshigh = ifp->if_flags >> 16;
1494 break;
1495
1496 case SIOCGIFCAP:
1497 ifr->ifr_reqcap = ifp->if_capabilities;
1498 ifr->ifr_curcap = ifp->if_capenable;
1499 break;
1500
1501 case SIOCGIFMETRIC:
1502 ifr->ifr_metric = ifp->if_metric;
1503 break;
1504
1505 case SIOCGIFMTU:
1506 ifr->ifr_mtu = ifp->if_mtu;
1507 break;
1508
1509 case SIOCGIFPHYS:
1510 ifr->ifr_phys = ifp->if_physical;
1511 break;
1512
1513 case SIOCGIFPOLLCPU:
1514#ifdef DEVICE_POLLING
1515 ifr->ifr_pollcpu = ifp->if_poll_cpuid;
1516#else
1517 ifr->ifr_pollcpu = -1;
1518#endif
1519 break;
1520
1521 case SIOCSIFPOLLCPU:
1522#ifdef DEVICE_POLLING
1523 if ((ifp->if_flags & IFF_POLLING) == 0)
1524 ether_pollcpu_register(ifp, ifr->ifr_pollcpu);
1525#endif
1526 break;
1527
1528 case SIOCSIFFLAGS:
1529 error = priv_check_cred(cred, PRIV_ROOT, 0);
1530 if (error)
1531 return (error);
1532 new_flags = (ifr->ifr_flags & 0xffff) |
1533 (ifr->ifr_flagshigh << 16);
1534 if (ifp->if_flags & IFF_SMART) {
1535 /* Smart drivers twiddle their own routes */
1536 } else if (ifp->if_flags & IFF_UP &&
1537 (new_flags & IFF_UP) == 0) {
1538 crit_enter();
1539 if_down(ifp);
1540 crit_exit();
1541 } else if (new_flags & IFF_UP &&
1542 (ifp->if_flags & IFF_UP) == 0) {
1543 crit_enter();
1544 if_up(ifp);
1545 crit_exit();
1546 }
1547
1548#ifdef DEVICE_POLLING
1549 if ((new_flags ^ ifp->if_flags) & IFF_POLLING) {
1550 if (new_flags & IFF_POLLING) {
1551 ether_poll_register(ifp);
1552 } else {
1553 ether_poll_deregister(ifp);
1554 }
1555 }
1556#endif
1557#ifdef IFPOLL_ENABLE
1558 if ((new_flags ^ ifp->if_flags) & IFF_NPOLLING) {
1559 if (new_flags & IFF_NPOLLING)
1560 ifpoll_register(ifp);
1561 else
1562 ifpoll_deregister(ifp);
1563 }
1564#endif
1565
1566 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) |
1567 (new_flags &~ IFF_CANTCHANGE);
1568 if (new_flags & IFF_PPROMISC) {
1569 /* Permanently promiscuous mode requested */
1570 ifp->if_flags |= IFF_PROMISC;
1571 } else if (ifp->if_pcount == 0) {
1572 ifp->if_flags &= ~IFF_PROMISC;
1573 }
1574 if (ifp->if_ioctl) {
1575 ifnet_serialize_all(ifp);
1576 ifp->if_ioctl(ifp, cmd, data, cred);
1577 ifnet_deserialize_all(ifp);
1578 }
1579 getmicrotime(&ifp->if_lastchange);
1580 break;
1581
1582 case SIOCSIFCAP:
1583 error = priv_check_cred(cred, PRIV_ROOT, 0);
1584 if (error)
1585 return (error);
1586 if (ifr->ifr_reqcap & ~ifp->if_capabilities)
1587 return (EINVAL);
1588 ifnet_serialize_all(ifp);
1589 ifp->if_ioctl(ifp, cmd, data, cred);
1590 ifnet_deserialize_all(ifp);
1591 break;
1592
1593 case SIOCSIFNAME:
1594 error = priv_check_cred(cred, PRIV_ROOT, 0);
1595 if (error != 0)
1596 return (error);
1597 error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL);
1598 if (error != 0)
1599 return (error);
1600 if (new_name[0] == '\0')
1601 return (EINVAL);
1602 if (ifunit(new_name) != NULL)
1603 return (EEXIST);
1604
1605 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp);
1606
1607 /* Announce the departure of the interface. */
1608 rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
1609
1610 strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname));
1611 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
1612 /* XXX IFA_LOCK(ifa); */
1613 sdl = (struct sockaddr_dl *)ifa->ifa_addr;
1614 namelen = strlen(new_name);
1615 onamelen = sdl->sdl_nlen;
1616 /*
1617 * Move the address if needed. This is safe because we
1618 * allocate space for a name of length IFNAMSIZ when we
1619 * create this in if_attach().
1620 */
1621 if (namelen != onamelen) {
1622 bcopy(sdl->sdl_data + onamelen,
1623 sdl->sdl_data + namelen, sdl->sdl_alen);
1624 }
1625 bcopy(new_name, sdl->sdl_data, namelen);
1626 sdl->sdl_nlen = namelen;
1627 sdl = (struct sockaddr_dl *)ifa->ifa_netmask;
1628 bzero(sdl->sdl_data, onamelen);
1629 while (namelen != 0)
1630 sdl->sdl_data[--namelen] = 0xff;
1631 /* XXX IFA_UNLOCK(ifa) */
1632
1633 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp);
1634
1635 /* Announce the return of the interface. */
1636 rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
1637 break;
1638
1639 case SIOCSIFMETRIC:
1640 error = priv_check_cred(cred, PRIV_ROOT, 0);
1641 if (error)
1642 return (error);
1643 ifp->if_metric = ifr->ifr_metric;
1644 getmicrotime(&ifp->if_lastchange);
1645 break;
1646
1647 case SIOCSIFPHYS:
1648 error = priv_check_cred(cred, PRIV_ROOT, 0);
1649 if (error)
1650 return error;
1651 if (!ifp->if_ioctl)
1652 return EOPNOTSUPP;
1653 ifnet_serialize_all(ifp);
1654 error = ifp->if_ioctl(ifp, cmd, data, cred);
1655 ifnet_deserialize_all(ifp);
1656 if (error == 0)
1657 getmicrotime(&ifp->if_lastchange);
1658 return (error);
1659
1660 case SIOCSIFMTU:
1661 {
1662 u_long oldmtu = ifp->if_mtu;
1663
1664 error = priv_check_cred(cred, PRIV_ROOT, 0);
1665 if (error)
1666 return (error);
1667 if (ifp->if_ioctl == NULL)
1668 return (EOPNOTSUPP);
1669 if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU)
1670 return (EINVAL);
1671 ifnet_serialize_all(ifp);
1672 error = ifp->if_ioctl(ifp, cmd, data, cred);
1673 ifnet_deserialize_all(ifp);
1674 if (error == 0) {
1675 getmicrotime(&ifp->if_lastchange);
1676 rt_ifmsg(ifp);
1677 }
1678 /*
1679 * If the link MTU changed, do network layer specific procedure.
1680 */
1681 if (ifp->if_mtu != oldmtu) {
1682#ifdef INET6
1683 nd6_setmtu(ifp);
1684#endif
1685 }
1686 return (error);
1687 }
1688
1689 case SIOCADDMULTI:
1690 case SIOCDELMULTI:
1691 error = priv_check_cred(cred, PRIV_ROOT, 0);
1692 if (error)
1693 return (error);
1694
1695 /* Don't allow group membership on non-multicast interfaces. */
1696 if ((ifp->if_flags & IFF_MULTICAST) == 0)
1697 return EOPNOTSUPP;
1698
1699 /* Don't let users screw up protocols' entries. */
1700 if (ifr->ifr_addr.sa_family != AF_LINK)
1701 return EINVAL;
1702
1703 if (cmd == SIOCADDMULTI) {
1704 struct ifmultiaddr *ifma;
1705 error = if_addmulti(ifp, &ifr->ifr_addr, &ifma);
1706 } else {
1707 error = if_delmulti(ifp, &ifr->ifr_addr);
1708 }
1709 if (error == 0)
1710 getmicrotime(&ifp->if_lastchange);
1711 return error;
1712
1713 case SIOCSIFPHYADDR:
1714 case SIOCDIFPHYADDR:
1715#ifdef INET6
1716 case SIOCSIFPHYADDR_IN6:
1717#endif
1718 case SIOCSLIFPHYADDR:
1719 case SIOCSIFMEDIA:
1720 case SIOCSIFGENERIC:
1721 error = priv_check_cred(cred, PRIV_ROOT, 0);
1722 if (error)
1723 return (error);
1724 if (ifp->if_ioctl == 0)
1725 return (EOPNOTSUPP);
1726 ifnet_serialize_all(ifp);
1727 error = ifp->if_ioctl(ifp, cmd, data, cred);
1728 ifnet_deserialize_all(ifp);
1729 if (error == 0)
1730 getmicrotime(&ifp->if_lastchange);
1731 return error;
1732
1733 case SIOCGIFSTATUS:
1734 ifs = (struct ifstat *)data;
1735 ifs->ascii[0] = '\0';
1736
1737 case SIOCGIFPSRCADDR:
1738 case SIOCGIFPDSTADDR:
1739 case SIOCGLIFPHYADDR:
1740 case SIOCGIFMEDIA:
1741 case SIOCGIFGENERIC:
1742 if (ifp->if_ioctl == NULL)
1743 return (EOPNOTSUPP);
1744 ifnet_serialize_all(ifp);
1745 error = ifp->if_ioctl(ifp, cmd, data, cred);
1746 ifnet_deserialize_all(ifp);
1747 return (error);
1748
1749 case SIOCSIFLLADDR:
1750 error = priv_check_cred(cred, PRIV_ROOT, 0);
1751 if (error)
1752 return (error);
1753 error = if_setlladdr(ifp,
1754 ifr->ifr_addr.sa_data, ifr->ifr_addr.sa_len);
1755 EVENTHANDLER_INVOKE(iflladdr_event, ifp);
1756 return (error);
1757
1758 default:
1759 oif_flags = ifp->if_flags;
1760 if (so->so_proto == 0)
1761 return (EOPNOTSUPP);
1762#ifndef COMPAT_43
1763 error = so_pru_control(so, cmd, data, ifp);
1764#else
1765 {
1766 int ocmd = cmd;
1767
1768 switch (cmd) {
1769
1770 case SIOCSIFDSTADDR:
1771 case SIOCSIFADDR:
1772 case SIOCSIFBRDADDR:
1773 case SIOCSIFNETMASK:
1774#if BYTE_ORDER != BIG_ENDIAN
1775 if (ifr->ifr_addr.sa_family == 0 &&
1776 ifr->ifr_addr.sa_len < 16) {
1777 ifr->ifr_addr.sa_family = ifr->ifr_addr.sa_len;
1778 ifr->ifr_addr.sa_len = 16;
1779 }
1780#else
1781 if (ifr->ifr_addr.sa_len == 0)
1782 ifr->ifr_addr.sa_len = 16;
1783#endif
1784 break;
1785
1786 case OSIOCGIFADDR:
1787 cmd = SIOCGIFADDR;
1788 break;
1789
1790 case OSIOCGIFDSTADDR:
1791 cmd = SIOCGIFDSTADDR;
1792 break;
1793
1794 case OSIOCGIFBRDADDR:
1795 cmd = SIOCGIFBRDADDR;
1796 break;
1797
1798 case OSIOCGIFNETMASK:
1799 cmd = SIOCGIFNETMASK;
1800 }
1801 error = so_pru_control(so, cmd, data, ifp);
1802 switch (ocmd) {
1803
1804 case OSIOCGIFADDR:
1805 case OSIOCGIFDSTADDR:
1806 case OSIOCGIFBRDADDR:
1807 case OSIOCGIFNETMASK:
1808 *(u_short *)&ifr->ifr_addr = ifr->ifr_addr.sa_family;
1809
1810 }
1811 }
1812#endif /* COMPAT_43 */
1813
1814 if ((oif_flags ^ ifp->if_flags) & IFF_UP) {
1815#ifdef INET6
1816 DELAY(100);/* XXX: temporary workaround for fxp issue*/
1817 if (ifp->if_flags & IFF_UP) {
1818 crit_enter();
1819 in6_if_up(ifp);
1820 crit_exit();
1821 }
1822#endif
1823 }
1824 return (error);
1825
1826 }
1827 return (0);
1828}
1829
1830/*
1831 * Set/clear promiscuous mode on interface ifp based on the truth value
1832 * of pswitch. The calls are reference counted so that only the first
1833 * "on" request actually has an effect, as does the final "off" request.
1834 * Results are undefined if the "off" and "on" requests are not matched.
1835 */
1836int
1837ifpromisc(struct ifnet *ifp, int pswitch)
1838{
1839 struct ifreq ifr;
1840 int error;
1841 int oldflags;
1842
1843 oldflags = ifp->if_flags;
1844 if (ifp->if_flags & IFF_PPROMISC) {
1845 /* Do nothing if device is in permanently promiscuous mode */
1846 ifp->if_pcount += pswitch ? 1 : -1;
1847 return (0);
1848 }
1849 if (pswitch) {
1850 /*
1851 * If the device is not configured up, we cannot put it in
1852 * promiscuous mode.
1853 */
1854 if ((ifp->if_flags & IFF_UP) == 0)
1855 return (ENETDOWN);
1856 if (ifp->if_pcount++ != 0)
1857 return (0);
1858 ifp->if_flags |= IFF_PROMISC;
1859 log(LOG_INFO, "%s: promiscuous mode enabled\n",
1860 ifp->if_xname);
1861 } else {
1862 if (--ifp->if_pcount > 0)
1863 return (0);
1864 ifp->if_flags &= ~IFF_PROMISC;
1865 log(LOG_INFO, "%s: promiscuous mode disabled\n",
1866 ifp->if_xname);
1867 }
1868 ifr.ifr_flags = ifp->if_flags;
1869 ifr.ifr_flagshigh = ifp->if_flags >> 16;
1870 ifnet_serialize_all(ifp);
1871 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, NULL);
1872 ifnet_deserialize_all(ifp);
1873 if (error == 0)
1874 rt_ifmsg(ifp);
1875 else
1876 ifp->if_flags = oldflags;
1877 return error;
1878}
1879
1880/*
1881 * Return interface configuration
1882 * of system. List may be used
1883 * in later ioctl's (above) to get
1884 * other information.
1885 */
1886static int
1887ifconf(u_long cmd, caddr_t data, struct ucred *cred)
1888{
1889 struct ifconf *ifc = (struct ifconf *)data;
1890 struct ifnet *ifp;
1891 struct sockaddr *sa;
1892 struct ifreq ifr, *ifrp;
1893 int space = ifc->ifc_len, error = 0;
1894
1895 ifrp = ifc->ifc_req;
1896 TAILQ_FOREACH(ifp, &ifnet, if_link) {
1897 struct ifaddr_container *ifac;
1898 int addrs;
1899
1900 if (space <= sizeof ifr)
1901 break;
1902
1903 /*
1904 * Zero the stack declared structure first to prevent
1905 * memory disclosure.
1906 */
1907 bzero(&ifr, sizeof(ifr));
1908 if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name))
1909 >= sizeof(ifr.ifr_name)) {
1910 error = ENAMETOOLONG;
1911 break;
1912 }
1913
1914 addrs = 0;
1915 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1916 struct ifaddr *ifa = ifac->ifa;
1917
1918 if (space <= sizeof ifr)
1919 break;
1920 sa = ifa->ifa_addr;
1921 if (cred->cr_prison &&
1922 prison_if(cred, sa))
1923 continue;
1924 addrs++;
1925#ifdef COMPAT_43
1926 if (cmd == OSIOCGIFCONF) {
1927 struct osockaddr *osa =
1928 (struct osockaddr *)&ifr.ifr_addr;
1929 ifr.ifr_addr = *sa;
1930 osa->sa_family = sa->sa_family;
1931 error = copyout(&ifr, ifrp, sizeof ifr);
1932 ifrp++;
1933 } else
1934#endif
1935 if (sa->sa_len <= sizeof(*sa)) {
1936 ifr.ifr_addr = *sa;
1937 error = copyout(&ifr, ifrp, sizeof ifr);
1938 ifrp++;
1939 } else {
1940 if (space < (sizeof ifr) + sa->sa_len -
1941 sizeof(*sa))
1942 break;
1943 space -= sa->sa_len - sizeof(*sa);
1944 error = copyout(&ifr, ifrp,
1945 sizeof ifr.ifr_name);
1946 if (error == 0)
1947 error = copyout(sa, &ifrp->ifr_addr,
1948 sa->sa_len);
1949 ifrp = (struct ifreq *)
1950 (sa->sa_len + (caddr_t)&ifrp->ifr_addr);
1951 }
1952 if (error)
1953 break;
1954 space -= sizeof ifr;
1955 }
1956 if (error)
1957 break;
1958 if (!addrs) {
1959 bzero(&ifr.ifr_addr, sizeof ifr.ifr_addr);
1960 error = copyout(&ifr, ifrp, sizeof ifr);
1961 if (error)
1962 break;
1963 space -= sizeof ifr;
1964 ifrp++;
1965 }
1966 }
1967 ifc->ifc_len -= space;
1968 return (error);
1969}
1970
1971/*
1972 * Just like if_promisc(), but for all-multicast-reception mode.
1973 */
1974int
1975if_allmulti(struct ifnet *ifp, int onswitch)
1976{
1977 int error = 0;
1978 struct ifreq ifr;
1979
1980 crit_enter();
1981
1982 if (onswitch) {
1983 if (ifp->if_amcount++ == 0) {
1984 ifp->if_flags |= IFF_ALLMULTI;
1985 ifr.ifr_flags = ifp->if_flags;
1986 ifr.ifr_flagshigh = ifp->if_flags >> 16;
1987 ifnet_serialize_all(ifp);
1988 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
1989 NULL);
1990 ifnet_deserialize_all(ifp);
1991 }
1992 } else {
1993 if (ifp->if_amcount > 1) {
1994 ifp->if_amcount--;
1995 } else {
1996 ifp->if_amcount = 0;
1997 ifp->if_flags &= ~IFF_ALLMULTI;
1998 ifr.ifr_flags = ifp->if_flags;
1999 ifr.ifr_flagshigh = ifp->if_flags >> 16;
2000 ifnet_serialize_all(ifp);
2001 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2002 NULL);
2003 ifnet_deserialize_all(ifp);
2004 }
2005 }
2006
2007 crit_exit();
2008
2009 if (error == 0)
2010 rt_ifmsg(ifp);
2011 return error;
2012}
2013
2014/*
2015 * Add a multicast listenership to the interface in question.
2016 * The link layer provides a routine which converts
2017 */
2018int
2019if_addmulti(
2020 struct ifnet *ifp, /* interface to manipulate */
2021 struct sockaddr *sa, /* address to add */
2022 struct ifmultiaddr **retifma)
2023{
2024 struct sockaddr *llsa, *dupsa;
2025 int error;
2026 struct ifmultiaddr *ifma;
2027
2028 /*
2029 * If the matching multicast address already exists
2030 * then don't add a new one, just add a reference
2031 */
2032 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2033 if (sa_equal(sa, ifma->ifma_addr)) {
2034 ifma->ifma_refcount++;
2035 if (retifma)
2036 *retifma = ifma;
2037 return 0;
2038 }
2039 }
2040
2041 /*
2042 * Give the link layer a chance to accept/reject it, and also
2043 * find out which AF_LINK address this maps to, if it isn't one
2044 * already.
2045 */
2046 if (ifp->if_resolvemulti) {
2047 ifnet_serialize_all(ifp);
2048 error = ifp->if_resolvemulti(ifp, &llsa, sa);
2049 ifnet_deserialize_all(ifp);
2050 if (error)
2051 return error;
2052 } else {
2053 llsa = 0;
2054 }
2055
2056 MALLOC(ifma, struct ifmultiaddr *, sizeof *ifma, M_IFMADDR, M_WAITOK);
2057 MALLOC(dupsa, struct sockaddr *, sa->sa_len, M_IFMADDR, M_WAITOK);
2058 bcopy(sa, dupsa, sa->sa_len);
2059
2060 ifma->ifma_addr = dupsa;
2061 ifma->ifma_lladdr = llsa;
2062 ifma->ifma_ifp = ifp;
2063 ifma->ifma_refcount = 1;
2064 ifma->ifma_protospec = 0;
2065 rt_newmaddrmsg(RTM_NEWMADDR, ifma);
2066
2067 /*
2068 * Some network interfaces can scan the address list at
2069 * interrupt time; lock them out.
2070 */
2071 crit_enter();
2072 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
2073 crit_exit();
2074 if (retifma)
2075 *retifma = ifma;
2076
2077 if (llsa != 0) {
2078 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2079 if (sa_equal(ifma->ifma_addr, llsa))
2080 break;
2081 }
2082 if (ifma) {
2083 ifma->ifma_refcount++;
2084 } else {
2085 MALLOC(ifma, struct ifmultiaddr *, sizeof *ifma,
2086 M_IFMADDR, M_WAITOK);
2087 MALLOC(dupsa, struct sockaddr *, llsa->sa_len,
2088 M_IFMADDR, M_WAITOK);
2089 bcopy(llsa, dupsa, llsa->sa_len);
2090 ifma->ifma_addr = dupsa;
2091 ifma->ifma_ifp = ifp;
2092 ifma->ifma_refcount = 1;
2093 crit_enter();
2094 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
2095 crit_exit();
2096 }
2097 }
2098 /*
2099 * We are certain we have added something, so call down to the
2100 * interface to let them know about it.
2101 */
2102 crit_enter();
2103 ifnet_serialize_all(ifp);
2104 if (ifp->if_ioctl)
2105 ifp->if_ioctl(ifp, SIOCADDMULTI, 0, NULL);
2106 ifnet_deserialize_all(ifp);
2107 crit_exit();
2108
2109 return 0;
2110}
2111
2112/*
2113 * Remove a reference to a multicast address on this interface. Yell
2114 * if the request does not match an existing membership.
2115 */
2116int
2117if_delmulti(struct ifnet *ifp, struct sockaddr *sa)
2118{
2119 struct ifmultiaddr *ifma;
2120
2121 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
2122 if (sa_equal(sa, ifma->ifma_addr))
2123 break;
2124 if (ifma == 0)
2125 return ENOENT;
2126
2127 if (ifma->ifma_refcount > 1) {
2128 ifma->ifma_refcount--;
2129 return 0;
2130 }
2131
2132 rt_newmaddrmsg(RTM_DELMADDR, ifma);
2133 sa = ifma->ifma_lladdr;
2134 crit_enter();
2135 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link);
2136 /*
2137 * Make sure the interface driver is notified
2138 * in the case of a link layer mcast group being left.
2139 */
2140 if (ifma->ifma_addr->sa_family == AF_LINK && sa == 0) {
2141 ifnet_serialize_all(ifp);
2142 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL);
2143 ifnet_deserialize_all(ifp);
2144 }
2145 crit_exit();
2146 kfree(ifma->ifma_addr, M_IFMADDR);
2147 kfree(ifma, M_IFMADDR);
2148 if (sa == 0)
2149 return 0;
2150
2151 /*
2152 * Now look for the link-layer address which corresponds to
2153 * this network address. It had been squirreled away in
2154 * ifma->ifma_lladdr for this purpose (so we don't have
2155 * to call ifp->if_resolvemulti() again), and we saved that
2156 * value in sa above. If some nasty deleted the
2157 * link-layer address out from underneath us, we can deal because
2158 * the address we stored was is not the same as the one which was
2159 * in the record for the link-layer address. (So we don't complain
2160 * in that case.)
2161 */
2162 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
2163 if (sa_equal(sa, ifma->ifma_addr))
2164 break;
2165 if (ifma == 0)
2166 return 0;
2167
2168 if (ifma->ifma_refcount > 1) {
2169 ifma->ifma_refcount--;
2170 return 0;
2171 }
2172
2173 crit_enter();
2174 ifnet_serialize_all(ifp);
2175 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link);
2176 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL);
2177 ifnet_deserialize_all(ifp);
2178 crit_exit();
2179 kfree(ifma->ifma_addr, M_IFMADDR);
2180 kfree(sa, M_IFMADDR);
2181 kfree(ifma, M_IFMADDR);
2182
2183 return 0;
2184}
2185
2186/*
2187 * Delete all multicast group membership for an interface.
2188 * Should be used to quickly flush all multicast filters.
2189 */
2190void
2191if_delallmulti(struct ifnet *ifp)
2192{
2193 struct ifmultiaddr *ifma;
2194 struct ifmultiaddr *next;
2195
2196 TAILQ_FOREACH_MUTABLE(ifma, &ifp->if_multiaddrs, ifma_link, next)
2197 if_delmulti(ifp, ifma->ifma_addr);
2198}
2199
2200
2201/*
2202 * Set the link layer address on an interface.
2203 *
2204 * At this time we only support certain types of interfaces,
2205 * and we don't allow the length of the address to change.
2206 */
2207int
2208if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len)
2209{
2210 struct sockaddr_dl *sdl;
2211 struct ifreq ifr;
2212
2213 sdl = IF_LLSOCKADDR(ifp);
2214 if (sdl == NULL)
2215 return (EINVAL);
2216 if (len != sdl->sdl_alen) /* don't allow length to change */
2217 return (EINVAL);
2218 switch (ifp->if_type) {
2219 case IFT_ETHER: /* these types use struct arpcom */
2220 case IFT_XETHER:
2221 case IFT_L2VLAN:
2222 bcopy(lladdr, ((struct arpcom *)ifp->if_softc)->ac_enaddr, len);
2223 bcopy(lladdr, LLADDR(sdl), len);
2224 break;
2225 default:
2226 return (ENODEV);
2227 }
2228 /*
2229 * If the interface is already up, we need
2230 * to re-init it in order to reprogram its
2231 * address filter.
2232 */
2233 ifnet_serialize_all(ifp);
2234 if ((ifp->if_flags & IFF_UP) != 0) {
2235 struct ifaddr_container *ifac;
2236
2237 ifp->if_flags &= ~IFF_UP;
2238 ifr.ifr_flags = ifp->if_flags;
2239 ifr.ifr_flagshigh = ifp->if_flags >> 16;
2240 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2241 NULL);
2242 ifp->if_flags |= IFF_UP;
2243 ifr.ifr_flags = ifp->if_flags;
2244 ifr.ifr_flagshigh = ifp->if_flags >> 16;
2245 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2246 NULL);
2247#ifdef INET
2248 /*
2249 * Also send gratuitous ARPs to notify other nodes about
2250 * the address change.
2251 */
2252 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
2253 struct ifaddr *ifa = ifac->ifa;
2254
2255 if (ifa->ifa_addr != NULL &&
2256 ifa->ifa_addr->sa_family == AF_INET)
2257 arp_ifinit(ifp, ifa);
2258 }
2259#endif
2260 }
2261 ifnet_deserialize_all(ifp);
2262 return (0);
2263}
2264
2265struct ifmultiaddr *
2266ifmaof_ifpforaddr(struct sockaddr *sa, struct ifnet *ifp)
2267{
2268 struct ifmultiaddr *ifma;
2269
2270 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
2271 if (sa_equal(ifma->ifma_addr, sa))
2272 break;
2273
2274 return ifma;
2275}
2276
2277/*
2278 * This function locates the first real ethernet MAC from a network
2279 * card and loads it into node, returning 0 on success or ENOENT if
2280 * no suitable interfaces were found. It is used by the uuid code to
2281 * generate a unique 6-byte number.
2282 */
2283int
2284if_getanyethermac(uint16_t *node, int minlen)
2285{
2286 struct ifnet *ifp;
2287 struct sockaddr_dl *sdl;
2288
2289 TAILQ_FOREACH(ifp, &ifnet, if_link) {
2290 if (ifp->if_type != IFT_ETHER)
2291 continue;
2292 sdl = IF_LLSOCKADDR(ifp);
2293 if (sdl->sdl_alen < minlen)
2294 continue;
2295 bcopy(((struct arpcom *)ifp->if_softc)->ac_enaddr, node,
2296 minlen);
2297 return(0);
2298 }
2299 return (ENOENT);
2300}
2301
2302/*
2303 * The name argument must be a pointer to storage which will last as
2304 * long as the interface does. For physical devices, the result of
2305 * device_get_name(dev) is a good choice and for pseudo-devices a
2306 * static string works well.
2307 */
2308void
2309if_initname(struct ifnet *ifp, const char *name, int unit)
2310{
2311 ifp->if_dname = name;
2312 ifp->if_dunit = unit;
2313 if (unit != IF_DUNIT_NONE)
2314 ksnprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit);
2315 else
2316 strlcpy(ifp->if_xname, name, IFNAMSIZ);
2317}
2318
2319int
2320if_printf(struct ifnet *ifp, const char *fmt, ...)
2321{
2322 __va_list ap;
2323 int retval;
2324
2325 retval = kprintf("%s: ", ifp->if_xname);
2326 __va_start(ap, fmt);
2327 retval += kvprintf(fmt, ap);
2328 __va_end(ap);
2329 return (retval);
2330}
2331
2332struct ifnet *
2333if_alloc(uint8_t type)
2334{
2335 struct ifnet *ifp;
2336
2337 ifp = kmalloc(sizeof(struct ifnet), M_IFNET, M_WAITOK|M_ZERO);
2338
2339 ifp->if_type = type;
2340
2341 if (if_com_alloc[type] != NULL) {
2342 ifp->if_l2com = if_com_alloc[type](type, ifp);
2343 if (ifp->if_l2com == NULL) {
2344 kfree(ifp, M_IFNET);
2345 return (NULL);
2346 }
2347 }
2348 return (ifp);
2349}
2350
2351void
2352if_free(struct ifnet *ifp)
2353{
2354 kfree(ifp, M_IFNET);
2355}
2356
2357void
2358ifq_set_classic(struct ifaltq *ifq)
2359{
2360 ifq->altq_enqueue = ifq_classic_enqueue;
2361 ifq->altq_dequeue = ifq_classic_dequeue;
2362 ifq->altq_request = ifq_classic_request;
2363}
2364
2365int
2366ifq_classic_enqueue(struct ifaltq *ifq, struct mbuf *m,
2367 struct altq_pktattr *pa __unused)
2368{
2369 logifq(enqueue, ifq);
2370 if (IF_QFULL(ifq)) {
2371 m_freem(m);
2372 return(ENOBUFS);
2373 } else {
2374 IF_ENQUEUE(ifq, m);
2375 return(0);
2376 }
2377}
2378
2379struct mbuf *
2380ifq_classic_dequeue(struct ifaltq *ifq, struct mbuf *mpolled, int op)
2381{
2382 struct mbuf *m;
2383
2384 switch (op) {
2385 case ALTDQ_POLL:
2386 IF_POLL(ifq, m);
2387 break;
2388 case ALTDQ_REMOVE:
2389 logifq(dequeue, ifq);
2390 IF_DEQUEUE(ifq, m);
2391 break;
2392 default:
2393 panic("unsupported ALTQ dequeue op: %d", op);
2394 }
2395 KKASSERT(mpolled == NULL || mpolled == m);
2396 return(m);
2397}
2398
2399int
2400ifq_classic_request(struct ifaltq *ifq, int req, void *arg)
2401{
2402 switch (req) {
2403 case ALTRQ_PURGE:
2404 IF_DRAIN(ifq);
2405 break;
2406 default:
2407 panic("unsupported ALTQ request: %d", req);
2408 }
2409 return(0);
2410}
2411
2412int
2413ifq_dispatch(struct ifnet *ifp, struct mbuf *m, struct altq_pktattr *pa)
2414{
2415 struct ifaltq *ifq = &ifp->if_snd;
2416 int running = 0, error, start = 0;
2417
2418 ASSERT_IFNET_NOT_SERIALIZED_TX(ifp);
2419
2420 ALTQ_LOCK(ifq);
2421 error = ifq_enqueue_locked(ifq, m, pa);
2422 if (error) {
2423 ALTQ_UNLOCK(ifq);
2424 return error;
2425 }
2426 if (!ifq->altq_started) {
2427 /*
2428 * Hold the interlock of ifnet.if_start
2429 */
2430 ifq->altq_started = 1;
2431 start = 1;
2432 }
2433 ALTQ_UNLOCK(ifq);
2434
2435 ifp->if_obytes += m->m_pkthdr.len;
2436 if (m->m_flags & M_MCAST)
2437 ifp->if_omcasts++;
2438
2439 if (!start) {
2440 logifstart(avoid, ifp);
2441 return 0;
2442 }
2443
2444 if (ifq_dispatch_schedonly) {
2445 /*
2446 * Always schedule ifnet.if_start on ifnet's CPU,
2447 * short circuit the rest of this function.
2448 */
2449 logifstart(sched, ifp);
2450 if_start_schedule(ifp);
2451 return 0;
2452 }
2453
2454 /*
2455 * Try to do direct ifnet.if_start first, if there is
2456 * contention on ifnet's serializer, ifnet.if_start will
2457 * be scheduled on ifnet's CPU.
2458 */
2459 if (!ifnet_tryserialize_tx(ifp)) {
2460 /*
2461 * ifnet serializer contention happened,
2462 * ifnet.if_start is scheduled on ifnet's
2463 * CPU, and we keep going.
2464 */
2465 logifstart(contend_sched, ifp);
2466 if_start_schedule(ifp);
2467 return 0;
2468 }
2469
2470 if ((ifp->if_flags & IFF_OACTIVE) == 0) {
2471 logifstart(run, ifp);
2472 ifp->if_start(ifp);
2473 if ((ifp->if_flags &
2474 (IFF_OACTIVE | IFF_RUNNING)) == IFF_RUNNING)
2475 running = 1;
2476 }
2477
2478 ifnet_deserialize_tx(ifp);
2479
2480 if (ifq_dispatch_schednochk || if_start_need_schedule(ifq, running)) {
2481 /*
2482 * More data need to be transmitted, ifnet.if_start is
2483 * scheduled on ifnet's CPU, and we keep going.
2484 * NOTE: ifnet.if_start interlock is not released.
2485 */
2486 logifstart(sched, ifp);
2487 if_start_schedule(ifp);
2488 }
2489 return 0;
2490}
2491
2492void *
2493ifa_create(int size, int flags)
2494{
2495 struct ifaddr *ifa;
2496 int i;
2497
2498 KASSERT(size >= sizeof(*ifa), ("ifaddr size too small\n"));
2499
2500 ifa = kmalloc(size, M_IFADDR, flags | M_ZERO);
2501 if (ifa == NULL)
2502 return NULL;
2503
2504 ifa->ifa_containers = kmalloc(ncpus * sizeof(struct ifaddr_container),
2505 M_IFADDR, M_WAITOK | M_ZERO);
2506 ifa->ifa_ncnt = ncpus;
2507 for (i = 0; i < ncpus; ++i) {
2508 struct ifaddr_container *ifac = &ifa->ifa_containers[i];
2509
2510 ifac->ifa_magic = IFA_CONTAINER_MAGIC;
2511 ifac->ifa = ifa;
2512 ifac->ifa_refcnt = 1;
2513 }
2514#ifdef IFADDR_DEBUG
2515 kprintf("alloc ifa %p %d\n", ifa, size);
2516#endif
2517 return ifa;
2518}
2519
2520void
2521ifac_free(struct ifaddr_container *ifac, int cpu_id)
2522{
2523 struct ifaddr *ifa = ifac->ifa;
2524
2525 KKASSERT(ifac->ifa_magic == IFA_CONTAINER_MAGIC);
2526 KKASSERT(ifac->ifa_refcnt == 0);
2527 KASSERT(ifac->ifa_listmask == 0,
2528 ("ifa is still on %#x lists\n", ifac->ifa_listmask));
2529
2530 ifac->ifa_magic = IFA_CONTAINER_DEAD;
2531
2532#ifdef IFADDR_DEBUG_VERBOSE
2533 kprintf("try free ifa %p cpu_id %d\n", ifac->ifa, cpu_id);
2534#endif
2535
2536 KASSERT(ifa->ifa_ncnt > 0 && ifa->ifa_ncnt <= ncpus,
2537 ("invalid # of ifac, %d\n", ifa->ifa_ncnt));
2538 if (atomic_fetchadd_int(&ifa->ifa_ncnt, -1) == 1) {
2539#ifdef IFADDR_DEBUG
2540 kprintf("free ifa %p\n", ifa);
2541#endif
2542 kfree(ifa->ifa_containers, M_IFADDR);
2543 kfree(ifa, M_IFADDR);
2544 }
2545}
2546
2547static void
2548ifa_iflink_dispatch(struct netmsg *nmsg)
2549{
2550 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2551 struct ifaddr *ifa = msg->ifa;
2552 struct ifnet *ifp = msg->ifp;
2553 int cpu = mycpuid;
2554 struct ifaddr_container *ifac;
2555
2556 crit_enter();
2557
2558 ifac = &ifa->ifa_containers[cpu];
2559 ASSERT_IFAC_VALID(ifac);
2560 KASSERT((ifac->ifa_listmask & IFA_LIST_IFADDRHEAD) == 0,
2561 ("ifaddr is on if_addrheads\n"));
2562
2563 ifac->ifa_listmask |= IFA_LIST_IFADDRHEAD;
2564 if (msg->tail)
2565 TAILQ_INSERT_TAIL(&ifp->if_addrheads[cpu], ifac, ifa_link);
2566 else
2567 TAILQ_INSERT_HEAD(&ifp->if_addrheads[cpu], ifac, ifa_link);
2568
2569 crit_exit();
2570
2571 ifa_forwardmsg(&nmsg->nm_lmsg, cpu + 1);
2572}
2573
2574void
2575ifa_iflink(struct ifaddr *ifa, struct ifnet *ifp, int tail)
2576{
2577 struct netmsg_ifaddr msg;
2578
2579 netmsg_init(&msg.netmsg, NULL, &curthread->td_msgport,
2580 0, ifa_iflink_dispatch);
2581 msg.ifa = ifa;
2582 msg.ifp = ifp;
2583 msg.tail = tail;
2584
2585 ifa_domsg(&msg.netmsg.nm_lmsg, 0);
2586}
2587
2588static void
2589ifa_ifunlink_dispatch(struct netmsg *nmsg)
2590{
2591 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2592 struct ifaddr *ifa = msg->ifa;
2593 struct ifnet *ifp = msg->ifp;
2594 int cpu = mycpuid;
2595 struct ifaddr_container *ifac;
2596
2597 crit_enter();
2598
2599 ifac = &ifa->ifa_containers[cpu];
2600 ASSERT_IFAC_VALID(ifac);
2601 KASSERT(ifac->ifa_listmask & IFA_LIST_IFADDRHEAD,
2602 ("ifaddr is not on if_addrhead\n"));
2603
2604 TAILQ_REMOVE(&ifp->if_addrheads[cpu], ifac, ifa_link);
2605 ifac->ifa_listmask &= ~IFA_LIST_IFADDRHEAD;
2606
2607 crit_exit();
2608
2609 ifa_forwardmsg(&nmsg->nm_lmsg, cpu + 1);
2610}
2611
2612void
2613ifa_ifunlink(struct ifaddr *ifa, struct ifnet *ifp)
2614{
2615 struct netmsg_ifaddr msg;
2616
2617 netmsg_init(&msg.netmsg, NULL, &curthread->td_msgport,
2618 0, ifa_ifunlink_dispatch);
2619 msg.ifa = ifa;
2620 msg.ifp = ifp;
2621
2622 ifa_domsg(&msg.netmsg.nm_lmsg, 0);
2623}
2624
2625static void
2626ifa_destroy_dispatch(struct netmsg *nmsg)
2627{
2628 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2629
2630 IFAFREE(msg->ifa);
2631 ifa_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2632}
2633
2634void
2635ifa_destroy(struct ifaddr *ifa)
2636{
2637 struct netmsg_ifaddr msg;
2638
2639 netmsg_init(&msg.netmsg, NULL, &curthread->td_msgport,
2640 0, ifa_destroy_dispatch);
2641 msg.ifa = ifa;
2642
2643 ifa_domsg(&msg.netmsg.nm_lmsg, 0);
2644}
2645
2646struct lwkt_port *
2647ifnet_portfn(int cpu)
2648{
2649 return &ifnet_threads[cpu].td_msgport;
2650}
2651
2652void
2653ifnet_forwardmsg(struct lwkt_msg *lmsg, int next_cpu)
2654{
2655 KKASSERT(next_cpu > mycpuid && next_cpu <= ncpus);
2656
2657 if (next_cpu < ncpus)
2658 lwkt_forwardmsg(ifnet_portfn(next_cpu), lmsg);
2659 else
2660 lwkt_replymsg(lmsg, 0);
2661}
2662
2663int
2664ifnet_domsg(struct lwkt_msg *lmsg, int cpu)
2665{
2666 KKASSERT(cpu < ncpus);
2667 return lwkt_domsg(ifnet_portfn(cpu), lmsg, 0);
2668}
2669
2670void
2671ifnet_sendmsg(struct lwkt_msg *lmsg, int cpu)
2672{
2673 KKASSERT(cpu < ncpus);
2674 lwkt_sendmsg(ifnet_portfn(cpu), lmsg);
2675}
2676
2677static void
2678ifnetinit(void *dummy __unused)
2679{
2680 int i;
2681
2682 for (i = 0; i < ncpus; ++i) {
2683 struct thread *thr = &ifnet_threads[i];
2684
2685 lwkt_create(netmsg_service_loop, &ifnet_mpsafe_thread, NULL,
2686 thr, TDF_NETWORK | TDF_MPSAFE, i, "ifnet %d", i);
2687 netmsg_service_port_init(&thr->td_msgport);
2688 }
2689}
2690
2691struct ifnet *
2692ifnet_byindex(unsigned short idx)
2693{
2694 if (idx > if_index)
2695 return NULL;
2696 return ifindex2ifnet[idx];
2697}
2698
2699struct ifaddr *
2700ifaddr_byindex(unsigned short idx)
2701{
2702 struct ifnet *ifp;
2703
2704 ifp = ifnet_byindex(idx);
2705 if (!ifp)
2706 return NULL;
2707 return TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
2708}
2709
2710void
2711if_register_com_alloc(u_char type,
2712 if_com_alloc_t *a, if_com_free_t *f)
2713{
2714
2715 KASSERT(if_com_alloc[type] == NULL,
2716 ("if_register_com_alloc: %d already registered", type));
2717 KASSERT(if_com_free[type] == NULL,
2718 ("if_register_com_alloc: %d free already registered", type));
2719
2720 if_com_alloc[type] = a;
2721 if_com_free[type] = f;
2722}
2723
2724void
2725if_deregister_com_alloc(u_char type)
2726{
2727
2728 KASSERT(if_com_alloc[type] != NULL,
2729 ("if_deregister_com_alloc: %d not registered", type));
2730 KASSERT(if_com_free[type] != NULL,
2731 ("if_deregister_com_alloc: %d free not registered", type));
2732 if_com_alloc[type] = NULL;
2733 if_com_free[type] = NULL;
2734}