5a43dacb26287fe7d6167831d2361d6f429252f5
[dragonfly.git] / sys / net / if.c
1 /*
2  * Copyright (c) 1980, 1986, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by the University of
16  *      California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *      @(#)if.c        8.3 (Berkeley) 1/4/94
34  * $FreeBSD: src/sys/net/if.c,v 1.185 2004/03/13 02:35:03 brooks Exp $
35  * $DragonFly: src/sys/net/if.c,v 1.84 2008/11/15 11:58:16 sephe Exp $
36  */
37
38 #include "opt_compat.h"
39 #include "opt_inet6.h"
40 #include "opt_inet.h"
41 #include "opt_polling.h"
42 #include "opt_ifpoll.h"
43
44 #include <sys/param.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/systm.h>
48 #include <sys/proc.h>
49 #include <sys/priv.h>
50 #include <sys/protosw.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/socketops.h>
54 #include <sys/protosw.h>
55 #include <sys/kernel.h>
56 #include <sys/ktr.h>
57 #include <sys/sockio.h>
58 #include <sys/syslog.h>
59 #include <sys/sysctl.h>
60 #include <sys/domain.h>
61 #include <sys/thread.h>
62 #include <sys/thread2.h>
63 #include <sys/serialize.h>
64 #include <sys/msgport2.h>
65 #include <sys/bus.h>
66
67 #include <net/if.h>
68 #include <net/if_arp.h>
69 #include <net/if_dl.h>
70 #include <net/if_types.h>
71 #include <net/if_var.h>
72 #include <net/ifq_var.h>
73 #include <net/radix.h>
74 #include <net/route.h>
75 #include <net/if_clone.h>
76 #include <net/netisr.h>
77 #include <net/netmsg2.h>
78
79 #include <machine/atomic.h>
80 #include <machine/stdarg.h>
81 #include <machine/smp.h>
82
83 #if defined(INET) || defined(INET6)
84 /*XXX*/
85 #include <netinet/in.h>
86 #include <netinet/in_var.h>
87 #include <netinet/if_ether.h>
88 #ifdef INET6
89 #include <netinet6/in6_var.h>
90 #include <netinet6/in6_ifattach.h>
91 #endif
92 #endif
93
94 #if defined(COMPAT_43)
95 #include <emulation/43bsd/43bsd_socket.h>
96 #endif /* COMPAT_43 */
97
98 struct netmsg_ifaddr {
99         struct netmsg   netmsg;
100         struct ifaddr   *ifa;
101         struct ifnet    *ifp;
102         int             tail;
103 };
104
105 /*
106  * System initialization
107  */
108 static void     if_attachdomain(void *);
109 static void     if_attachdomain1(struct ifnet *);
110 static int      ifconf(u_long, caddr_t, struct ucred *);
111 static void     ifinit(void *);
112 static void     ifnetinit(void *);
113 static void     if_slowtimo(void *);
114 static void     link_rtrequest(int, struct rtentry *, struct rt_addrinfo *);
115 static int      if_rtdel(struct radix_node *, void *);
116
117 #ifdef INET6
118 /*
119  * XXX: declare here to avoid to include many inet6 related files..
120  * should be more generalized?
121  */
122 extern void     nd6_setmtu(struct ifnet *);
123 #endif
124
125 SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers");
126 SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management");
127
128 SYSINIT(interfaces, SI_SUB_PROTO_IF, SI_ORDER_FIRST, ifinit, NULL)
129 /* Must be after netisr_init */
130 SYSINIT(ifnet, SI_SUB_PRE_DRIVERS, SI_ORDER_SECOND, ifnetinit, NULL)
131
132 static  if_com_alloc_t *if_com_alloc[256];
133 static  if_com_free_t *if_com_free[256];
134
135 MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address");
136 MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address");
137 MALLOC_DEFINE(M_IFNET, "ifnet", "interface structure");
138
139 int                     ifqmaxlen = IFQ_MAXLEN;
140 struct ifnethead        ifnet = TAILQ_HEAD_INITIALIZER(ifnet);
141
142 /* In ifq_dispatch(), try to do direct ifnet.if_start first */
143 static int              ifq_dispatch_schedonly = 0;
144 SYSCTL_INT(_net_link_generic, OID_AUTO, ifq_dispatch_schedonly, CTLFLAG_RW,
145            &ifq_dispatch_schedonly, 0, "");
146
147 /* In ifq_dispatch(), schedule ifnet.if_start without checking ifnet.if_snd */
148 static int              ifq_dispatch_schednochk = 0;
149 SYSCTL_INT(_net_link_generic, OID_AUTO, ifq_dispatch_schednochk, CTLFLAG_RW,
150            &ifq_dispatch_schednochk, 0, "");
151
152 /* In if_devstart(), try to do direct ifnet.if_start first */
153 static int              if_devstart_schedonly = 0;
154 SYSCTL_INT(_net_link_generic, OID_AUTO, if_devstart_schedonly, CTLFLAG_RW,
155            &if_devstart_schedonly, 0, "");
156
157 /* In if_devstart(), schedule ifnet.if_start without checking ifnet.if_snd */
158 static int              if_devstart_schednochk = 0;
159 SYSCTL_INT(_net_link_generic, OID_AUTO, if_devstart_schednochk, CTLFLAG_RW,
160            &if_devstart_schednochk, 0, "");
161
162 #ifdef SMP
163 /* Schedule ifnet.if_start on the current CPU */
164 static int              if_start_oncpu_sched = 0;
165 SYSCTL_INT(_net_link_generic, OID_AUTO, if_start_oncpu_sched, CTLFLAG_RW,
166            &if_start_oncpu_sched, 0, "");
167 #endif
168
169 struct callout          if_slowtimo_timer;
170
171 int                     if_index = 0;
172 struct ifnet            **ifindex2ifnet = NULL;
173 static struct thread    ifnet_threads[MAXCPU];
174 static int              ifnet_mpsafe_thread = NETMSG_SERVICE_MPSAFE;
175
176 #define IFQ_KTR_STRING          "ifq=%p"
177 #define IFQ_KTR_ARG_SIZE        (sizeof(void *))
178 #ifndef KTR_IFQ
179 #define KTR_IFQ                 KTR_ALL
180 #endif
181 KTR_INFO_MASTER(ifq);
182 KTR_INFO(KTR_IFQ, ifq, enqueue, 0, IFQ_KTR_STRING, IFQ_KTR_ARG_SIZE);
183 KTR_INFO(KTR_IFQ, ifq, dequeue, 1, IFQ_KTR_STRING, IFQ_KTR_ARG_SIZE);
184 #define logifq(name, arg)       KTR_LOG(ifq_ ## name, arg)
185
186 #define IF_START_KTR_STRING     "ifp=%p"
187 #define IF_START_KTR_ARG_SIZE   (sizeof(void *))
188 #ifndef KTR_IF_START
189 #define KTR_IF_START            KTR_ALL
190 #endif
191 KTR_INFO_MASTER(if_start);
192 KTR_INFO(KTR_IF_START, if_start, run, 0,
193          IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
194 KTR_INFO(KTR_IF_START, if_start, sched, 1,
195          IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
196 KTR_INFO(KTR_IF_START, if_start, avoid, 2,
197          IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
198 KTR_INFO(KTR_IF_START, if_start, contend_sched, 3,
199          IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
200 #ifdef SMP
201 KTR_INFO(KTR_IF_START, if_start, chase_sched, 4,
202          IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
203 #endif
204 #define logifstart(name, arg)   KTR_LOG(if_start_ ## name, arg)
205
206 /*
207  * Network interface utility routines.
208  *
209  * Routines with ifa_ifwith* names take sockaddr *'s as
210  * parameters.
211  */
212 /* ARGSUSED*/
213 void
214 ifinit(void *dummy)
215 {
216         struct ifnet *ifp;
217
218         callout_init(&if_slowtimo_timer);
219
220         crit_enter();
221         TAILQ_FOREACH(ifp, &ifnet, if_link) {
222                 if (ifp->if_snd.ifq_maxlen == 0) {
223                         if_printf(ifp, "XXX: driver didn't set ifq_maxlen\n");
224                         ifp->if_snd.ifq_maxlen = ifqmaxlen;
225                 }
226         }
227         crit_exit();
228
229         if_slowtimo(0);
230 }
231
232 static int
233 if_start_cpuid(struct ifnet *ifp)
234 {
235         return ifp->if_cpuid;
236 }
237
238 #ifdef DEVICE_POLLING
239 static int
240 if_start_cpuid_poll(struct ifnet *ifp)
241 {
242         int poll_cpuid = ifp->if_poll_cpuid;
243
244         if (poll_cpuid >= 0)
245                 return poll_cpuid;
246         else
247                 return ifp->if_cpuid;
248 }
249 #endif
250
251 static void
252 if_start_ipifunc(void *arg)
253 {
254         struct ifnet *ifp = arg;
255         struct lwkt_msg *lmsg = &ifp->if_start_nmsg[mycpuid].nm_lmsg;
256
257         crit_enter();
258         if (lmsg->ms_flags & MSGF_DONE)
259                 lwkt_sendmsg(ifnet_portfn(mycpuid), lmsg);
260         crit_exit();
261 }
262
263 /*
264  * Schedule ifnet.if_start on ifnet's CPU
265  */
266 static void
267 if_start_schedule(struct ifnet *ifp)
268 {
269 #ifdef SMP
270         int cpu;
271
272         if (if_start_oncpu_sched)
273                 cpu = mycpuid;
274         else
275                 cpu = ifp->if_start_cpuid(ifp);
276
277         if (cpu != mycpuid)
278                 lwkt_send_ipiq(globaldata_find(cpu), if_start_ipifunc, ifp);
279         else
280 #endif
281         if_start_ipifunc(ifp);
282 }
283
284 /*
285  * NOTE:
286  * This function will release ifnet.if_start interlock,
287  * if ifnet.if_start does not need to be scheduled
288  */
289 static __inline int
290 if_start_need_schedule(struct ifaltq *ifq, int running)
291 {
292         if (!running || ifq_is_empty(ifq)
293 #ifdef ALTQ
294             || ifq->altq_tbr != NULL
295 #endif
296         ) {
297                 ALTQ_LOCK(ifq);
298                 /*
299                  * ifnet.if_start interlock is released, if:
300                  * 1) Hardware can not take any packets, due to
301                  *    o  interface is marked down
302                  *    o  hardware queue is full (IFF_OACTIVE)
303                  *    Under the second situation, hardware interrupt
304                  *    or polling(4) will call/schedule ifnet.if_start
305                  *    when hardware queue is ready
306                  * 2) There is not packet in the ifnet.if_snd.
307                  *    Further ifq_dispatch or ifq_handoff will call/
308                  *    schedule ifnet.if_start
309                  * 3) TBR is used and it does not allow further
310                  *    dequeueing.
311                  *    TBR callout will call ifnet.if_start
312                  */
313                 if (!running || !ifq_data_ready(ifq)) {
314                         ifq->altq_started = 0;
315                         ALTQ_UNLOCK(ifq);
316                         return 0;
317                 }
318                 ALTQ_UNLOCK(ifq);
319         }
320         return 1;
321 }
322
323 static void
324 if_start_dispatch(struct netmsg *nmsg)
325 {
326         struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
327         struct ifnet *ifp = lmsg->u.ms_resultp;
328         struct ifaltq *ifq = &ifp->if_snd;
329         int running = 0;
330
331         crit_enter();
332         lwkt_replymsg(lmsg, 0); /* reply ASAP */
333         crit_exit();
334
335 #ifdef SMP
336         if (!if_start_oncpu_sched && mycpuid != ifp->if_start_cpuid(ifp)) {
337                 /*
338                  * If the ifnet is still up, we need to
339                  * chase its CPU change.
340                  */
341                 if (ifp->if_flags & IFF_UP) {
342                         logifstart(chase_sched, ifp);
343                         if_start_schedule(ifp);
344                         return;
345                 } else {
346                         goto check;
347                 }
348         }
349 #endif
350
351         if (ifp->if_flags & IFF_UP) {
352                 ifnet_serialize_tx(ifp); /* XXX try? */
353                 if ((ifp->if_flags & IFF_OACTIVE) == 0) {
354                         logifstart(run, ifp);
355                         ifp->if_start(ifp);
356                         if ((ifp->if_flags &
357                         (IFF_OACTIVE | IFF_RUNNING)) == IFF_RUNNING)
358                                 running = 1;
359                 }
360                 ifnet_deserialize_tx(ifp);
361         }
362 #ifdef SMP
363 check:
364 #endif
365         if (if_start_need_schedule(ifq, running)) {
366                 crit_enter();
367                 if (lmsg->ms_flags & MSGF_DONE) { /* XXX necessary? */
368                         logifstart(sched, ifp);
369                         lwkt_sendmsg(ifnet_portfn(mycpuid), lmsg);
370                 }
371                 crit_exit();
372         }
373 }
374
375 /* Device driver ifnet.if_start helper function */
376 void
377 if_devstart(struct ifnet *ifp)
378 {
379         struct ifaltq *ifq = &ifp->if_snd;
380         int running = 0;
381
382         ASSERT_IFNET_SERIALIZED_TX(ifp);
383
384         ALTQ_LOCK(ifq);
385         if (ifq->altq_started || !ifq_data_ready(ifq)) {
386                 logifstart(avoid, ifp);
387                 ALTQ_UNLOCK(ifq);
388                 return;
389         }
390         ifq->altq_started = 1;
391         ALTQ_UNLOCK(ifq);
392
393         if (if_devstart_schedonly) {
394                 /*
395                  * Always schedule ifnet.if_start on ifnet's CPU,
396                  * short circuit the rest of this function.
397                  */
398                 logifstart(sched, ifp);
399                 if_start_schedule(ifp);
400                 return;
401         }
402
403         logifstart(run, ifp);
404         ifp->if_start(ifp);
405
406         if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) == IFF_RUNNING)
407                 running = 1;
408
409         if (if_devstart_schednochk || if_start_need_schedule(ifq, running)) {
410                 /*
411                  * More data need to be transmitted, ifnet.if_start is
412                  * scheduled on ifnet's CPU, and we keep going.
413                  * NOTE: ifnet.if_start interlock is not released.
414                  */
415                 logifstart(sched, ifp);
416                 if_start_schedule(ifp);
417         }
418 }
419
420 static void
421 if_default_serialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
422 {
423         lwkt_serialize_enter(ifp->if_serializer);
424 }
425
426 static void
427 if_default_deserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
428 {
429         lwkt_serialize_exit(ifp->if_serializer);
430 }
431
432 static int
433 if_default_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
434 {
435         return lwkt_serialize_try(ifp->if_serializer);
436 }
437
438 #ifdef INVARIANTS
439 static void
440 if_default_serialize_assert(struct ifnet *ifp,
441                             enum ifnet_serialize slz __unused,
442                             boolean_t serialized)
443 {
444         if (serialized)
445                 ASSERT_SERIALIZED(ifp->if_serializer);
446         else
447                 ASSERT_NOT_SERIALIZED(ifp->if_serializer);
448 }
449 #endif
450
451 /*
452  * Attach an interface to the list of "active" interfaces.
453  *
454  * The serializer is optional.  If non-NULL access to the interface
455  * may be MPSAFE.
456  */
457 void
458 if_attach(struct ifnet *ifp, lwkt_serialize_t serializer)
459 {
460         unsigned socksize, ifasize;
461         int namelen, masklen;
462         struct sockaddr_dl *sdl;
463         struct ifaddr *ifa;
464         struct ifaltq *ifq;
465         int i;
466
467         static int if_indexlim = 8;
468
469         if (ifp->if_serialize != NULL) {
470                 KASSERT(ifp->if_deserialize != NULL &&
471                         ifp->if_tryserialize != NULL &&
472                         ifp->if_serialize_assert != NULL,
473                         ("serialize functions are partially setup\n"));
474
475                 /*
476                  * If the device supplies serialize functions,
477                  * then clear if_serializer to catch any invalid
478                  * usage of this field.
479                  */
480                 KASSERT(serializer == NULL,
481                         ("both serialize functions and default serializer "
482                          "are supplied\n"));
483                 ifp->if_serializer = NULL;
484         } else {
485                 KASSERT(ifp->if_deserialize == NULL &&
486                         ifp->if_tryserialize == NULL &&
487                         ifp->if_serialize_assert == NULL,
488                         ("serialize functions are partially setup\n"));
489                 ifp->if_serialize = if_default_serialize;
490                 ifp->if_deserialize = if_default_deserialize;
491                 ifp->if_tryserialize = if_default_tryserialize;
492 #ifdef INVARIANTS
493                 ifp->if_serialize_assert = if_default_serialize_assert;
494 #endif
495
496                 /*
497                  * The serializer can be passed in from the device,
498                  * allowing the same serializer to be used for both
499                  * the interrupt interlock and the device queue.
500                  * If not specified, the netif structure will use an
501                  * embedded serializer.
502                  */
503                 if (serializer == NULL) {
504                         serializer = &ifp->if_default_serializer;
505                         lwkt_serialize_init(serializer);
506                 }
507                 ifp->if_serializer = serializer;
508         }
509
510         ifp->if_start_cpuid = if_start_cpuid;
511         ifp->if_cpuid = 0;
512
513 #ifdef DEVICE_POLLING
514         /* Device is not in polling mode by default */
515         ifp->if_poll_cpuid = -1;
516         if (ifp->if_poll != NULL)
517                 ifp->if_start_cpuid = if_start_cpuid_poll;
518 #endif
519
520         ifp->if_start_nmsg = kmalloc(ncpus * sizeof(struct netmsg),
521                                      M_LWKTMSG, M_WAITOK);
522         for (i = 0; i < ncpus; ++i) {
523                 netmsg_init(&ifp->if_start_nmsg[i], NULL, &netisr_adone_rport,
524                             0, if_start_dispatch);
525                 ifp->if_start_nmsg[i].nm_lmsg.u.ms_resultp = ifp;
526         }
527
528         TAILQ_INSERT_TAIL(&ifnet, ifp, if_link);
529         ifp->if_index = ++if_index;
530
531         /*
532          * XXX -
533          * The old code would work if the interface passed a pre-existing
534          * chain of ifaddrs to this code.  We don't trust our callers to
535          * properly initialize the tailq, however, so we no longer allow
536          * this unlikely case.
537          */
538         ifp->if_addrheads = kmalloc(ncpus * sizeof(struct ifaddrhead),
539                                     M_IFADDR, M_WAITOK | M_ZERO);
540         for (i = 0; i < ncpus; ++i)
541                 TAILQ_INIT(&ifp->if_addrheads[i]);
542
543         TAILQ_INIT(&ifp->if_prefixhead);
544         TAILQ_INIT(&ifp->if_multiaddrs);
545         getmicrotime(&ifp->if_lastchange);
546         if (ifindex2ifnet == NULL || if_index >= if_indexlim) {
547                 unsigned int n;
548                 struct ifnet **q;
549
550                 if_indexlim <<= 1;
551
552                 /* grow ifindex2ifnet */
553                 n = if_indexlim * sizeof(*q);
554                 q = kmalloc(n, M_IFADDR, M_WAITOK | M_ZERO);
555                 if (ifindex2ifnet) {
556                         bcopy(ifindex2ifnet, q, n/2);
557                         kfree(ifindex2ifnet, M_IFADDR);
558                 }
559                 ifindex2ifnet = q;
560         }
561
562         ifindex2ifnet[if_index] = ifp;
563
564         /*
565          * create a Link Level name for this device
566          */
567         namelen = strlen(ifp->if_xname);
568 #define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m))
569         masklen = _offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
570         socksize = masklen + ifp->if_addrlen;
571 #define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
572         if (socksize < sizeof(*sdl))
573                 socksize = sizeof(*sdl);
574         socksize = ROUNDUP(socksize);
575         ifasize = sizeof(struct ifaddr) + 2 * socksize;
576         ifa = ifa_create(ifasize, M_WAITOK);
577         sdl = (struct sockaddr_dl *)(ifa + 1);
578         sdl->sdl_len = socksize;
579         sdl->sdl_family = AF_LINK;
580         bcopy(ifp->if_xname, sdl->sdl_data, namelen);
581         sdl->sdl_nlen = namelen;
582         sdl->sdl_index = ifp->if_index;
583         sdl->sdl_type = ifp->if_type;
584         ifp->if_lladdr = ifa;
585         ifa->ifa_ifp = ifp;
586         ifa->ifa_rtrequest = link_rtrequest;
587         ifa->ifa_addr = (struct sockaddr *)sdl;
588         sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl);
589         ifa->ifa_netmask = (struct sockaddr *)sdl;
590         sdl->sdl_len = masklen;
591         while (namelen != 0)
592                 sdl->sdl_data[--namelen] = 0xff;
593         ifa_iflink(ifa, ifp, 0 /* Insert head */);
594
595         EVENTHANDLER_INVOKE(ifnet_attach_event, ifp);
596         devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL);
597
598         ifq = &ifp->if_snd;
599         ifq->altq_type = 0;
600         ifq->altq_disc = NULL;
601         ifq->altq_flags &= ALTQF_CANTCHANGE;
602         ifq->altq_tbr = NULL;
603         ifq->altq_ifp = ifp;
604         ifq->altq_started = 0;
605         ifq->altq_prepended = NULL;
606         ALTQ_LOCK_INIT(ifq);
607         ifq_set_classic(ifq);
608
609         if (!SLIST_EMPTY(&domains))
610                 if_attachdomain1(ifp);
611
612         /* Announce the interface. */
613         rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
614 }
615
616 static void
617 if_attachdomain(void *dummy)
618 {
619         struct ifnet *ifp;
620
621         crit_enter();
622         TAILQ_FOREACH(ifp, &ifnet, if_list)
623                 if_attachdomain1(ifp);
624         crit_exit();
625 }
626 SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST,
627         if_attachdomain, NULL);
628
629 static void
630 if_attachdomain1(struct ifnet *ifp)
631 {
632         struct domain *dp;
633
634         crit_enter();
635
636         /* address family dependent data region */
637         bzero(ifp->if_afdata, sizeof(ifp->if_afdata));
638         SLIST_FOREACH(dp, &domains, dom_next)
639                 if (dp->dom_ifattach)
640                         ifp->if_afdata[dp->dom_family] =
641                                 (*dp->dom_ifattach)(ifp);
642         crit_exit();
643 }
644
645 /*
646  * Purge all addresses whose type is _not_ AF_LINK
647  */
648 void
649 if_purgeaddrs_nolink(struct ifnet *ifp)
650 {
651         struct ifaddr_container *ifac, *next;
652
653         TAILQ_FOREACH_MUTABLE(ifac, &ifp->if_addrheads[mycpuid],
654                               ifa_link, next) {
655                 struct ifaddr *ifa = ifac->ifa;
656
657                 /* Leave link ifaddr as it is */
658                 if (ifa->ifa_addr->sa_family == AF_LINK)
659                         continue;
660 #ifdef INET
661                 /* XXX: Ugly!! ad hoc just for INET */
662                 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET) {
663                         struct ifaliasreq ifr;
664 #ifdef IFADDR_DEBUG_VERBOSE
665                         int i;
666
667                         kprintf("purge in4 addr %p: ", ifa);
668                         for (i = 0; i < ncpus; ++i)
669                                 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt);
670                         kprintf("\n");
671 #endif
672
673                         bzero(&ifr, sizeof ifr);
674                         ifr.ifra_addr = *ifa->ifa_addr;
675                         if (ifa->ifa_dstaddr)
676                                 ifr.ifra_broadaddr = *ifa->ifa_dstaddr;
677                         if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp,
678                                        NULL) == 0)
679                                 continue;
680                 }
681 #endif /* INET */
682 #ifdef INET6
683                 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET6) {
684 #ifdef IFADDR_DEBUG_VERBOSE
685                         int i;
686
687                         kprintf("purge in6 addr %p: ", ifa);
688                         for (i = 0; i < ncpus; ++i)
689                                 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt);
690                         kprintf("\n");
691 #endif
692
693                         in6_purgeaddr(ifa);
694                         /* ifp_addrhead is already updated */
695                         continue;
696                 }
697 #endif /* INET6 */
698                 ifa_ifunlink(ifa, ifp);
699                 ifa_destroy(ifa);
700         }
701 }
702
703 /*
704  * Detach an interface, removing it from the
705  * list of "active" interfaces.
706  */
707 void
708 if_detach(struct ifnet *ifp)
709 {
710         struct radix_node_head  *rnh;
711         int i;
712         int cpu, origcpu;
713         struct domain *dp;
714
715         EVENTHANDLER_INVOKE(ifnet_detach_event, ifp);
716
717         /*
718          * Remove routes and flush queues.
719          */
720         crit_enter();
721 #ifdef DEVICE_POLLING
722         if (ifp->if_flags & IFF_POLLING)
723                 ether_poll_deregister(ifp);
724 #endif
725 #ifdef IFPOLL_ENABLE
726         if (ifp->if_flags & IFF_NPOLLING)
727                 ifpoll_deregister(ifp);
728 #endif
729         if_down(ifp);
730
731 #ifdef ALTQ
732         if (ifq_is_enabled(&ifp->if_snd))
733                 altq_disable(&ifp->if_snd);
734         if (ifq_is_attached(&ifp->if_snd))
735                 altq_detach(&ifp->if_snd);
736 #endif
737
738         /*
739          * Clean up all addresses.
740          */
741         ifp->if_lladdr = NULL;
742
743         if_purgeaddrs_nolink(ifp);
744         if (!TAILQ_EMPTY(&ifp->if_addrheads[mycpuid])) {
745                 struct ifaddr *ifa;
746
747                 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
748                 KASSERT(ifa->ifa_addr->sa_family == AF_LINK,
749                         ("non-link ifaddr is left on if_addrheads"));
750
751                 ifa_ifunlink(ifa, ifp);
752                 ifa_destroy(ifa);
753                 KASSERT(TAILQ_EMPTY(&ifp->if_addrheads[mycpuid]),
754                         ("there are still ifaddrs left on if_addrheads"));
755         }
756
757 #ifdef INET
758         /*
759          * Remove all IPv4 kernel structures related to ifp.
760          */
761         in_ifdetach(ifp);
762 #endif
763
764 #ifdef INET6
765         /*
766          * Remove all IPv6 kernel structs related to ifp.  This should be done
767          * before removing routing entries below, since IPv6 interface direct
768          * routes are expected to be removed by the IPv6-specific kernel API.
769          * Otherwise, the kernel will detect some inconsistency and bark it.
770          */
771         in6_ifdetach(ifp);
772 #endif
773
774         /*
775          * Delete all remaining routes using this interface
776          * Unfortuneatly the only way to do this is to slog through
777          * the entire routing table looking for routes which point
778          * to this interface...oh well...
779          */
780         origcpu = mycpuid;
781         for (cpu = 0; cpu < ncpus2; cpu++) {
782                 lwkt_migratecpu(cpu);
783                 for (i = 1; i <= AF_MAX; i++) {
784                         if ((rnh = rt_tables[cpu][i]) == NULL)
785                                 continue;
786                         rnh->rnh_walktree(rnh, if_rtdel, ifp);
787                 }
788         }
789         lwkt_migratecpu(origcpu);
790
791         /* Announce that the interface is gone. */
792         rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
793         devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL);
794
795         SLIST_FOREACH(dp, &domains, dom_next)
796                 if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family])
797                         (*dp->dom_ifdetach)(ifp,
798                                 ifp->if_afdata[dp->dom_family]);
799
800         /*
801          * Remove interface from ifindex2ifp[] and maybe decrement if_index.
802          */
803         ifindex2ifnet[ifp->if_index] = NULL;
804         while (if_index > 0 && ifindex2ifnet[if_index] == NULL)
805                 if_index--;
806
807         TAILQ_REMOVE(&ifnet, ifp, if_link);
808         kfree(ifp->if_addrheads, M_IFADDR);
809         kfree(ifp->if_start_nmsg, M_LWKTMSG);
810         crit_exit();
811 }
812
813 /*
814  * Delete Routes for a Network Interface
815  *
816  * Called for each routing entry via the rnh->rnh_walktree() call above
817  * to delete all route entries referencing a detaching network interface.
818  *
819  * Arguments:
820  *      rn      pointer to node in the routing table
821  *      arg     argument passed to rnh->rnh_walktree() - detaching interface
822  *
823  * Returns:
824  *      0       successful
825  *      errno   failed - reason indicated
826  *
827  */
828 static int
829 if_rtdel(struct radix_node *rn, void *arg)
830 {
831         struct rtentry  *rt = (struct rtentry *)rn;
832         struct ifnet    *ifp = arg;
833         int             err;
834
835         if (rt->rt_ifp == ifp) {
836
837                 /*
838                  * Protect (sorta) against walktree recursion problems
839                  * with cloned routes
840                  */
841                 if (!(rt->rt_flags & RTF_UP))
842                         return (0);
843
844                 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
845                                 rt_mask(rt), rt->rt_flags,
846                                 NULL);
847                 if (err) {
848                         log(LOG_WARNING, "if_rtdel: error %d\n", err);
849                 }
850         }
851
852         return (0);
853 }
854
855 /*
856  * Locate an interface based on a complete address.
857  */
858 struct ifaddr *
859 ifa_ifwithaddr(struct sockaddr *addr)
860 {
861         struct ifnet *ifp;
862
863         TAILQ_FOREACH(ifp, &ifnet, if_link) {
864                 struct ifaddr_container *ifac;
865
866                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
867                         struct ifaddr *ifa = ifac->ifa;
868
869                         if (ifa->ifa_addr->sa_family != addr->sa_family)
870                                 continue;
871                         if (sa_equal(addr, ifa->ifa_addr))
872                                 return (ifa);
873                         if ((ifp->if_flags & IFF_BROADCAST) &&
874                             ifa->ifa_broadaddr &&
875                             /* IPv6 doesn't have broadcast */
876                             ifa->ifa_broadaddr->sa_len != 0 &&
877                             sa_equal(ifa->ifa_broadaddr, addr))
878                                 return (ifa);
879                 }
880         }
881         return (NULL);
882 }
883 /*
884  * Locate the point to point interface with a given destination address.
885  */
886 struct ifaddr *
887 ifa_ifwithdstaddr(struct sockaddr *addr)
888 {
889         struct ifnet *ifp;
890
891         TAILQ_FOREACH(ifp, &ifnet, if_link) {
892                 struct ifaddr_container *ifac;
893
894                 if (!(ifp->if_flags & IFF_POINTOPOINT))
895                         continue;
896
897                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
898                         struct ifaddr *ifa = ifac->ifa;
899
900                         if (ifa->ifa_addr->sa_family != addr->sa_family)
901                                 continue;
902                         if (ifa->ifa_dstaddr &&
903                             sa_equal(addr, ifa->ifa_dstaddr))
904                                 return (ifa);
905                 }
906         }
907         return (NULL);
908 }
909
910 /*
911  * Find an interface on a specific network.  If many, choice
912  * is most specific found.
913  */
914 struct ifaddr *
915 ifa_ifwithnet(struct sockaddr *addr)
916 {
917         struct ifnet *ifp;
918         struct ifaddr *ifa_maybe = NULL;
919         u_int af = addr->sa_family;
920         char *addr_data = addr->sa_data, *cplim;
921
922         /*
923          * AF_LINK addresses can be looked up directly by their index number,
924          * so do that if we can.
925          */
926         if (af == AF_LINK) {
927                 struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr;
928
929                 if (sdl->sdl_index && sdl->sdl_index <= if_index)
930                         return (ifindex2ifnet[sdl->sdl_index]->if_lladdr);
931         }
932
933         /*
934          * Scan though each interface, looking for ones that have
935          * addresses in this address family.
936          */
937         TAILQ_FOREACH(ifp, &ifnet, if_link) {
938                 struct ifaddr_container *ifac;
939
940                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
941                         struct ifaddr *ifa = ifac->ifa;
942                         char *cp, *cp2, *cp3;
943
944                         if (ifa->ifa_addr->sa_family != af)
945 next:                           continue;
946                         if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT) {
947                                 /*
948                                  * This is a bit broken as it doesn't
949                                  * take into account that the remote end may
950                                  * be a single node in the network we are
951                                  * looking for.
952                                  * The trouble is that we don't know the
953                                  * netmask for the remote end.
954                                  */
955                                 if (ifa->ifa_dstaddr != NULL &&
956                                     sa_equal(addr, ifa->ifa_dstaddr))
957                                         return (ifa);
958                         } else {
959                                 /*
960                                  * if we have a special address handler,
961                                  * then use it instead of the generic one.
962                                  */
963                                 if (ifa->ifa_claim_addr) {
964                                         if ((*ifa->ifa_claim_addr)(ifa, addr)) {
965                                                 return (ifa);
966                                         } else {
967                                                 continue;
968                                         }
969                                 }
970
971                                 /*
972                                  * Scan all the bits in the ifa's address.
973                                  * If a bit dissagrees with what we are
974                                  * looking for, mask it with the netmask
975                                  * to see if it really matters.
976                                  * (A byte at a time)
977                                  */
978                                 if (ifa->ifa_netmask == 0)
979                                         continue;
980                                 cp = addr_data;
981                                 cp2 = ifa->ifa_addr->sa_data;
982                                 cp3 = ifa->ifa_netmask->sa_data;
983                                 cplim = ifa->ifa_netmask->sa_len +
984                                         (char *)ifa->ifa_netmask;
985                                 while (cp3 < cplim)
986                                         if ((*cp++ ^ *cp2++) & *cp3++)
987                                                 goto next; /* next address! */
988                                 /*
989                                  * If the netmask of what we just found
990                                  * is more specific than what we had before
991                                  * (if we had one) then remember the new one
992                                  * before continuing to search
993                                  * for an even better one.
994                                  */
995                                 if (ifa_maybe == 0 ||
996                                     rn_refines((char *)ifa->ifa_netmask,
997                                                (char *)ifa_maybe->ifa_netmask))
998                                         ifa_maybe = ifa;
999                         }
1000                 }
1001         }
1002         return (ifa_maybe);
1003 }
1004
1005 /*
1006  * Find an interface address specific to an interface best matching
1007  * a given address.
1008  */
1009 struct ifaddr *
1010 ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp)
1011 {
1012         struct ifaddr_container *ifac;
1013         char *cp, *cp2, *cp3;
1014         char *cplim;
1015         struct ifaddr *ifa_maybe = 0;
1016         u_int af = addr->sa_family;
1017
1018         if (af >= AF_MAX)
1019                 return (0);
1020         TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1021                 struct ifaddr *ifa = ifac->ifa;
1022
1023                 if (ifa->ifa_addr->sa_family != af)
1024                         continue;
1025                 if (ifa_maybe == 0)
1026                         ifa_maybe = ifa;
1027                 if (ifa->ifa_netmask == NULL) {
1028                         if (sa_equal(addr, ifa->ifa_addr) ||
1029                             (ifa->ifa_dstaddr != NULL &&
1030                              sa_equal(addr, ifa->ifa_dstaddr)))
1031                                 return (ifa);
1032                         continue;
1033                 }
1034                 if (ifp->if_flags & IFF_POINTOPOINT) {
1035                         if (sa_equal(addr, ifa->ifa_dstaddr))
1036                                 return (ifa);
1037                 } else {
1038                         cp = addr->sa_data;
1039                         cp2 = ifa->ifa_addr->sa_data;
1040                         cp3 = ifa->ifa_netmask->sa_data;
1041                         cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask;
1042                         for (; cp3 < cplim; cp3++)
1043                                 if ((*cp++ ^ *cp2++) & *cp3)
1044                                         break;
1045                         if (cp3 == cplim)
1046                                 return (ifa);
1047                 }
1048         }
1049         return (ifa_maybe);
1050 }
1051
1052 /*
1053  * Default action when installing a route with a Link Level gateway.
1054  * Lookup an appropriate real ifa to point to.
1055  * This should be moved to /sys/net/link.c eventually.
1056  */
1057 static void
1058 link_rtrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info)
1059 {
1060         struct ifaddr *ifa;
1061         struct sockaddr *dst;
1062         struct ifnet *ifp;
1063
1064         if (cmd != RTM_ADD || (ifa = rt->rt_ifa) == NULL ||
1065             (ifp = ifa->ifa_ifp) == NULL || (dst = rt_key(rt)) == NULL)
1066                 return;
1067         ifa = ifaof_ifpforaddr(dst, ifp);
1068         if (ifa != NULL) {
1069                 IFAFREE(rt->rt_ifa);
1070                 IFAREF(ifa);
1071                 rt->rt_ifa = ifa;
1072                 if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest)
1073                         ifa->ifa_rtrequest(cmd, rt, info);
1074         }
1075 }
1076
1077 /*
1078  * Mark an interface down and notify protocols of
1079  * the transition.
1080  * NOTE: must be called at splnet or eqivalent.
1081  */
1082 void
1083 if_unroute(struct ifnet *ifp, int flag, int fam)
1084 {
1085         struct ifaddr_container *ifac;
1086
1087         ifp->if_flags &= ~flag;
1088         getmicrotime(&ifp->if_lastchange);
1089         TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1090                 struct ifaddr *ifa = ifac->ifa;
1091
1092                 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
1093                         kpfctlinput(PRC_IFDOWN, ifa->ifa_addr);
1094         }
1095         ifq_purge(&ifp->if_snd);
1096         rt_ifmsg(ifp);
1097 }
1098
1099 /*
1100  * Mark an interface up and notify protocols of
1101  * the transition.
1102  * NOTE: must be called at splnet or eqivalent.
1103  */
1104 void
1105 if_route(struct ifnet *ifp, int flag, int fam)
1106 {
1107         struct ifaddr_container *ifac;
1108
1109         ifq_purge(&ifp->if_snd);
1110         ifp->if_flags |= flag;
1111         getmicrotime(&ifp->if_lastchange);
1112         TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1113                 struct ifaddr *ifa = ifac->ifa;
1114
1115                 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
1116                         kpfctlinput(PRC_IFUP, ifa->ifa_addr);
1117         }
1118         rt_ifmsg(ifp);
1119 #ifdef INET6
1120         in6_if_up(ifp);
1121 #endif
1122 }
1123
1124 /*
1125  * Mark an interface down and notify protocols of the transition.  An
1126  * interface going down is also considered to be a synchronizing event.
1127  * We must ensure that all packet processing related to the interface
1128  * has completed before we return so e.g. the caller can free the ifnet
1129  * structure that the mbufs may be referencing.
1130  *
1131  * NOTE: must be called at splnet or eqivalent.
1132  */
1133 void
1134 if_down(struct ifnet *ifp)
1135 {
1136         if_unroute(ifp, IFF_UP, AF_UNSPEC);
1137         netmsg_service_sync();
1138 }
1139
1140 /*
1141  * Mark an interface up and notify protocols of
1142  * the transition.
1143  * NOTE: must be called at splnet or eqivalent.
1144  */
1145 void
1146 if_up(struct ifnet *ifp)
1147 {
1148         if_route(ifp, IFF_UP, AF_UNSPEC);
1149 }
1150
1151 /*
1152  * Process a link state change.
1153  * NOTE: must be called at splsoftnet or equivalent.
1154  */
1155 void
1156 if_link_state_change(struct ifnet *ifp)
1157 {
1158         int link_state = ifp->if_link_state;
1159
1160         rt_ifmsg(ifp);
1161         devctl_notify("IFNET", ifp->if_xname,
1162             (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL);
1163 }
1164
1165 /*
1166  * Handle interface watchdog timer routines.  Called
1167  * from softclock, we decrement timers (if set) and
1168  * call the appropriate interface routine on expiration.
1169  */
1170 static void
1171 if_slowtimo(void *arg)
1172 {
1173         struct ifnet *ifp;
1174
1175         crit_enter();
1176
1177         TAILQ_FOREACH(ifp, &ifnet, if_link) {
1178                 if (ifp->if_timer == 0 || --ifp->if_timer)
1179                         continue;
1180                 if (ifp->if_watchdog) {
1181                         if (ifnet_tryserialize_all(ifp)) {
1182                                 (*ifp->if_watchdog)(ifp);
1183                                 ifnet_deserialize_all(ifp);
1184                         } else {
1185                                 /* try again next timeout */
1186                                 ++ifp->if_timer;
1187                         }
1188                 }
1189         }
1190
1191         crit_exit();
1192
1193         callout_reset(&if_slowtimo_timer, hz / IFNET_SLOWHZ, if_slowtimo, NULL);
1194 }
1195
1196 /*
1197  * Map interface name to
1198  * interface structure pointer.
1199  */
1200 struct ifnet *
1201 ifunit(const char *name)
1202 {
1203         struct ifnet *ifp;
1204
1205         /*
1206          * Search all the interfaces for this name/number
1207          */
1208
1209         TAILQ_FOREACH(ifp, &ifnet, if_link) {
1210                 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0)
1211                         break;
1212         }
1213         return (ifp);
1214 }
1215
1216
1217 /*
1218  * Map interface name in a sockaddr_dl to
1219  * interface structure pointer.
1220  */
1221 struct ifnet *
1222 if_withname(struct sockaddr *sa)
1223 {
1224         char ifname[IFNAMSIZ+1];
1225         struct sockaddr_dl *sdl = (struct sockaddr_dl *)sa;
1226
1227         if ( (sa->sa_family != AF_LINK) || (sdl->sdl_nlen == 0) ||
1228              (sdl->sdl_nlen > IFNAMSIZ) )
1229                 return NULL;
1230
1231         /*
1232          * ifunit wants a null-terminated name.  It may not be null-terminated
1233          * in the sockaddr.  We don't want to change the caller's sockaddr,
1234          * and there might not be room to put the trailing null anyway, so we
1235          * make a local copy that we know we can null terminate safely.
1236          */
1237
1238         bcopy(sdl->sdl_data, ifname, sdl->sdl_nlen);
1239         ifname[sdl->sdl_nlen] = '\0';
1240         return ifunit(ifname);
1241 }
1242
1243
1244 /*
1245  * Interface ioctls.
1246  */
1247 int
1248 ifioctl(struct socket *so, u_long cmd, caddr_t data, struct ucred *cred)
1249 {
1250         struct ifnet *ifp;
1251         struct ifreq *ifr;
1252         struct ifstat *ifs;
1253         int error;
1254         short oif_flags;
1255         int new_flags;
1256         size_t namelen, onamelen;
1257         char new_name[IFNAMSIZ];
1258         struct ifaddr *ifa;
1259         struct sockaddr_dl *sdl;
1260
1261         switch (cmd) {
1262
1263         case SIOCGIFCONF:
1264         case OSIOCGIFCONF:
1265                 return (ifconf(cmd, data, cred));
1266         }
1267         ifr = (struct ifreq *)data;
1268
1269         switch (cmd) {
1270         case SIOCIFCREATE:
1271         case SIOCIFCREATE2:
1272                 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
1273                         return (error);
1274                 return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name),
1275                         cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL));
1276         case SIOCIFDESTROY:
1277                 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
1278                         return (error);
1279                 return (if_clone_destroy(ifr->ifr_name));
1280
1281         case SIOCIFGCLONERS:
1282                 return (if_clone_list((struct if_clonereq *)data));
1283         }
1284
1285         ifp = ifunit(ifr->ifr_name);
1286         if (ifp == 0)
1287                 return (ENXIO);
1288         switch (cmd) {
1289
1290         case SIOCGIFINDEX:
1291                 ifr->ifr_index = ifp->if_index;
1292                 break;
1293
1294         case SIOCGIFFLAGS:
1295                 ifr->ifr_flags = ifp->if_flags;
1296                 ifr->ifr_flagshigh = ifp->if_flags >> 16;
1297                 break;
1298
1299         case SIOCGIFCAP:
1300                 ifr->ifr_reqcap = ifp->if_capabilities;
1301                 ifr->ifr_curcap = ifp->if_capenable;
1302                 break;
1303
1304         case SIOCGIFMETRIC:
1305                 ifr->ifr_metric = ifp->if_metric;
1306                 break;
1307
1308         case SIOCGIFMTU:
1309                 ifr->ifr_mtu = ifp->if_mtu;
1310                 break;
1311
1312         case SIOCGIFPHYS:
1313                 ifr->ifr_phys = ifp->if_physical;
1314                 break;
1315
1316         case SIOCGIFPOLLCPU:
1317 #ifdef DEVICE_POLLING
1318                 ifr->ifr_pollcpu = ifp->if_poll_cpuid;
1319 #else
1320                 ifr->ifr_pollcpu = -1;
1321 #endif
1322                 break;
1323
1324         case SIOCSIFPOLLCPU:
1325 #ifdef DEVICE_POLLING
1326                 if ((ifp->if_flags & IFF_POLLING) == 0)
1327                         ether_pollcpu_register(ifp, ifr->ifr_pollcpu);
1328 #endif
1329                 break;
1330
1331         case SIOCSIFFLAGS:
1332                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1333                 if (error)
1334                         return (error);
1335                 new_flags = (ifr->ifr_flags & 0xffff) |
1336                     (ifr->ifr_flagshigh << 16);
1337                 if (ifp->if_flags & IFF_SMART) {
1338                         /* Smart drivers twiddle their own routes */
1339                 } else if (ifp->if_flags & IFF_UP &&
1340                     (new_flags & IFF_UP) == 0) {
1341                         crit_enter();
1342                         if_down(ifp);
1343                         crit_exit();
1344                 } else if (new_flags & IFF_UP &&
1345                     (ifp->if_flags & IFF_UP) == 0) {
1346                         crit_enter();
1347                         if_up(ifp);
1348                         crit_exit();
1349                 }
1350
1351 #ifdef DEVICE_POLLING
1352                 if ((new_flags ^ ifp->if_flags) & IFF_POLLING) {
1353                         if (new_flags & IFF_POLLING) {
1354                                 ether_poll_register(ifp);
1355                         } else {
1356                                 ether_poll_deregister(ifp);
1357                         }
1358                 }
1359 #endif
1360 #ifdef IFPOLL_ENABLE
1361                 if ((new_flags ^ ifp->if_flags) & IFF_NPOLLING) {
1362                         if (new_flags & IFF_NPOLLING)
1363                                 ifpoll_register(ifp);
1364                         else
1365                                 ifpoll_deregister(ifp);
1366                 }
1367 #endif
1368
1369                 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) |
1370                         (new_flags &~ IFF_CANTCHANGE);
1371                 if (new_flags & IFF_PPROMISC) {
1372                         /* Permanently promiscuous mode requested */
1373                         ifp->if_flags |= IFF_PROMISC;
1374                 } else if (ifp->if_pcount == 0) {
1375                         ifp->if_flags &= ~IFF_PROMISC;
1376                 }
1377                 if (ifp->if_ioctl) {
1378                         ifnet_serialize_all(ifp);
1379                         ifp->if_ioctl(ifp, cmd, data, cred);
1380                         ifnet_deserialize_all(ifp);
1381                 }
1382                 getmicrotime(&ifp->if_lastchange);
1383                 break;
1384
1385         case SIOCSIFCAP:
1386                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1387                 if (error)
1388                         return (error);
1389                 if (ifr->ifr_reqcap & ~ifp->if_capabilities)
1390                         return (EINVAL);
1391                 ifnet_serialize_all(ifp);
1392                 ifp->if_ioctl(ifp, cmd, data, cred);
1393                 ifnet_deserialize_all(ifp);
1394                 break;
1395
1396         case SIOCSIFNAME:
1397                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1398                 if (error != 0)
1399                         return (error);
1400                 error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL);
1401                 if (error != 0)
1402                         return (error);
1403                 if (new_name[0] == '\0')
1404                         return (EINVAL);
1405                 if (ifunit(new_name) != NULL)
1406                         return (EEXIST);
1407
1408                 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp);
1409
1410                 /* Announce the departure of the interface. */
1411                 rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
1412
1413                 strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname));
1414                 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
1415                 /* XXX IFA_LOCK(ifa); */
1416                 sdl = (struct sockaddr_dl *)ifa->ifa_addr;
1417                 namelen = strlen(new_name);
1418                 onamelen = sdl->sdl_nlen;
1419                 /*
1420                  * Move the address if needed.  This is safe because we
1421                  * allocate space for a name of length IFNAMSIZ when we
1422                  * create this in if_attach().
1423                  */
1424                 if (namelen != onamelen) {
1425                         bcopy(sdl->sdl_data + onamelen,
1426                             sdl->sdl_data + namelen, sdl->sdl_alen);
1427                 }
1428                 bcopy(new_name, sdl->sdl_data, namelen);
1429                 sdl->sdl_nlen = namelen;
1430                 sdl = (struct sockaddr_dl *)ifa->ifa_netmask;
1431                 bzero(sdl->sdl_data, onamelen);
1432                 while (namelen != 0)
1433                         sdl->sdl_data[--namelen] = 0xff;
1434                 /* XXX IFA_UNLOCK(ifa) */
1435
1436                 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp);
1437
1438                 /* Announce the return of the interface. */
1439                 rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
1440                 break;
1441
1442         case SIOCSIFMETRIC:
1443                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1444                 if (error)
1445                         return (error);
1446                 ifp->if_metric = ifr->ifr_metric;
1447                 getmicrotime(&ifp->if_lastchange);
1448                 break;
1449
1450         case SIOCSIFPHYS:
1451                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1452                 if (error)
1453                         return error;
1454                 if (!ifp->if_ioctl)
1455                         return EOPNOTSUPP;
1456                 ifnet_serialize_all(ifp);
1457                 error = ifp->if_ioctl(ifp, cmd, data, cred);
1458                 ifnet_deserialize_all(ifp);
1459                 if (error == 0)
1460                         getmicrotime(&ifp->if_lastchange);
1461                 return (error);
1462
1463         case SIOCSIFMTU:
1464         {
1465                 u_long oldmtu = ifp->if_mtu;
1466
1467                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1468                 if (error)
1469                         return (error);
1470                 if (ifp->if_ioctl == NULL)
1471                         return (EOPNOTSUPP);
1472                 if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU)
1473                         return (EINVAL);
1474                 ifnet_serialize_all(ifp);
1475                 error = ifp->if_ioctl(ifp, cmd, data, cred);
1476                 ifnet_deserialize_all(ifp);
1477                 if (error == 0) {
1478                         getmicrotime(&ifp->if_lastchange);
1479                         rt_ifmsg(ifp);
1480                 }
1481                 /*
1482                  * If the link MTU changed, do network layer specific procedure.
1483                  */
1484                 if (ifp->if_mtu != oldmtu) {
1485 #ifdef INET6
1486                         nd6_setmtu(ifp);
1487 #endif
1488                 }
1489                 return (error);
1490         }
1491
1492         case SIOCADDMULTI:
1493         case SIOCDELMULTI:
1494                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1495                 if (error)
1496                         return (error);
1497
1498                 /* Don't allow group membership on non-multicast interfaces. */
1499                 if ((ifp->if_flags & IFF_MULTICAST) == 0)
1500                         return EOPNOTSUPP;
1501
1502                 /* Don't let users screw up protocols' entries. */
1503                 if (ifr->ifr_addr.sa_family != AF_LINK)
1504                         return EINVAL;
1505
1506                 if (cmd == SIOCADDMULTI) {
1507                         struct ifmultiaddr *ifma;
1508                         error = if_addmulti(ifp, &ifr->ifr_addr, &ifma);
1509                 } else {
1510                         error = if_delmulti(ifp, &ifr->ifr_addr);
1511                 }
1512                 if (error == 0)
1513                         getmicrotime(&ifp->if_lastchange);
1514                 return error;
1515
1516         case SIOCSIFPHYADDR:
1517         case SIOCDIFPHYADDR:
1518 #ifdef INET6
1519         case SIOCSIFPHYADDR_IN6:
1520 #endif
1521         case SIOCSLIFPHYADDR:
1522         case SIOCSIFMEDIA:
1523         case SIOCSIFGENERIC:
1524                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1525                 if (error)
1526                         return (error);
1527                 if (ifp->if_ioctl == 0)
1528                         return (EOPNOTSUPP);
1529                 ifnet_serialize_all(ifp);
1530                 error = ifp->if_ioctl(ifp, cmd, data, cred);
1531                 ifnet_deserialize_all(ifp);
1532                 if (error == 0)
1533                         getmicrotime(&ifp->if_lastchange);
1534                 return error;
1535
1536         case SIOCGIFSTATUS:
1537                 ifs = (struct ifstat *)data;
1538                 ifs->ascii[0] = '\0';
1539
1540         case SIOCGIFPSRCADDR:
1541         case SIOCGIFPDSTADDR:
1542         case SIOCGLIFPHYADDR:
1543         case SIOCGIFMEDIA:
1544         case SIOCGIFGENERIC:
1545                 if (ifp->if_ioctl == NULL)
1546                         return (EOPNOTSUPP);
1547                 ifnet_serialize_all(ifp);
1548                 error = ifp->if_ioctl(ifp, cmd, data, cred);
1549                 ifnet_deserialize_all(ifp);
1550                 return (error);
1551
1552         case SIOCSIFLLADDR:
1553                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1554                 if (error)
1555                         return (error);
1556                 error = if_setlladdr(ifp,
1557                     ifr->ifr_addr.sa_data, ifr->ifr_addr.sa_len);
1558                 EVENTHANDLER_INVOKE(iflladdr_event, ifp);
1559                 return (error);
1560
1561         default:
1562                 oif_flags = ifp->if_flags;
1563                 if (so->so_proto == 0)
1564                         return (EOPNOTSUPP);
1565 #ifndef COMPAT_43
1566                 error = so_pru_control(so, cmd, data, ifp);
1567 #else
1568             {
1569                 int ocmd = cmd;
1570
1571                 switch (cmd) {
1572
1573                 case SIOCSIFDSTADDR:
1574                 case SIOCSIFADDR:
1575                 case SIOCSIFBRDADDR:
1576                 case SIOCSIFNETMASK:
1577 #if BYTE_ORDER != BIG_ENDIAN
1578                         if (ifr->ifr_addr.sa_family == 0 &&
1579                             ifr->ifr_addr.sa_len < 16) {
1580                                 ifr->ifr_addr.sa_family = ifr->ifr_addr.sa_len;
1581                                 ifr->ifr_addr.sa_len = 16;
1582                         }
1583 #else
1584                         if (ifr->ifr_addr.sa_len == 0)
1585                                 ifr->ifr_addr.sa_len = 16;
1586 #endif
1587                         break;
1588
1589                 case OSIOCGIFADDR:
1590                         cmd = SIOCGIFADDR;
1591                         break;
1592
1593                 case OSIOCGIFDSTADDR:
1594                         cmd = SIOCGIFDSTADDR;
1595                         break;
1596
1597                 case OSIOCGIFBRDADDR:
1598                         cmd = SIOCGIFBRDADDR;
1599                         break;
1600
1601                 case OSIOCGIFNETMASK:
1602                         cmd = SIOCGIFNETMASK;
1603                 }
1604                 error =  so_pru_control(so, cmd, data, ifp);
1605                 switch (ocmd) {
1606
1607                 case OSIOCGIFADDR:
1608                 case OSIOCGIFDSTADDR:
1609                 case OSIOCGIFBRDADDR:
1610                 case OSIOCGIFNETMASK:
1611                         *(u_short *)&ifr->ifr_addr = ifr->ifr_addr.sa_family;
1612
1613                 }
1614             }
1615 #endif /* COMPAT_43 */
1616
1617                 if ((oif_flags ^ ifp->if_flags) & IFF_UP) {
1618 #ifdef INET6
1619                         DELAY(100);/* XXX: temporary workaround for fxp issue*/
1620                         if (ifp->if_flags & IFF_UP) {
1621                                 crit_enter();
1622                                 in6_if_up(ifp);
1623                                 crit_exit();
1624                         }
1625 #endif
1626                 }
1627                 return (error);
1628
1629         }
1630         return (0);
1631 }
1632
1633 /*
1634  * Set/clear promiscuous mode on interface ifp based on the truth value
1635  * of pswitch.  The calls are reference counted so that only the first
1636  * "on" request actually has an effect, as does the final "off" request.
1637  * Results are undefined if the "off" and "on" requests are not matched.
1638  */
1639 int
1640 ifpromisc(struct ifnet *ifp, int pswitch)
1641 {
1642         struct ifreq ifr;
1643         int error;
1644         int oldflags;
1645
1646         oldflags = ifp->if_flags;
1647         if (ifp->if_flags & IFF_PPROMISC) {
1648                 /* Do nothing if device is in permanently promiscuous mode */
1649                 ifp->if_pcount += pswitch ? 1 : -1;
1650                 return (0);
1651         }
1652         if (pswitch) {
1653                 /*
1654                  * If the device is not configured up, we cannot put it in
1655                  * promiscuous mode.
1656                  */
1657                 if ((ifp->if_flags & IFF_UP) == 0)
1658                         return (ENETDOWN);
1659                 if (ifp->if_pcount++ != 0)
1660                         return (0);
1661                 ifp->if_flags |= IFF_PROMISC;
1662                 log(LOG_INFO, "%s: promiscuous mode enabled\n",
1663                     ifp->if_xname);
1664         } else {
1665                 if (--ifp->if_pcount > 0)
1666                         return (0);
1667                 ifp->if_flags &= ~IFF_PROMISC;
1668                 log(LOG_INFO, "%s: promiscuous mode disabled\n",
1669                     ifp->if_xname);
1670         }
1671         ifr.ifr_flags = ifp->if_flags;
1672         ifr.ifr_flagshigh = ifp->if_flags >> 16;
1673         ifnet_serialize_all(ifp);
1674         error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, NULL);
1675         ifnet_deserialize_all(ifp);
1676         if (error == 0)
1677                 rt_ifmsg(ifp);
1678         else
1679                 ifp->if_flags = oldflags;
1680         return error;
1681 }
1682
1683 /*
1684  * Return interface configuration
1685  * of system.  List may be used
1686  * in later ioctl's (above) to get
1687  * other information.
1688  */
1689 static int
1690 ifconf(u_long cmd, caddr_t data, struct ucred *cred)
1691 {
1692         struct ifconf *ifc = (struct ifconf *)data;
1693         struct ifnet *ifp;
1694         struct sockaddr *sa;
1695         struct ifreq ifr, *ifrp;
1696         int space = ifc->ifc_len, error = 0;
1697
1698         ifrp = ifc->ifc_req;
1699         TAILQ_FOREACH(ifp, &ifnet, if_link) {
1700                 struct ifaddr_container *ifac;
1701                 int addrs;
1702
1703                 if (space <= sizeof ifr)
1704                         break;
1705
1706                 /*
1707                  * Zero the stack declared structure first to prevent
1708                  * memory disclosure.
1709                  */
1710                 bzero(&ifr, sizeof(ifr));
1711                 if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name))
1712                     >= sizeof(ifr.ifr_name)) {
1713                         error = ENAMETOOLONG;
1714                         break;
1715                 }
1716
1717                 addrs = 0;
1718                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1719                         struct ifaddr *ifa = ifac->ifa;
1720
1721                         if (space <= sizeof ifr)
1722                                 break;
1723                         sa = ifa->ifa_addr;
1724                         if (cred->cr_prison &&
1725                             prison_if(cred, sa))
1726                                 continue;
1727                         addrs++;
1728 #ifdef COMPAT_43
1729                         if (cmd == OSIOCGIFCONF) {
1730                                 struct osockaddr *osa =
1731                                          (struct osockaddr *)&ifr.ifr_addr;
1732                                 ifr.ifr_addr = *sa;
1733                                 osa->sa_family = sa->sa_family;
1734                                 error = copyout(&ifr, ifrp, sizeof ifr);
1735                                 ifrp++;
1736                         } else
1737 #endif
1738                         if (sa->sa_len <= sizeof(*sa)) {
1739                                 ifr.ifr_addr = *sa;
1740                                 error = copyout(&ifr, ifrp, sizeof ifr);
1741                                 ifrp++;
1742                         } else {
1743                                 if (space < (sizeof ifr) + sa->sa_len -
1744                                             sizeof(*sa))
1745                                         break;
1746                                 space -= sa->sa_len - sizeof(*sa);
1747                                 error = copyout(&ifr, ifrp,
1748                                                 sizeof ifr.ifr_name);
1749                                 if (error == 0)
1750                                         error = copyout(sa, &ifrp->ifr_addr,
1751                                                         sa->sa_len);
1752                                 ifrp = (struct ifreq *)
1753                                         (sa->sa_len + (caddr_t)&ifrp->ifr_addr);
1754                         }
1755                         if (error)
1756                                 break;
1757                         space -= sizeof ifr;
1758                 }
1759                 if (error)
1760                         break;
1761                 if (!addrs) {
1762                         bzero(&ifr.ifr_addr, sizeof ifr.ifr_addr);
1763                         error = copyout(&ifr, ifrp, sizeof ifr);
1764                         if (error)
1765                                 break;
1766                         space -= sizeof ifr;
1767                         ifrp++;
1768                 }
1769         }
1770         ifc->ifc_len -= space;
1771         return (error);
1772 }
1773
1774 /*
1775  * Just like if_promisc(), but for all-multicast-reception mode.
1776  */
1777 int
1778 if_allmulti(struct ifnet *ifp, int onswitch)
1779 {
1780         int error = 0;
1781         struct ifreq ifr;
1782
1783         crit_enter();
1784
1785         if (onswitch) {
1786                 if (ifp->if_amcount++ == 0) {
1787                         ifp->if_flags |= IFF_ALLMULTI;
1788                         ifr.ifr_flags = ifp->if_flags;
1789                         ifr.ifr_flagshigh = ifp->if_flags >> 16;
1790                         ifnet_serialize_all(ifp);
1791                         error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
1792                                               NULL);
1793                         ifnet_deserialize_all(ifp);
1794                 }
1795         } else {
1796                 if (ifp->if_amcount > 1) {
1797                         ifp->if_amcount--;
1798                 } else {
1799                         ifp->if_amcount = 0;
1800                         ifp->if_flags &= ~IFF_ALLMULTI;
1801                         ifr.ifr_flags = ifp->if_flags;
1802                         ifr.ifr_flagshigh = ifp->if_flags >> 16;
1803                         ifnet_serialize_all(ifp);
1804                         error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
1805                                               NULL);
1806                         ifnet_deserialize_all(ifp);
1807                 }
1808         }
1809
1810         crit_exit();
1811
1812         if (error == 0)
1813                 rt_ifmsg(ifp);
1814         return error;
1815 }
1816
1817 /*
1818  * Add a multicast listenership to the interface in question.
1819  * The link layer provides a routine which converts
1820  */
1821 int
1822 if_addmulti(
1823         struct ifnet *ifp,      /* interface to manipulate */
1824         struct sockaddr *sa,    /* address to add */
1825         struct ifmultiaddr **retifma)
1826 {
1827         struct sockaddr *llsa, *dupsa;
1828         int error;
1829         struct ifmultiaddr *ifma;
1830
1831         /*
1832          * If the matching multicast address already exists
1833          * then don't add a new one, just add a reference
1834          */
1835         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1836                 if (sa_equal(sa, ifma->ifma_addr)) {
1837                         ifma->ifma_refcount++;
1838                         if (retifma)
1839                                 *retifma = ifma;
1840                         return 0;
1841                 }
1842         }
1843
1844         /*
1845          * Give the link layer a chance to accept/reject it, and also
1846          * find out which AF_LINK address this maps to, if it isn't one
1847          * already.
1848          */
1849         if (ifp->if_resolvemulti) {
1850                 ifnet_serialize_all(ifp);
1851                 error = ifp->if_resolvemulti(ifp, &llsa, sa);
1852                 ifnet_deserialize_all(ifp);
1853                 if (error) 
1854                         return error;
1855         } else {
1856                 llsa = 0;
1857         }
1858
1859         MALLOC(ifma, struct ifmultiaddr *, sizeof *ifma, M_IFMADDR, M_WAITOK);
1860         MALLOC(dupsa, struct sockaddr *, sa->sa_len, M_IFMADDR, M_WAITOK);
1861         bcopy(sa, dupsa, sa->sa_len);
1862
1863         ifma->ifma_addr = dupsa;
1864         ifma->ifma_lladdr = llsa;
1865         ifma->ifma_ifp = ifp;
1866         ifma->ifma_refcount = 1;
1867         ifma->ifma_protospec = 0;
1868         rt_newmaddrmsg(RTM_NEWMADDR, ifma);
1869
1870         /*
1871          * Some network interfaces can scan the address list at
1872          * interrupt time; lock them out.
1873          */
1874         crit_enter();
1875         TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
1876         crit_exit();
1877         if (retifma)
1878                 *retifma = ifma;
1879
1880         if (llsa != 0) {
1881                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1882                         if (sa_equal(ifma->ifma_addr, llsa))
1883                                 break;
1884                 }
1885                 if (ifma) {
1886                         ifma->ifma_refcount++;
1887                 } else {
1888                         MALLOC(ifma, struct ifmultiaddr *, sizeof *ifma,
1889                                M_IFMADDR, M_WAITOK);
1890                         MALLOC(dupsa, struct sockaddr *, llsa->sa_len,
1891                                M_IFMADDR, M_WAITOK);
1892                         bcopy(llsa, dupsa, llsa->sa_len);
1893                         ifma->ifma_addr = dupsa;
1894                         ifma->ifma_ifp = ifp;
1895                         ifma->ifma_refcount = 1;
1896                         crit_enter();
1897                         TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
1898                         crit_exit();
1899                 }
1900         }
1901         /*
1902          * We are certain we have added something, so call down to the
1903          * interface to let them know about it.
1904          */
1905         crit_enter();
1906         ifnet_serialize_all(ifp);
1907         if (ifp->if_ioctl)
1908                 ifp->if_ioctl(ifp, SIOCADDMULTI, 0, NULL);
1909         ifnet_deserialize_all(ifp);
1910         crit_exit();
1911
1912         return 0;
1913 }
1914
1915 /*
1916  * Remove a reference to a multicast address on this interface.  Yell
1917  * if the request does not match an existing membership.
1918  */
1919 int
1920 if_delmulti(struct ifnet *ifp, struct sockaddr *sa)
1921 {
1922         struct ifmultiaddr *ifma;
1923
1924         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
1925                 if (sa_equal(sa, ifma->ifma_addr))
1926                         break;
1927         if (ifma == 0)
1928                 return ENOENT;
1929
1930         if (ifma->ifma_refcount > 1) {
1931                 ifma->ifma_refcount--;
1932                 return 0;
1933         }
1934
1935         rt_newmaddrmsg(RTM_DELMADDR, ifma);
1936         sa = ifma->ifma_lladdr;
1937         crit_enter();
1938         TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link);
1939         /*
1940          * Make sure the interface driver is notified
1941          * in the case of a link layer mcast group being left.
1942          */
1943         if (ifma->ifma_addr->sa_family == AF_LINK && sa == 0) {
1944                 ifnet_serialize_all(ifp);
1945                 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL);
1946                 ifnet_deserialize_all(ifp);
1947         }
1948         crit_exit();
1949         kfree(ifma->ifma_addr, M_IFMADDR);
1950         kfree(ifma, M_IFMADDR);
1951         if (sa == 0)
1952                 return 0;
1953
1954         /*
1955          * Now look for the link-layer address which corresponds to
1956          * this network address.  It had been squirreled away in
1957          * ifma->ifma_lladdr for this purpose (so we don't have
1958          * to call ifp->if_resolvemulti() again), and we saved that
1959          * value in sa above.  If some nasty deleted the
1960          * link-layer address out from underneath us, we can deal because
1961          * the address we stored was is not the same as the one which was
1962          * in the record for the link-layer address.  (So we don't complain
1963          * in that case.)
1964          */
1965         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
1966                 if (sa_equal(sa, ifma->ifma_addr))
1967                         break;
1968         if (ifma == 0)
1969                 return 0;
1970
1971         if (ifma->ifma_refcount > 1) {
1972                 ifma->ifma_refcount--;
1973                 return 0;
1974         }
1975
1976         crit_enter();
1977         ifnet_serialize_all(ifp);
1978         TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link);
1979         ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL);
1980         ifnet_deserialize_all(ifp);
1981         crit_exit();
1982         kfree(ifma->ifma_addr, M_IFMADDR);
1983         kfree(sa, M_IFMADDR);
1984         kfree(ifma, M_IFMADDR);
1985
1986         return 0;
1987 }
1988
1989 /*
1990  * Delete all multicast group membership for an interface.
1991  * Should be used to quickly flush all multicast filters.
1992  */
1993 void
1994 if_delallmulti(struct ifnet *ifp)
1995 {
1996         struct ifmultiaddr *ifma;
1997         struct ifmultiaddr *next;
1998
1999         TAILQ_FOREACH_MUTABLE(ifma, &ifp->if_multiaddrs, ifma_link, next)
2000                 if_delmulti(ifp, ifma->ifma_addr);
2001 }
2002
2003
2004 /*
2005  * Set the link layer address on an interface.
2006  *
2007  * At this time we only support certain types of interfaces,
2008  * and we don't allow the length of the address to change.
2009  */
2010 int
2011 if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len)
2012 {
2013         struct sockaddr_dl *sdl;
2014         struct ifreq ifr;
2015
2016         sdl = IF_LLSOCKADDR(ifp);
2017         if (sdl == NULL)
2018                 return (EINVAL);
2019         if (len != sdl->sdl_alen)       /* don't allow length to change */
2020                 return (EINVAL);
2021         switch (ifp->if_type) {
2022         case IFT_ETHER:                 /* these types use struct arpcom */
2023         case IFT_XETHER:
2024         case IFT_L2VLAN:
2025                 bcopy(lladdr, ((struct arpcom *)ifp->if_softc)->ac_enaddr, len);
2026                 bcopy(lladdr, LLADDR(sdl), len);
2027                 break;
2028         default:
2029                 return (ENODEV);
2030         }
2031         /*
2032          * If the interface is already up, we need
2033          * to re-init it in order to reprogram its
2034          * address filter.
2035          */
2036         ifnet_serialize_all(ifp);
2037         if ((ifp->if_flags & IFF_UP) != 0) {
2038                 struct ifaddr_container *ifac;
2039
2040                 ifp->if_flags &= ~IFF_UP;
2041                 ifr.ifr_flags = ifp->if_flags;
2042                 ifr.ifr_flagshigh = ifp->if_flags >> 16;
2043                 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2044                               NULL);
2045                 ifp->if_flags |= IFF_UP;
2046                 ifr.ifr_flags = ifp->if_flags;
2047                 ifr.ifr_flagshigh = ifp->if_flags >> 16;
2048                 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2049                                  NULL);
2050 #ifdef INET
2051                 /*
2052                  * Also send gratuitous ARPs to notify other nodes about
2053                  * the address change.
2054                  */
2055                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
2056                         struct ifaddr *ifa = ifac->ifa;
2057
2058                         if (ifa->ifa_addr != NULL &&
2059                             ifa->ifa_addr->sa_family == AF_INET)
2060                                 arp_ifinit(ifp, ifa);
2061                 }
2062 #endif
2063         }
2064         ifnet_deserialize_all(ifp);
2065         return (0);
2066 }
2067
2068 struct ifmultiaddr *
2069 ifmaof_ifpforaddr(struct sockaddr *sa, struct ifnet *ifp)
2070 {
2071         struct ifmultiaddr *ifma;
2072
2073         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
2074                 if (sa_equal(ifma->ifma_addr, sa))
2075                         break;
2076
2077         return ifma;
2078 }
2079
2080 /*
2081  * This function locates the first real ethernet MAC from a network
2082  * card and loads it into node, returning 0 on success or ENOENT if
2083  * no suitable interfaces were found.  It is used by the uuid code to
2084  * generate a unique 6-byte number.
2085  */
2086 int
2087 if_getanyethermac(uint16_t *node, int minlen)
2088 {
2089         struct ifnet *ifp;
2090         struct sockaddr_dl *sdl;
2091
2092         TAILQ_FOREACH(ifp, &ifnet, if_link) {
2093                 if (ifp->if_type != IFT_ETHER)
2094                         continue;
2095                 sdl = IF_LLSOCKADDR(ifp);
2096                 if (sdl->sdl_alen < minlen)
2097                         continue;
2098                 bcopy(((struct arpcom *)ifp->if_softc)->ac_enaddr, node,
2099                       minlen);
2100                 return(0);
2101         }
2102         return (ENOENT);
2103 }
2104
2105 /*
2106  * The name argument must be a pointer to storage which will last as
2107  * long as the interface does.  For physical devices, the result of
2108  * device_get_name(dev) is a good choice and for pseudo-devices a
2109  * static string works well.
2110  */
2111 void
2112 if_initname(struct ifnet *ifp, const char *name, int unit)
2113 {
2114         ifp->if_dname = name;
2115         ifp->if_dunit = unit;
2116         if (unit != IF_DUNIT_NONE)
2117                 ksnprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit);
2118         else
2119                 strlcpy(ifp->if_xname, name, IFNAMSIZ);
2120 }
2121
2122 int
2123 if_printf(struct ifnet *ifp, const char *fmt, ...)
2124 {
2125         __va_list ap;
2126         int retval;
2127
2128         retval = kprintf("%s: ", ifp->if_xname);
2129         __va_start(ap, fmt);
2130         retval += kvprintf(fmt, ap);
2131         __va_end(ap);
2132         return (retval);
2133 }
2134
2135 struct ifnet *
2136 if_alloc(uint8_t type)
2137 {
2138         struct ifnet *ifp;
2139
2140         ifp = kmalloc(sizeof(struct ifnet), M_IFNET, M_WAITOK|M_ZERO);
2141
2142         ifp->if_type = type;
2143
2144         if (if_com_alloc[type] != NULL) {
2145                 ifp->if_l2com = if_com_alloc[type](type, ifp);
2146                 if (ifp->if_l2com == NULL) {
2147                         kfree(ifp, M_IFNET);
2148                         return (NULL);
2149                 }
2150         }
2151         return (ifp);
2152 }
2153
2154 void
2155 if_free(struct ifnet *ifp)
2156 {
2157         kfree(ifp, M_IFNET);
2158 }
2159
2160 void
2161 ifq_set_classic(struct ifaltq *ifq)
2162 {
2163         ifq->altq_enqueue = ifq_classic_enqueue;
2164         ifq->altq_dequeue = ifq_classic_dequeue;
2165         ifq->altq_request = ifq_classic_request;
2166 }
2167
2168 int
2169 ifq_classic_enqueue(struct ifaltq *ifq, struct mbuf *m,
2170                     struct altq_pktattr *pa __unused)
2171 {
2172         logifq(enqueue, ifq);
2173         if (IF_QFULL(ifq)) {
2174                 m_freem(m);
2175                 return(ENOBUFS);
2176         } else {
2177                 IF_ENQUEUE(ifq, m);
2178                 return(0);
2179         }       
2180 }
2181
2182 struct mbuf *
2183 ifq_classic_dequeue(struct ifaltq *ifq, struct mbuf *mpolled, int op)
2184 {
2185         struct mbuf *m;
2186
2187         switch (op) {
2188         case ALTDQ_POLL:
2189                 IF_POLL(ifq, m);
2190                 break;
2191         case ALTDQ_REMOVE:
2192                 logifq(dequeue, ifq);
2193                 IF_DEQUEUE(ifq, m);
2194                 break;
2195         default:
2196                 panic("unsupported ALTQ dequeue op: %d", op);
2197         }
2198         KKASSERT(mpolled == NULL || mpolled == m);
2199         return(m);
2200 }
2201
2202 int
2203 ifq_classic_request(struct ifaltq *ifq, int req, void *arg)
2204 {
2205         switch (req) {
2206         case ALTRQ_PURGE:
2207                 IF_DRAIN(ifq);
2208                 break;
2209         default:
2210                 panic("unsupported ALTQ request: %d", req);
2211         }
2212         return(0);
2213 }
2214
2215 int
2216 ifq_dispatch(struct ifnet *ifp, struct mbuf *m, struct altq_pktattr *pa)
2217 {
2218         struct ifaltq *ifq = &ifp->if_snd;
2219         int running = 0, error, start = 0;
2220
2221         ASSERT_IFNET_NOT_SERIALIZED_TX(ifp);
2222
2223         ALTQ_LOCK(ifq);
2224         error = ifq_enqueue_locked(ifq, m, pa);
2225         if (error) {
2226                 ALTQ_UNLOCK(ifq);
2227                 return error;
2228         }
2229         if (!ifq->altq_started) {
2230                 /*
2231                  * Hold the interlock of ifnet.if_start
2232                  */
2233                 ifq->altq_started = 1;
2234                 start = 1;
2235         }
2236         ALTQ_UNLOCK(ifq);
2237
2238         ifp->if_obytes += m->m_pkthdr.len;
2239         if (m->m_flags & M_MCAST)
2240                 ifp->if_omcasts++;
2241
2242         if (!start) {
2243                 logifstart(avoid, ifp);
2244                 return 0;
2245         }
2246
2247         if (ifq_dispatch_schedonly) {
2248                 /*
2249                  * Always schedule ifnet.if_start on ifnet's CPU,
2250                  * short circuit the rest of this function.
2251                  */
2252                 logifstart(sched, ifp);
2253                 if_start_schedule(ifp);
2254                 return 0;
2255         }
2256
2257         /*
2258          * Try to do direct ifnet.if_start first, if there is
2259          * contention on ifnet's serializer, ifnet.if_start will
2260          * be scheduled on ifnet's CPU.
2261          */
2262         if (!ifnet_tryserialize_tx(ifp)) {
2263                 /*
2264                  * ifnet serializer contention happened,
2265                  * ifnet.if_start is scheduled on ifnet's
2266                  * CPU, and we keep going.
2267                  */
2268                 logifstart(contend_sched, ifp);
2269                 if_start_schedule(ifp);
2270                 return 0;
2271         }
2272
2273         if ((ifp->if_flags & IFF_OACTIVE) == 0) {
2274                 logifstart(run, ifp);
2275                 ifp->if_start(ifp);
2276                 if ((ifp->if_flags &
2277                      (IFF_OACTIVE | IFF_RUNNING)) == IFF_RUNNING)
2278                         running = 1;
2279         }
2280
2281         ifnet_deserialize_tx(ifp);
2282
2283         if (ifq_dispatch_schednochk || if_start_need_schedule(ifq, running)) {
2284                 /*
2285                  * More data need to be transmitted, ifnet.if_start is
2286                  * scheduled on ifnet's CPU, and we keep going.
2287                  * NOTE: ifnet.if_start interlock is not released.
2288                  */
2289                 logifstart(sched, ifp);
2290                 if_start_schedule(ifp);
2291         }
2292         return 0;
2293 }
2294
2295 void *
2296 ifa_create(int size, int flags)
2297 {
2298         struct ifaddr *ifa;
2299         int i;
2300
2301         KASSERT(size >= sizeof(*ifa), ("ifaddr size too small\n"));
2302
2303         ifa = kmalloc(size, M_IFADDR, flags | M_ZERO);
2304         if (ifa == NULL)
2305                 return NULL;
2306
2307         ifa->ifa_containers = kmalloc(ncpus * sizeof(struct ifaddr_container),
2308                                       M_IFADDR, M_WAITOK | M_ZERO);
2309         ifa->ifa_ncnt = ncpus;
2310         for (i = 0; i < ncpus; ++i) {
2311                 struct ifaddr_container *ifac = &ifa->ifa_containers[i];
2312
2313                 ifac->ifa_magic = IFA_CONTAINER_MAGIC;
2314                 ifac->ifa = ifa;
2315                 ifac->ifa_refcnt = 1;
2316         }
2317 #ifdef IFADDR_DEBUG
2318         kprintf("alloc ifa %p %d\n", ifa, size);
2319 #endif
2320         return ifa;
2321 }
2322
2323 void
2324 ifac_free(struct ifaddr_container *ifac, int cpu_id)
2325 {
2326         struct ifaddr *ifa = ifac->ifa;
2327
2328         KKASSERT(ifac->ifa_magic == IFA_CONTAINER_MAGIC);
2329         KKASSERT(ifac->ifa_refcnt == 0);
2330         KASSERT(ifac->ifa_listmask == 0,
2331                 ("ifa is still on %#x lists\n", ifac->ifa_listmask));
2332
2333         ifac->ifa_magic = IFA_CONTAINER_DEAD;
2334
2335 #ifdef IFADDR_DEBUG_VERBOSE
2336         kprintf("try free ifa %p cpu_id %d\n", ifac->ifa, cpu_id);
2337 #endif
2338
2339         KASSERT(ifa->ifa_ncnt > 0 && ifa->ifa_ncnt <= ncpus,
2340                 ("invalid # of ifac, %d\n", ifa->ifa_ncnt));
2341         if (atomic_fetchadd_int(&ifa->ifa_ncnt, -1) == 1) {
2342 #ifdef IFADDR_DEBUG
2343                 kprintf("free ifa %p\n", ifa);
2344 #endif
2345                 kfree(ifa->ifa_containers, M_IFADDR);
2346                 kfree(ifa, M_IFADDR);
2347         }
2348 }
2349
2350 static void
2351 ifa_iflink_dispatch(struct netmsg *nmsg)
2352 {
2353         struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2354         struct ifaddr *ifa = msg->ifa;
2355         struct ifnet *ifp = msg->ifp;
2356         int cpu = mycpuid;
2357         struct ifaddr_container *ifac;
2358
2359         crit_enter();
2360
2361         ifac = &ifa->ifa_containers[cpu];
2362         ASSERT_IFAC_VALID(ifac);
2363         KASSERT((ifac->ifa_listmask & IFA_LIST_IFADDRHEAD) == 0,
2364                 ("ifaddr is on if_addrheads\n"));
2365
2366         ifac->ifa_listmask |= IFA_LIST_IFADDRHEAD;
2367         if (msg->tail)
2368                 TAILQ_INSERT_TAIL(&ifp->if_addrheads[cpu], ifac, ifa_link);
2369         else
2370                 TAILQ_INSERT_HEAD(&ifp->if_addrheads[cpu], ifac, ifa_link);
2371
2372         crit_exit();
2373
2374         ifa_forwardmsg(&nmsg->nm_lmsg, cpu + 1);
2375 }
2376
2377 void
2378 ifa_iflink(struct ifaddr *ifa, struct ifnet *ifp, int tail)
2379 {
2380         struct netmsg_ifaddr msg;
2381
2382         netmsg_init(&msg.netmsg, NULL, &curthread->td_msgport,
2383                     0, ifa_iflink_dispatch);
2384         msg.ifa = ifa;
2385         msg.ifp = ifp;
2386         msg.tail = tail;
2387
2388         ifa_domsg(&msg.netmsg.nm_lmsg, 0);
2389 }
2390
2391 static void
2392 ifa_ifunlink_dispatch(struct netmsg *nmsg)
2393 {
2394         struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2395         struct ifaddr *ifa = msg->ifa;
2396         struct ifnet *ifp = msg->ifp;
2397         int cpu = mycpuid;
2398         struct ifaddr_container *ifac;
2399
2400         crit_enter();
2401
2402         ifac = &ifa->ifa_containers[cpu];
2403         ASSERT_IFAC_VALID(ifac);
2404         KASSERT(ifac->ifa_listmask & IFA_LIST_IFADDRHEAD,
2405                 ("ifaddr is not on if_addrhead\n"));
2406
2407         TAILQ_REMOVE(&ifp->if_addrheads[cpu], ifac, ifa_link);
2408         ifac->ifa_listmask &= ~IFA_LIST_IFADDRHEAD;
2409
2410         crit_exit();
2411
2412         ifa_forwardmsg(&nmsg->nm_lmsg, cpu + 1);
2413 }
2414
2415 void
2416 ifa_ifunlink(struct ifaddr *ifa, struct ifnet *ifp)
2417 {
2418         struct netmsg_ifaddr msg;
2419
2420         netmsg_init(&msg.netmsg, NULL, &curthread->td_msgport,
2421                     0, ifa_ifunlink_dispatch);
2422         msg.ifa = ifa;
2423         msg.ifp = ifp;
2424
2425         ifa_domsg(&msg.netmsg.nm_lmsg, 0);
2426 }
2427
2428 static void
2429 ifa_destroy_dispatch(struct netmsg *nmsg)
2430 {
2431         struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2432
2433         IFAFREE(msg->ifa);
2434         ifa_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2435 }
2436
2437 void
2438 ifa_destroy(struct ifaddr *ifa)
2439 {
2440         struct netmsg_ifaddr msg;
2441
2442         netmsg_init(&msg.netmsg, NULL, &curthread->td_msgport,
2443                     0, ifa_destroy_dispatch);
2444         msg.ifa = ifa;
2445
2446         ifa_domsg(&msg.netmsg.nm_lmsg, 0);
2447 }
2448
2449 struct lwkt_port *
2450 ifnet_portfn(int cpu)
2451 {
2452         return &ifnet_threads[cpu].td_msgport;
2453 }
2454
2455 void
2456 ifnet_forwardmsg(struct lwkt_msg *lmsg, int next_cpu)
2457 {
2458         KKASSERT(next_cpu > mycpuid && next_cpu <= ncpus);
2459
2460         if (next_cpu < ncpus)
2461                 lwkt_forwardmsg(ifnet_portfn(next_cpu), lmsg);
2462         else
2463                 lwkt_replymsg(lmsg, 0);
2464 }
2465
2466 int
2467 ifnet_domsg(struct lwkt_msg *lmsg, int cpu)
2468 {
2469         KKASSERT(cpu < ncpus);
2470         return lwkt_domsg(ifnet_portfn(cpu), lmsg, 0);
2471 }
2472
2473 void
2474 ifnet_sendmsg(struct lwkt_msg *lmsg, int cpu)
2475 {
2476         KKASSERT(cpu < ncpus);
2477         lwkt_sendmsg(ifnet_portfn(cpu), lmsg);
2478 }
2479
2480 static void
2481 ifnetinit(void *dummy __unused)
2482 {
2483         int i;
2484
2485         for (i = 0; i < ncpus; ++i) {
2486                 struct thread *thr = &ifnet_threads[i];
2487
2488                 lwkt_create(netmsg_service_loop, &ifnet_mpsafe_thread, NULL,
2489                             thr, TDF_NETWORK | TDF_MPSAFE, i, "ifnet %d", i);
2490                 netmsg_service_port_init(&thr->td_msgport);
2491         }
2492 }
2493
2494 struct ifnet *
2495 ifnet_byindex(unsigned short idx)
2496 {
2497         if (idx > if_index)
2498                 return NULL;
2499         return ifindex2ifnet[idx];
2500 }
2501
2502 struct ifaddr *
2503 ifaddr_byindex(unsigned short idx)
2504 {
2505         struct ifnet *ifp;
2506
2507         ifp = ifnet_byindex(idx);
2508         if (!ifp)
2509                 return NULL;
2510         return TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
2511 }
2512
2513 void
2514 if_register_com_alloc(u_char type,
2515     if_com_alloc_t *a, if_com_free_t *f)
2516 {
2517
2518         KASSERT(if_com_alloc[type] == NULL,
2519             ("if_register_com_alloc: %d already registered", type));
2520         KASSERT(if_com_free[type] == NULL,
2521             ("if_register_com_alloc: %d free already registered", type));
2522
2523         if_com_alloc[type] = a;
2524         if_com_free[type] = f;
2525 }
2526
2527 void
2528 if_deregister_com_alloc(u_char type)
2529 {
2530
2531         KASSERT(if_com_alloc[type] != NULL,
2532             ("if_deregister_com_alloc: %d not registered", type));
2533         KASSERT(if_com_free[type] != NULL,
2534             ("if_deregister_com_alloc: %d free not registered", type));
2535         if_com_alloc[type] = NULL;
2536         if_com_free[type] = NULL;
2537 }