Implement bpf_track and iflladdr event handlers and activate the
[dragonfly.git] / sys / net / if.c
1 /*
2  * Copyright (c) 1980, 1986, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by the University of
16  *      California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *      @(#)if.c        8.3 (Berkeley) 1/4/94
34  * $FreeBSD: src/sys/net/if.c,v 1.185 2004/03/13 02:35:03 brooks Exp $
35  * $DragonFly: src/sys/net/if.c,v 1.84 2008/11/15 11:58:16 sephe Exp $
36  */
37
38 #include "opt_compat.h"
39 #include "opt_inet6.h"
40 #include "opt_inet.h"
41 #include "opt_polling.h"
42 #include "opt_ifpoll.h"
43
44 #include <sys/param.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/systm.h>
48 #include <sys/proc.h>
49 #include <sys/priv.h>
50 #include <sys/protosw.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/socketops.h>
54 #include <sys/protosw.h>
55 #include <sys/kernel.h>
56 #include <sys/ktr.h>
57 #include <sys/sockio.h>
58 #include <sys/syslog.h>
59 #include <sys/sysctl.h>
60 #include <sys/domain.h>
61 #include <sys/thread.h>
62 #include <sys/thread2.h>
63 #include <sys/serialize.h>
64 #include <sys/msgport2.h>
65 #include <sys/bus.h>
66
67 #include <net/if.h>
68 #include <net/if_arp.h>
69 #include <net/if_dl.h>
70 #include <net/if_types.h>
71 #include <net/if_var.h>
72 #include <net/ifq_var.h>
73 #include <net/radix.h>
74 #include <net/route.h>
75 #include <net/if_clone.h>
76 #include <net/netisr.h>
77 #include <net/netmsg2.h>
78
79 #include <machine/atomic.h>
80 #include <machine/stdarg.h>
81 #include <machine/smp.h>
82
83 #if defined(INET) || defined(INET6)
84 /*XXX*/
85 #include <netinet/in.h>
86 #include <netinet/in_var.h>
87 #include <netinet/if_ether.h>
88 #ifdef INET6
89 #include <netinet6/in6_var.h>
90 #include <netinet6/in6_ifattach.h>
91 #endif
92 #endif
93
94 #if defined(COMPAT_43)
95 #include <emulation/43bsd/43bsd_socket.h>
96 #endif /* COMPAT_43 */
97
98 struct netmsg_ifaddr {
99         struct netmsg   netmsg;
100         struct ifaddr   *ifa;
101         struct ifnet    *ifp;
102         int             tail;
103 };
104
105 /*
106  * System initialization
107  */
108 static void     if_attachdomain(void *);
109 static void     if_attachdomain1(struct ifnet *);
110 static int      ifconf(u_long, caddr_t, struct ucred *);
111 static void     ifinit(void *);
112 static void     ifnetinit(void *);
113 static void     if_slowtimo(void *);
114 static void     link_rtrequest(int, struct rtentry *, struct rt_addrinfo *);
115 static int      if_rtdel(struct radix_node *, void *);
116
117 #ifdef INET6
118 /*
119  * XXX: declare here to avoid to include many inet6 related files..
120  * should be more generalized?
121  */
122 extern void     nd6_setmtu(struct ifnet *);
123 #endif
124
125 SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers");
126 SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management");
127
128 SYSINIT(interfaces, SI_SUB_PROTO_IF, SI_ORDER_FIRST, ifinit, NULL)
129 /* Must be after netisr_init */
130 SYSINIT(ifnet, SI_SUB_PRE_DRIVERS, SI_ORDER_SECOND, ifnetinit, NULL)
131
132 static  if_com_alloc_t *if_com_alloc[256];
133 static  if_com_free_t *if_com_free[256];
134
135 MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address");
136 MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address");
137 MALLOC_DEFINE(M_IFNET, "ifnet", "interface structure");
138
139 int                     ifqmaxlen = IFQ_MAXLEN;
140 struct ifnethead        ifnet = TAILQ_HEAD_INITIALIZER(ifnet);
141
142 /* In ifq_dispatch(), try to do direct ifnet.if_start first */
143 static int              ifq_dispatch_schedonly = 0;
144 SYSCTL_INT(_net_link_generic, OID_AUTO, ifq_dispatch_schedonly, CTLFLAG_RW,
145            &ifq_dispatch_schedonly, 0, "");
146
147 /* In ifq_dispatch(), schedule ifnet.if_start without checking ifnet.if_snd */
148 static int              ifq_dispatch_schednochk = 0;
149 SYSCTL_INT(_net_link_generic, OID_AUTO, ifq_dispatch_schednochk, CTLFLAG_RW,
150            &ifq_dispatch_schednochk, 0, "");
151
152 /* In if_devstart(), try to do direct ifnet.if_start first */
153 static int              if_devstart_schedonly = 0;
154 SYSCTL_INT(_net_link_generic, OID_AUTO, if_devstart_schedonly, CTLFLAG_RW,
155            &if_devstart_schedonly, 0, "");
156
157 /* In if_devstart(), schedule ifnet.if_start without checking ifnet.if_snd */
158 static int              if_devstart_schednochk = 0;
159 SYSCTL_INT(_net_link_generic, OID_AUTO, if_devstart_schednochk, CTLFLAG_RW,
160            &if_devstart_schednochk, 0, "");
161
162 #ifdef SMP
163 /* Schedule ifnet.if_start on the current CPU */
164 static int              if_start_oncpu_sched = 0;
165 SYSCTL_INT(_net_link_generic, OID_AUTO, if_start_oncpu_sched, CTLFLAG_RW,
166            &if_start_oncpu_sched, 0, "");
167 #endif
168
169 struct callout          if_slowtimo_timer;
170
171 int                     if_index = 0;
172 struct ifnet            **ifindex2ifnet = NULL;
173 static struct thread    ifnet_threads[MAXCPU];
174 static int              ifnet_mpsafe_thread = NETMSG_SERVICE_MPSAFE;
175
176 #define IFQ_KTR_STRING          "ifq=%p"
177 #define IFQ_KTR_ARG_SIZE        (sizeof(void *))
178 #ifndef KTR_IFQ
179 #define KTR_IFQ                 KTR_ALL
180 #endif
181 KTR_INFO_MASTER(ifq);
182 KTR_INFO(KTR_IFQ, ifq, enqueue, 0, IFQ_KTR_STRING, IFQ_KTR_ARG_SIZE);
183 KTR_INFO(KTR_IFQ, ifq, dequeue, 1, IFQ_KTR_STRING, IFQ_KTR_ARG_SIZE);
184 #define logifq(name, arg)       KTR_LOG(ifq_ ## name, arg)
185
186 #define IF_START_KTR_STRING     "ifp=%p"
187 #define IF_START_KTR_ARG_SIZE   (sizeof(void *))
188 #ifndef KTR_IF_START
189 #define KTR_IF_START            KTR_ALL
190 #endif
191 KTR_INFO_MASTER(if_start);
192 KTR_INFO(KTR_IF_START, if_start, run, 0,
193          IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
194 KTR_INFO(KTR_IF_START, if_start, sched, 1,
195          IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
196 KTR_INFO(KTR_IF_START, if_start, avoid, 2,
197          IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
198 KTR_INFO(KTR_IF_START, if_start, contend_sched, 3,
199          IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
200 #ifdef SMP
201 KTR_INFO(KTR_IF_START, if_start, chase_sched, 4,
202          IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
203 #endif
204 #define logifstart(name, arg)   KTR_LOG(if_start_ ## name, arg)
205
206 /*
207  * Network interface utility routines.
208  *
209  * Routines with ifa_ifwith* names take sockaddr *'s as
210  * parameters.
211  */
212 /* ARGSUSED*/
213 void
214 ifinit(void *dummy)
215 {
216         struct ifnet *ifp;
217
218         callout_init(&if_slowtimo_timer);
219
220         crit_enter();
221         TAILQ_FOREACH(ifp, &ifnet, if_link) {
222                 if (ifp->if_snd.ifq_maxlen == 0) {
223                         if_printf(ifp, "XXX: driver didn't set ifq_maxlen\n");
224                         ifp->if_snd.ifq_maxlen = ifqmaxlen;
225                 }
226         }
227         crit_exit();
228
229         if_slowtimo(0);
230 }
231
232 static int
233 if_start_cpuid(struct ifnet *ifp)
234 {
235         return ifp->if_cpuid;
236 }
237
238 #ifdef DEVICE_POLLING
239 static int
240 if_start_cpuid_poll(struct ifnet *ifp)
241 {
242         int poll_cpuid = ifp->if_poll_cpuid;
243
244         if (poll_cpuid >= 0)
245                 return poll_cpuid;
246         else
247                 return ifp->if_cpuid;
248 }
249 #endif
250
251 static void
252 if_start_ipifunc(void *arg)
253 {
254         struct ifnet *ifp = arg;
255         struct lwkt_msg *lmsg = &ifp->if_start_nmsg[mycpuid].nm_lmsg;
256
257         crit_enter();
258         if (lmsg->ms_flags & MSGF_DONE)
259                 lwkt_sendmsg(ifnet_portfn(mycpuid), lmsg);
260         crit_exit();
261 }
262
263 /*
264  * Schedule ifnet.if_start on ifnet's CPU
265  */
266 static void
267 if_start_schedule(struct ifnet *ifp)
268 {
269 #ifdef SMP
270         int cpu;
271
272         if (if_start_oncpu_sched)
273                 cpu = mycpuid;
274         else
275                 cpu = ifp->if_start_cpuid(ifp);
276
277         if (cpu != mycpuid)
278                 lwkt_send_ipiq(globaldata_find(cpu), if_start_ipifunc, ifp);
279         else
280 #endif
281         if_start_ipifunc(ifp);
282 }
283
284 /*
285  * NOTE:
286  * This function will release ifnet.if_start interlock,
287  * if ifnet.if_start does not need to be scheduled
288  */
289 static __inline int
290 if_start_need_schedule(struct ifaltq *ifq, int running)
291 {
292         if (!running || ifq_is_empty(ifq)
293 #ifdef ALTQ
294             || ifq->altq_tbr != NULL
295 #endif
296         ) {
297                 ALTQ_LOCK(ifq);
298                 /*
299                  * ifnet.if_start interlock is released, if:
300                  * 1) Hardware can not take any packets, due to
301                  *    o  interface is marked down
302                  *    o  hardware queue is full (IFF_OACTIVE)
303                  *    Under the second situation, hardware interrupt
304                  *    or polling(4) will call/schedule ifnet.if_start
305                  *    when hardware queue is ready
306                  * 2) There is not packet in the ifnet.if_snd.
307                  *    Further ifq_dispatch or ifq_handoff will call/
308                  *    schedule ifnet.if_start
309                  * 3) TBR is used and it does not allow further
310                  *    dequeueing.
311                  *    TBR callout will call ifnet.if_start
312                  */
313                 if (!running || !ifq_data_ready(ifq)) {
314                         ifq->altq_started = 0;
315                         ALTQ_UNLOCK(ifq);
316                         return 0;
317                 }
318                 ALTQ_UNLOCK(ifq);
319         }
320         return 1;
321 }
322
323 static void
324 if_start_dispatch(struct netmsg *nmsg)
325 {
326         struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
327         struct ifnet *ifp = lmsg->u.ms_resultp;
328         struct ifaltq *ifq = &ifp->if_snd;
329         int running = 0;
330
331         crit_enter();
332         lwkt_replymsg(lmsg, 0); /* reply ASAP */
333         crit_exit();
334
335 #ifdef SMP
336         if (!if_start_oncpu_sched && mycpuid != ifp->if_start_cpuid(ifp)) {
337                 /*
338                  * If the ifnet is still up, we need to
339                  * chase its CPU change.
340                  */
341                 if (ifp->if_flags & IFF_UP) {
342                         logifstart(chase_sched, ifp);
343                         if_start_schedule(ifp);
344                         return;
345                 } else {
346                         goto check;
347                 }
348         }
349 #endif
350
351         if (ifp->if_flags & IFF_UP) {
352                 ifnet_serialize_tx(ifp); /* XXX try? */
353                 if ((ifp->if_flags & IFF_OACTIVE) == 0) {
354                         logifstart(run, ifp);
355                         ifp->if_start(ifp);
356                         if ((ifp->if_flags &
357                         (IFF_OACTIVE | IFF_RUNNING)) == IFF_RUNNING)
358                                 running = 1;
359                 }
360                 ifnet_deserialize_tx(ifp);
361         }
362 #ifdef SMP
363 check:
364 #endif
365         if (if_start_need_schedule(ifq, running)) {
366                 crit_enter();
367                 if (lmsg->ms_flags & MSGF_DONE) { /* XXX necessary? */
368                         logifstart(sched, ifp);
369                         lwkt_sendmsg(ifnet_portfn(mycpuid), lmsg);
370                 }
371                 crit_exit();
372         }
373 }
374
375 /* Device driver ifnet.if_start helper function */
376 void
377 if_devstart(struct ifnet *ifp)
378 {
379         struct ifaltq *ifq = &ifp->if_snd;
380         int running = 0;
381
382         ASSERT_IFNET_SERIALIZED_TX(ifp);
383
384         ALTQ_LOCK(ifq);
385         if (ifq->altq_started || !ifq_data_ready(ifq)) {
386                 logifstart(avoid, ifp);
387                 ALTQ_UNLOCK(ifq);
388                 return;
389         }
390         ifq->altq_started = 1;
391         ALTQ_UNLOCK(ifq);
392
393         if (if_devstart_schedonly) {
394                 /*
395                  * Always schedule ifnet.if_start on ifnet's CPU,
396                  * short circuit the rest of this function.
397                  */
398                 logifstart(sched, ifp);
399                 if_start_schedule(ifp);
400                 return;
401         }
402
403         logifstart(run, ifp);
404         ifp->if_start(ifp);
405
406         if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) == IFF_RUNNING)
407                 running = 1;
408
409         if (if_devstart_schednochk || if_start_need_schedule(ifq, running)) {
410                 /*
411                  * More data need to be transmitted, ifnet.if_start is
412                  * scheduled on ifnet's CPU, and we keep going.
413                  * NOTE: ifnet.if_start interlock is not released.
414                  */
415                 logifstart(sched, ifp);
416                 if_start_schedule(ifp);
417         }
418 }
419
420 static void
421 if_default_serialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
422 {
423         lwkt_serialize_enter(ifp->if_serializer);
424 }
425
426 static void
427 if_default_deserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
428 {
429         lwkt_serialize_exit(ifp->if_serializer);
430 }
431
432 static int
433 if_default_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
434 {
435         return lwkt_serialize_try(ifp->if_serializer);
436 }
437
438 #ifdef INVARIANTS
439 static void
440 if_default_serialize_assert(struct ifnet *ifp,
441                             enum ifnet_serialize slz __unused,
442                             boolean_t serialized)
443 {
444         if (serialized)
445                 ASSERT_SERIALIZED(ifp->if_serializer);
446         else
447                 ASSERT_NOT_SERIALIZED(ifp->if_serializer);
448 }
449 #endif
450
451 /*
452  * Attach an interface to the list of "active" interfaces.
453  *
454  * The serializer is optional.  If non-NULL access to the interface
455  * may be MPSAFE.
456  */
457 void
458 if_attach(struct ifnet *ifp, lwkt_serialize_t serializer)
459 {
460         unsigned socksize, ifasize;
461         int namelen, masklen;
462         struct sockaddr_dl *sdl;
463         struct ifaddr *ifa;
464         struct ifaltq *ifq;
465         int i;
466
467         static int if_indexlim = 8;
468
469         if (ifp->if_serialize != NULL) {
470                 KASSERT(ifp->if_deserialize != NULL &&
471                         ifp->if_tryserialize != NULL &&
472                         ifp->if_serialize_assert != NULL,
473                         ("serialize functions are partially setup\n"));
474
475                 /*
476                  * If the device supplies serialize functions,
477                  * then clear if_serializer to catch any invalid
478                  * usage of this field.
479                  */
480                 KASSERT(serializer == NULL,
481                         ("both serialize functions and default serializer "
482                          "are supplied\n"));
483                 ifp->if_serializer = NULL;
484         } else {
485                 KASSERT(ifp->if_deserialize == NULL &&
486                         ifp->if_tryserialize == NULL &&
487                         ifp->if_serialize_assert == NULL,
488                         ("serialize functions are partially setup\n"));
489                 ifp->if_serialize = if_default_serialize;
490                 ifp->if_deserialize = if_default_deserialize;
491                 ifp->if_tryserialize = if_default_tryserialize;
492 #ifdef INVARIANTS
493                 ifp->if_serialize_assert = if_default_serialize_assert;
494 #endif
495
496                 /*
497                  * The serializer can be passed in from the device,
498                  * allowing the same serializer to be used for both
499                  * the interrupt interlock and the device queue.
500                  * If not specified, the netif structure will use an
501                  * embedded serializer.
502                  */
503                 if (serializer == NULL) {
504                         serializer = &ifp->if_default_serializer;
505                         lwkt_serialize_init(serializer);
506                 }
507                 ifp->if_serializer = serializer;
508         }
509
510         ifp->if_start_cpuid = if_start_cpuid;
511         ifp->if_cpuid = 0;
512
513 #ifdef DEVICE_POLLING
514         /* Device is not in polling mode by default */
515         ifp->if_poll_cpuid = -1;
516         if (ifp->if_poll != NULL)
517                 ifp->if_start_cpuid = if_start_cpuid_poll;
518 #endif
519
520         ifp->if_start_nmsg = kmalloc(ncpus * sizeof(struct netmsg),
521                                      M_LWKTMSG, M_WAITOK);
522         for (i = 0; i < ncpus; ++i) {
523                 netmsg_init(&ifp->if_start_nmsg[i], NULL, &netisr_adone_rport,
524                             0, if_start_dispatch);
525                 ifp->if_start_nmsg[i].nm_lmsg.u.ms_resultp = ifp;
526         }
527
528         TAILQ_INSERT_TAIL(&ifnet, ifp, if_link);
529         ifp->if_index = ++if_index;
530
531         /*
532          * XXX -
533          * The old code would work if the interface passed a pre-existing
534          * chain of ifaddrs to this code.  We don't trust our callers to
535          * properly initialize the tailq, however, so we no longer allow
536          * this unlikely case.
537          */
538         ifp->if_addrheads = kmalloc(ncpus * sizeof(struct ifaddrhead),
539                                     M_IFADDR, M_WAITOK | M_ZERO);
540         for (i = 0; i < ncpus; ++i)
541                 TAILQ_INIT(&ifp->if_addrheads[i]);
542
543         TAILQ_INIT(&ifp->if_prefixhead);
544         LIST_INIT(&ifp->if_multiaddrs);
545         getmicrotime(&ifp->if_lastchange);
546         if (ifindex2ifnet == NULL || if_index >= if_indexlim) {
547                 unsigned int n;
548                 struct ifnet **q;
549
550                 if_indexlim <<= 1;
551
552                 /* grow ifindex2ifnet */
553                 n = if_indexlim * sizeof(*q);
554                 q = kmalloc(n, M_IFADDR, M_WAITOK | M_ZERO);
555                 if (ifindex2ifnet) {
556                         bcopy(ifindex2ifnet, q, n/2);
557                         kfree(ifindex2ifnet, M_IFADDR);
558                 }
559                 ifindex2ifnet = q;
560         }
561
562         ifindex2ifnet[if_index] = ifp;
563
564         /*
565          * create a Link Level name for this device
566          */
567         namelen = strlen(ifp->if_xname);
568 #define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m))
569         masklen = _offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
570         socksize = masklen + ifp->if_addrlen;
571 #define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
572         if (socksize < sizeof(*sdl))
573                 socksize = sizeof(*sdl);
574         socksize = ROUNDUP(socksize);
575         ifasize = sizeof(struct ifaddr) + 2 * socksize;
576         ifa = ifa_create(ifasize, M_WAITOK);
577         sdl = (struct sockaddr_dl *)(ifa + 1);
578         sdl->sdl_len = socksize;
579         sdl->sdl_family = AF_LINK;
580         bcopy(ifp->if_xname, sdl->sdl_data, namelen);
581         sdl->sdl_nlen = namelen;
582         sdl->sdl_index = ifp->if_index;
583         sdl->sdl_type = ifp->if_type;
584         ifp->if_lladdr = ifa;
585         ifa->ifa_ifp = ifp;
586         ifa->ifa_rtrequest = link_rtrequest;
587         ifa->ifa_addr = (struct sockaddr *)sdl;
588         sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl);
589         ifa->ifa_netmask = (struct sockaddr *)sdl;
590         sdl->sdl_len = masklen;
591         while (namelen != 0)
592                 sdl->sdl_data[--namelen] = 0xff;
593         ifa_iflink(ifa, ifp, 0 /* Insert head */);
594
595         EVENTHANDLER_INVOKE(ifnet_attach_event, ifp);
596         devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL);
597
598         ifq = &ifp->if_snd;
599         ifq->altq_type = 0;
600         ifq->altq_disc = NULL;
601         ifq->altq_flags &= ALTQF_CANTCHANGE;
602         ifq->altq_tbr = NULL;
603         ifq->altq_ifp = ifp;
604         ifq->altq_started = 0;
605         ifq->altq_prepended = NULL;
606         ALTQ_LOCK_INIT(ifq);
607         ifq_set_classic(ifq);
608
609         if (!SLIST_EMPTY(&domains))
610                 if_attachdomain1(ifp);
611
612         /* Announce the interface. */
613         rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
614 }
615
616 static void
617 if_attachdomain(void *dummy)
618 {
619         struct ifnet *ifp;
620
621         crit_enter();
622         TAILQ_FOREACH(ifp, &ifnet, if_list)
623                 if_attachdomain1(ifp);
624         crit_exit();
625 }
626 SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST,
627         if_attachdomain, NULL);
628
629 static void
630 if_attachdomain1(struct ifnet *ifp)
631 {
632         struct domain *dp;
633
634         crit_enter();
635
636         /* address family dependent data region */
637         bzero(ifp->if_afdata, sizeof(ifp->if_afdata));
638         SLIST_FOREACH(dp, &domains, dom_next)
639                 if (dp->dom_ifattach)
640                         ifp->if_afdata[dp->dom_family] =
641                                 (*dp->dom_ifattach)(ifp);
642         crit_exit();
643 }
644
645 /*
646  * Purge all addresses whose type is _not_ AF_LINK
647  */
648 void
649 if_purgeaddrs_nolink(struct ifnet *ifp)
650 {
651         struct ifaddr_container *ifac, *next;
652
653         TAILQ_FOREACH_MUTABLE(ifac, &ifp->if_addrheads[mycpuid],
654                               ifa_link, next) {
655                 struct ifaddr *ifa = ifac->ifa;
656
657                 /* Leave link ifaddr as it is */
658                 if (ifa->ifa_addr->sa_family == AF_LINK)
659                         continue;
660 #ifdef INET
661                 /* XXX: Ugly!! ad hoc just for INET */
662                 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET) {
663                         struct ifaliasreq ifr;
664 #ifdef IFADDR_DEBUG_VERBOSE
665                         int i;
666
667                         kprintf("purge in4 addr %p: ", ifa);
668                         for (i = 0; i < ncpus; ++i)
669                                 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt);
670                         kprintf("\n");
671 #endif
672
673                         bzero(&ifr, sizeof ifr);
674                         ifr.ifra_addr = *ifa->ifa_addr;
675                         if (ifa->ifa_dstaddr)
676                                 ifr.ifra_broadaddr = *ifa->ifa_dstaddr;
677                         if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp,
678                                        NULL) == 0)
679                                 continue;
680                 }
681 #endif /* INET */
682 #ifdef INET6
683                 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET6) {
684 #ifdef IFADDR_DEBUG_VERBOSE
685                         int i;
686
687                         kprintf("purge in6 addr %p: ", ifa);
688                         for (i = 0; i < ncpus; ++i)
689                                 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt);
690                         kprintf("\n");
691 #endif
692
693                         in6_purgeaddr(ifa);
694                         /* ifp_addrhead is already updated */
695                         continue;
696                 }
697 #endif /* INET6 */
698                 ifa_ifunlink(ifa, ifp);
699                 ifa_destroy(ifa);
700         }
701 }
702
703 /*
704  * Detach an interface, removing it from the
705  * list of "active" interfaces.
706  */
707 void
708 if_detach(struct ifnet *ifp)
709 {
710         struct radix_node_head  *rnh;
711         int i;
712         int cpu, origcpu;
713         struct domain *dp;
714
715         EVENTHANDLER_INVOKE(ifnet_detach_event, ifp);
716
717         /*
718          * Remove routes and flush queues.
719          */
720         crit_enter();
721 #ifdef DEVICE_POLLING
722         if (ifp->if_flags & IFF_POLLING)
723                 ether_poll_deregister(ifp);
724 #endif
725 #ifdef IFPOLL_ENABLE
726         if (ifp->if_flags & IFF_NPOLLING)
727                 ifpoll_deregister(ifp);
728 #endif
729         if_down(ifp);
730
731         if (ifq_is_enabled(&ifp->if_snd))
732                 altq_disable(&ifp->if_snd);
733         if (ifq_is_attached(&ifp->if_snd))
734                 altq_detach(&ifp->if_snd);
735
736         /*
737          * Clean up all addresses.
738          */
739         ifp->if_lladdr = NULL;
740
741         if_purgeaddrs_nolink(ifp);
742         if (!TAILQ_EMPTY(&ifp->if_addrheads[mycpuid])) {
743                 struct ifaddr *ifa;
744
745                 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
746                 KASSERT(ifa->ifa_addr->sa_family == AF_LINK,
747                         ("non-link ifaddr is left on if_addrheads"));
748
749                 ifa_ifunlink(ifa, ifp);
750                 ifa_destroy(ifa);
751                 KASSERT(TAILQ_EMPTY(&ifp->if_addrheads[mycpuid]),
752                         ("there are still ifaddrs left on if_addrheads"));
753         }
754
755 #ifdef INET
756         /*
757          * Remove all IPv4 kernel structures related to ifp.
758          */
759         in_ifdetach(ifp);
760 #endif
761
762 #ifdef INET6
763         /*
764          * Remove all IPv6 kernel structs related to ifp.  This should be done
765          * before removing routing entries below, since IPv6 interface direct
766          * routes are expected to be removed by the IPv6-specific kernel API.
767          * Otherwise, the kernel will detect some inconsistency and bark it.
768          */
769         in6_ifdetach(ifp);
770 #endif
771
772         /*
773          * Delete all remaining routes using this interface
774          * Unfortuneatly the only way to do this is to slog through
775          * the entire routing table looking for routes which point
776          * to this interface...oh well...
777          */
778         origcpu = mycpuid;
779         for (cpu = 0; cpu < ncpus2; cpu++) {
780                 lwkt_migratecpu(cpu);
781                 for (i = 1; i <= AF_MAX; i++) {
782                         if ((rnh = rt_tables[cpu][i]) == NULL)
783                                 continue;
784                         rnh->rnh_walktree(rnh, if_rtdel, ifp);
785                 }
786         }
787         lwkt_migratecpu(origcpu);
788
789         /* Announce that the interface is gone. */
790         rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
791         devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL);
792
793         SLIST_FOREACH(dp, &domains, dom_next)
794                 if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family])
795                         (*dp->dom_ifdetach)(ifp,
796                                 ifp->if_afdata[dp->dom_family]);
797
798         /*
799          * Remove interface from ifindex2ifp[] and maybe decrement if_index.
800          */
801         ifindex2ifnet[ifp->if_index] = NULL;
802         while (if_index > 0 && ifindex2ifnet[if_index] == NULL)
803                 if_index--;
804
805         TAILQ_REMOVE(&ifnet, ifp, if_link);
806         kfree(ifp->if_addrheads, M_IFADDR);
807         kfree(ifp->if_start_nmsg, M_LWKTMSG);
808         crit_exit();
809 }
810
811 /*
812  * Delete Routes for a Network Interface
813  *
814  * Called for each routing entry via the rnh->rnh_walktree() call above
815  * to delete all route entries referencing a detaching network interface.
816  *
817  * Arguments:
818  *      rn      pointer to node in the routing table
819  *      arg     argument passed to rnh->rnh_walktree() - detaching interface
820  *
821  * Returns:
822  *      0       successful
823  *      errno   failed - reason indicated
824  *
825  */
826 static int
827 if_rtdel(struct radix_node *rn, void *arg)
828 {
829         struct rtentry  *rt = (struct rtentry *)rn;
830         struct ifnet    *ifp = arg;
831         int             err;
832
833         if (rt->rt_ifp == ifp) {
834
835                 /*
836                  * Protect (sorta) against walktree recursion problems
837                  * with cloned routes
838                  */
839                 if (!(rt->rt_flags & RTF_UP))
840                         return (0);
841
842                 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
843                                 rt_mask(rt), rt->rt_flags,
844                                 NULL);
845                 if (err) {
846                         log(LOG_WARNING, "if_rtdel: error %d\n", err);
847                 }
848         }
849
850         return (0);
851 }
852
853 /*
854  * Locate an interface based on a complete address.
855  */
856 struct ifaddr *
857 ifa_ifwithaddr(struct sockaddr *addr)
858 {
859         struct ifnet *ifp;
860
861         TAILQ_FOREACH(ifp, &ifnet, if_link) {
862                 struct ifaddr_container *ifac;
863
864                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
865                         struct ifaddr *ifa = ifac->ifa;
866
867                         if (ifa->ifa_addr->sa_family != addr->sa_family)
868                                 continue;
869                         if (sa_equal(addr, ifa->ifa_addr))
870                                 return (ifa);
871                         if ((ifp->if_flags & IFF_BROADCAST) &&
872                             ifa->ifa_broadaddr &&
873                             /* IPv6 doesn't have broadcast */
874                             ifa->ifa_broadaddr->sa_len != 0 &&
875                             sa_equal(ifa->ifa_broadaddr, addr))
876                                 return (ifa);
877                 }
878         }
879         return (NULL);
880 }
881 /*
882  * Locate the point to point interface with a given destination address.
883  */
884 struct ifaddr *
885 ifa_ifwithdstaddr(struct sockaddr *addr)
886 {
887         struct ifnet *ifp;
888
889         TAILQ_FOREACH(ifp, &ifnet, if_link) {
890                 struct ifaddr_container *ifac;
891
892                 if (!(ifp->if_flags & IFF_POINTOPOINT))
893                         continue;
894
895                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
896                         struct ifaddr *ifa = ifac->ifa;
897
898                         if (ifa->ifa_addr->sa_family != addr->sa_family)
899                                 continue;
900                         if (ifa->ifa_dstaddr &&
901                             sa_equal(addr, ifa->ifa_dstaddr))
902                                 return (ifa);
903                 }
904         }
905         return (NULL);
906 }
907
908 /*
909  * Find an interface on a specific network.  If many, choice
910  * is most specific found.
911  */
912 struct ifaddr *
913 ifa_ifwithnet(struct sockaddr *addr)
914 {
915         struct ifnet *ifp;
916         struct ifaddr *ifa_maybe = NULL;
917         u_int af = addr->sa_family;
918         char *addr_data = addr->sa_data, *cplim;
919
920         /*
921          * AF_LINK addresses can be looked up directly by their index number,
922          * so do that if we can.
923          */
924         if (af == AF_LINK) {
925                 struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr;
926
927                 if (sdl->sdl_index && sdl->sdl_index <= if_index)
928                         return (ifindex2ifnet[sdl->sdl_index]->if_lladdr);
929         }
930
931         /*
932          * Scan though each interface, looking for ones that have
933          * addresses in this address family.
934          */
935         TAILQ_FOREACH(ifp, &ifnet, if_link) {
936                 struct ifaddr_container *ifac;
937
938                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
939                         struct ifaddr *ifa = ifac->ifa;
940                         char *cp, *cp2, *cp3;
941
942                         if (ifa->ifa_addr->sa_family != af)
943 next:                           continue;
944                         if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT) {
945                                 /*
946                                  * This is a bit broken as it doesn't
947                                  * take into account that the remote end may
948                                  * be a single node in the network we are
949                                  * looking for.
950                                  * The trouble is that we don't know the
951                                  * netmask for the remote end.
952                                  */
953                                 if (ifa->ifa_dstaddr != NULL &&
954                                     sa_equal(addr, ifa->ifa_dstaddr))
955                                         return (ifa);
956                         } else {
957                                 /*
958                                  * if we have a special address handler,
959                                  * then use it instead of the generic one.
960                                  */
961                                 if (ifa->ifa_claim_addr) {
962                                         if ((*ifa->ifa_claim_addr)(ifa, addr)) {
963                                                 return (ifa);
964                                         } else {
965                                                 continue;
966                                         }
967                                 }
968
969                                 /*
970                                  * Scan all the bits in the ifa's address.
971                                  * If a bit dissagrees with what we are
972                                  * looking for, mask it with the netmask
973                                  * to see if it really matters.
974                                  * (A byte at a time)
975                                  */
976                                 if (ifa->ifa_netmask == 0)
977                                         continue;
978                                 cp = addr_data;
979                                 cp2 = ifa->ifa_addr->sa_data;
980                                 cp3 = ifa->ifa_netmask->sa_data;
981                                 cplim = ifa->ifa_netmask->sa_len +
982                                         (char *)ifa->ifa_netmask;
983                                 while (cp3 < cplim)
984                                         if ((*cp++ ^ *cp2++) & *cp3++)
985                                                 goto next; /* next address! */
986                                 /*
987                                  * If the netmask of what we just found
988                                  * is more specific than what we had before
989                                  * (if we had one) then remember the new one
990                                  * before continuing to search
991                                  * for an even better one.
992                                  */
993                                 if (ifa_maybe == 0 ||
994                                     rn_refines((char *)ifa->ifa_netmask,
995                                                (char *)ifa_maybe->ifa_netmask))
996                                         ifa_maybe = ifa;
997                         }
998                 }
999         }
1000         return (ifa_maybe);
1001 }
1002
1003 /*
1004  * Find an interface address specific to an interface best matching
1005  * a given address.
1006  */
1007 struct ifaddr *
1008 ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp)
1009 {
1010         struct ifaddr_container *ifac;
1011         char *cp, *cp2, *cp3;
1012         char *cplim;
1013         struct ifaddr *ifa_maybe = 0;
1014         u_int af = addr->sa_family;
1015
1016         if (af >= AF_MAX)
1017                 return (0);
1018         TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1019                 struct ifaddr *ifa = ifac->ifa;
1020
1021                 if (ifa->ifa_addr->sa_family != af)
1022                         continue;
1023                 if (ifa_maybe == 0)
1024                         ifa_maybe = ifa;
1025                 if (ifa->ifa_netmask == NULL) {
1026                         if (sa_equal(addr, ifa->ifa_addr) ||
1027                             (ifa->ifa_dstaddr != NULL &&
1028                              sa_equal(addr, ifa->ifa_dstaddr)))
1029                                 return (ifa);
1030                         continue;
1031                 }
1032                 if (ifp->if_flags & IFF_POINTOPOINT) {
1033                         if (sa_equal(addr, ifa->ifa_dstaddr))
1034                                 return (ifa);
1035                 } else {
1036                         cp = addr->sa_data;
1037                         cp2 = ifa->ifa_addr->sa_data;
1038                         cp3 = ifa->ifa_netmask->sa_data;
1039                         cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask;
1040                         for (; cp3 < cplim; cp3++)
1041                                 if ((*cp++ ^ *cp2++) & *cp3)
1042                                         break;
1043                         if (cp3 == cplim)
1044                                 return (ifa);
1045                 }
1046         }
1047         return (ifa_maybe);
1048 }
1049
1050 /*
1051  * Default action when installing a route with a Link Level gateway.
1052  * Lookup an appropriate real ifa to point to.
1053  * This should be moved to /sys/net/link.c eventually.
1054  */
1055 static void
1056 link_rtrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info)
1057 {
1058         struct ifaddr *ifa;
1059         struct sockaddr *dst;
1060         struct ifnet *ifp;
1061
1062         if (cmd != RTM_ADD || (ifa = rt->rt_ifa) == NULL ||
1063             (ifp = ifa->ifa_ifp) == NULL || (dst = rt_key(rt)) == NULL)
1064                 return;
1065         ifa = ifaof_ifpforaddr(dst, ifp);
1066         if (ifa != NULL) {
1067                 IFAFREE(rt->rt_ifa);
1068                 IFAREF(ifa);
1069                 rt->rt_ifa = ifa;
1070                 if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest)
1071                         ifa->ifa_rtrequest(cmd, rt, info);
1072         }
1073 }
1074
1075 /*
1076  * Mark an interface down and notify protocols of
1077  * the transition.
1078  * NOTE: must be called at splnet or eqivalent.
1079  */
1080 void
1081 if_unroute(struct ifnet *ifp, int flag, int fam)
1082 {
1083         struct ifaddr_container *ifac;
1084
1085         ifp->if_flags &= ~flag;
1086         getmicrotime(&ifp->if_lastchange);
1087         TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1088                 struct ifaddr *ifa = ifac->ifa;
1089
1090                 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
1091                         kpfctlinput(PRC_IFDOWN, ifa->ifa_addr);
1092         }
1093         ifq_purge(&ifp->if_snd);
1094         rt_ifmsg(ifp);
1095 }
1096
1097 /*
1098  * Mark an interface up and notify protocols of
1099  * the transition.
1100  * NOTE: must be called at splnet or eqivalent.
1101  */
1102 void
1103 if_route(struct ifnet *ifp, int flag, int fam)
1104 {
1105         struct ifaddr_container *ifac;
1106
1107         ifq_purge(&ifp->if_snd);
1108         ifp->if_flags |= flag;
1109         getmicrotime(&ifp->if_lastchange);
1110         TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1111                 struct ifaddr *ifa = ifac->ifa;
1112
1113                 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
1114                         kpfctlinput(PRC_IFUP, ifa->ifa_addr);
1115         }
1116         rt_ifmsg(ifp);
1117 #ifdef INET6
1118         in6_if_up(ifp);
1119 #endif
1120 }
1121
1122 /*
1123  * Mark an interface down and notify protocols of the transition.  An
1124  * interface going down is also considered to be a synchronizing event.
1125  * We must ensure that all packet processing related to the interface
1126  * has completed before we return so e.g. the caller can free the ifnet
1127  * structure that the mbufs may be referencing.
1128  *
1129  * NOTE: must be called at splnet or eqivalent.
1130  */
1131 void
1132 if_down(struct ifnet *ifp)
1133 {
1134         if_unroute(ifp, IFF_UP, AF_UNSPEC);
1135         netmsg_service_sync();
1136 }
1137
1138 /*
1139  * Mark an interface up and notify protocols of
1140  * the transition.
1141  * NOTE: must be called at splnet or eqivalent.
1142  */
1143 void
1144 if_up(struct ifnet *ifp)
1145 {
1146         if_route(ifp, IFF_UP, AF_UNSPEC);
1147 }
1148
1149 /*
1150  * Process a link state change.
1151  * NOTE: must be called at splsoftnet or equivalent.
1152  */
1153 void
1154 if_link_state_change(struct ifnet *ifp)
1155 {
1156         int link_state = ifp->if_link_state;
1157
1158         rt_ifmsg(ifp);
1159         devctl_notify("IFNET", ifp->if_xname,
1160             (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL);
1161 }
1162
1163 /*
1164  * Handle interface watchdog timer routines.  Called
1165  * from softclock, we decrement timers (if set) and
1166  * call the appropriate interface routine on expiration.
1167  */
1168 static void
1169 if_slowtimo(void *arg)
1170 {
1171         struct ifnet *ifp;
1172
1173         crit_enter();
1174
1175         TAILQ_FOREACH(ifp, &ifnet, if_link) {
1176                 if (ifp->if_timer == 0 || --ifp->if_timer)
1177                         continue;
1178                 if (ifp->if_watchdog) {
1179                         if (ifnet_tryserialize_all(ifp)) {
1180                                 (*ifp->if_watchdog)(ifp);
1181                                 ifnet_deserialize_all(ifp);
1182                         } else {
1183                                 /* try again next timeout */
1184                                 ++ifp->if_timer;
1185                         }
1186                 }
1187         }
1188
1189         crit_exit();
1190
1191         callout_reset(&if_slowtimo_timer, hz / IFNET_SLOWHZ, if_slowtimo, NULL);
1192 }
1193
1194 /*
1195  * Map interface name to
1196  * interface structure pointer.
1197  */
1198 struct ifnet *
1199 ifunit(const char *name)
1200 {
1201         struct ifnet *ifp;
1202
1203         /*
1204          * Search all the interfaces for this name/number
1205          */
1206
1207         TAILQ_FOREACH(ifp, &ifnet, if_link) {
1208                 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0)
1209                         break;
1210         }
1211         return (ifp);
1212 }
1213
1214
1215 /*
1216  * Map interface name in a sockaddr_dl to
1217  * interface structure pointer.
1218  */
1219 struct ifnet *
1220 if_withname(struct sockaddr *sa)
1221 {
1222         char ifname[IFNAMSIZ+1];
1223         struct sockaddr_dl *sdl = (struct sockaddr_dl *)sa;
1224
1225         if ( (sa->sa_family != AF_LINK) || (sdl->sdl_nlen == 0) ||
1226              (sdl->sdl_nlen > IFNAMSIZ) )
1227                 return NULL;
1228
1229         /*
1230          * ifunit wants a null-terminated name.  It may not be null-terminated
1231          * in the sockaddr.  We don't want to change the caller's sockaddr,
1232          * and there might not be room to put the trailing null anyway, so we
1233          * make a local copy that we know we can null terminate safely.
1234          */
1235
1236         bcopy(sdl->sdl_data, ifname, sdl->sdl_nlen);
1237         ifname[sdl->sdl_nlen] = '\0';
1238         return ifunit(ifname);
1239 }
1240
1241
1242 /*
1243  * Interface ioctls.
1244  */
1245 int
1246 ifioctl(struct socket *so, u_long cmd, caddr_t data, struct ucred *cred)
1247 {
1248         struct ifnet *ifp;
1249         struct ifreq *ifr;
1250         struct ifstat *ifs;
1251         int error;
1252         short oif_flags;
1253         int new_flags;
1254         size_t namelen, onamelen;
1255         char new_name[IFNAMSIZ];
1256         struct ifaddr *ifa;
1257         struct sockaddr_dl *sdl;
1258
1259         switch (cmd) {
1260
1261         case SIOCGIFCONF:
1262         case OSIOCGIFCONF:
1263                 return (ifconf(cmd, data, cred));
1264         }
1265         ifr = (struct ifreq *)data;
1266
1267         switch (cmd) {
1268         case SIOCIFCREATE:
1269         case SIOCIFCREATE2:
1270                 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
1271                         return (error);
1272                 return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name),
1273                         cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL));
1274         case SIOCIFDESTROY:
1275                 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
1276                         return (error);
1277                 return (if_clone_destroy(ifr->ifr_name));
1278
1279         case SIOCIFGCLONERS:
1280                 return (if_clone_list((struct if_clonereq *)data));
1281         }
1282
1283         ifp = ifunit(ifr->ifr_name);
1284         if (ifp == 0)
1285                 return (ENXIO);
1286         switch (cmd) {
1287
1288         case SIOCGIFINDEX:
1289                 ifr->ifr_index = ifp->if_index;
1290                 break;
1291
1292         case SIOCGIFFLAGS:
1293                 ifr->ifr_flags = ifp->if_flags;
1294                 ifr->ifr_flagshigh = ifp->if_flags >> 16;
1295                 break;
1296
1297         case SIOCGIFCAP:
1298                 ifr->ifr_reqcap = ifp->if_capabilities;
1299                 ifr->ifr_curcap = ifp->if_capenable;
1300                 break;
1301
1302         case SIOCGIFMETRIC:
1303                 ifr->ifr_metric = ifp->if_metric;
1304                 break;
1305
1306         case SIOCGIFMTU:
1307                 ifr->ifr_mtu = ifp->if_mtu;
1308                 break;
1309
1310         case SIOCGIFPHYS:
1311                 ifr->ifr_phys = ifp->if_physical;
1312                 break;
1313
1314         case SIOCGIFPOLLCPU:
1315 #ifdef DEVICE_POLLING
1316                 ifr->ifr_pollcpu = ifp->if_poll_cpuid;
1317 #else
1318                 ifr->ifr_pollcpu = -1;
1319 #endif
1320                 break;
1321
1322         case SIOCSIFPOLLCPU:
1323 #ifdef DEVICE_POLLING
1324                 if ((ifp->if_flags & IFF_POLLING) == 0)
1325                         ether_pollcpu_register(ifp, ifr->ifr_pollcpu);
1326 #endif
1327                 break;
1328
1329         case SIOCSIFFLAGS:
1330                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1331                 if (error)
1332                         return (error);
1333                 new_flags = (ifr->ifr_flags & 0xffff) |
1334                     (ifr->ifr_flagshigh << 16);
1335                 if (ifp->if_flags & IFF_SMART) {
1336                         /* Smart drivers twiddle their own routes */
1337                 } else if (ifp->if_flags & IFF_UP &&
1338                     (new_flags & IFF_UP) == 0) {
1339                         crit_enter();
1340                         if_down(ifp);
1341                         crit_exit();
1342                 } else if (new_flags & IFF_UP &&
1343                     (ifp->if_flags & IFF_UP) == 0) {
1344                         crit_enter();
1345                         if_up(ifp);
1346                         crit_exit();
1347                 }
1348
1349 #ifdef DEVICE_POLLING
1350                 if ((new_flags ^ ifp->if_flags) & IFF_POLLING) {
1351                         if (new_flags & IFF_POLLING) {
1352                                 ether_poll_register(ifp);
1353                         } else {
1354                                 ether_poll_deregister(ifp);
1355                         }
1356                 }
1357 #endif
1358 #ifdef IFPOLL_ENABLE
1359                 if ((new_flags ^ ifp->if_flags) & IFF_NPOLLING) {
1360                         if (new_flags & IFF_NPOLLING)
1361                                 ifpoll_register(ifp);
1362                         else
1363                                 ifpoll_deregister(ifp);
1364                 }
1365 #endif
1366
1367                 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) |
1368                         (new_flags &~ IFF_CANTCHANGE);
1369                 if (new_flags & IFF_PPROMISC) {
1370                         /* Permanently promiscuous mode requested */
1371                         ifp->if_flags |= IFF_PROMISC;
1372                 } else if (ifp->if_pcount == 0) {
1373                         ifp->if_flags &= ~IFF_PROMISC;
1374                 }
1375                 if (ifp->if_ioctl) {
1376                         ifnet_serialize_all(ifp);
1377                         ifp->if_ioctl(ifp, cmd, data, cred);
1378                         ifnet_deserialize_all(ifp);
1379                 }
1380                 getmicrotime(&ifp->if_lastchange);
1381                 break;
1382
1383         case SIOCSIFCAP:
1384                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1385                 if (error)
1386                         return (error);
1387                 if (ifr->ifr_reqcap & ~ifp->if_capabilities)
1388                         return (EINVAL);
1389                 ifnet_serialize_all(ifp);
1390                 ifp->if_ioctl(ifp, cmd, data, cred);
1391                 ifnet_deserialize_all(ifp);
1392                 break;
1393
1394         case SIOCSIFNAME:
1395                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1396                 if (error != 0)
1397                         return (error);
1398                 error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL);
1399                 if (error != 0)
1400                         return (error);
1401                 if (new_name[0] == '\0')
1402                         return (EINVAL);
1403                 if (ifunit(new_name) != NULL)
1404                         return (EEXIST);
1405
1406                 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp);
1407
1408                 /* Announce the departure of the interface. */
1409                 rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
1410
1411                 strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname));
1412                 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
1413                 /* XXX IFA_LOCK(ifa); */
1414                 sdl = (struct sockaddr_dl *)ifa->ifa_addr;
1415                 namelen = strlen(new_name);
1416                 onamelen = sdl->sdl_nlen;
1417                 /*
1418                  * Move the address if needed.  This is safe because we
1419                  * allocate space for a name of length IFNAMSIZ when we
1420                  * create this in if_attach().
1421                  */
1422                 if (namelen != onamelen) {
1423                         bcopy(sdl->sdl_data + onamelen,
1424                             sdl->sdl_data + namelen, sdl->sdl_alen);
1425                 }
1426                 bcopy(new_name, sdl->sdl_data, namelen);
1427                 sdl->sdl_nlen = namelen;
1428                 sdl = (struct sockaddr_dl *)ifa->ifa_netmask;
1429                 bzero(sdl->sdl_data, onamelen);
1430                 while (namelen != 0)
1431                         sdl->sdl_data[--namelen] = 0xff;
1432                 /* XXX IFA_UNLOCK(ifa) */
1433
1434                 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp);
1435
1436                 /* Announce the return of the interface. */
1437                 rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
1438                 break;
1439
1440         case SIOCSIFMETRIC:
1441                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1442                 if (error)
1443                         return (error);
1444                 ifp->if_metric = ifr->ifr_metric;
1445                 getmicrotime(&ifp->if_lastchange);
1446                 break;
1447
1448         case SIOCSIFPHYS:
1449                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1450                 if (error)
1451                         return error;
1452                 if (!ifp->if_ioctl)
1453                         return EOPNOTSUPP;
1454                 ifnet_serialize_all(ifp);
1455                 error = ifp->if_ioctl(ifp, cmd, data, cred);
1456                 ifnet_deserialize_all(ifp);
1457                 if (error == 0)
1458                         getmicrotime(&ifp->if_lastchange);
1459                 return (error);
1460
1461         case SIOCSIFMTU:
1462         {
1463                 u_long oldmtu = ifp->if_mtu;
1464
1465                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1466                 if (error)
1467                         return (error);
1468                 if (ifp->if_ioctl == NULL)
1469                         return (EOPNOTSUPP);
1470                 if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU)
1471                         return (EINVAL);
1472                 ifnet_serialize_all(ifp);
1473                 error = ifp->if_ioctl(ifp, cmd, data, cred);
1474                 ifnet_deserialize_all(ifp);
1475                 if (error == 0) {
1476                         getmicrotime(&ifp->if_lastchange);
1477                         rt_ifmsg(ifp);
1478                 }
1479                 /*
1480                  * If the link MTU changed, do network layer specific procedure.
1481                  */
1482                 if (ifp->if_mtu != oldmtu) {
1483 #ifdef INET6
1484                         nd6_setmtu(ifp);
1485 #endif
1486                 }
1487                 return (error);
1488         }
1489
1490         case SIOCADDMULTI:
1491         case SIOCDELMULTI:
1492                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1493                 if (error)
1494                         return (error);
1495
1496                 /* Don't allow group membership on non-multicast interfaces. */
1497                 if ((ifp->if_flags & IFF_MULTICAST) == 0)
1498                         return EOPNOTSUPP;
1499
1500                 /* Don't let users screw up protocols' entries. */
1501                 if (ifr->ifr_addr.sa_family != AF_LINK)
1502                         return EINVAL;
1503
1504                 if (cmd == SIOCADDMULTI) {
1505                         struct ifmultiaddr *ifma;
1506                         error = if_addmulti(ifp, &ifr->ifr_addr, &ifma);
1507                 } else {
1508                         error = if_delmulti(ifp, &ifr->ifr_addr);
1509                 }
1510                 if (error == 0)
1511                         getmicrotime(&ifp->if_lastchange);
1512                 return error;
1513
1514         case SIOCSIFPHYADDR:
1515         case SIOCDIFPHYADDR:
1516 #ifdef INET6
1517         case SIOCSIFPHYADDR_IN6:
1518 #endif
1519         case SIOCSLIFPHYADDR:
1520         case SIOCSIFMEDIA:
1521         case SIOCSIFGENERIC:
1522                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1523                 if (error)
1524                         return (error);
1525                 if (ifp->if_ioctl == 0)
1526                         return (EOPNOTSUPP);
1527                 ifnet_serialize_all(ifp);
1528                 error = ifp->if_ioctl(ifp, cmd, data, cred);
1529                 ifnet_deserialize_all(ifp);
1530                 if (error == 0)
1531                         getmicrotime(&ifp->if_lastchange);
1532                 return error;
1533
1534         case SIOCGIFSTATUS:
1535                 ifs = (struct ifstat *)data;
1536                 ifs->ascii[0] = '\0';
1537
1538         case SIOCGIFPSRCADDR:
1539         case SIOCGIFPDSTADDR:
1540         case SIOCGLIFPHYADDR:
1541         case SIOCGIFMEDIA:
1542         case SIOCGIFGENERIC:
1543                 if (ifp->if_ioctl == NULL)
1544                         return (EOPNOTSUPP);
1545                 ifnet_serialize_all(ifp);
1546                 error = ifp->if_ioctl(ifp, cmd, data, cred);
1547                 ifnet_deserialize_all(ifp);
1548                 return (error);
1549
1550         case SIOCSIFLLADDR:
1551                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1552                 if (error)
1553                         return (error);
1554                 error = if_setlladdr(ifp,
1555                     ifr->ifr_addr.sa_data, ifr->ifr_addr.sa_len);
1556                 EVENTHANDLER_INVOKE(iflladdr_event, ifp);
1557                 return (error);
1558
1559         default:
1560                 oif_flags = ifp->if_flags;
1561                 if (so->so_proto == 0)
1562                         return (EOPNOTSUPP);
1563 #ifndef COMPAT_43
1564                 error = so_pru_control(so, cmd, data, ifp);
1565 #else
1566             {
1567                 int ocmd = cmd;
1568
1569                 switch (cmd) {
1570
1571                 case SIOCSIFDSTADDR:
1572                 case SIOCSIFADDR:
1573                 case SIOCSIFBRDADDR:
1574                 case SIOCSIFNETMASK:
1575 #if BYTE_ORDER != BIG_ENDIAN
1576                         if (ifr->ifr_addr.sa_family == 0 &&
1577                             ifr->ifr_addr.sa_len < 16) {
1578                                 ifr->ifr_addr.sa_family = ifr->ifr_addr.sa_len;
1579                                 ifr->ifr_addr.sa_len = 16;
1580                         }
1581 #else
1582                         if (ifr->ifr_addr.sa_len == 0)
1583                                 ifr->ifr_addr.sa_len = 16;
1584 #endif
1585                         break;
1586
1587                 case OSIOCGIFADDR:
1588                         cmd = SIOCGIFADDR;
1589                         break;
1590
1591                 case OSIOCGIFDSTADDR:
1592                         cmd = SIOCGIFDSTADDR;
1593                         break;
1594
1595                 case OSIOCGIFBRDADDR:
1596                         cmd = SIOCGIFBRDADDR;
1597                         break;
1598
1599                 case OSIOCGIFNETMASK:
1600                         cmd = SIOCGIFNETMASK;
1601                 }
1602                 error =  so_pru_control(so, cmd, data, ifp);
1603                 switch (ocmd) {
1604
1605                 case OSIOCGIFADDR:
1606                 case OSIOCGIFDSTADDR:
1607                 case OSIOCGIFBRDADDR:
1608                 case OSIOCGIFNETMASK:
1609                         *(u_short *)&ifr->ifr_addr = ifr->ifr_addr.sa_family;
1610
1611                 }
1612             }
1613 #endif /* COMPAT_43 */
1614
1615                 if ((oif_flags ^ ifp->if_flags) & IFF_UP) {
1616 #ifdef INET6
1617                         DELAY(100);/* XXX: temporary workaround for fxp issue*/
1618                         if (ifp->if_flags & IFF_UP) {
1619                                 crit_enter();
1620                                 in6_if_up(ifp);
1621                                 crit_exit();
1622                         }
1623 #endif
1624                 }
1625                 return (error);
1626
1627         }
1628         return (0);
1629 }
1630
1631 /*
1632  * Set/clear promiscuous mode on interface ifp based on the truth value
1633  * of pswitch.  The calls are reference counted so that only the first
1634  * "on" request actually has an effect, as does the final "off" request.
1635  * Results are undefined if the "off" and "on" requests are not matched.
1636  */
1637 int
1638 ifpromisc(struct ifnet *ifp, int pswitch)
1639 {
1640         struct ifreq ifr;
1641         int error;
1642         int oldflags;
1643
1644         oldflags = ifp->if_flags;
1645         if (ifp->if_flags & IFF_PPROMISC) {
1646                 /* Do nothing if device is in permanently promiscuous mode */
1647                 ifp->if_pcount += pswitch ? 1 : -1;
1648                 return (0);
1649         }
1650         if (pswitch) {
1651                 /*
1652                  * If the device is not configured up, we cannot put it in
1653                  * promiscuous mode.
1654                  */
1655                 if ((ifp->if_flags & IFF_UP) == 0)
1656                         return (ENETDOWN);
1657                 if (ifp->if_pcount++ != 0)
1658                         return (0);
1659                 ifp->if_flags |= IFF_PROMISC;
1660                 log(LOG_INFO, "%s: promiscuous mode enabled\n",
1661                     ifp->if_xname);
1662         } else {
1663                 if (--ifp->if_pcount > 0)
1664                         return (0);
1665                 ifp->if_flags &= ~IFF_PROMISC;
1666                 log(LOG_INFO, "%s: promiscuous mode disabled\n",
1667                     ifp->if_xname);
1668         }
1669         ifr.ifr_flags = ifp->if_flags;
1670         ifr.ifr_flagshigh = ifp->if_flags >> 16;
1671         ifnet_serialize_all(ifp);
1672         error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, NULL);
1673         ifnet_deserialize_all(ifp);
1674         if (error == 0)
1675                 rt_ifmsg(ifp);
1676         else
1677                 ifp->if_flags = oldflags;
1678         return error;
1679 }
1680
1681 /*
1682  * Return interface configuration
1683  * of system.  List may be used
1684  * in later ioctl's (above) to get
1685  * other information.
1686  */
1687 static int
1688 ifconf(u_long cmd, caddr_t data, struct ucred *cred)
1689 {
1690         struct ifconf *ifc = (struct ifconf *)data;
1691         struct ifnet *ifp;
1692         struct sockaddr *sa;
1693         struct ifreq ifr, *ifrp;
1694         int space = ifc->ifc_len, error = 0;
1695
1696         ifrp = ifc->ifc_req;
1697         TAILQ_FOREACH(ifp, &ifnet, if_link) {
1698                 struct ifaddr_container *ifac;
1699                 int addrs;
1700
1701                 if (space <= sizeof ifr)
1702                         break;
1703
1704                 /*
1705                  * Zero the stack declared structure first to prevent
1706                  * memory disclosure.
1707                  */
1708                 bzero(&ifr, sizeof(ifr));
1709                 if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name))
1710                     >= sizeof(ifr.ifr_name)) {
1711                         error = ENAMETOOLONG;
1712                         break;
1713                 }
1714
1715                 addrs = 0;
1716                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1717                         struct ifaddr *ifa = ifac->ifa;
1718
1719                         if (space <= sizeof ifr)
1720                                 break;
1721                         sa = ifa->ifa_addr;
1722                         if (cred->cr_prison &&
1723                             prison_if(cred, sa))
1724                                 continue;
1725                         addrs++;
1726 #ifdef COMPAT_43
1727                         if (cmd == OSIOCGIFCONF) {
1728                                 struct osockaddr *osa =
1729                                          (struct osockaddr *)&ifr.ifr_addr;
1730                                 ifr.ifr_addr = *sa;
1731                                 osa->sa_family = sa->sa_family;
1732                                 error = copyout(&ifr, ifrp, sizeof ifr);
1733                                 ifrp++;
1734                         } else
1735 #endif
1736                         if (sa->sa_len <= sizeof(*sa)) {
1737                                 ifr.ifr_addr = *sa;
1738                                 error = copyout(&ifr, ifrp, sizeof ifr);
1739                                 ifrp++;
1740                         } else {
1741                                 if (space < (sizeof ifr) + sa->sa_len -
1742                                             sizeof(*sa))
1743                                         break;
1744                                 space -= sa->sa_len - sizeof(*sa);
1745                                 error = copyout(&ifr, ifrp,
1746                                                 sizeof ifr.ifr_name);
1747                                 if (error == 0)
1748                                         error = copyout(sa, &ifrp->ifr_addr,
1749                                                         sa->sa_len);
1750                                 ifrp = (struct ifreq *)
1751                                         (sa->sa_len + (caddr_t)&ifrp->ifr_addr);
1752                         }
1753                         if (error)
1754                                 break;
1755                         space -= sizeof ifr;
1756                 }
1757                 if (error)
1758                         break;
1759                 if (!addrs) {
1760                         bzero(&ifr.ifr_addr, sizeof ifr.ifr_addr);
1761                         error = copyout(&ifr, ifrp, sizeof ifr);
1762                         if (error)
1763                                 break;
1764                         space -= sizeof ifr;
1765                         ifrp++;
1766                 }
1767         }
1768         ifc->ifc_len -= space;
1769         return (error);
1770 }
1771
1772 /*
1773  * Just like if_promisc(), but for all-multicast-reception mode.
1774  */
1775 int
1776 if_allmulti(struct ifnet *ifp, int onswitch)
1777 {
1778         int error = 0;
1779         struct ifreq ifr;
1780
1781         crit_enter();
1782
1783         if (onswitch) {
1784                 if (ifp->if_amcount++ == 0) {
1785                         ifp->if_flags |= IFF_ALLMULTI;
1786                         ifr.ifr_flags = ifp->if_flags;
1787                         ifr.ifr_flagshigh = ifp->if_flags >> 16;
1788                         ifnet_serialize_all(ifp);
1789                         error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
1790                                               NULL);
1791                         ifnet_deserialize_all(ifp);
1792                 }
1793         } else {
1794                 if (ifp->if_amcount > 1) {
1795                         ifp->if_amcount--;
1796                 } else {
1797                         ifp->if_amcount = 0;
1798                         ifp->if_flags &= ~IFF_ALLMULTI;
1799                         ifr.ifr_flags = ifp->if_flags;
1800                         ifr.ifr_flagshigh = ifp->if_flags >> 16;
1801                         ifnet_serialize_all(ifp);
1802                         error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
1803                                               NULL);
1804                         ifnet_deserialize_all(ifp);
1805                 }
1806         }
1807
1808         crit_exit();
1809
1810         if (error == 0)
1811                 rt_ifmsg(ifp);
1812         return error;
1813 }
1814
1815 /*
1816  * Add a multicast listenership to the interface in question.
1817  * The link layer provides a routine which converts
1818  */
1819 int
1820 if_addmulti(
1821         struct ifnet *ifp,      /* interface to manipulate */
1822         struct sockaddr *sa,    /* address to add */
1823         struct ifmultiaddr **retifma)
1824 {
1825         struct sockaddr *llsa, *dupsa;
1826         int error;
1827         struct ifmultiaddr *ifma;
1828
1829         /*
1830          * If the matching multicast address already exists
1831          * then don't add a new one, just add a reference
1832          */
1833         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1834                 if (sa_equal(sa, ifma->ifma_addr)) {
1835                         ifma->ifma_refcount++;
1836                         if (retifma)
1837                                 *retifma = ifma;
1838                         return 0;
1839                 }
1840         }
1841
1842         /*
1843          * Give the link layer a chance to accept/reject it, and also
1844          * find out which AF_LINK address this maps to, if it isn't one
1845          * already.
1846          */
1847         if (ifp->if_resolvemulti) {
1848                 ifnet_serialize_all(ifp);
1849                 error = ifp->if_resolvemulti(ifp, &llsa, sa);
1850                 ifnet_deserialize_all(ifp);
1851                 if (error) 
1852                         return error;
1853         } else {
1854                 llsa = 0;
1855         }
1856
1857         MALLOC(ifma, struct ifmultiaddr *, sizeof *ifma, M_IFMADDR, M_WAITOK);
1858         MALLOC(dupsa, struct sockaddr *, sa->sa_len, M_IFMADDR, M_WAITOK);
1859         bcopy(sa, dupsa, sa->sa_len);
1860
1861         ifma->ifma_addr = dupsa;
1862         ifma->ifma_lladdr = llsa;
1863         ifma->ifma_ifp = ifp;
1864         ifma->ifma_refcount = 1;
1865         ifma->ifma_protospec = 0;
1866         rt_newmaddrmsg(RTM_NEWMADDR, ifma);
1867
1868         /*
1869          * Some network interfaces can scan the address list at
1870          * interrupt time; lock them out.
1871          */
1872         crit_enter();
1873         LIST_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
1874         crit_exit();
1875         if (retifma)
1876                 *retifma = ifma;
1877
1878         if (llsa != 0) {
1879                 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1880                         if (sa_equal(ifma->ifma_addr, llsa))
1881                                 break;
1882                 }
1883                 if (ifma) {
1884                         ifma->ifma_refcount++;
1885                 } else {
1886                         MALLOC(ifma, struct ifmultiaddr *, sizeof *ifma,
1887                                M_IFMADDR, M_WAITOK);
1888                         MALLOC(dupsa, struct sockaddr *, llsa->sa_len,
1889                                M_IFMADDR, M_WAITOK);
1890                         bcopy(llsa, dupsa, llsa->sa_len);
1891                         ifma->ifma_addr = dupsa;
1892                         ifma->ifma_ifp = ifp;
1893                         ifma->ifma_refcount = 1;
1894                         crit_enter();
1895                         LIST_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
1896                         crit_exit();
1897                 }
1898         }
1899         /*
1900          * We are certain we have added something, so call down to the
1901          * interface to let them know about it.
1902          */
1903         crit_enter();
1904         ifnet_serialize_all(ifp);
1905         if (ifp->if_ioctl)
1906                 ifp->if_ioctl(ifp, SIOCADDMULTI, 0, NULL);
1907         ifnet_deserialize_all(ifp);
1908         crit_exit();
1909
1910         return 0;
1911 }
1912
1913 /*
1914  * Remove a reference to a multicast address on this interface.  Yell
1915  * if the request does not match an existing membership.
1916  */
1917 int
1918 if_delmulti(struct ifnet *ifp, struct sockaddr *sa)
1919 {
1920         struct ifmultiaddr *ifma;
1921
1922         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
1923                 if (sa_equal(sa, ifma->ifma_addr))
1924                         break;
1925         if (ifma == 0)
1926                 return ENOENT;
1927
1928         if (ifma->ifma_refcount > 1) {
1929                 ifma->ifma_refcount--;
1930                 return 0;
1931         }
1932
1933         rt_newmaddrmsg(RTM_DELMADDR, ifma);
1934         sa = ifma->ifma_lladdr;
1935         crit_enter();
1936         LIST_REMOVE(ifma, ifma_link);
1937         /*
1938          * Make sure the interface driver is notified
1939          * in the case of a link layer mcast group being left.
1940          */
1941         if (ifma->ifma_addr->sa_family == AF_LINK && sa == 0) {
1942                 ifnet_serialize_all(ifp);
1943                 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL);
1944                 ifnet_deserialize_all(ifp);
1945         }
1946         crit_exit();
1947         kfree(ifma->ifma_addr, M_IFMADDR);
1948         kfree(ifma, M_IFMADDR);
1949         if (sa == 0)
1950                 return 0;
1951
1952         /*
1953          * Now look for the link-layer address which corresponds to
1954          * this network address.  It had been squirreled away in
1955          * ifma->ifma_lladdr for this purpose (so we don't have
1956          * to call ifp->if_resolvemulti() again), and we saved that
1957          * value in sa above.  If some nasty deleted the
1958          * link-layer address out from underneath us, we can deal because
1959          * the address we stored was is not the same as the one which was
1960          * in the record for the link-layer address.  (So we don't complain
1961          * in that case.)
1962          */
1963         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
1964                 if (sa_equal(sa, ifma->ifma_addr))
1965                         break;
1966         if (ifma == 0)
1967                 return 0;
1968
1969         if (ifma->ifma_refcount > 1) {
1970                 ifma->ifma_refcount--;
1971                 return 0;
1972         }
1973
1974         crit_enter();
1975         ifnet_serialize_all(ifp);
1976         LIST_REMOVE(ifma, ifma_link);
1977         ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL);
1978         ifnet_deserialize_all(ifp);
1979         crit_exit();
1980         kfree(ifma->ifma_addr, M_IFMADDR);
1981         kfree(sa, M_IFMADDR);
1982         kfree(ifma, M_IFMADDR);
1983
1984         return 0;
1985 }
1986
1987 /*
1988  * Delete all multicast group membership for an interface.
1989  * Should be used to quickly flush all multicast filters.
1990  */
1991 void
1992 if_delallmulti(struct ifnet *ifp)
1993 {
1994         struct ifmultiaddr *ifma;
1995         struct ifmultiaddr *next;
1996
1997         LIST_FOREACH_MUTABLE(ifma, &ifp->if_multiaddrs, ifma_link, next)
1998                 if_delmulti(ifp, ifma->ifma_addr);
1999 }
2000
2001
2002 /*
2003  * Set the link layer address on an interface.
2004  *
2005  * At this time we only support certain types of interfaces,
2006  * and we don't allow the length of the address to change.
2007  */
2008 int
2009 if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len)
2010 {
2011         struct sockaddr_dl *sdl;
2012         struct ifreq ifr;
2013
2014         sdl = IF_LLSOCKADDR(ifp);
2015         if (sdl == NULL)
2016                 return (EINVAL);
2017         if (len != sdl->sdl_alen)       /* don't allow length to change */
2018                 return (EINVAL);
2019         switch (ifp->if_type) {
2020         case IFT_ETHER:                 /* these types use struct arpcom */
2021         case IFT_XETHER:
2022         case IFT_L2VLAN:
2023                 bcopy(lladdr, ((struct arpcom *)ifp->if_softc)->ac_enaddr, len);
2024                 bcopy(lladdr, LLADDR(sdl), len);
2025                 break;
2026         default:
2027                 return (ENODEV);
2028         }
2029         /*
2030          * If the interface is already up, we need
2031          * to re-init it in order to reprogram its
2032          * address filter.
2033          */
2034         ifnet_serialize_all(ifp);
2035         if ((ifp->if_flags & IFF_UP) != 0) {
2036                 struct ifaddr_container *ifac;
2037
2038                 ifp->if_flags &= ~IFF_UP;
2039                 ifr.ifr_flags = ifp->if_flags;
2040                 ifr.ifr_flagshigh = ifp->if_flags >> 16;
2041                 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2042                               NULL);
2043                 ifp->if_flags |= IFF_UP;
2044                 ifr.ifr_flags = ifp->if_flags;
2045                 ifr.ifr_flagshigh = ifp->if_flags >> 16;
2046                 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2047                                  NULL);
2048 #ifdef INET
2049                 /*
2050                  * Also send gratuitous ARPs to notify other nodes about
2051                  * the address change.
2052                  */
2053                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
2054                         struct ifaddr *ifa = ifac->ifa;
2055
2056                         if (ifa->ifa_addr != NULL &&
2057                             ifa->ifa_addr->sa_family == AF_INET)
2058                                 arp_ifinit(ifp, ifa);
2059                 }
2060 #endif
2061         }
2062         ifnet_deserialize_all(ifp);
2063         return (0);
2064 }
2065
2066 struct ifmultiaddr *
2067 ifmaof_ifpforaddr(struct sockaddr *sa, struct ifnet *ifp)
2068 {
2069         struct ifmultiaddr *ifma;
2070
2071         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
2072                 if (sa_equal(ifma->ifma_addr, sa))
2073                         break;
2074
2075         return ifma;
2076 }
2077
2078 /*
2079  * This function locates the first real ethernet MAC from a network
2080  * card and loads it into node, returning 0 on success or ENOENT if
2081  * no suitable interfaces were found.  It is used by the uuid code to
2082  * generate a unique 6-byte number.
2083  */
2084 int
2085 if_getanyethermac(uint16_t *node, int minlen)
2086 {
2087         struct ifnet *ifp;
2088         struct sockaddr_dl *sdl;
2089
2090         TAILQ_FOREACH(ifp, &ifnet, if_link) {
2091                 if (ifp->if_type != IFT_ETHER)
2092                         continue;
2093                 sdl = IF_LLSOCKADDR(ifp);
2094                 if (sdl->sdl_alen < minlen)
2095                         continue;
2096                 bcopy(((struct arpcom *)ifp->if_softc)->ac_enaddr, node,
2097                       minlen);
2098                 return(0);
2099         }
2100         return (ENOENT);
2101 }
2102
2103 /*
2104  * The name argument must be a pointer to storage which will last as
2105  * long as the interface does.  For physical devices, the result of
2106  * device_get_name(dev) is a good choice and for pseudo-devices a
2107  * static string works well.
2108  */
2109 void
2110 if_initname(struct ifnet *ifp, const char *name, int unit)
2111 {
2112         ifp->if_dname = name;
2113         ifp->if_dunit = unit;
2114         if (unit != IF_DUNIT_NONE)
2115                 ksnprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit);
2116         else
2117                 strlcpy(ifp->if_xname, name, IFNAMSIZ);
2118 }
2119
2120 int
2121 if_printf(struct ifnet *ifp, const char *fmt, ...)
2122 {
2123         __va_list ap;
2124         int retval;
2125
2126         retval = kprintf("%s: ", ifp->if_xname);
2127         __va_start(ap, fmt);
2128         retval += kvprintf(fmt, ap);
2129         __va_end(ap);
2130         return (retval);
2131 }
2132
2133 struct ifnet *
2134 if_alloc(uint8_t type)
2135 {
2136         struct ifnet *ifp;
2137
2138         ifp = kmalloc(sizeof(struct ifnet), M_IFNET, M_WAITOK|M_ZERO);
2139
2140         ifp->if_type = type;
2141
2142         if (if_com_alloc[type] != NULL) {
2143                 ifp->if_l2com = if_com_alloc[type](type, ifp);
2144                 if (ifp->if_l2com == NULL) {
2145                         kfree(ifp, M_IFNET);
2146                         return (NULL);
2147                 }
2148         }
2149         return (ifp);
2150 }
2151
2152 void
2153 if_free(struct ifnet *ifp)
2154 {
2155         kfree(ifp, M_IFNET);
2156 }
2157
2158 void
2159 ifq_set_classic(struct ifaltq *ifq)
2160 {
2161         ifq->altq_enqueue = ifq_classic_enqueue;
2162         ifq->altq_dequeue = ifq_classic_dequeue;
2163         ifq->altq_request = ifq_classic_request;
2164 }
2165
2166 int
2167 ifq_classic_enqueue(struct ifaltq *ifq, struct mbuf *m,
2168                     struct altq_pktattr *pa __unused)
2169 {
2170         logifq(enqueue, ifq);
2171         if (IF_QFULL(ifq)) {
2172                 m_freem(m);
2173                 return(ENOBUFS);
2174         } else {
2175                 IF_ENQUEUE(ifq, m);
2176                 return(0);
2177         }       
2178 }
2179
2180 struct mbuf *
2181 ifq_classic_dequeue(struct ifaltq *ifq, struct mbuf *mpolled, int op)
2182 {
2183         struct mbuf *m;
2184
2185         switch (op) {
2186         case ALTDQ_POLL:
2187                 IF_POLL(ifq, m);
2188                 break;
2189         case ALTDQ_REMOVE:
2190                 logifq(dequeue, ifq);
2191                 IF_DEQUEUE(ifq, m);
2192                 break;
2193         default:
2194                 panic("unsupported ALTQ dequeue op: %d", op);
2195         }
2196         KKASSERT(mpolled == NULL || mpolled == m);
2197         return(m);
2198 }
2199
2200 int
2201 ifq_classic_request(struct ifaltq *ifq, int req, void *arg)
2202 {
2203         switch (req) {
2204         case ALTRQ_PURGE:
2205                 IF_DRAIN(ifq);
2206                 break;
2207         default:
2208                 panic("unsupported ALTQ request: %d", req);
2209         }
2210         return(0);
2211 }
2212
2213 int
2214 ifq_dispatch(struct ifnet *ifp, struct mbuf *m, struct altq_pktattr *pa)
2215 {
2216         struct ifaltq *ifq = &ifp->if_snd;
2217         int running = 0, error, start = 0;
2218
2219         ASSERT_IFNET_NOT_SERIALIZED_TX(ifp);
2220
2221         ALTQ_LOCK(ifq);
2222         error = ifq_enqueue_locked(ifq, m, pa);
2223         if (error) {
2224                 ALTQ_UNLOCK(ifq);
2225                 return error;
2226         }
2227         if (!ifq->altq_started) {
2228                 /*
2229                  * Hold the interlock of ifnet.if_start
2230                  */
2231                 ifq->altq_started = 1;
2232                 start = 1;
2233         }
2234         ALTQ_UNLOCK(ifq);
2235
2236         ifp->if_obytes += m->m_pkthdr.len;
2237         if (m->m_flags & M_MCAST)
2238                 ifp->if_omcasts++;
2239
2240         if (!start) {
2241                 logifstart(avoid, ifp);
2242                 return 0;
2243         }
2244
2245         if (ifq_dispatch_schedonly) {
2246                 /*
2247                  * Always schedule ifnet.if_start on ifnet's CPU,
2248                  * short circuit the rest of this function.
2249                  */
2250                 logifstart(sched, ifp);
2251                 if_start_schedule(ifp);
2252                 return 0;
2253         }
2254
2255         /*
2256          * Try to do direct ifnet.if_start first, if there is
2257          * contention on ifnet's serializer, ifnet.if_start will
2258          * be scheduled on ifnet's CPU.
2259          */
2260         if (!ifnet_tryserialize_tx(ifp)) {
2261                 /*
2262                  * ifnet serializer contention happened,
2263                  * ifnet.if_start is scheduled on ifnet's
2264                  * CPU, and we keep going.
2265                  */
2266                 logifstart(contend_sched, ifp);
2267                 if_start_schedule(ifp);
2268                 return 0;
2269         }
2270
2271         if ((ifp->if_flags & IFF_OACTIVE) == 0) {
2272                 logifstart(run, ifp);
2273                 ifp->if_start(ifp);
2274                 if ((ifp->if_flags &
2275                      (IFF_OACTIVE | IFF_RUNNING)) == IFF_RUNNING)
2276                         running = 1;
2277         }
2278
2279         ifnet_deserialize_tx(ifp);
2280
2281         if (ifq_dispatch_schednochk || if_start_need_schedule(ifq, running)) {
2282                 /*
2283                  * More data need to be transmitted, ifnet.if_start is
2284                  * scheduled on ifnet's CPU, and we keep going.
2285                  * NOTE: ifnet.if_start interlock is not released.
2286                  */
2287                 logifstart(sched, ifp);
2288                 if_start_schedule(ifp);
2289         }
2290         return 0;
2291 }
2292
2293 void *
2294 ifa_create(int size, int flags)
2295 {
2296         struct ifaddr *ifa;
2297         int i;
2298
2299         KASSERT(size >= sizeof(*ifa), ("ifaddr size too small\n"));
2300
2301         ifa = kmalloc(size, M_IFADDR, flags | M_ZERO);
2302         if (ifa == NULL)
2303                 return NULL;
2304
2305         ifa->ifa_containers = kmalloc(ncpus * sizeof(struct ifaddr_container),
2306                                       M_IFADDR, M_WAITOK | M_ZERO);
2307         ifa->ifa_ncnt = ncpus;
2308         for (i = 0; i < ncpus; ++i) {
2309                 struct ifaddr_container *ifac = &ifa->ifa_containers[i];
2310
2311                 ifac->ifa_magic = IFA_CONTAINER_MAGIC;
2312                 ifac->ifa = ifa;
2313                 ifac->ifa_refcnt = 1;
2314         }
2315 #ifdef IFADDR_DEBUG
2316         kprintf("alloc ifa %p %d\n", ifa, size);
2317 #endif
2318         return ifa;
2319 }
2320
2321 void
2322 ifac_free(struct ifaddr_container *ifac, int cpu_id)
2323 {
2324         struct ifaddr *ifa = ifac->ifa;
2325
2326         KKASSERT(ifac->ifa_magic == IFA_CONTAINER_MAGIC);
2327         KKASSERT(ifac->ifa_refcnt == 0);
2328         KASSERT(ifac->ifa_listmask == 0,
2329                 ("ifa is still on %#x lists\n", ifac->ifa_listmask));
2330
2331         ifac->ifa_magic = IFA_CONTAINER_DEAD;
2332
2333 #ifdef IFADDR_DEBUG_VERBOSE
2334         kprintf("try free ifa %p cpu_id %d\n", ifac->ifa, cpu_id);
2335 #endif
2336
2337         KASSERT(ifa->ifa_ncnt > 0 && ifa->ifa_ncnt <= ncpus,
2338                 ("invalid # of ifac, %d\n", ifa->ifa_ncnt));
2339         if (atomic_fetchadd_int(&ifa->ifa_ncnt, -1) == 1) {
2340 #ifdef IFADDR_DEBUG
2341                 kprintf("free ifa %p\n", ifa);
2342 #endif
2343                 kfree(ifa->ifa_containers, M_IFADDR);
2344                 kfree(ifa, M_IFADDR);
2345         }
2346 }
2347
2348 static void
2349 ifa_iflink_dispatch(struct netmsg *nmsg)
2350 {
2351         struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2352         struct ifaddr *ifa = msg->ifa;
2353         struct ifnet *ifp = msg->ifp;
2354         int cpu = mycpuid;
2355         struct ifaddr_container *ifac;
2356
2357         crit_enter();
2358
2359         ifac = &ifa->ifa_containers[cpu];
2360         ASSERT_IFAC_VALID(ifac);
2361         KASSERT((ifac->ifa_listmask & IFA_LIST_IFADDRHEAD) == 0,
2362                 ("ifaddr is on if_addrheads\n"));
2363
2364         ifac->ifa_listmask |= IFA_LIST_IFADDRHEAD;
2365         if (msg->tail)
2366                 TAILQ_INSERT_TAIL(&ifp->if_addrheads[cpu], ifac, ifa_link);
2367         else
2368                 TAILQ_INSERT_HEAD(&ifp->if_addrheads[cpu], ifac, ifa_link);
2369
2370         crit_exit();
2371
2372         ifa_forwardmsg(&nmsg->nm_lmsg, cpu + 1);
2373 }
2374
2375 void
2376 ifa_iflink(struct ifaddr *ifa, struct ifnet *ifp, int tail)
2377 {
2378         struct netmsg_ifaddr msg;
2379
2380         netmsg_init(&msg.netmsg, NULL, &curthread->td_msgport,
2381                     0, ifa_iflink_dispatch);
2382         msg.ifa = ifa;
2383         msg.ifp = ifp;
2384         msg.tail = tail;
2385
2386         ifa_domsg(&msg.netmsg.nm_lmsg, 0);
2387 }
2388
2389 static void
2390 ifa_ifunlink_dispatch(struct netmsg *nmsg)
2391 {
2392         struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2393         struct ifaddr *ifa = msg->ifa;
2394         struct ifnet *ifp = msg->ifp;
2395         int cpu = mycpuid;
2396         struct ifaddr_container *ifac;
2397
2398         crit_enter();
2399
2400         ifac = &ifa->ifa_containers[cpu];
2401         ASSERT_IFAC_VALID(ifac);
2402         KASSERT(ifac->ifa_listmask & IFA_LIST_IFADDRHEAD,
2403                 ("ifaddr is not on if_addrhead\n"));
2404
2405         TAILQ_REMOVE(&ifp->if_addrheads[cpu], ifac, ifa_link);
2406         ifac->ifa_listmask &= ~IFA_LIST_IFADDRHEAD;
2407
2408         crit_exit();
2409
2410         ifa_forwardmsg(&nmsg->nm_lmsg, cpu + 1);
2411 }
2412
2413 void
2414 ifa_ifunlink(struct ifaddr *ifa, struct ifnet *ifp)
2415 {
2416         struct netmsg_ifaddr msg;
2417
2418         netmsg_init(&msg.netmsg, NULL, &curthread->td_msgport,
2419                     0, ifa_ifunlink_dispatch);
2420         msg.ifa = ifa;
2421         msg.ifp = ifp;
2422
2423         ifa_domsg(&msg.netmsg.nm_lmsg, 0);
2424 }
2425
2426 static void
2427 ifa_destroy_dispatch(struct netmsg *nmsg)
2428 {
2429         struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2430
2431         IFAFREE(msg->ifa);
2432         ifa_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2433 }
2434
2435 void
2436 ifa_destroy(struct ifaddr *ifa)
2437 {
2438         struct netmsg_ifaddr msg;
2439
2440         netmsg_init(&msg.netmsg, NULL, &curthread->td_msgport,
2441                     0, ifa_destroy_dispatch);
2442         msg.ifa = ifa;
2443
2444         ifa_domsg(&msg.netmsg.nm_lmsg, 0);
2445 }
2446
2447 struct lwkt_port *
2448 ifnet_portfn(int cpu)
2449 {
2450         return &ifnet_threads[cpu].td_msgport;
2451 }
2452
2453 void
2454 ifnet_forwardmsg(struct lwkt_msg *lmsg, int next_cpu)
2455 {
2456         KKASSERT(next_cpu > mycpuid && next_cpu <= ncpus);
2457
2458         if (next_cpu < ncpus)
2459                 lwkt_forwardmsg(ifnet_portfn(next_cpu), lmsg);
2460         else
2461                 lwkt_replymsg(lmsg, 0);
2462 }
2463
2464 int
2465 ifnet_domsg(struct lwkt_msg *lmsg, int cpu)
2466 {
2467         KKASSERT(cpu < ncpus);
2468         return lwkt_domsg(ifnet_portfn(cpu), lmsg, 0);
2469 }
2470
2471 void
2472 ifnet_sendmsg(struct lwkt_msg *lmsg, int cpu)
2473 {
2474         KKASSERT(cpu < ncpus);
2475         lwkt_sendmsg(ifnet_portfn(cpu), lmsg);
2476 }
2477
2478 static void
2479 ifnetinit(void *dummy __unused)
2480 {
2481         int i;
2482
2483         for (i = 0; i < ncpus; ++i) {
2484                 struct thread *thr = &ifnet_threads[i];
2485
2486                 lwkt_create(netmsg_service_loop, &ifnet_mpsafe_thread, NULL,
2487                             thr, TDF_NETWORK | TDF_MPSAFE, i, "ifnet %d", i);
2488                 netmsg_service_port_init(&thr->td_msgport);
2489         }
2490 }
2491
2492 struct ifnet *
2493 ifnet_byindex(unsigned short idx)
2494 {
2495         if (idx > if_index)
2496                 return NULL;
2497         return ifindex2ifnet[idx];
2498 }
2499
2500 struct ifaddr *
2501 ifaddr_byindex(unsigned short idx)
2502 {
2503         struct ifnet *ifp;
2504
2505         ifp = ifnet_byindex(idx);
2506         if (!ifp)
2507                 return NULL;
2508         return TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
2509 }
2510
2511 void
2512 if_register_com_alloc(u_char type,
2513     if_com_alloc_t *a, if_com_free_t *f)
2514 {
2515
2516         KASSERT(if_com_alloc[type] == NULL,
2517             ("if_register_com_alloc: %d already registered", type));
2518         KASSERT(if_com_free[type] == NULL,
2519             ("if_register_com_alloc: %d free already registered", type));
2520
2521         if_com_alloc[type] = a;
2522         if_com_free[type] = f;
2523 }
2524
2525 void
2526 if_deregister_com_alloc(u_char type)
2527 {
2528
2529         KASSERT(if_com_alloc[type] != NULL,
2530             ("if_deregister_com_alloc: %d not registered", type));
2531         KASSERT(if_com_free[type] != NULL,
2532             ("if_deregister_com_alloc: %d free not registered", type));
2533         if_com_alloc[type] = NULL;
2534         if_com_free[type] = NULL;
2535 }