network - Completely revamp the netisr / dispatch code
[dragonfly.git] / sys / net / if.c
1 /*
2  * Copyright (c) 1980, 1986, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by the University of
16  *      California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *      @(#)if.c        8.3 (Berkeley) 1/4/94
34  * $FreeBSD: src/sys/net/if.c,v 1.185 2004/03/13 02:35:03 brooks Exp $
35  * $DragonFly: src/sys/net/if.c,v 1.84 2008/11/15 11:58:16 sephe Exp $
36  */
37
38 #include "opt_compat.h"
39 #include "opt_inet6.h"
40 #include "opt_inet.h"
41 #include "opt_polling.h"
42 #include "opt_ifpoll.h"
43
44 #include <sys/param.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/systm.h>
48 #include <sys/proc.h>
49 #include <sys/priv.h>
50 #include <sys/protosw.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/socketops.h>
54 #include <sys/protosw.h>
55 #include <sys/kernel.h>
56 #include <sys/ktr.h>
57 #include <sys/sockio.h>
58 #include <sys/syslog.h>
59 #include <sys/sysctl.h>
60 #include <sys/domain.h>
61 #include <sys/thread.h>
62 #include <sys/thread2.h>
63 #include <sys/serialize.h>
64 #include <sys/msgport2.h>
65 #include <sys/bus.h>
66
67 #include <net/if.h>
68 #include <net/if_arp.h>
69 #include <net/if_dl.h>
70 #include <net/if_types.h>
71 #include <net/if_var.h>
72 #include <net/ifq_var.h>
73 #include <net/radix.h>
74 #include <net/route.h>
75 #include <net/if_clone.h>
76 #include <net/netisr.h>
77 #include <net/netmsg2.h>
78
79 #include <machine/atomic.h>
80 #include <machine/stdarg.h>
81 #include <machine/smp.h>
82
83 #if defined(INET) || defined(INET6)
84 /*XXX*/
85 #include <netinet/in.h>
86 #include <netinet/in_var.h>
87 #include <netinet/if_ether.h>
88 #ifdef INET6
89 #include <netinet6/in6_var.h>
90 #include <netinet6/in6_ifattach.h>
91 #endif
92 #endif
93
94 #if defined(COMPAT_43)
95 #include <emulation/43bsd/43bsd_socket.h>
96 #endif /* COMPAT_43 */
97
98 struct netmsg_ifaddr {
99         struct netmsg   netmsg;
100         struct ifaddr   *ifa;
101         struct ifnet    *ifp;
102         int             tail;
103 };
104
105 /*
106  * System initialization
107  */
108 static void     if_attachdomain(void *);
109 static void     if_attachdomain1(struct ifnet *);
110 static int      ifconf(u_long, caddr_t, struct ucred *);
111 static void     ifinit(void *);
112 static void     ifnetinit(void *);
113 static void     if_slowtimo(void *);
114 static void     link_rtrequest(int, struct rtentry *, struct rt_addrinfo *);
115 static int      if_rtdel(struct radix_node *, void *);
116
117 #ifdef INET6
118 /*
119  * XXX: declare here to avoid to include many inet6 related files..
120  * should be more generalized?
121  */
122 extern void     nd6_setmtu(struct ifnet *);
123 #endif
124
125 SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers");
126 SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management");
127
128 SYSINIT(interfaces, SI_SUB_PROTO_IF, SI_ORDER_FIRST, ifinit, NULL)
129 /* Must be after netisr_init */
130 SYSINIT(ifnet, SI_SUB_PRE_DRIVERS, SI_ORDER_SECOND, ifnetinit, NULL)
131
132 static  if_com_alloc_t *if_com_alloc[256];
133 static  if_com_free_t *if_com_free[256];
134
135 MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address");
136 MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address");
137 MALLOC_DEFINE(M_IFNET, "ifnet", "interface structure");
138
139 int                     ifqmaxlen = IFQ_MAXLEN;
140 struct ifnethead        ifnet = TAILQ_HEAD_INITIALIZER(ifnet);
141
142 /* In ifq_dispatch(), try to do direct ifnet.if_start first */
143 static int              ifq_dispatch_schedonly = 0;
144 SYSCTL_INT(_net_link_generic, OID_AUTO, ifq_dispatch_schedonly, CTLFLAG_RW,
145            &ifq_dispatch_schedonly, 0, "");
146
147 /* In ifq_dispatch(), schedule ifnet.if_start without checking ifnet.if_snd */
148 static int              ifq_dispatch_schednochk = 0;
149 SYSCTL_INT(_net_link_generic, OID_AUTO, ifq_dispatch_schednochk, CTLFLAG_RW,
150            &ifq_dispatch_schednochk, 0, "");
151
152 /* In if_devstart(), try to do direct ifnet.if_start first */
153 static int              if_devstart_schedonly = 0;
154 SYSCTL_INT(_net_link_generic, OID_AUTO, if_devstart_schedonly, CTLFLAG_RW,
155            &if_devstart_schedonly, 0, "");
156
157 /* In if_devstart(), schedule ifnet.if_start without checking ifnet.if_snd */
158 static int              if_devstart_schednochk = 0;
159 SYSCTL_INT(_net_link_generic, OID_AUTO, if_devstart_schednochk, CTLFLAG_RW,
160            &if_devstart_schednochk, 0, "");
161
162 #ifdef SMP
163 /* Schedule ifnet.if_start on the current CPU */
164 static int              if_start_oncpu_sched = 0;
165 SYSCTL_INT(_net_link_generic, OID_AUTO, if_start_oncpu_sched, CTLFLAG_RW,
166            &if_start_oncpu_sched, 0, "");
167 #endif
168
169 struct callout          if_slowtimo_timer;
170
171 int                     if_index = 0;
172 struct ifnet            **ifindex2ifnet = NULL;
173 static struct thread    ifnet_threads[MAXCPU];
174
175 #define IFQ_KTR_STRING          "ifq=%p"
176 #define IFQ_KTR_ARG_SIZE        (sizeof(void *))
177 #ifndef KTR_IFQ
178 #define KTR_IFQ                 KTR_ALL
179 #endif
180 KTR_INFO_MASTER(ifq);
181 KTR_INFO(KTR_IFQ, ifq, enqueue, 0, IFQ_KTR_STRING, IFQ_KTR_ARG_SIZE);
182 KTR_INFO(KTR_IFQ, ifq, dequeue, 1, IFQ_KTR_STRING, IFQ_KTR_ARG_SIZE);
183 #define logifq(name, arg)       KTR_LOG(ifq_ ## name, arg)
184
185 #define IF_START_KTR_STRING     "ifp=%p"
186 #define IF_START_KTR_ARG_SIZE   (sizeof(void *))
187 #ifndef KTR_IF_START
188 #define KTR_IF_START            KTR_ALL
189 #endif
190 KTR_INFO_MASTER(if_start);
191 KTR_INFO(KTR_IF_START, if_start, run, 0,
192          IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
193 KTR_INFO(KTR_IF_START, if_start, sched, 1,
194          IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
195 KTR_INFO(KTR_IF_START, if_start, avoid, 2,
196          IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
197 KTR_INFO(KTR_IF_START, if_start, contend_sched, 3,
198          IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
199 #ifdef SMP
200 KTR_INFO(KTR_IF_START, if_start, chase_sched, 4,
201          IF_START_KTR_STRING, IF_START_KTR_ARG_SIZE);
202 #endif
203 #define logifstart(name, arg)   KTR_LOG(if_start_ ## name, arg)
204
205 /*
206  * Network interface utility routines.
207  *
208  * Routines with ifa_ifwith* names take sockaddr *'s as
209  * parameters.
210  */
211 /* ARGSUSED*/
212 void
213 ifinit(void *dummy)
214 {
215         struct ifnet *ifp;
216
217         callout_init(&if_slowtimo_timer);
218
219         crit_enter();
220         TAILQ_FOREACH(ifp, &ifnet, if_link) {
221                 if (ifp->if_snd.ifq_maxlen == 0) {
222                         if_printf(ifp, "XXX: driver didn't set ifq_maxlen\n");
223                         ifp->if_snd.ifq_maxlen = ifqmaxlen;
224                 }
225         }
226         crit_exit();
227
228         if_slowtimo(0);
229 }
230
231 static int
232 if_start_cpuid(struct ifnet *ifp)
233 {
234         return ifp->if_cpuid;
235 }
236
237 #ifdef DEVICE_POLLING
238 static int
239 if_start_cpuid_poll(struct ifnet *ifp)
240 {
241         int poll_cpuid = ifp->if_poll_cpuid;
242
243         if (poll_cpuid >= 0)
244                 return poll_cpuid;
245         else
246                 return ifp->if_cpuid;
247 }
248 #endif
249
250 static void
251 if_start_ipifunc(void *arg)
252 {
253         struct ifnet *ifp = arg;
254         struct lwkt_msg *lmsg = &ifp->if_start_nmsg[mycpuid].nm_lmsg;
255
256         crit_enter();
257         if (lmsg->ms_flags & MSGF_DONE)
258                 lwkt_sendmsg(ifnet_portfn(mycpuid), lmsg);
259         crit_exit();
260 }
261
262 /*
263  * Schedule ifnet.if_start on ifnet's CPU
264  */
265 static void
266 if_start_schedule(struct ifnet *ifp)
267 {
268 #ifdef SMP
269         int cpu;
270
271         if (if_start_oncpu_sched)
272                 cpu = mycpuid;
273         else
274                 cpu = ifp->if_start_cpuid(ifp);
275
276         if (cpu != mycpuid)
277                 lwkt_send_ipiq(globaldata_find(cpu), if_start_ipifunc, ifp);
278         else
279 #endif
280         if_start_ipifunc(ifp);
281 }
282
283 /*
284  * NOTE:
285  * This function will release ifnet.if_start interlock,
286  * if ifnet.if_start does not need to be scheduled
287  */
288 static __inline int
289 if_start_need_schedule(struct ifaltq *ifq, int running)
290 {
291         if (!running || ifq_is_empty(ifq)
292 #ifdef ALTQ
293             || ifq->altq_tbr != NULL
294 #endif
295         ) {
296                 ALTQ_LOCK(ifq);
297                 /*
298                  * ifnet.if_start interlock is released, if:
299                  * 1) Hardware can not take any packets, due to
300                  *    o  interface is marked down
301                  *    o  hardware queue is full (IFF_OACTIVE)
302                  *    Under the second situation, hardware interrupt
303                  *    or polling(4) will call/schedule ifnet.if_start
304                  *    when hardware queue is ready
305                  * 2) There is not packet in the ifnet.if_snd.
306                  *    Further ifq_dispatch or ifq_handoff will call/
307                  *    schedule ifnet.if_start
308                  * 3) TBR is used and it does not allow further
309                  *    dequeueing.
310                  *    TBR callout will call ifnet.if_start
311                  */
312                 if (!running || !ifq_data_ready(ifq)) {
313                         ifq->altq_started = 0;
314                         ALTQ_UNLOCK(ifq);
315                         return 0;
316                 }
317                 ALTQ_UNLOCK(ifq);
318         }
319         return 1;
320 }
321
322 static void
323 if_start_dispatch(struct netmsg *nmsg)
324 {
325         struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
326         struct ifnet *ifp = lmsg->u.ms_resultp;
327         struct ifaltq *ifq = &ifp->if_snd;
328         int running = 0;
329
330         crit_enter();
331         lwkt_replymsg(lmsg, 0); /* reply ASAP */
332         crit_exit();
333
334 #ifdef SMP
335         if (!if_start_oncpu_sched && mycpuid != ifp->if_start_cpuid(ifp)) {
336                 /*
337                  * If the ifnet is still up, we need to
338                  * chase its CPU change.
339                  */
340                 if (ifp->if_flags & IFF_UP) {
341                         logifstart(chase_sched, ifp);
342                         if_start_schedule(ifp);
343                         return;
344                 } else {
345                         goto check;
346                 }
347         }
348 #endif
349
350         if (ifp->if_flags & IFF_UP) {
351                 ifnet_serialize_tx(ifp); /* XXX try? */
352                 if ((ifp->if_flags & IFF_OACTIVE) == 0) {
353                         logifstart(run, ifp);
354                         ifp->if_start(ifp);
355                         if ((ifp->if_flags &
356                         (IFF_OACTIVE | IFF_RUNNING)) == IFF_RUNNING)
357                                 running = 1;
358                 }
359                 ifnet_deserialize_tx(ifp);
360         }
361 #ifdef SMP
362 check:
363 #endif
364         if (if_start_need_schedule(ifq, running)) {
365                 crit_enter();
366                 if (lmsg->ms_flags & MSGF_DONE) { /* XXX necessary? */
367                         logifstart(sched, ifp);
368                         lwkt_sendmsg(ifnet_portfn(mycpuid), lmsg);
369                 }
370                 crit_exit();
371         }
372 }
373
374 /* Device driver ifnet.if_start helper function */
375 void
376 if_devstart(struct ifnet *ifp)
377 {
378         struct ifaltq *ifq = &ifp->if_snd;
379         int running = 0;
380
381         ASSERT_IFNET_SERIALIZED_TX(ifp);
382
383         ALTQ_LOCK(ifq);
384         if (ifq->altq_started || !ifq_data_ready(ifq)) {
385                 logifstart(avoid, ifp);
386                 ALTQ_UNLOCK(ifq);
387                 return;
388         }
389         ifq->altq_started = 1;
390         ALTQ_UNLOCK(ifq);
391
392         if (if_devstart_schedonly) {
393                 /*
394                  * Always schedule ifnet.if_start on ifnet's CPU,
395                  * short circuit the rest of this function.
396                  */
397                 logifstart(sched, ifp);
398                 if_start_schedule(ifp);
399                 return;
400         }
401
402         logifstart(run, ifp);
403         ifp->if_start(ifp);
404
405         if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) == IFF_RUNNING)
406                 running = 1;
407
408         if (if_devstart_schednochk || if_start_need_schedule(ifq, running)) {
409                 /*
410                  * More data need to be transmitted, ifnet.if_start is
411                  * scheduled on ifnet's CPU, and we keep going.
412                  * NOTE: ifnet.if_start interlock is not released.
413                  */
414                 logifstart(sched, ifp);
415                 if_start_schedule(ifp);
416         }
417 }
418
419 static void
420 if_default_serialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
421 {
422         lwkt_serialize_enter(ifp->if_serializer);
423 }
424
425 static void
426 if_default_deserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
427 {
428         lwkt_serialize_exit(ifp->if_serializer);
429 }
430
431 static int
432 if_default_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
433 {
434         return lwkt_serialize_try(ifp->if_serializer);
435 }
436
437 #ifdef INVARIANTS
438 static void
439 if_default_serialize_assert(struct ifnet *ifp,
440                             enum ifnet_serialize slz __unused,
441                             boolean_t serialized)
442 {
443         if (serialized)
444                 ASSERT_SERIALIZED(ifp->if_serializer);
445         else
446                 ASSERT_NOT_SERIALIZED(ifp->if_serializer);
447 }
448 #endif
449
450 /*
451  * Attach an interface to the list of "active" interfaces.
452  *
453  * The serializer is optional.  If non-NULL access to the interface
454  * may be MPSAFE.
455  */
456 void
457 if_attach(struct ifnet *ifp, lwkt_serialize_t serializer)
458 {
459         unsigned socksize, ifasize;
460         int namelen, masklen;
461         struct sockaddr_dl *sdl;
462         struct ifaddr *ifa;
463         struct ifaltq *ifq;
464         int i;
465
466         static int if_indexlim = 8;
467
468         if (ifp->if_serialize != NULL) {
469                 KASSERT(ifp->if_deserialize != NULL &&
470                         ifp->if_tryserialize != NULL &&
471                         ifp->if_serialize_assert != NULL,
472                         ("serialize functions are partially setup\n"));
473
474                 /*
475                  * If the device supplies serialize functions,
476                  * then clear if_serializer to catch any invalid
477                  * usage of this field.
478                  */
479                 KASSERT(serializer == NULL,
480                         ("both serialize functions and default serializer "
481                          "are supplied\n"));
482                 ifp->if_serializer = NULL;
483         } else {
484                 KASSERT(ifp->if_deserialize == NULL &&
485                         ifp->if_tryserialize == NULL &&
486                         ifp->if_serialize_assert == NULL,
487                         ("serialize functions are partially setup\n"));
488                 ifp->if_serialize = if_default_serialize;
489                 ifp->if_deserialize = if_default_deserialize;
490                 ifp->if_tryserialize = if_default_tryserialize;
491 #ifdef INVARIANTS
492                 ifp->if_serialize_assert = if_default_serialize_assert;
493 #endif
494
495                 /*
496                  * The serializer can be passed in from the device,
497                  * allowing the same serializer to be used for both
498                  * the interrupt interlock and the device queue.
499                  * If not specified, the netif structure will use an
500                  * embedded serializer.
501                  */
502                 if (serializer == NULL) {
503                         serializer = &ifp->if_default_serializer;
504                         lwkt_serialize_init(serializer);
505                 }
506                 ifp->if_serializer = serializer;
507         }
508
509         ifp->if_start_cpuid = if_start_cpuid;
510         ifp->if_cpuid = 0;
511
512 #ifdef DEVICE_POLLING
513         /* Device is not in polling mode by default */
514         ifp->if_poll_cpuid = -1;
515         if (ifp->if_poll != NULL)
516                 ifp->if_start_cpuid = if_start_cpuid_poll;
517 #endif
518
519         ifp->if_start_nmsg = kmalloc(ncpus * sizeof(struct netmsg),
520                                      M_LWKTMSG, M_WAITOK);
521         for (i = 0; i < ncpus; ++i) {
522                 netmsg_init(&ifp->if_start_nmsg[i], NULL, &netisr_adone_rport,
523                             0, if_start_dispatch);
524                 ifp->if_start_nmsg[i].nm_lmsg.u.ms_resultp = ifp;
525         }
526
527         TAILQ_INSERT_TAIL(&ifnet, ifp, if_link);
528         ifp->if_index = ++if_index;
529
530         /*
531          * XXX -
532          * The old code would work if the interface passed a pre-existing
533          * chain of ifaddrs to this code.  We don't trust our callers to
534          * properly initialize the tailq, however, so we no longer allow
535          * this unlikely case.
536          */
537         ifp->if_addrheads = kmalloc(ncpus * sizeof(struct ifaddrhead),
538                                     M_IFADDR, M_WAITOK | M_ZERO);
539         for (i = 0; i < ncpus; ++i)
540                 TAILQ_INIT(&ifp->if_addrheads[i]);
541
542         TAILQ_INIT(&ifp->if_prefixhead);
543         TAILQ_INIT(&ifp->if_multiaddrs);
544         getmicrotime(&ifp->if_lastchange);
545         if (ifindex2ifnet == NULL || if_index >= if_indexlim) {
546                 unsigned int n;
547                 struct ifnet **q;
548
549                 if_indexlim <<= 1;
550
551                 /* grow ifindex2ifnet */
552                 n = if_indexlim * sizeof(*q);
553                 q = kmalloc(n, M_IFADDR, M_WAITOK | M_ZERO);
554                 if (ifindex2ifnet) {
555                         bcopy(ifindex2ifnet, q, n/2);
556                         kfree(ifindex2ifnet, M_IFADDR);
557                 }
558                 ifindex2ifnet = q;
559         }
560
561         ifindex2ifnet[if_index] = ifp;
562
563         /*
564          * create a Link Level name for this device
565          */
566         namelen = strlen(ifp->if_xname);
567 #define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m))
568         masklen = _offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
569         socksize = masklen + ifp->if_addrlen;
570 #define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
571         if (socksize < sizeof(*sdl))
572                 socksize = sizeof(*sdl);
573         socksize = ROUNDUP(socksize);
574         ifasize = sizeof(struct ifaddr) + 2 * socksize;
575         ifa = ifa_create(ifasize, M_WAITOK);
576         sdl = (struct sockaddr_dl *)(ifa + 1);
577         sdl->sdl_len = socksize;
578         sdl->sdl_family = AF_LINK;
579         bcopy(ifp->if_xname, sdl->sdl_data, namelen);
580         sdl->sdl_nlen = namelen;
581         sdl->sdl_index = ifp->if_index;
582         sdl->sdl_type = ifp->if_type;
583         ifp->if_lladdr = ifa;
584         ifa->ifa_ifp = ifp;
585         ifa->ifa_rtrequest = link_rtrequest;
586         ifa->ifa_addr = (struct sockaddr *)sdl;
587         sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl);
588         ifa->ifa_netmask = (struct sockaddr *)sdl;
589         sdl->sdl_len = masklen;
590         while (namelen != 0)
591                 sdl->sdl_data[--namelen] = 0xff;
592         ifa_iflink(ifa, ifp, 0 /* Insert head */);
593
594         EVENTHANDLER_INVOKE(ifnet_attach_event, ifp);
595         devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL);
596
597         ifq = &ifp->if_snd;
598         ifq->altq_type = 0;
599         ifq->altq_disc = NULL;
600         ifq->altq_flags &= ALTQF_CANTCHANGE;
601         ifq->altq_tbr = NULL;
602         ifq->altq_ifp = ifp;
603         ifq->altq_started = 0;
604         ifq->altq_prepended = NULL;
605         ALTQ_LOCK_INIT(ifq);
606         ifq_set_classic(ifq);
607
608         if (!SLIST_EMPTY(&domains))
609                 if_attachdomain1(ifp);
610
611         /* Announce the interface. */
612         rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
613 }
614
615 static void
616 if_attachdomain(void *dummy)
617 {
618         struct ifnet *ifp;
619
620         crit_enter();
621         TAILQ_FOREACH(ifp, &ifnet, if_list)
622                 if_attachdomain1(ifp);
623         crit_exit();
624 }
625 SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST,
626         if_attachdomain, NULL);
627
628 static void
629 if_attachdomain1(struct ifnet *ifp)
630 {
631         struct domain *dp;
632
633         crit_enter();
634
635         /* address family dependent data region */
636         bzero(ifp->if_afdata, sizeof(ifp->if_afdata));
637         SLIST_FOREACH(dp, &domains, dom_next)
638                 if (dp->dom_ifattach)
639                         ifp->if_afdata[dp->dom_family] =
640                                 (*dp->dom_ifattach)(ifp);
641         crit_exit();
642 }
643
644 /*
645  * Purge all addresses whose type is _not_ AF_LINK
646  */
647 void
648 if_purgeaddrs_nolink(struct ifnet *ifp)
649 {
650         struct ifaddr_container *ifac, *next;
651
652         TAILQ_FOREACH_MUTABLE(ifac, &ifp->if_addrheads[mycpuid],
653                               ifa_link, next) {
654                 struct ifaddr *ifa = ifac->ifa;
655
656                 /* Leave link ifaddr as it is */
657                 if (ifa->ifa_addr->sa_family == AF_LINK)
658                         continue;
659 #ifdef INET
660                 /* XXX: Ugly!! ad hoc just for INET */
661                 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET) {
662                         struct ifaliasreq ifr;
663 #ifdef IFADDR_DEBUG_VERBOSE
664                         int i;
665
666                         kprintf("purge in4 addr %p: ", ifa);
667                         for (i = 0; i < ncpus; ++i)
668                                 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt);
669                         kprintf("\n");
670 #endif
671
672                         bzero(&ifr, sizeof ifr);
673                         ifr.ifra_addr = *ifa->ifa_addr;
674                         if (ifa->ifa_dstaddr)
675                                 ifr.ifra_broadaddr = *ifa->ifa_dstaddr;
676                         if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp,
677                                        NULL) == 0)
678                                 continue;
679                 }
680 #endif /* INET */
681 #ifdef INET6
682                 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET6) {
683 #ifdef IFADDR_DEBUG_VERBOSE
684                         int i;
685
686                         kprintf("purge in6 addr %p: ", ifa);
687                         for (i = 0; i < ncpus; ++i)
688                                 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt);
689                         kprintf("\n");
690 #endif
691
692                         in6_purgeaddr(ifa);
693                         /* ifp_addrhead is already updated */
694                         continue;
695                 }
696 #endif /* INET6 */
697                 ifa_ifunlink(ifa, ifp);
698                 ifa_destroy(ifa);
699         }
700 }
701
702 /*
703  * Detach an interface, removing it from the
704  * list of "active" interfaces.
705  */
706 void
707 if_detach(struct ifnet *ifp)
708 {
709         struct radix_node_head  *rnh;
710         int i;
711         int cpu, origcpu;
712         struct domain *dp;
713
714         EVENTHANDLER_INVOKE(ifnet_detach_event, ifp);
715
716         /*
717          * Remove routes and flush queues.
718          */
719         crit_enter();
720 #ifdef DEVICE_POLLING
721         if (ifp->if_flags & IFF_POLLING)
722                 ether_poll_deregister(ifp);
723 #endif
724 #ifdef IFPOLL_ENABLE
725         if (ifp->if_flags & IFF_NPOLLING)
726                 ifpoll_deregister(ifp);
727 #endif
728         if_down(ifp);
729
730 #ifdef ALTQ
731         if (ifq_is_enabled(&ifp->if_snd))
732                 altq_disable(&ifp->if_snd);
733         if (ifq_is_attached(&ifp->if_snd))
734                 altq_detach(&ifp->if_snd);
735 #endif
736
737         /*
738          * Clean up all addresses.
739          */
740         ifp->if_lladdr = NULL;
741
742         if_purgeaddrs_nolink(ifp);
743         if (!TAILQ_EMPTY(&ifp->if_addrheads[mycpuid])) {
744                 struct ifaddr *ifa;
745
746                 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
747                 KASSERT(ifa->ifa_addr->sa_family == AF_LINK,
748                         ("non-link ifaddr is left on if_addrheads"));
749
750                 ifa_ifunlink(ifa, ifp);
751                 ifa_destroy(ifa);
752                 KASSERT(TAILQ_EMPTY(&ifp->if_addrheads[mycpuid]),
753                         ("there are still ifaddrs left on if_addrheads"));
754         }
755
756 #ifdef INET
757         /*
758          * Remove all IPv4 kernel structures related to ifp.
759          */
760         in_ifdetach(ifp);
761 #endif
762
763 #ifdef INET6
764         /*
765          * Remove all IPv6 kernel structs related to ifp.  This should be done
766          * before removing routing entries below, since IPv6 interface direct
767          * routes are expected to be removed by the IPv6-specific kernel API.
768          * Otherwise, the kernel will detect some inconsistency and bark it.
769          */
770         in6_ifdetach(ifp);
771 #endif
772
773         /*
774          * Delete all remaining routes using this interface
775          * Unfortuneatly the only way to do this is to slog through
776          * the entire routing table looking for routes which point
777          * to this interface...oh well...
778          */
779         origcpu = mycpuid;
780         for (cpu = 0; cpu < ncpus2; cpu++) {
781                 lwkt_migratecpu(cpu);
782                 for (i = 1; i <= AF_MAX; i++) {
783                         if ((rnh = rt_tables[cpu][i]) == NULL)
784                                 continue;
785                         rnh->rnh_walktree(rnh, if_rtdel, ifp);
786                 }
787         }
788         lwkt_migratecpu(origcpu);
789
790         /* Announce that the interface is gone. */
791         rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
792         devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL);
793
794         SLIST_FOREACH(dp, &domains, dom_next)
795                 if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family])
796                         (*dp->dom_ifdetach)(ifp,
797                                 ifp->if_afdata[dp->dom_family]);
798
799         /*
800          * Remove interface from ifindex2ifp[] and maybe decrement if_index.
801          */
802         ifindex2ifnet[ifp->if_index] = NULL;
803         while (if_index > 0 && ifindex2ifnet[if_index] == NULL)
804                 if_index--;
805
806         TAILQ_REMOVE(&ifnet, ifp, if_link);
807         kfree(ifp->if_addrheads, M_IFADDR);
808         kfree(ifp->if_start_nmsg, M_LWKTMSG);
809         crit_exit();
810 }
811
812 /*
813  * Delete Routes for a Network Interface
814  *
815  * Called for each routing entry via the rnh->rnh_walktree() call above
816  * to delete all route entries referencing a detaching network interface.
817  *
818  * Arguments:
819  *      rn      pointer to node in the routing table
820  *      arg     argument passed to rnh->rnh_walktree() - detaching interface
821  *
822  * Returns:
823  *      0       successful
824  *      errno   failed - reason indicated
825  *
826  */
827 static int
828 if_rtdel(struct radix_node *rn, void *arg)
829 {
830         struct rtentry  *rt = (struct rtentry *)rn;
831         struct ifnet    *ifp = arg;
832         int             err;
833
834         if (rt->rt_ifp == ifp) {
835
836                 /*
837                  * Protect (sorta) against walktree recursion problems
838                  * with cloned routes
839                  */
840                 if (!(rt->rt_flags & RTF_UP))
841                         return (0);
842
843                 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
844                                 rt_mask(rt), rt->rt_flags,
845                                 NULL);
846                 if (err) {
847                         log(LOG_WARNING, "if_rtdel: error %d\n", err);
848                 }
849         }
850
851         return (0);
852 }
853
854 /*
855  * Locate an interface based on a complete address.
856  */
857 struct ifaddr *
858 ifa_ifwithaddr(struct sockaddr *addr)
859 {
860         struct ifnet *ifp;
861
862         TAILQ_FOREACH(ifp, &ifnet, if_link) {
863                 struct ifaddr_container *ifac;
864
865                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
866                         struct ifaddr *ifa = ifac->ifa;
867
868                         if (ifa->ifa_addr->sa_family != addr->sa_family)
869                                 continue;
870                         if (sa_equal(addr, ifa->ifa_addr))
871                                 return (ifa);
872                         if ((ifp->if_flags & IFF_BROADCAST) &&
873                             ifa->ifa_broadaddr &&
874                             /* IPv6 doesn't have broadcast */
875                             ifa->ifa_broadaddr->sa_len != 0 &&
876                             sa_equal(ifa->ifa_broadaddr, addr))
877                                 return (ifa);
878                 }
879         }
880         return (NULL);
881 }
882 /*
883  * Locate the point to point interface with a given destination address.
884  */
885 struct ifaddr *
886 ifa_ifwithdstaddr(struct sockaddr *addr)
887 {
888         struct ifnet *ifp;
889
890         TAILQ_FOREACH(ifp, &ifnet, if_link) {
891                 struct ifaddr_container *ifac;
892
893                 if (!(ifp->if_flags & IFF_POINTOPOINT))
894                         continue;
895
896                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
897                         struct ifaddr *ifa = ifac->ifa;
898
899                         if (ifa->ifa_addr->sa_family != addr->sa_family)
900                                 continue;
901                         if (ifa->ifa_dstaddr &&
902                             sa_equal(addr, ifa->ifa_dstaddr))
903                                 return (ifa);
904                 }
905         }
906         return (NULL);
907 }
908
909 /*
910  * Find an interface on a specific network.  If many, choice
911  * is most specific found.
912  */
913 struct ifaddr *
914 ifa_ifwithnet(struct sockaddr *addr)
915 {
916         struct ifnet *ifp;
917         struct ifaddr *ifa_maybe = NULL;
918         u_int af = addr->sa_family;
919         char *addr_data = addr->sa_data, *cplim;
920
921         /*
922          * AF_LINK addresses can be looked up directly by their index number,
923          * so do that if we can.
924          */
925         if (af == AF_LINK) {
926                 struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr;
927
928                 if (sdl->sdl_index && sdl->sdl_index <= if_index)
929                         return (ifindex2ifnet[sdl->sdl_index]->if_lladdr);
930         }
931
932         /*
933          * Scan though each interface, looking for ones that have
934          * addresses in this address family.
935          */
936         TAILQ_FOREACH(ifp, &ifnet, if_link) {
937                 struct ifaddr_container *ifac;
938
939                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
940                         struct ifaddr *ifa = ifac->ifa;
941                         char *cp, *cp2, *cp3;
942
943                         if (ifa->ifa_addr->sa_family != af)
944 next:                           continue;
945                         if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT) {
946                                 /*
947                                  * This is a bit broken as it doesn't
948                                  * take into account that the remote end may
949                                  * be a single node in the network we are
950                                  * looking for.
951                                  * The trouble is that we don't know the
952                                  * netmask for the remote end.
953                                  */
954                                 if (ifa->ifa_dstaddr != NULL &&
955                                     sa_equal(addr, ifa->ifa_dstaddr))
956                                         return (ifa);
957                         } else {
958                                 /*
959                                  * if we have a special address handler,
960                                  * then use it instead of the generic one.
961                                  */
962                                 if (ifa->ifa_claim_addr) {
963                                         if ((*ifa->ifa_claim_addr)(ifa, addr)) {
964                                                 return (ifa);
965                                         } else {
966                                                 continue;
967                                         }
968                                 }
969
970                                 /*
971                                  * Scan all the bits in the ifa's address.
972                                  * If a bit dissagrees with what we are
973                                  * looking for, mask it with the netmask
974                                  * to see if it really matters.
975                                  * (A byte at a time)
976                                  */
977                                 if (ifa->ifa_netmask == 0)
978                                         continue;
979                                 cp = addr_data;
980                                 cp2 = ifa->ifa_addr->sa_data;
981                                 cp3 = ifa->ifa_netmask->sa_data;
982                                 cplim = ifa->ifa_netmask->sa_len +
983                                         (char *)ifa->ifa_netmask;
984                                 while (cp3 < cplim)
985                                         if ((*cp++ ^ *cp2++) & *cp3++)
986                                                 goto next; /* next address! */
987                                 /*
988                                  * If the netmask of what we just found
989                                  * is more specific than what we had before
990                                  * (if we had one) then remember the new one
991                                  * before continuing to search
992                                  * for an even better one.
993                                  */
994                                 if (ifa_maybe == 0 ||
995                                     rn_refines((char *)ifa->ifa_netmask,
996                                                (char *)ifa_maybe->ifa_netmask))
997                                         ifa_maybe = ifa;
998                         }
999                 }
1000         }
1001         return (ifa_maybe);
1002 }
1003
1004 /*
1005  * Find an interface address specific to an interface best matching
1006  * a given address.
1007  */
1008 struct ifaddr *
1009 ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp)
1010 {
1011         struct ifaddr_container *ifac;
1012         char *cp, *cp2, *cp3;
1013         char *cplim;
1014         struct ifaddr *ifa_maybe = 0;
1015         u_int af = addr->sa_family;
1016
1017         if (af >= AF_MAX)
1018                 return (0);
1019         TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1020                 struct ifaddr *ifa = ifac->ifa;
1021
1022                 if (ifa->ifa_addr->sa_family != af)
1023                         continue;
1024                 if (ifa_maybe == 0)
1025                         ifa_maybe = ifa;
1026                 if (ifa->ifa_netmask == NULL) {
1027                         if (sa_equal(addr, ifa->ifa_addr) ||
1028                             (ifa->ifa_dstaddr != NULL &&
1029                              sa_equal(addr, ifa->ifa_dstaddr)))
1030                                 return (ifa);
1031                         continue;
1032                 }
1033                 if (ifp->if_flags & IFF_POINTOPOINT) {
1034                         if (sa_equal(addr, ifa->ifa_dstaddr))
1035                                 return (ifa);
1036                 } else {
1037                         cp = addr->sa_data;
1038                         cp2 = ifa->ifa_addr->sa_data;
1039                         cp3 = ifa->ifa_netmask->sa_data;
1040                         cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask;
1041                         for (; cp3 < cplim; cp3++)
1042                                 if ((*cp++ ^ *cp2++) & *cp3)
1043                                         break;
1044                         if (cp3 == cplim)
1045                                 return (ifa);
1046                 }
1047         }
1048         return (ifa_maybe);
1049 }
1050
1051 /*
1052  * Default action when installing a route with a Link Level gateway.
1053  * Lookup an appropriate real ifa to point to.
1054  * This should be moved to /sys/net/link.c eventually.
1055  */
1056 static void
1057 link_rtrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info)
1058 {
1059         struct ifaddr *ifa;
1060         struct sockaddr *dst;
1061         struct ifnet *ifp;
1062
1063         if (cmd != RTM_ADD || (ifa = rt->rt_ifa) == NULL ||
1064             (ifp = ifa->ifa_ifp) == NULL || (dst = rt_key(rt)) == NULL)
1065                 return;
1066         ifa = ifaof_ifpforaddr(dst, ifp);
1067         if (ifa != NULL) {
1068                 IFAFREE(rt->rt_ifa);
1069                 IFAREF(ifa);
1070                 rt->rt_ifa = ifa;
1071                 if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest)
1072                         ifa->ifa_rtrequest(cmd, rt, info);
1073         }
1074 }
1075
1076 /*
1077  * Mark an interface down and notify protocols of
1078  * the transition.
1079  * NOTE: must be called at splnet or eqivalent.
1080  */
1081 void
1082 if_unroute(struct ifnet *ifp, int flag, int fam)
1083 {
1084         struct ifaddr_container *ifac;
1085
1086         ifp->if_flags &= ~flag;
1087         getmicrotime(&ifp->if_lastchange);
1088         TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1089                 struct ifaddr *ifa = ifac->ifa;
1090
1091                 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
1092                         kpfctlinput(PRC_IFDOWN, ifa->ifa_addr);
1093         }
1094         ifq_purge(&ifp->if_snd);
1095         rt_ifmsg(ifp);
1096 }
1097
1098 /*
1099  * Mark an interface up and notify protocols of
1100  * the transition.
1101  * NOTE: must be called at splnet or eqivalent.
1102  */
1103 void
1104 if_route(struct ifnet *ifp, int flag, int fam)
1105 {
1106         struct ifaddr_container *ifac;
1107
1108         ifq_purge(&ifp->if_snd);
1109         ifp->if_flags |= flag;
1110         getmicrotime(&ifp->if_lastchange);
1111         TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1112                 struct ifaddr *ifa = ifac->ifa;
1113
1114                 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
1115                         kpfctlinput(PRC_IFUP, ifa->ifa_addr);
1116         }
1117         rt_ifmsg(ifp);
1118 #ifdef INET6
1119         in6_if_up(ifp);
1120 #endif
1121 }
1122
1123 /*
1124  * Mark an interface down and notify protocols of the transition.  An
1125  * interface going down is also considered to be a synchronizing event.
1126  * We must ensure that all packet processing related to the interface
1127  * has completed before we return so e.g. the caller can free the ifnet
1128  * structure that the mbufs may be referencing.
1129  *
1130  * NOTE: must be called at splnet or eqivalent.
1131  */
1132 void
1133 if_down(struct ifnet *ifp)
1134 {
1135         if_unroute(ifp, IFF_UP, AF_UNSPEC);
1136         netmsg_service_sync();
1137 }
1138
1139 /*
1140  * Mark an interface up and notify protocols of
1141  * the transition.
1142  * NOTE: must be called at splnet or eqivalent.
1143  */
1144 void
1145 if_up(struct ifnet *ifp)
1146 {
1147         if_route(ifp, IFF_UP, AF_UNSPEC);
1148 }
1149
1150 /*
1151  * Process a link state change.
1152  * NOTE: must be called at splsoftnet or equivalent.
1153  */
1154 void
1155 if_link_state_change(struct ifnet *ifp)
1156 {
1157         int link_state = ifp->if_link_state;
1158
1159         rt_ifmsg(ifp);
1160         devctl_notify("IFNET", ifp->if_xname,
1161             (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL);
1162 }
1163
1164 /*
1165  * Handle interface watchdog timer routines.  Called
1166  * from softclock, we decrement timers (if set) and
1167  * call the appropriate interface routine on expiration.
1168  */
1169 static void
1170 if_slowtimo(void *arg)
1171 {
1172         struct ifnet *ifp;
1173
1174         crit_enter();
1175
1176         TAILQ_FOREACH(ifp, &ifnet, if_link) {
1177                 if (ifp->if_timer == 0 || --ifp->if_timer)
1178                         continue;
1179                 if (ifp->if_watchdog) {
1180                         if (ifnet_tryserialize_all(ifp)) {
1181                                 (*ifp->if_watchdog)(ifp);
1182                                 ifnet_deserialize_all(ifp);
1183                         } else {
1184                                 /* try again next timeout */
1185                                 ++ifp->if_timer;
1186                         }
1187                 }
1188         }
1189
1190         crit_exit();
1191
1192         callout_reset(&if_slowtimo_timer, hz / IFNET_SLOWHZ, if_slowtimo, NULL);
1193 }
1194
1195 /*
1196  * Map interface name to
1197  * interface structure pointer.
1198  */
1199 struct ifnet *
1200 ifunit(const char *name)
1201 {
1202         struct ifnet *ifp;
1203
1204         /*
1205          * Search all the interfaces for this name/number
1206          */
1207
1208         TAILQ_FOREACH(ifp, &ifnet, if_link) {
1209                 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0)
1210                         break;
1211         }
1212         return (ifp);
1213 }
1214
1215
1216 /*
1217  * Map interface name in a sockaddr_dl to
1218  * interface structure pointer.
1219  */
1220 struct ifnet *
1221 if_withname(struct sockaddr *sa)
1222 {
1223         char ifname[IFNAMSIZ+1];
1224         struct sockaddr_dl *sdl = (struct sockaddr_dl *)sa;
1225
1226         if ( (sa->sa_family != AF_LINK) || (sdl->sdl_nlen == 0) ||
1227              (sdl->sdl_nlen > IFNAMSIZ) )
1228                 return NULL;
1229
1230         /*
1231          * ifunit wants a null-terminated name.  It may not be null-terminated
1232          * in the sockaddr.  We don't want to change the caller's sockaddr,
1233          * and there might not be room to put the trailing null anyway, so we
1234          * make a local copy that we know we can null terminate safely.
1235          */
1236
1237         bcopy(sdl->sdl_data, ifname, sdl->sdl_nlen);
1238         ifname[sdl->sdl_nlen] = '\0';
1239         return ifunit(ifname);
1240 }
1241
1242
1243 /*
1244  * Interface ioctls.
1245  */
1246 int
1247 ifioctl(struct socket *so, u_long cmd, caddr_t data, struct ucred *cred)
1248 {
1249         struct ifnet *ifp;
1250         struct ifreq *ifr;
1251         struct ifstat *ifs;
1252         int error;
1253         short oif_flags;
1254         int new_flags;
1255         size_t namelen, onamelen;
1256         char new_name[IFNAMSIZ];
1257         struct ifaddr *ifa;
1258         struct sockaddr_dl *sdl;
1259
1260         switch (cmd) {
1261
1262         case SIOCGIFCONF:
1263         case OSIOCGIFCONF:
1264                 return (ifconf(cmd, data, cred));
1265         }
1266         ifr = (struct ifreq *)data;
1267
1268         switch (cmd) {
1269         case SIOCIFCREATE:
1270         case SIOCIFCREATE2:
1271                 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
1272                         return (error);
1273                 return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name),
1274                         cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL));
1275         case SIOCIFDESTROY:
1276                 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
1277                         return (error);
1278                 return (if_clone_destroy(ifr->ifr_name));
1279
1280         case SIOCIFGCLONERS:
1281                 return (if_clone_list((struct if_clonereq *)data));
1282         }
1283
1284         ifp = ifunit(ifr->ifr_name);
1285         if (ifp == 0)
1286                 return (ENXIO);
1287         switch (cmd) {
1288
1289         case SIOCGIFINDEX:
1290                 ifr->ifr_index = ifp->if_index;
1291                 break;
1292
1293         case SIOCGIFFLAGS:
1294                 ifr->ifr_flags = ifp->if_flags;
1295                 ifr->ifr_flagshigh = ifp->if_flags >> 16;
1296                 break;
1297
1298         case SIOCGIFCAP:
1299                 ifr->ifr_reqcap = ifp->if_capabilities;
1300                 ifr->ifr_curcap = ifp->if_capenable;
1301                 break;
1302
1303         case SIOCGIFMETRIC:
1304                 ifr->ifr_metric = ifp->if_metric;
1305                 break;
1306
1307         case SIOCGIFMTU:
1308                 ifr->ifr_mtu = ifp->if_mtu;
1309                 break;
1310
1311         case SIOCGIFPHYS:
1312                 ifr->ifr_phys = ifp->if_physical;
1313                 break;
1314
1315         case SIOCGIFPOLLCPU:
1316 #ifdef DEVICE_POLLING
1317                 ifr->ifr_pollcpu = ifp->if_poll_cpuid;
1318 #else
1319                 ifr->ifr_pollcpu = -1;
1320 #endif
1321                 break;
1322
1323         case SIOCSIFPOLLCPU:
1324 #ifdef DEVICE_POLLING
1325                 if ((ifp->if_flags & IFF_POLLING) == 0)
1326                         ether_pollcpu_register(ifp, ifr->ifr_pollcpu);
1327 #endif
1328                 break;
1329
1330         case SIOCSIFFLAGS:
1331                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1332                 if (error)
1333                         return (error);
1334                 new_flags = (ifr->ifr_flags & 0xffff) |
1335                     (ifr->ifr_flagshigh << 16);
1336                 if (ifp->if_flags & IFF_SMART) {
1337                         /* Smart drivers twiddle their own routes */
1338                 } else if (ifp->if_flags & IFF_UP &&
1339                     (new_flags & IFF_UP) == 0) {
1340                         crit_enter();
1341                         if_down(ifp);
1342                         crit_exit();
1343                 } else if (new_flags & IFF_UP &&
1344                     (ifp->if_flags & IFF_UP) == 0) {
1345                         crit_enter();
1346                         if_up(ifp);
1347                         crit_exit();
1348                 }
1349
1350 #ifdef DEVICE_POLLING
1351                 if ((new_flags ^ ifp->if_flags) & IFF_POLLING) {
1352                         if (new_flags & IFF_POLLING) {
1353                                 ether_poll_register(ifp);
1354                         } else {
1355                                 ether_poll_deregister(ifp);
1356                         }
1357                 }
1358 #endif
1359 #ifdef IFPOLL_ENABLE
1360                 if ((new_flags ^ ifp->if_flags) & IFF_NPOLLING) {
1361                         if (new_flags & IFF_NPOLLING)
1362                                 ifpoll_register(ifp);
1363                         else
1364                                 ifpoll_deregister(ifp);
1365                 }
1366 #endif
1367
1368                 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) |
1369                         (new_flags &~ IFF_CANTCHANGE);
1370                 if (new_flags & IFF_PPROMISC) {
1371                         /* Permanently promiscuous mode requested */
1372                         ifp->if_flags |= IFF_PROMISC;
1373                 } else if (ifp->if_pcount == 0) {
1374                         ifp->if_flags &= ~IFF_PROMISC;
1375                 }
1376                 if (ifp->if_ioctl) {
1377                         ifnet_serialize_all(ifp);
1378                         ifp->if_ioctl(ifp, cmd, data, cred);
1379                         ifnet_deserialize_all(ifp);
1380                 }
1381                 getmicrotime(&ifp->if_lastchange);
1382                 break;
1383
1384         case SIOCSIFCAP:
1385                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1386                 if (error)
1387                         return (error);
1388                 if (ifr->ifr_reqcap & ~ifp->if_capabilities)
1389                         return (EINVAL);
1390                 ifnet_serialize_all(ifp);
1391                 ifp->if_ioctl(ifp, cmd, data, cred);
1392                 ifnet_deserialize_all(ifp);
1393                 break;
1394
1395         case SIOCSIFNAME:
1396                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1397                 if (error != 0)
1398                         return (error);
1399                 error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL);
1400                 if (error != 0)
1401                         return (error);
1402                 if (new_name[0] == '\0')
1403                         return (EINVAL);
1404                 if (ifunit(new_name) != NULL)
1405                         return (EEXIST);
1406
1407                 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp);
1408
1409                 /* Announce the departure of the interface. */
1410                 rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
1411
1412                 strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname));
1413                 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
1414                 /* XXX IFA_LOCK(ifa); */
1415                 sdl = (struct sockaddr_dl *)ifa->ifa_addr;
1416                 namelen = strlen(new_name);
1417                 onamelen = sdl->sdl_nlen;
1418                 /*
1419                  * Move the address if needed.  This is safe because we
1420                  * allocate space for a name of length IFNAMSIZ when we
1421                  * create this in if_attach().
1422                  */
1423                 if (namelen != onamelen) {
1424                         bcopy(sdl->sdl_data + onamelen,
1425                             sdl->sdl_data + namelen, sdl->sdl_alen);
1426                 }
1427                 bcopy(new_name, sdl->sdl_data, namelen);
1428                 sdl->sdl_nlen = namelen;
1429                 sdl = (struct sockaddr_dl *)ifa->ifa_netmask;
1430                 bzero(sdl->sdl_data, onamelen);
1431                 while (namelen != 0)
1432                         sdl->sdl_data[--namelen] = 0xff;
1433                 /* XXX IFA_UNLOCK(ifa) */
1434
1435                 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp);
1436
1437                 /* Announce the return of the interface. */
1438                 rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
1439                 break;
1440
1441         case SIOCSIFMETRIC:
1442                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1443                 if (error)
1444                         return (error);
1445                 ifp->if_metric = ifr->ifr_metric;
1446                 getmicrotime(&ifp->if_lastchange);
1447                 break;
1448
1449         case SIOCSIFPHYS:
1450                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1451                 if (error)
1452                         return error;
1453                 if (!ifp->if_ioctl)
1454                         return EOPNOTSUPP;
1455                 ifnet_serialize_all(ifp);
1456                 error = ifp->if_ioctl(ifp, cmd, data, cred);
1457                 ifnet_deserialize_all(ifp);
1458                 if (error == 0)
1459                         getmicrotime(&ifp->if_lastchange);
1460                 return (error);
1461
1462         case SIOCSIFMTU:
1463         {
1464                 u_long oldmtu = ifp->if_mtu;
1465
1466                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1467                 if (error)
1468                         return (error);
1469                 if (ifp->if_ioctl == NULL)
1470                         return (EOPNOTSUPP);
1471                 if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU)
1472                         return (EINVAL);
1473                 ifnet_serialize_all(ifp);
1474                 error = ifp->if_ioctl(ifp, cmd, data, cred);
1475                 ifnet_deserialize_all(ifp);
1476                 if (error == 0) {
1477                         getmicrotime(&ifp->if_lastchange);
1478                         rt_ifmsg(ifp);
1479                 }
1480                 /*
1481                  * If the link MTU changed, do network layer specific procedure.
1482                  */
1483                 if (ifp->if_mtu != oldmtu) {
1484 #ifdef INET6
1485                         nd6_setmtu(ifp);
1486 #endif
1487                 }
1488                 return (error);
1489         }
1490
1491         case SIOCADDMULTI:
1492         case SIOCDELMULTI:
1493                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1494                 if (error)
1495                         return (error);
1496
1497                 /* Don't allow group membership on non-multicast interfaces. */
1498                 if ((ifp->if_flags & IFF_MULTICAST) == 0)
1499                         return EOPNOTSUPP;
1500
1501                 /* Don't let users screw up protocols' entries. */
1502                 if (ifr->ifr_addr.sa_family != AF_LINK)
1503                         return EINVAL;
1504
1505                 if (cmd == SIOCADDMULTI) {
1506                         struct ifmultiaddr *ifma;
1507                         error = if_addmulti(ifp, &ifr->ifr_addr, &ifma);
1508                 } else {
1509                         error = if_delmulti(ifp, &ifr->ifr_addr);
1510                 }
1511                 if (error == 0)
1512                         getmicrotime(&ifp->if_lastchange);
1513                 return error;
1514
1515         case SIOCSIFPHYADDR:
1516         case SIOCDIFPHYADDR:
1517 #ifdef INET6
1518         case SIOCSIFPHYADDR_IN6:
1519 #endif
1520         case SIOCSLIFPHYADDR:
1521         case SIOCSIFMEDIA:
1522         case SIOCSIFGENERIC:
1523                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1524                 if (error)
1525                         return (error);
1526                 if (ifp->if_ioctl == 0)
1527                         return (EOPNOTSUPP);
1528                 ifnet_serialize_all(ifp);
1529                 error = ifp->if_ioctl(ifp, cmd, data, cred);
1530                 ifnet_deserialize_all(ifp);
1531                 if (error == 0)
1532                         getmicrotime(&ifp->if_lastchange);
1533                 return error;
1534
1535         case SIOCGIFSTATUS:
1536                 ifs = (struct ifstat *)data;
1537                 ifs->ascii[0] = '\0';
1538
1539         case SIOCGIFPSRCADDR:
1540         case SIOCGIFPDSTADDR:
1541         case SIOCGLIFPHYADDR:
1542         case SIOCGIFMEDIA:
1543         case SIOCGIFGENERIC:
1544                 if (ifp->if_ioctl == NULL)
1545                         return (EOPNOTSUPP);
1546                 ifnet_serialize_all(ifp);
1547                 error = ifp->if_ioctl(ifp, cmd, data, cred);
1548                 ifnet_deserialize_all(ifp);
1549                 return (error);
1550
1551         case SIOCSIFLLADDR:
1552                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1553                 if (error)
1554                         return (error);
1555                 error = if_setlladdr(ifp,
1556                     ifr->ifr_addr.sa_data, ifr->ifr_addr.sa_len);
1557                 EVENTHANDLER_INVOKE(iflladdr_event, ifp);
1558                 return (error);
1559
1560         default:
1561                 oif_flags = ifp->if_flags;
1562                 if (so->so_proto == 0)
1563                         return (EOPNOTSUPP);
1564 #ifndef COMPAT_43
1565                 error = so_pru_control(so, cmd, data, ifp);
1566 #else
1567             {
1568                 int ocmd = cmd;
1569
1570                 switch (cmd) {
1571
1572                 case SIOCSIFDSTADDR:
1573                 case SIOCSIFADDR:
1574                 case SIOCSIFBRDADDR:
1575                 case SIOCSIFNETMASK:
1576 #if BYTE_ORDER != BIG_ENDIAN
1577                         if (ifr->ifr_addr.sa_family == 0 &&
1578                             ifr->ifr_addr.sa_len < 16) {
1579                                 ifr->ifr_addr.sa_family = ifr->ifr_addr.sa_len;
1580                                 ifr->ifr_addr.sa_len = 16;
1581                         }
1582 #else
1583                         if (ifr->ifr_addr.sa_len == 0)
1584                                 ifr->ifr_addr.sa_len = 16;
1585 #endif
1586                         break;
1587
1588                 case OSIOCGIFADDR:
1589                         cmd = SIOCGIFADDR;
1590                         break;
1591
1592                 case OSIOCGIFDSTADDR:
1593                         cmd = SIOCGIFDSTADDR;
1594                         break;
1595
1596                 case OSIOCGIFBRDADDR:
1597                         cmd = SIOCGIFBRDADDR;
1598                         break;
1599
1600                 case OSIOCGIFNETMASK:
1601                         cmd = SIOCGIFNETMASK;
1602                 }
1603                 error =  so_pru_control(so, cmd, data, ifp);
1604                 switch (ocmd) {
1605
1606                 case OSIOCGIFADDR:
1607                 case OSIOCGIFDSTADDR:
1608                 case OSIOCGIFBRDADDR:
1609                 case OSIOCGIFNETMASK:
1610                         *(u_short *)&ifr->ifr_addr = ifr->ifr_addr.sa_family;
1611
1612                 }
1613             }
1614 #endif /* COMPAT_43 */
1615
1616                 if ((oif_flags ^ ifp->if_flags) & IFF_UP) {
1617 #ifdef INET6
1618                         DELAY(100);/* XXX: temporary workaround for fxp issue*/
1619                         if (ifp->if_flags & IFF_UP) {
1620                                 crit_enter();
1621                                 in6_if_up(ifp);
1622                                 crit_exit();
1623                         }
1624 #endif
1625                 }
1626                 return (error);
1627
1628         }
1629         return (0);
1630 }
1631
1632 /*
1633  * Set/clear promiscuous mode on interface ifp based on the truth value
1634  * of pswitch.  The calls are reference counted so that only the first
1635  * "on" request actually has an effect, as does the final "off" request.
1636  * Results are undefined if the "off" and "on" requests are not matched.
1637  */
1638 int
1639 ifpromisc(struct ifnet *ifp, int pswitch)
1640 {
1641         struct ifreq ifr;
1642         int error;
1643         int oldflags;
1644
1645         oldflags = ifp->if_flags;
1646         if (ifp->if_flags & IFF_PPROMISC) {
1647                 /* Do nothing if device is in permanently promiscuous mode */
1648                 ifp->if_pcount += pswitch ? 1 : -1;
1649                 return (0);
1650         }
1651         if (pswitch) {
1652                 /*
1653                  * If the device is not configured up, we cannot put it in
1654                  * promiscuous mode.
1655                  */
1656                 if ((ifp->if_flags & IFF_UP) == 0)
1657                         return (ENETDOWN);
1658                 if (ifp->if_pcount++ != 0)
1659                         return (0);
1660                 ifp->if_flags |= IFF_PROMISC;
1661                 log(LOG_INFO, "%s: promiscuous mode enabled\n",
1662                     ifp->if_xname);
1663         } else {
1664                 if (--ifp->if_pcount > 0)
1665                         return (0);
1666                 ifp->if_flags &= ~IFF_PROMISC;
1667                 log(LOG_INFO, "%s: promiscuous mode disabled\n",
1668                     ifp->if_xname);
1669         }
1670         ifr.ifr_flags = ifp->if_flags;
1671         ifr.ifr_flagshigh = ifp->if_flags >> 16;
1672         ifnet_serialize_all(ifp);
1673         error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, NULL);
1674         ifnet_deserialize_all(ifp);
1675         if (error == 0)
1676                 rt_ifmsg(ifp);
1677         else
1678                 ifp->if_flags = oldflags;
1679         return error;
1680 }
1681
1682 /*
1683  * Return interface configuration
1684  * of system.  List may be used
1685  * in later ioctl's (above) to get
1686  * other information.
1687  */
1688 static int
1689 ifconf(u_long cmd, caddr_t data, struct ucred *cred)
1690 {
1691         struct ifconf *ifc = (struct ifconf *)data;
1692         struct ifnet *ifp;
1693         struct sockaddr *sa;
1694         struct ifreq ifr, *ifrp;
1695         int space = ifc->ifc_len, error = 0;
1696
1697         ifrp = ifc->ifc_req;
1698         TAILQ_FOREACH(ifp, &ifnet, if_link) {
1699                 struct ifaddr_container *ifac;
1700                 int addrs;
1701
1702                 if (space <= sizeof ifr)
1703                         break;
1704
1705                 /*
1706                  * Zero the stack declared structure first to prevent
1707                  * memory disclosure.
1708                  */
1709                 bzero(&ifr, sizeof(ifr));
1710                 if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name))
1711                     >= sizeof(ifr.ifr_name)) {
1712                         error = ENAMETOOLONG;
1713                         break;
1714                 }
1715
1716                 addrs = 0;
1717                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1718                         struct ifaddr *ifa = ifac->ifa;
1719
1720                         if (space <= sizeof ifr)
1721                                 break;
1722                         sa = ifa->ifa_addr;
1723                         if (cred->cr_prison &&
1724                             prison_if(cred, sa))
1725                                 continue;
1726                         addrs++;
1727 #ifdef COMPAT_43
1728                         if (cmd == OSIOCGIFCONF) {
1729                                 struct osockaddr *osa =
1730                                          (struct osockaddr *)&ifr.ifr_addr;
1731                                 ifr.ifr_addr = *sa;
1732                                 osa->sa_family = sa->sa_family;
1733                                 error = copyout(&ifr, ifrp, sizeof ifr);
1734                                 ifrp++;
1735                         } else
1736 #endif
1737                         if (sa->sa_len <= sizeof(*sa)) {
1738                                 ifr.ifr_addr = *sa;
1739                                 error = copyout(&ifr, ifrp, sizeof ifr);
1740                                 ifrp++;
1741                         } else {
1742                                 if (space < (sizeof ifr) + sa->sa_len -
1743                                             sizeof(*sa))
1744                                         break;
1745                                 space -= sa->sa_len - sizeof(*sa);
1746                                 error = copyout(&ifr, ifrp,
1747                                                 sizeof ifr.ifr_name);
1748                                 if (error == 0)
1749                                         error = copyout(sa, &ifrp->ifr_addr,
1750                                                         sa->sa_len);
1751                                 ifrp = (struct ifreq *)
1752                                         (sa->sa_len + (caddr_t)&ifrp->ifr_addr);
1753                         }
1754                         if (error)
1755                                 break;
1756                         space -= sizeof ifr;
1757                 }
1758                 if (error)
1759                         break;
1760                 if (!addrs) {
1761                         bzero(&ifr.ifr_addr, sizeof ifr.ifr_addr);
1762                         error = copyout(&ifr, ifrp, sizeof ifr);
1763                         if (error)
1764                                 break;
1765                         space -= sizeof ifr;
1766                         ifrp++;
1767                 }
1768         }
1769         ifc->ifc_len -= space;
1770         return (error);
1771 }
1772
1773 /*
1774  * Just like if_promisc(), but for all-multicast-reception mode.
1775  */
1776 int
1777 if_allmulti(struct ifnet *ifp, int onswitch)
1778 {
1779         int error = 0;
1780         struct ifreq ifr;
1781
1782         crit_enter();
1783
1784         if (onswitch) {
1785                 if (ifp->if_amcount++ == 0) {
1786                         ifp->if_flags |= IFF_ALLMULTI;
1787                         ifr.ifr_flags = ifp->if_flags;
1788                         ifr.ifr_flagshigh = ifp->if_flags >> 16;
1789                         ifnet_serialize_all(ifp);
1790                         error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
1791                                               NULL);
1792                         ifnet_deserialize_all(ifp);
1793                 }
1794         } else {
1795                 if (ifp->if_amcount > 1) {
1796                         ifp->if_amcount--;
1797                 } else {
1798                         ifp->if_amcount = 0;
1799                         ifp->if_flags &= ~IFF_ALLMULTI;
1800                         ifr.ifr_flags = ifp->if_flags;
1801                         ifr.ifr_flagshigh = ifp->if_flags >> 16;
1802                         ifnet_serialize_all(ifp);
1803                         error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
1804                                               NULL);
1805                         ifnet_deserialize_all(ifp);
1806                 }
1807         }
1808
1809         crit_exit();
1810
1811         if (error == 0)
1812                 rt_ifmsg(ifp);
1813         return error;
1814 }
1815
1816 /*
1817  * Add a multicast listenership to the interface in question.
1818  * The link layer provides a routine which converts
1819  */
1820 int
1821 if_addmulti(
1822         struct ifnet *ifp,      /* interface to manipulate */
1823         struct sockaddr *sa,    /* address to add */
1824         struct ifmultiaddr **retifma)
1825 {
1826         struct sockaddr *llsa, *dupsa;
1827         int error;
1828         struct ifmultiaddr *ifma;
1829
1830         /*
1831          * If the matching multicast address already exists
1832          * then don't add a new one, just add a reference
1833          */
1834         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1835                 if (sa_equal(sa, ifma->ifma_addr)) {
1836                         ifma->ifma_refcount++;
1837                         if (retifma)
1838                                 *retifma = ifma;
1839                         return 0;
1840                 }
1841         }
1842
1843         /*
1844          * Give the link layer a chance to accept/reject it, and also
1845          * find out which AF_LINK address this maps to, if it isn't one
1846          * already.
1847          */
1848         if (ifp->if_resolvemulti) {
1849                 ifnet_serialize_all(ifp);
1850                 error = ifp->if_resolvemulti(ifp, &llsa, sa);
1851                 ifnet_deserialize_all(ifp);
1852                 if (error) 
1853                         return error;
1854         } else {
1855                 llsa = 0;
1856         }
1857
1858         MALLOC(ifma, struct ifmultiaddr *, sizeof *ifma, M_IFMADDR, M_WAITOK);
1859         MALLOC(dupsa, struct sockaddr *, sa->sa_len, M_IFMADDR, M_WAITOK);
1860         bcopy(sa, dupsa, sa->sa_len);
1861
1862         ifma->ifma_addr = dupsa;
1863         ifma->ifma_lladdr = llsa;
1864         ifma->ifma_ifp = ifp;
1865         ifma->ifma_refcount = 1;
1866         ifma->ifma_protospec = 0;
1867         rt_newmaddrmsg(RTM_NEWMADDR, ifma);
1868
1869         /*
1870          * Some network interfaces can scan the address list at
1871          * interrupt time; lock them out.
1872          */
1873         crit_enter();
1874         TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
1875         crit_exit();
1876         if (retifma)
1877                 *retifma = ifma;
1878
1879         if (llsa != 0) {
1880                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1881                         if (sa_equal(ifma->ifma_addr, llsa))
1882                                 break;
1883                 }
1884                 if (ifma) {
1885                         ifma->ifma_refcount++;
1886                 } else {
1887                         MALLOC(ifma, struct ifmultiaddr *, sizeof *ifma,
1888                                M_IFMADDR, M_WAITOK);
1889                         MALLOC(dupsa, struct sockaddr *, llsa->sa_len,
1890                                M_IFMADDR, M_WAITOK);
1891                         bcopy(llsa, dupsa, llsa->sa_len);
1892                         ifma->ifma_addr = dupsa;
1893                         ifma->ifma_ifp = ifp;
1894                         ifma->ifma_refcount = 1;
1895                         crit_enter();
1896                         TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
1897                         crit_exit();
1898                 }
1899         }
1900         /*
1901          * We are certain we have added something, so call down to the
1902          * interface to let them know about it.
1903          */
1904         crit_enter();
1905         ifnet_serialize_all(ifp);
1906         if (ifp->if_ioctl)
1907                 ifp->if_ioctl(ifp, SIOCADDMULTI, 0, NULL);
1908         ifnet_deserialize_all(ifp);
1909         crit_exit();
1910
1911         return 0;
1912 }
1913
1914 /*
1915  * Remove a reference to a multicast address on this interface.  Yell
1916  * if the request does not match an existing membership.
1917  */
1918 int
1919 if_delmulti(struct ifnet *ifp, struct sockaddr *sa)
1920 {
1921         struct ifmultiaddr *ifma;
1922
1923         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
1924                 if (sa_equal(sa, ifma->ifma_addr))
1925                         break;
1926         if (ifma == 0)
1927                 return ENOENT;
1928
1929         if (ifma->ifma_refcount > 1) {
1930                 ifma->ifma_refcount--;
1931                 return 0;
1932         }
1933
1934         rt_newmaddrmsg(RTM_DELMADDR, ifma);
1935         sa = ifma->ifma_lladdr;
1936         crit_enter();
1937         TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link);
1938         /*
1939          * Make sure the interface driver is notified
1940          * in the case of a link layer mcast group being left.
1941          */
1942         if (ifma->ifma_addr->sa_family == AF_LINK && sa == 0) {
1943                 ifnet_serialize_all(ifp);
1944                 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL);
1945                 ifnet_deserialize_all(ifp);
1946         }
1947         crit_exit();
1948         kfree(ifma->ifma_addr, M_IFMADDR);
1949         kfree(ifma, M_IFMADDR);
1950         if (sa == 0)
1951                 return 0;
1952
1953         /*
1954          * Now look for the link-layer address which corresponds to
1955          * this network address.  It had been squirreled away in
1956          * ifma->ifma_lladdr for this purpose (so we don't have
1957          * to call ifp->if_resolvemulti() again), and we saved that
1958          * value in sa above.  If some nasty deleted the
1959          * link-layer address out from underneath us, we can deal because
1960          * the address we stored was is not the same as the one which was
1961          * in the record for the link-layer address.  (So we don't complain
1962          * in that case.)
1963          */
1964         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
1965                 if (sa_equal(sa, ifma->ifma_addr))
1966                         break;
1967         if (ifma == 0)
1968                 return 0;
1969
1970         if (ifma->ifma_refcount > 1) {
1971                 ifma->ifma_refcount--;
1972                 return 0;
1973         }
1974
1975         crit_enter();
1976         ifnet_serialize_all(ifp);
1977         TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link);
1978         ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL);
1979         ifnet_deserialize_all(ifp);
1980         crit_exit();
1981         kfree(ifma->ifma_addr, M_IFMADDR);
1982         kfree(sa, M_IFMADDR);
1983         kfree(ifma, M_IFMADDR);
1984
1985         return 0;
1986 }
1987
1988 /*
1989  * Delete all multicast group membership for an interface.
1990  * Should be used to quickly flush all multicast filters.
1991  */
1992 void
1993 if_delallmulti(struct ifnet *ifp)
1994 {
1995         struct ifmultiaddr *ifma;
1996         struct ifmultiaddr *next;
1997
1998         TAILQ_FOREACH_MUTABLE(ifma, &ifp->if_multiaddrs, ifma_link, next)
1999                 if_delmulti(ifp, ifma->ifma_addr);
2000 }
2001
2002
2003 /*
2004  * Set the link layer address on an interface.
2005  *
2006  * At this time we only support certain types of interfaces,
2007  * and we don't allow the length of the address to change.
2008  */
2009 int
2010 if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len)
2011 {
2012         struct sockaddr_dl *sdl;
2013         struct ifreq ifr;
2014
2015         sdl = IF_LLSOCKADDR(ifp);
2016         if (sdl == NULL)
2017                 return (EINVAL);
2018         if (len != sdl->sdl_alen)       /* don't allow length to change */
2019                 return (EINVAL);
2020         switch (ifp->if_type) {
2021         case IFT_ETHER:                 /* these types use struct arpcom */
2022         case IFT_XETHER:
2023         case IFT_L2VLAN:
2024                 bcopy(lladdr, ((struct arpcom *)ifp->if_softc)->ac_enaddr, len);
2025                 bcopy(lladdr, LLADDR(sdl), len);
2026                 break;
2027         default:
2028                 return (ENODEV);
2029         }
2030         /*
2031          * If the interface is already up, we need
2032          * to re-init it in order to reprogram its
2033          * address filter.
2034          */
2035         ifnet_serialize_all(ifp);
2036         if ((ifp->if_flags & IFF_UP) != 0) {
2037                 struct ifaddr_container *ifac;
2038
2039                 ifp->if_flags &= ~IFF_UP;
2040                 ifr.ifr_flags = ifp->if_flags;
2041                 ifr.ifr_flagshigh = ifp->if_flags >> 16;
2042                 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2043                               NULL);
2044                 ifp->if_flags |= IFF_UP;
2045                 ifr.ifr_flags = ifp->if_flags;
2046                 ifr.ifr_flagshigh = ifp->if_flags >> 16;
2047                 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2048                                  NULL);
2049 #ifdef INET
2050                 /*
2051                  * Also send gratuitous ARPs to notify other nodes about
2052                  * the address change.
2053                  */
2054                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
2055                         struct ifaddr *ifa = ifac->ifa;
2056
2057                         if (ifa->ifa_addr != NULL &&
2058                             ifa->ifa_addr->sa_family == AF_INET)
2059                                 arp_ifinit(ifp, ifa);
2060                 }
2061 #endif
2062         }
2063         ifnet_deserialize_all(ifp);
2064         return (0);
2065 }
2066
2067 struct ifmultiaddr *
2068 ifmaof_ifpforaddr(struct sockaddr *sa, struct ifnet *ifp)
2069 {
2070         struct ifmultiaddr *ifma;
2071
2072         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
2073                 if (sa_equal(ifma->ifma_addr, sa))
2074                         break;
2075
2076         return ifma;
2077 }
2078
2079 /*
2080  * This function locates the first real ethernet MAC from a network
2081  * card and loads it into node, returning 0 on success or ENOENT if
2082  * no suitable interfaces were found.  It is used by the uuid code to
2083  * generate a unique 6-byte number.
2084  */
2085 int
2086 if_getanyethermac(uint16_t *node, int minlen)
2087 {
2088         struct ifnet *ifp;
2089         struct sockaddr_dl *sdl;
2090
2091         TAILQ_FOREACH(ifp, &ifnet, if_link) {
2092                 if (ifp->if_type != IFT_ETHER)
2093                         continue;
2094                 sdl = IF_LLSOCKADDR(ifp);
2095                 if (sdl->sdl_alen < minlen)
2096                         continue;
2097                 bcopy(((struct arpcom *)ifp->if_softc)->ac_enaddr, node,
2098                       minlen);
2099                 return(0);
2100         }
2101         return (ENOENT);
2102 }
2103
2104 /*
2105  * The name argument must be a pointer to storage which will last as
2106  * long as the interface does.  For physical devices, the result of
2107  * device_get_name(dev) is a good choice and for pseudo-devices a
2108  * static string works well.
2109  */
2110 void
2111 if_initname(struct ifnet *ifp, const char *name, int unit)
2112 {
2113         ifp->if_dname = name;
2114         ifp->if_dunit = unit;
2115         if (unit != IF_DUNIT_NONE)
2116                 ksnprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit);
2117         else
2118                 strlcpy(ifp->if_xname, name, IFNAMSIZ);
2119 }
2120
2121 int
2122 if_printf(struct ifnet *ifp, const char *fmt, ...)
2123 {
2124         __va_list ap;
2125         int retval;
2126
2127         retval = kprintf("%s: ", ifp->if_xname);
2128         __va_start(ap, fmt);
2129         retval += kvprintf(fmt, ap);
2130         __va_end(ap);
2131         return (retval);
2132 }
2133
2134 struct ifnet *
2135 if_alloc(uint8_t type)
2136 {
2137         struct ifnet *ifp;
2138         size_t size;
2139
2140         /*
2141          * XXX temporary hack until arpcom is setup in if_l2com
2142          */
2143         if (type == IFT_ETHER)
2144                 size = sizeof(struct arpcom);
2145         else
2146                 size = sizeof(struct ifnet);
2147
2148         ifp = kmalloc(size, M_IFNET, M_WAITOK|M_ZERO);
2149
2150         ifp->if_type = type;
2151
2152         if (if_com_alloc[type] != NULL) {
2153                 ifp->if_l2com = if_com_alloc[type](type, ifp);
2154                 if (ifp->if_l2com == NULL) {
2155                         kfree(ifp, M_IFNET);
2156                         return (NULL);
2157                 }
2158         }
2159         return (ifp);
2160 }
2161
2162 void
2163 if_free(struct ifnet *ifp)
2164 {
2165         kfree(ifp, M_IFNET);
2166 }
2167
2168 void
2169 ifq_set_classic(struct ifaltq *ifq)
2170 {
2171         ifq->altq_enqueue = ifq_classic_enqueue;
2172         ifq->altq_dequeue = ifq_classic_dequeue;
2173         ifq->altq_request = ifq_classic_request;
2174 }
2175
2176 int
2177 ifq_classic_enqueue(struct ifaltq *ifq, struct mbuf *m,
2178                     struct altq_pktattr *pa __unused)
2179 {
2180         logifq(enqueue, ifq);
2181         if (IF_QFULL(ifq)) {
2182                 m_freem(m);
2183                 return(ENOBUFS);
2184         } else {
2185                 IF_ENQUEUE(ifq, m);
2186                 return(0);
2187         }       
2188 }
2189
2190 struct mbuf *
2191 ifq_classic_dequeue(struct ifaltq *ifq, struct mbuf *mpolled, int op)
2192 {
2193         struct mbuf *m;
2194
2195         switch (op) {
2196         case ALTDQ_POLL:
2197                 IF_POLL(ifq, m);
2198                 break;
2199         case ALTDQ_REMOVE:
2200                 logifq(dequeue, ifq);
2201                 IF_DEQUEUE(ifq, m);
2202                 break;
2203         default:
2204                 panic("unsupported ALTQ dequeue op: %d", op);
2205         }
2206         KKASSERT(mpolled == NULL || mpolled == m);
2207         return(m);
2208 }
2209
2210 int
2211 ifq_classic_request(struct ifaltq *ifq, int req, void *arg)
2212 {
2213         switch (req) {
2214         case ALTRQ_PURGE:
2215                 IF_DRAIN(ifq);
2216                 break;
2217         default:
2218                 panic("unsupported ALTQ request: %d", req);
2219         }
2220         return(0);
2221 }
2222
2223 int
2224 ifq_dispatch(struct ifnet *ifp, struct mbuf *m, struct altq_pktattr *pa)
2225 {
2226         struct ifaltq *ifq = &ifp->if_snd;
2227         int running = 0, error, start = 0;
2228
2229         ASSERT_IFNET_NOT_SERIALIZED_TX(ifp);
2230
2231         ALTQ_LOCK(ifq);
2232         error = ifq_enqueue_locked(ifq, m, pa);
2233         if (error) {
2234                 ALTQ_UNLOCK(ifq);
2235                 return error;
2236         }
2237         if (!ifq->altq_started) {
2238                 /*
2239                  * Hold the interlock of ifnet.if_start
2240                  */
2241                 ifq->altq_started = 1;
2242                 start = 1;
2243         }
2244         ALTQ_UNLOCK(ifq);
2245
2246         ifp->if_obytes += m->m_pkthdr.len;
2247         if (m->m_flags & M_MCAST)
2248                 ifp->if_omcasts++;
2249
2250         if (!start) {
2251                 logifstart(avoid, ifp);
2252                 return 0;
2253         }
2254
2255         if (ifq_dispatch_schedonly) {
2256                 /*
2257                  * Always schedule ifnet.if_start on ifnet's CPU,
2258                  * short circuit the rest of this function.
2259                  */
2260                 logifstart(sched, ifp);
2261                 if_start_schedule(ifp);
2262                 return 0;
2263         }
2264
2265         /*
2266          * Try to do direct ifnet.if_start first, if there is
2267          * contention on ifnet's serializer, ifnet.if_start will
2268          * be scheduled on ifnet's CPU.
2269          */
2270         if (!ifnet_tryserialize_tx(ifp)) {
2271                 /*
2272                  * ifnet serializer contention happened,
2273                  * ifnet.if_start is scheduled on ifnet's
2274                  * CPU, and we keep going.
2275                  */
2276                 logifstart(contend_sched, ifp);
2277                 if_start_schedule(ifp);
2278                 return 0;
2279         }
2280
2281         if ((ifp->if_flags & IFF_OACTIVE) == 0) {
2282                 logifstart(run, ifp);
2283                 ifp->if_start(ifp);
2284                 if ((ifp->if_flags &
2285                      (IFF_OACTIVE | IFF_RUNNING)) == IFF_RUNNING)
2286                         running = 1;
2287         }
2288
2289         ifnet_deserialize_tx(ifp);
2290
2291         if (ifq_dispatch_schednochk || if_start_need_schedule(ifq, running)) {
2292                 /*
2293                  * More data need to be transmitted, ifnet.if_start is
2294                  * scheduled on ifnet's CPU, and we keep going.
2295                  * NOTE: ifnet.if_start interlock is not released.
2296                  */
2297                 logifstart(sched, ifp);
2298                 if_start_schedule(ifp);
2299         }
2300         return 0;
2301 }
2302
2303 void *
2304 ifa_create(int size, int flags)
2305 {
2306         struct ifaddr *ifa;
2307         int i;
2308
2309         KASSERT(size >= sizeof(*ifa), ("ifaddr size too small\n"));
2310
2311         ifa = kmalloc(size, M_IFADDR, flags | M_ZERO);
2312         if (ifa == NULL)
2313                 return NULL;
2314
2315         ifa->ifa_containers = kmalloc(ncpus * sizeof(struct ifaddr_container),
2316                                       M_IFADDR, M_WAITOK | M_ZERO);
2317         ifa->ifa_ncnt = ncpus;
2318         for (i = 0; i < ncpus; ++i) {
2319                 struct ifaddr_container *ifac = &ifa->ifa_containers[i];
2320
2321                 ifac->ifa_magic = IFA_CONTAINER_MAGIC;
2322                 ifac->ifa = ifa;
2323                 ifac->ifa_refcnt = 1;
2324         }
2325 #ifdef IFADDR_DEBUG
2326         kprintf("alloc ifa %p %d\n", ifa, size);
2327 #endif
2328         return ifa;
2329 }
2330
2331 void
2332 ifac_free(struct ifaddr_container *ifac, int cpu_id)
2333 {
2334         struct ifaddr *ifa = ifac->ifa;
2335
2336         KKASSERT(ifac->ifa_magic == IFA_CONTAINER_MAGIC);
2337         KKASSERT(ifac->ifa_refcnt == 0);
2338         KASSERT(ifac->ifa_listmask == 0,
2339                 ("ifa is still on %#x lists\n", ifac->ifa_listmask));
2340
2341         ifac->ifa_magic = IFA_CONTAINER_DEAD;
2342
2343 #ifdef IFADDR_DEBUG_VERBOSE
2344         kprintf("try free ifa %p cpu_id %d\n", ifac->ifa, cpu_id);
2345 #endif
2346
2347         KASSERT(ifa->ifa_ncnt > 0 && ifa->ifa_ncnt <= ncpus,
2348                 ("invalid # of ifac, %d\n", ifa->ifa_ncnt));
2349         if (atomic_fetchadd_int(&ifa->ifa_ncnt, -1) == 1) {
2350 #ifdef IFADDR_DEBUG
2351                 kprintf("free ifa %p\n", ifa);
2352 #endif
2353                 kfree(ifa->ifa_containers, M_IFADDR);
2354                 kfree(ifa, M_IFADDR);
2355         }
2356 }
2357
2358 static void
2359 ifa_iflink_dispatch(struct netmsg *nmsg)
2360 {
2361         struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2362         struct ifaddr *ifa = msg->ifa;
2363         struct ifnet *ifp = msg->ifp;
2364         int cpu = mycpuid;
2365         struct ifaddr_container *ifac;
2366
2367         crit_enter();
2368
2369         ifac = &ifa->ifa_containers[cpu];
2370         ASSERT_IFAC_VALID(ifac);
2371         KASSERT((ifac->ifa_listmask & IFA_LIST_IFADDRHEAD) == 0,
2372                 ("ifaddr is on if_addrheads\n"));
2373
2374         ifac->ifa_listmask |= IFA_LIST_IFADDRHEAD;
2375         if (msg->tail)
2376                 TAILQ_INSERT_TAIL(&ifp->if_addrheads[cpu], ifac, ifa_link);
2377         else
2378                 TAILQ_INSERT_HEAD(&ifp->if_addrheads[cpu], ifac, ifa_link);
2379
2380         crit_exit();
2381
2382         ifa_forwardmsg(&nmsg->nm_lmsg, cpu + 1);
2383 }
2384
2385 void
2386 ifa_iflink(struct ifaddr *ifa, struct ifnet *ifp, int tail)
2387 {
2388         struct netmsg_ifaddr msg;
2389
2390         netmsg_init(&msg.netmsg, NULL, &curthread->td_msgport,
2391                     0, ifa_iflink_dispatch);
2392         msg.ifa = ifa;
2393         msg.ifp = ifp;
2394         msg.tail = tail;
2395
2396         ifa_domsg(&msg.netmsg.nm_lmsg, 0);
2397 }
2398
2399 static void
2400 ifa_ifunlink_dispatch(struct netmsg *nmsg)
2401 {
2402         struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2403         struct ifaddr *ifa = msg->ifa;
2404         struct ifnet *ifp = msg->ifp;
2405         int cpu = mycpuid;
2406         struct ifaddr_container *ifac;
2407
2408         crit_enter();
2409
2410         ifac = &ifa->ifa_containers[cpu];
2411         ASSERT_IFAC_VALID(ifac);
2412         KASSERT(ifac->ifa_listmask & IFA_LIST_IFADDRHEAD,
2413                 ("ifaddr is not on if_addrhead\n"));
2414
2415         TAILQ_REMOVE(&ifp->if_addrheads[cpu], ifac, ifa_link);
2416         ifac->ifa_listmask &= ~IFA_LIST_IFADDRHEAD;
2417
2418         crit_exit();
2419
2420         ifa_forwardmsg(&nmsg->nm_lmsg, cpu + 1);
2421 }
2422
2423 void
2424 ifa_ifunlink(struct ifaddr *ifa, struct ifnet *ifp)
2425 {
2426         struct netmsg_ifaddr msg;
2427
2428         netmsg_init(&msg.netmsg, NULL, &curthread->td_msgport,
2429                     0, ifa_ifunlink_dispatch);
2430         msg.ifa = ifa;
2431         msg.ifp = ifp;
2432
2433         ifa_domsg(&msg.netmsg.nm_lmsg, 0);
2434 }
2435
2436 static void
2437 ifa_destroy_dispatch(struct netmsg *nmsg)
2438 {
2439         struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2440
2441         IFAFREE(msg->ifa);
2442         ifa_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2443 }
2444
2445 void
2446 ifa_destroy(struct ifaddr *ifa)
2447 {
2448         struct netmsg_ifaddr msg;
2449
2450         netmsg_init(&msg.netmsg, NULL, &curthread->td_msgport,
2451                     0, ifa_destroy_dispatch);
2452         msg.ifa = ifa;
2453
2454         ifa_domsg(&msg.netmsg.nm_lmsg, 0);
2455 }
2456
2457 struct lwkt_port *
2458 ifnet_portfn(int cpu)
2459 {
2460         return &ifnet_threads[cpu].td_msgport;
2461 }
2462
2463 void
2464 ifnet_forwardmsg(struct lwkt_msg *lmsg, int next_cpu)
2465 {
2466         KKASSERT(next_cpu > mycpuid && next_cpu <= ncpus);
2467
2468         if (next_cpu < ncpus)
2469                 lwkt_forwardmsg(ifnet_portfn(next_cpu), lmsg);
2470         else
2471                 lwkt_replymsg(lmsg, 0);
2472 }
2473
2474 int
2475 ifnet_domsg(struct lwkt_msg *lmsg, int cpu)
2476 {
2477         KKASSERT(cpu < ncpus);
2478         return lwkt_domsg(ifnet_portfn(cpu), lmsg, 0);
2479 }
2480
2481 void
2482 ifnet_sendmsg(struct lwkt_msg *lmsg, int cpu)
2483 {
2484         KKASSERT(cpu < ncpus);
2485         lwkt_sendmsg(ifnet_portfn(cpu), lmsg);
2486 }
2487
2488 /*
2489  * Generic netmsg service loop.  Some protocols may roll their own but all
2490  * must do the basic command dispatch function call done here.
2491  */
2492 static void
2493 ifnet_service_loop(void *arg __unused)
2494 {
2495         struct netmsg *msg;
2496
2497         while ((msg = lwkt_waitport(&curthread->td_msgport, 0))) {
2498                 KASSERT(msg->nm_dispatch, ("ifnet_service: badmsg"));
2499                 msg->nm_dispatch(msg);
2500         }
2501 }
2502
2503 static void
2504 ifnetinit(void *dummy __unused)
2505 {
2506         int i;
2507
2508         for (i = 0; i < ncpus; ++i) {
2509                 struct thread *thr = &ifnet_threads[i];
2510
2511                 lwkt_create(ifnet_service_loop, NULL, NULL,
2512                             thr, TDF_STOPREQ, i, "ifnet %d", i);
2513                 netmsg_service_port_init(&thr->td_msgport);
2514                 lwkt_schedule(thr);
2515         }
2516 }
2517
2518 struct ifnet *
2519 ifnet_byindex(unsigned short idx)
2520 {
2521         if (idx > if_index)
2522                 return NULL;
2523         return ifindex2ifnet[idx];
2524 }
2525
2526 struct ifaddr *
2527 ifaddr_byindex(unsigned short idx)
2528 {
2529         struct ifnet *ifp;
2530
2531         ifp = ifnet_byindex(idx);
2532         if (!ifp)
2533                 return NULL;
2534         return TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
2535 }
2536
2537 void
2538 if_register_com_alloc(u_char type,
2539     if_com_alloc_t *a, if_com_free_t *f)
2540 {
2541
2542         KASSERT(if_com_alloc[type] == NULL,
2543             ("if_register_com_alloc: %d already registered", type));
2544         KASSERT(if_com_free[type] == NULL,
2545             ("if_register_com_alloc: %d free already registered", type));
2546
2547         if_com_alloc[type] = a;
2548         if_com_free[type] = f;
2549 }
2550
2551 void
2552 if_deregister_com_alloc(u_char type)
2553 {
2554
2555         KASSERT(if_com_alloc[type] != NULL,
2556             ("if_deregister_com_alloc: %d not registered", type));
2557         KASSERT(if_com_free[type] != NULL,
2558             ("if_deregister_com_alloc: %d free not registered", type));
2559         if_com_alloc[type] = NULL;
2560         if_com_free[type] = NULL;
2561 }