if: Multiple TX queue support step 1 of many; introduce ifaltq subqueue
[dragonfly.git] / sys / net / if.c
1 /*
2  * Copyright (c) 1980, 1986, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by the University of
16  *      California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *      @(#)if.c        8.3 (Berkeley) 1/4/94
34  * $FreeBSD: src/sys/net/if.c,v 1.185 2004/03/13 02:35:03 brooks Exp $
35  */
36
37 #include "opt_compat.h"
38 #include "opt_inet6.h"
39 #include "opt_inet.h"
40 #include "opt_ifpoll.h"
41
42 #include <sys/param.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/priv.h>
48 #include <sys/protosw.h>
49 #include <sys/socket.h>
50 #include <sys/socketvar.h>
51 #include <sys/socketops.h>
52 #include <sys/protosw.h>
53 #include <sys/kernel.h>
54 #include <sys/ktr.h>
55 #include <sys/mutex.h>
56 #include <sys/sockio.h>
57 #include <sys/syslog.h>
58 #include <sys/sysctl.h>
59 #include <sys/domain.h>
60 #include <sys/thread.h>
61 #include <sys/serialize.h>
62 #include <sys/bus.h>
63
64 #include <sys/thread2.h>
65 #include <sys/msgport2.h>
66 #include <sys/mutex2.h>
67
68 #include <net/if.h>
69 #include <net/if_arp.h>
70 #include <net/if_dl.h>
71 #include <net/if_types.h>
72 #include <net/if_var.h>
73 #include <net/ifq_var.h>
74 #include <net/radix.h>
75 #include <net/route.h>
76 #include <net/if_clone.h>
77 #include <net/netisr.h>
78 #include <net/netmsg2.h>
79
80 #include <machine/atomic.h>
81 #include <machine/stdarg.h>
82 #include <machine/smp.h>
83
84 #if defined(INET) || defined(INET6)
85 /*XXX*/
86 #include <netinet/in.h>
87 #include <netinet/in_var.h>
88 #include <netinet/if_ether.h>
89 #ifdef INET6
90 #include <netinet6/in6_var.h>
91 #include <netinet6/in6_ifattach.h>
92 #endif
93 #endif
94
95 #if defined(COMPAT_43)
96 #include <emulation/43bsd/43bsd_socket.h>
97 #endif /* COMPAT_43 */
98
99 struct netmsg_ifaddr {
100         struct netmsg_base base;
101         struct ifaddr   *ifa;
102         struct ifnet    *ifp;
103         int             tail;
104 };
105
106 struct ifsubq_stage_head {
107         TAILQ_HEAD(, ifsubq_stage)      stg_head;
108 } __cachealign;
109
110 /*
111  * System initialization
112  */
113 static void     if_attachdomain(void *);
114 static void     if_attachdomain1(struct ifnet *);
115 static int      ifconf(u_long, caddr_t, struct ucred *);
116 static void     ifinit(void *);
117 static void     ifnetinit(void *);
118 static void     if_slowtimo(void *);
119 static void     link_rtrequest(int, struct rtentry *, struct rt_addrinfo *);
120 static int      if_rtdel(struct radix_node *, void *);
121
122 #ifdef INET6
123 /*
124  * XXX: declare here to avoid to include many inet6 related files..
125  * should be more generalized?
126  */
127 extern void     nd6_setmtu(struct ifnet *);
128 #endif
129
130 SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers");
131 SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management");
132
133 static int ifsq_stage_cntmax = 4;
134 TUNABLE_INT("net.link.stage_cntmax", &ifsq_stage_cntmax);
135 SYSCTL_INT(_net_link, OID_AUTO, stage_cntmax, CTLFLAG_RW,
136     &ifsq_stage_cntmax, 0, "ifq staging packet count max");
137
138 SYSINIT(interfaces, SI_SUB_PROTO_IF, SI_ORDER_FIRST, ifinit, NULL)
139 /* Must be after netisr_init */
140 SYSINIT(ifnet, SI_SUB_PRE_DRIVERS, SI_ORDER_SECOND, ifnetinit, NULL)
141
142 static  if_com_alloc_t *if_com_alloc[256];
143 static  if_com_free_t *if_com_free[256];
144
145 MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address");
146 MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address");
147 MALLOC_DEFINE(M_IFNET, "ifnet", "interface structure");
148
149 int                     ifqmaxlen = IFQ_MAXLEN;
150 struct ifnethead        ifnet = TAILQ_HEAD_INITIALIZER(ifnet);
151
152 struct callout          if_slowtimo_timer;
153
154 int                     if_index = 0;
155 struct ifnet            **ifindex2ifnet = NULL;
156 static struct thread    ifnet_threads[MAXCPU];
157
158 static struct ifsubq_stage_head ifsubq_stage_heads[MAXCPU];
159
160 #ifdef notyet
161 #define IFQ_KTR_STRING          "ifq=%p"
162 #define IFQ_KTR_ARGS    struct ifaltq *ifq
163 #ifndef KTR_IFQ
164 #define KTR_IFQ                 KTR_ALL
165 #endif
166 KTR_INFO_MASTER(ifq);
167 KTR_INFO(KTR_IFQ, ifq, enqueue, 0, IFQ_KTR_STRING, IFQ_KTR_ARGS);
168 KTR_INFO(KTR_IFQ, ifq, dequeue, 1, IFQ_KTR_STRING, IFQ_KTR_ARGS);
169 #define logifq(name, arg)       KTR_LOG(ifq_ ## name, arg)
170
171 #define IF_START_KTR_STRING     "ifp=%p"
172 #define IF_START_KTR_ARGS       struct ifnet *ifp
173 #ifndef KTR_IF_START
174 #define KTR_IF_START            KTR_ALL
175 #endif
176 KTR_INFO_MASTER(if_start);
177 KTR_INFO(KTR_IF_START, if_start, run, 0,
178          IF_START_KTR_STRING, IF_START_KTR_ARGS);
179 KTR_INFO(KTR_IF_START, if_start, sched, 1,
180          IF_START_KTR_STRING, IF_START_KTR_ARGS);
181 KTR_INFO(KTR_IF_START, if_start, avoid, 2,
182          IF_START_KTR_STRING, IF_START_KTR_ARGS);
183 KTR_INFO(KTR_IF_START, if_start, contend_sched, 3,
184          IF_START_KTR_STRING, IF_START_KTR_ARGS);
185 KTR_INFO(KTR_IF_START, if_start, chase_sched, 4,
186          IF_START_KTR_STRING, IF_START_KTR_ARGS);
187 #define logifstart(name, arg)   KTR_LOG(if_start_ ## name, arg)
188 #endif
189
190 TAILQ_HEAD(, ifg_group) ifg_head = TAILQ_HEAD_INITIALIZER(ifg_head);
191
192 /*
193  * Network interface utility routines.
194  *
195  * Routines with ifa_ifwith* names take sockaddr *'s as
196  * parameters.
197  */
198 /* ARGSUSED*/
199 void
200 ifinit(void *dummy)
201 {
202         struct ifnet *ifp;
203
204         callout_init(&if_slowtimo_timer);
205
206         crit_enter();
207         TAILQ_FOREACH(ifp, &ifnet, if_link) {
208                 if (ifp->if_snd.altq_maxlen == 0) {
209                         if_printf(ifp, "XXX: driver didn't set ifq_maxlen\n");
210                         ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
211                 }
212         }
213         crit_exit();
214
215         if_slowtimo(0);
216 }
217
218 static void
219 ifsq_ifstart_ipifunc(void *arg)
220 {
221         struct ifaltq_subque *ifsq = arg;
222         struct lwkt_msg *lmsg = ifsq_get_ifstart_lmsg(ifsq, mycpuid);
223
224         crit_enter();
225         if (lmsg->ms_flags & MSGF_DONE)
226                 lwkt_sendmsg(netisr_portfn(mycpuid), lmsg);
227         crit_exit();
228 }
229
230 static __inline void
231 ifsq_stage_remove(struct ifsubq_stage_head *head, struct ifsubq_stage *stage)
232 {
233         KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED);
234         TAILQ_REMOVE(&head->stg_head, stage, stg_link);
235         stage->stg_flags &= ~(IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED);
236         stage->stg_cnt = 0;
237         stage->stg_len = 0;
238 }
239
240 static __inline void
241 ifsq_stage_insert(struct ifsubq_stage_head *head, struct ifsubq_stage *stage)
242 {
243         KKASSERT((stage->stg_flags &
244             (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0);
245         stage->stg_flags |= IFSQ_STAGE_FLAG_QUED;
246         TAILQ_INSERT_TAIL(&head->stg_head, stage, stg_link);
247 }
248
249 /*
250  * Schedule ifnet.if_start on ifnet's CPU
251  */
252 static void
253 ifsq_ifstart_schedule(struct ifaltq_subque *ifsq, int force)
254 {
255         int cpu;
256
257         if (!force && curthread->td_type == TD_TYPE_NETISR &&
258             ifsq_stage_cntmax > 0) {
259                 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid);
260
261                 stage->stg_cnt = 0;
262                 stage->stg_len = 0;
263                 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0)
264                         ifsq_stage_insert(&ifsubq_stage_heads[mycpuid], stage);
265                 stage->stg_flags |= IFSQ_STAGE_FLAG_SCHED;
266                 return;
267         }
268
269         cpu = ifsq_get_cpuid(ifsq);
270         if (cpu != mycpuid)
271                 lwkt_send_ipiq(globaldata_find(cpu), ifsq_ifstart_ipifunc, ifsq);
272         else
273                 ifsq_ifstart_ipifunc(ifsq);
274 }
275
276 /*
277  * NOTE:
278  * This function will release ifnet.if_start interlock,
279  * if ifnet.if_start does not need to be scheduled
280  */
281 static __inline int
282 ifsq_ifstart_need_schedule(struct ifaltq_subque *ifsq, int running)
283 {
284         if (!running || ifsq_is_empty(ifsq)
285 #ifdef ALTQ
286             || ifsq->ifsq_altq->altq_tbr != NULL
287 #endif
288         ) {
289                 ALTQ_SQ_LOCK(ifsq);
290                 /*
291                  * ifnet.if_start interlock is released, if:
292                  * 1) Hardware can not take any packets, due to
293                  *    o  interface is marked down
294                  *    o  hardware queue is full (ifq_is_oactive)
295                  *    Under the second situation, hardware interrupt
296                  *    or polling(4) will call/schedule ifnet.if_start
297                  *    when hardware queue is ready
298                  * 2) There is not packet in the ifnet.if_snd.
299                  *    Further ifq_dispatch or ifq_handoff will call/
300                  *    schedule ifnet.if_start
301                  * 3) TBR is used and it does not allow further
302                  *    dequeueing.
303                  *    TBR callout will call ifnet.if_start
304                  */
305                 if (!running || !ifsq_data_ready(ifsq)) {
306                         ifsq_clr_started(ifsq);
307                         ALTQ_SQ_UNLOCK(ifsq);
308                         return 0;
309                 }
310                 ALTQ_SQ_UNLOCK(ifsq);
311         }
312         return 1;
313 }
314
315 static void
316 ifsq_ifstart_dispatch(netmsg_t msg)
317 {
318         struct lwkt_msg *lmsg = &msg->base.lmsg;
319         struct ifaltq_subque *ifsq = lmsg->u.ms_resultp;
320         struct ifnet *ifp = ifsq_get_ifp(ifsq);
321         int running = 0, need_sched;
322
323         crit_enter();
324         lwkt_replymsg(lmsg, 0); /* reply ASAP */
325         crit_exit();
326
327         if (mycpuid != ifsq_get_cpuid(ifsq)) {
328                 /*
329                  * We need to chase the ifnet CPU change.
330                  */
331                 ifsq_ifstart_schedule(ifsq, 1);
332                 return;
333         }
334
335         ifnet_serialize_tx(ifp);
336         if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) {
337                 ifp->if_start(ifp, ifsq);
338                 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq))
339                         running = 1;
340         }
341         need_sched = ifsq_ifstart_need_schedule(ifsq, running);
342         ifnet_deserialize_tx(ifp);
343
344         if (need_sched) {
345                 /*
346                  * More data need to be transmitted, ifnet.if_start is
347                  * scheduled on ifnet's CPU, and we keep going.
348                  * NOTE: ifnet.if_start interlock is not released.
349                  */
350                 ifsq_ifstart_schedule(ifsq, 0);
351         }
352 }
353
354 /* Device driver ifnet.if_start helper function */
355 void
356 ifsq_devstart(struct ifaltq_subque *ifsq)
357 {
358         struct ifnet *ifp = ifsq_get_ifp(ifsq);
359         int running = 0;
360
361         ASSERT_IFNET_SERIALIZED_TX(ifp);
362
363         ALTQ_SQ_LOCK(ifsq);
364         if (ifsq_is_started(ifsq) || !ifsq_data_ready(ifsq)) {
365                 ALTQ_SQ_UNLOCK(ifsq);
366                 return;
367         }
368         ifsq_set_started(ifsq);
369         ALTQ_SQ_UNLOCK(ifsq);
370
371         ifp->if_start(ifp, ifsq);
372
373         if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq))
374                 running = 1;
375
376         if (ifsq_ifstart_need_schedule(ifsq, running)) {
377                 /*
378                  * More data need to be transmitted, ifnet.if_start is
379                  * scheduled on ifnet's CPU, and we keep going.
380                  * NOTE: ifnet.if_start interlock is not released.
381                  */
382                 ifsq_ifstart_schedule(ifsq, 0);
383         }
384 }
385
386 void
387 if_devstart(struct ifnet *ifp)
388 {
389         ifsq_devstart(ifq_get_subq_default(&ifp->if_snd));
390 }
391
392 /* Device driver ifnet.if_start schedule helper function */
393 void
394 ifsq_devstart_sched(struct ifaltq_subque *ifsq)
395 {
396         ifsq_ifstart_schedule(ifsq, 1);
397 }
398
399 void
400 if_devstart_sched(struct ifnet *ifp)
401 {
402         ifsq_devstart_sched(ifq_get_subq_default(&ifp->if_snd));
403 }
404
405 static void
406 if_default_serialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
407 {
408         lwkt_serialize_enter(ifp->if_serializer);
409 }
410
411 static void
412 if_default_deserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
413 {
414         lwkt_serialize_exit(ifp->if_serializer);
415 }
416
417 static int
418 if_default_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
419 {
420         return lwkt_serialize_try(ifp->if_serializer);
421 }
422
423 #ifdef INVARIANTS
424 static void
425 if_default_serialize_assert(struct ifnet *ifp,
426                             enum ifnet_serialize slz __unused,
427                             boolean_t serialized)
428 {
429         if (serialized)
430                 ASSERT_SERIALIZED(ifp->if_serializer);
431         else
432                 ASSERT_NOT_SERIALIZED(ifp->if_serializer);
433 }
434 #endif
435
436 /*
437  * Attach an interface to the list of "active" interfaces.
438  *
439  * The serializer is optional.  If non-NULL access to the interface
440  * may be MPSAFE.
441  */
442 void
443 if_attach(struct ifnet *ifp, lwkt_serialize_t serializer)
444 {
445         unsigned socksize, ifasize;
446         int namelen, masklen;
447         struct sockaddr_dl *sdl;
448         struct ifaddr *ifa;
449         struct ifaltq *ifq;
450         int i, q;
451
452         static int if_indexlim = 8;
453
454         if (ifp->if_serialize != NULL) {
455                 KASSERT(ifp->if_deserialize != NULL &&
456                         ifp->if_tryserialize != NULL &&
457                         ifp->if_serialize_assert != NULL,
458                         ("serialize functions are partially setup"));
459
460                 /*
461                  * If the device supplies serialize functions,
462                  * then clear if_serializer to catch any invalid
463                  * usage of this field.
464                  */
465                 KASSERT(serializer == NULL,
466                         ("both serialize functions and default serializer "
467                          "are supplied"));
468                 ifp->if_serializer = NULL;
469         } else {
470                 KASSERT(ifp->if_deserialize == NULL &&
471                         ifp->if_tryserialize == NULL &&
472                         ifp->if_serialize_assert == NULL,
473                         ("serialize functions are partially setup"));
474                 ifp->if_serialize = if_default_serialize;
475                 ifp->if_deserialize = if_default_deserialize;
476                 ifp->if_tryserialize = if_default_tryserialize;
477 #ifdef INVARIANTS
478                 ifp->if_serialize_assert = if_default_serialize_assert;
479 #endif
480
481                 /*
482                  * The serializer can be passed in from the device,
483                  * allowing the same serializer to be used for both
484                  * the interrupt interlock and the device queue.
485                  * If not specified, the netif structure will use an
486                  * embedded serializer.
487                  */
488                 if (serializer == NULL) {
489                         serializer = &ifp->if_default_serializer;
490                         lwkt_serialize_init(serializer);
491                 }
492                 ifp->if_serializer = serializer;
493         }
494
495         mtx_init(&ifp->if_ioctl_mtx);
496         mtx_lock(&ifp->if_ioctl_mtx);
497
498         TAILQ_INSERT_TAIL(&ifnet, ifp, if_link);
499         ifp->if_index = ++if_index;
500
501         /*
502          * XXX -
503          * The old code would work if the interface passed a pre-existing
504          * chain of ifaddrs to this code.  We don't trust our callers to
505          * properly initialize the tailq, however, so we no longer allow
506          * this unlikely case.
507          */
508         ifp->if_addrheads = kmalloc(ncpus * sizeof(struct ifaddrhead),
509                                     M_IFADDR, M_WAITOK | M_ZERO);
510         for (i = 0; i < ncpus; ++i)
511                 TAILQ_INIT(&ifp->if_addrheads[i]);
512
513         TAILQ_INIT(&ifp->if_prefixhead);
514         TAILQ_INIT(&ifp->if_multiaddrs);
515         TAILQ_INIT(&ifp->if_groups);
516         getmicrotime(&ifp->if_lastchange);
517         if (ifindex2ifnet == NULL || if_index >= if_indexlim) {
518                 unsigned int n;
519                 struct ifnet **q;
520
521                 if_indexlim <<= 1;
522
523                 /* grow ifindex2ifnet */
524                 n = if_indexlim * sizeof(*q);
525                 q = kmalloc(n, M_IFADDR, M_WAITOK | M_ZERO);
526                 if (ifindex2ifnet) {
527                         bcopy(ifindex2ifnet, q, n/2);
528                         kfree(ifindex2ifnet, M_IFADDR);
529                 }
530                 ifindex2ifnet = q;
531         }
532
533         ifindex2ifnet[if_index] = ifp;
534
535         /*
536          * create a Link Level name for this device
537          */
538         namelen = strlen(ifp->if_xname);
539         masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
540         socksize = masklen + ifp->if_addrlen;
541 #define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
542         if (socksize < sizeof(*sdl))
543                 socksize = sizeof(*sdl);
544         socksize = ROUNDUP(socksize);
545 #undef ROUNDUP
546         ifasize = sizeof(struct ifaddr) + 2 * socksize;
547         ifa = ifa_create(ifasize, M_WAITOK);
548         sdl = (struct sockaddr_dl *)(ifa + 1);
549         sdl->sdl_len = socksize;
550         sdl->sdl_family = AF_LINK;
551         bcopy(ifp->if_xname, sdl->sdl_data, namelen);
552         sdl->sdl_nlen = namelen;
553         sdl->sdl_index = ifp->if_index;
554         sdl->sdl_type = ifp->if_type;
555         ifp->if_lladdr = ifa;
556         ifa->ifa_ifp = ifp;
557         ifa->ifa_rtrequest = link_rtrequest;
558         ifa->ifa_addr = (struct sockaddr *)sdl;
559         sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl);
560         ifa->ifa_netmask = (struct sockaddr *)sdl;
561         sdl->sdl_len = masklen;
562         while (namelen != 0)
563                 sdl->sdl_data[--namelen] = 0xff;
564         ifa_iflink(ifa, ifp, 0 /* Insert head */);
565
566         EVENTHANDLER_INVOKE(ifnet_attach_event, ifp);
567         devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL);
568
569         ifq = &ifp->if_snd;
570         ifq->altq_type = 0;
571         ifq->altq_disc = NULL;
572         ifq->altq_flags &= ALTQF_CANTCHANGE;
573         ifq->altq_tbr = NULL;
574         ifq->altq_ifp = ifp;
575
576         if (ifq->altq_subq_cnt <= 0)
577                 ifq->altq_subq_cnt = 1;
578         ifq->altq_subq = kmalloc_cachealign(
579             ifq->altq_subq_cnt * sizeof(struct ifaltq_subque),
580             M_DEVBUF, M_WAITOK | M_ZERO);
581
582         if (ifq->altq_maxlen == 0) {
583                 if_printf(ifp, "driver didn't set ifq_maxlen\n");
584                 ifq_set_maxlen(ifq, ifqmaxlen);
585         }
586
587         for (q = 0; q < ifq->altq_subq_cnt; ++q) {
588                 struct ifaltq_subque *ifsq = &ifq->altq_subq[q];
589
590                 ALTQ_SQ_LOCK_INIT(ifsq);
591                 ifsq->ifsq_index = q;
592
593                 ifsq->ifsq_altq = ifq;
594                 ifsq->ifsq_ifp = ifp;
595
596                 ifsq->ifq_maxlen = ifq->altq_maxlen;
597                 ifsq->ifsq_prepended = NULL;
598                 ifsq->ifsq_started = 0;
599                 ifsq->ifsq_hw_oactive = 0;
600                 ifsq_set_cpuid(ifsq, 0);
601
602                 ifsq->ifsq_stage =
603                     kmalloc_cachealign(ncpus * sizeof(struct ifsubq_stage),
604                     M_DEVBUF, M_WAITOK | M_ZERO);
605                 for (i = 0; i < ncpus; ++i)
606                         ifsq->ifsq_stage[i].stg_subq = ifsq;
607
608                 ifsq->ifsq_ifstart_nmsg =
609                     kmalloc(ncpus * sizeof(struct netmsg_base),
610                     M_LWKTMSG, M_WAITOK);
611                 for (i = 0; i < ncpus; ++i) {
612                         netmsg_init(&ifsq->ifsq_ifstart_nmsg[i], NULL,
613                             &netisr_adone_rport, 0, ifsq_ifstart_dispatch);
614                         ifsq->ifsq_ifstart_nmsg[i].lmsg.u.ms_resultp = ifsq;
615                 }
616         }
617         ifq_set_classic(ifq);
618
619         if (!SLIST_EMPTY(&domains))
620                 if_attachdomain1(ifp);
621
622         /* Announce the interface. */
623         rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
624
625         mtx_unlock(&ifp->if_ioctl_mtx);
626 }
627
628 static void
629 if_attachdomain(void *dummy)
630 {
631         struct ifnet *ifp;
632
633         crit_enter();
634         TAILQ_FOREACH(ifp, &ifnet, if_list)
635                 if_attachdomain1(ifp);
636         crit_exit();
637 }
638 SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST,
639         if_attachdomain, NULL);
640
641 static void
642 if_attachdomain1(struct ifnet *ifp)
643 {
644         struct domain *dp;
645
646         crit_enter();
647
648         /* address family dependent data region */
649         bzero(ifp->if_afdata, sizeof(ifp->if_afdata));
650         SLIST_FOREACH(dp, &domains, dom_next)
651                 if (dp->dom_ifattach)
652                         ifp->if_afdata[dp->dom_family] =
653                                 (*dp->dom_ifattach)(ifp);
654         crit_exit();
655 }
656
657 /*
658  * Purge all addresses whose type is _not_ AF_LINK
659  */
660 void
661 if_purgeaddrs_nolink(struct ifnet *ifp)
662 {
663         struct ifaddr_container *ifac, *next;
664
665         TAILQ_FOREACH_MUTABLE(ifac, &ifp->if_addrheads[mycpuid],
666                               ifa_link, next) {
667                 struct ifaddr *ifa = ifac->ifa;
668
669                 /* Leave link ifaddr as it is */
670                 if (ifa->ifa_addr->sa_family == AF_LINK)
671                         continue;
672 #ifdef INET
673                 /* XXX: Ugly!! ad hoc just for INET */
674                 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET) {
675                         struct ifaliasreq ifr;
676 #ifdef IFADDR_DEBUG_VERBOSE
677                         int i;
678
679                         kprintf("purge in4 addr %p: ", ifa);
680                         for (i = 0; i < ncpus; ++i)
681                                 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt);
682                         kprintf("\n");
683 #endif
684
685                         bzero(&ifr, sizeof ifr);
686                         ifr.ifra_addr = *ifa->ifa_addr;
687                         if (ifa->ifa_dstaddr)
688                                 ifr.ifra_broadaddr = *ifa->ifa_dstaddr;
689                         if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp,
690                                        NULL) == 0)
691                                 continue;
692                 }
693 #endif /* INET */
694 #ifdef INET6
695                 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET6) {
696 #ifdef IFADDR_DEBUG_VERBOSE
697                         int i;
698
699                         kprintf("purge in6 addr %p: ", ifa);
700                         for (i = 0; i < ncpus; ++i)
701                                 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt);
702                         kprintf("\n");
703 #endif
704
705                         in6_purgeaddr(ifa);
706                         /* ifp_addrhead is already updated */
707                         continue;
708                 }
709 #endif /* INET6 */
710                 ifa_ifunlink(ifa, ifp);
711                 ifa_destroy(ifa);
712         }
713 }
714
715 static void
716 ifq_stage_detach_handler(netmsg_t nmsg)
717 {
718         struct ifaltq *ifq = nmsg->lmsg.u.ms_resultp;
719         int q;
720
721         for (q = 0; q < ifq->altq_subq_cnt; ++q) {
722                 struct ifaltq_subque *ifsq = &ifq->altq_subq[q];
723                 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid);
724
725                 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED)
726                         ifsq_stage_remove(&ifsubq_stage_heads[mycpuid], stage);
727         }
728         lwkt_replymsg(&nmsg->lmsg, 0);
729 }
730
731 static void
732 ifq_stage_detach(struct ifaltq *ifq)
733 {
734         struct netmsg_base base;
735         int cpu;
736
737         netmsg_init(&base, NULL, &curthread->td_msgport, 0,
738             ifq_stage_detach_handler);
739         base.lmsg.u.ms_resultp = ifq;
740
741         for (cpu = 0; cpu < ncpus; ++cpu)
742                 lwkt_domsg(netisr_portfn(cpu), &base.lmsg, 0);
743 }
744
745 /*
746  * Detach an interface, removing it from the
747  * list of "active" interfaces.
748  */
749 void
750 if_detach(struct ifnet *ifp)
751 {
752         struct radix_node_head  *rnh;
753         int i, q;
754         int cpu, origcpu;
755         struct domain *dp;
756
757         EVENTHANDLER_INVOKE(ifnet_detach_event, ifp);
758
759         /*
760          * Remove routes and flush queues.
761          */
762         crit_enter();
763 #ifdef IFPOLL_ENABLE
764         if (ifp->if_flags & IFF_NPOLLING)
765                 ifpoll_deregister(ifp);
766 #endif
767         if_down(ifp);
768
769 #ifdef ALTQ
770         if (ifq_is_enabled(&ifp->if_snd))
771                 altq_disable(&ifp->if_snd);
772         if (ifq_is_attached(&ifp->if_snd))
773                 altq_detach(&ifp->if_snd);
774 #endif
775
776         /*
777          * Clean up all addresses.
778          */
779         ifp->if_lladdr = NULL;
780
781         if_purgeaddrs_nolink(ifp);
782         if (!TAILQ_EMPTY(&ifp->if_addrheads[mycpuid])) {
783                 struct ifaddr *ifa;
784
785                 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
786                 KASSERT(ifa->ifa_addr->sa_family == AF_LINK,
787                         ("non-link ifaddr is left on if_addrheads"));
788
789                 ifa_ifunlink(ifa, ifp);
790                 ifa_destroy(ifa);
791                 KASSERT(TAILQ_EMPTY(&ifp->if_addrheads[mycpuid]),
792                         ("there are still ifaddrs left on if_addrheads"));
793         }
794
795 #ifdef INET
796         /*
797          * Remove all IPv4 kernel structures related to ifp.
798          */
799         in_ifdetach(ifp);
800 #endif
801
802 #ifdef INET6
803         /*
804          * Remove all IPv6 kernel structs related to ifp.  This should be done
805          * before removing routing entries below, since IPv6 interface direct
806          * routes are expected to be removed by the IPv6-specific kernel API.
807          * Otherwise, the kernel will detect some inconsistency and bark it.
808          */
809         in6_ifdetach(ifp);
810 #endif
811
812         /*
813          * Delete all remaining routes using this interface
814          * Unfortuneatly the only way to do this is to slog through
815          * the entire routing table looking for routes which point
816          * to this interface...oh well...
817          */
818         origcpu = mycpuid;
819         for (cpu = 0; cpu < ncpus; cpu++) {
820                 lwkt_migratecpu(cpu);
821                 for (i = 1; i <= AF_MAX; i++) {
822                         if ((rnh = rt_tables[cpu][i]) == NULL)
823                                 continue;
824                         rnh->rnh_walktree(rnh, if_rtdel, ifp);
825                 }
826         }
827         lwkt_migratecpu(origcpu);
828
829         /* Announce that the interface is gone. */
830         rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
831         devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL);
832
833         SLIST_FOREACH(dp, &domains, dom_next)
834                 if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family])
835                         (*dp->dom_ifdetach)(ifp,
836                                 ifp->if_afdata[dp->dom_family]);
837
838         /*
839          * Remove interface from ifindex2ifp[] and maybe decrement if_index.
840          */
841         ifindex2ifnet[ifp->if_index] = NULL;
842         while (if_index > 0 && ifindex2ifnet[if_index] == NULL)
843                 if_index--;
844
845         TAILQ_REMOVE(&ifnet, ifp, if_link);
846         kfree(ifp->if_addrheads, M_IFADDR);
847
848         lwkt_synchronize_ipiqs("if_detach");
849         ifq_stage_detach(&ifp->if_snd);
850
851         for (q = 0; q < ifp->if_snd.altq_subq_cnt; ++q) {
852                 struct ifaltq_subque *ifsq = &ifp->if_snd.altq_subq[q];
853
854                 kfree(ifsq->ifsq_ifstart_nmsg, M_LWKTMSG);
855                 kfree(ifsq->ifsq_stage, M_DEVBUF);
856         }
857         crit_exit();
858 }
859
860 /*
861  * Create interface group without members
862  */
863 struct ifg_group *
864 if_creategroup(const char *groupname)
865 {
866         struct ifg_group        *ifg = NULL;
867
868         if ((ifg = (struct ifg_group *)kmalloc(sizeof(struct ifg_group),
869             M_TEMP, M_NOWAIT)) == NULL)
870                 return (NULL);
871
872         strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group));
873         ifg->ifg_refcnt = 0;
874         ifg->ifg_carp_demoted = 0;
875         TAILQ_INIT(&ifg->ifg_members);
876 #if NPF > 0
877         pfi_attach_ifgroup(ifg);
878 #endif
879         TAILQ_INSERT_TAIL(&ifg_head, ifg, ifg_next);
880
881         return (ifg);
882 }
883
884 /*
885  * Add a group to an interface
886  */
887 int
888 if_addgroup(struct ifnet *ifp, const char *groupname)
889 {
890         struct ifg_list         *ifgl;
891         struct ifg_group        *ifg = NULL;
892         struct ifg_member       *ifgm;
893
894         if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' &&
895             groupname[strlen(groupname) - 1] <= '9')
896                 return (EINVAL);
897
898         TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
899                 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname))
900                         return (EEXIST);
901
902         if ((ifgl = kmalloc(sizeof(*ifgl), M_TEMP, M_NOWAIT)) == NULL)
903                 return (ENOMEM);
904
905         if ((ifgm = kmalloc(sizeof(*ifgm), M_TEMP, M_NOWAIT)) == NULL) {
906                 kfree(ifgl, M_TEMP);
907                 return (ENOMEM);
908         }
909
910         TAILQ_FOREACH(ifg, &ifg_head, ifg_next)
911                 if (!strcmp(ifg->ifg_group, groupname))
912                         break;
913
914         if (ifg == NULL && (ifg = if_creategroup(groupname)) == NULL) {
915                 kfree(ifgl, M_TEMP);
916                 kfree(ifgm, M_TEMP);
917                 return (ENOMEM);
918         }
919
920         ifg->ifg_refcnt++;
921         ifgl->ifgl_group = ifg;
922         ifgm->ifgm_ifp = ifp;
923
924         TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next);
925         TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next);
926
927 #if NPF > 0
928         pfi_group_change(groupname);
929 #endif
930
931         return (0);
932 }
933
934 /*
935  * Remove a group from an interface
936  */
937 int
938 if_delgroup(struct ifnet *ifp, const char *groupname)
939 {
940         struct ifg_list         *ifgl;
941         struct ifg_member       *ifgm;
942
943         TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
944                 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname))
945                         break;
946         if (ifgl == NULL)
947                 return (ENOENT);
948
949         TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next);
950
951         TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next)
952                 if (ifgm->ifgm_ifp == ifp)
953                         break;
954
955         if (ifgm != NULL) {
956                 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next);
957                 kfree(ifgm, M_TEMP);
958         }
959
960         if (--ifgl->ifgl_group->ifg_refcnt == 0) {
961                 TAILQ_REMOVE(&ifg_head, ifgl->ifgl_group, ifg_next);
962 #if NPF > 0
963                 pfi_detach_ifgroup(ifgl->ifgl_group);
964 #endif
965                 kfree(ifgl->ifgl_group, M_TEMP);
966         }
967
968         kfree(ifgl, M_TEMP);
969
970 #if NPF > 0
971         pfi_group_change(groupname);
972 #endif
973
974         return (0);
975 }
976
977 /*
978  * Stores all groups from an interface in memory pointed
979  * to by data
980  */
981 int
982 if_getgroup(caddr_t data, struct ifnet *ifp)
983 {
984         int                      len, error;
985         struct ifg_list         *ifgl;
986         struct ifg_req           ifgrq, *ifgp;
987         struct ifgroupreq       *ifgr = (struct ifgroupreq *)data;
988
989         if (ifgr->ifgr_len == 0) {
990                 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
991                         ifgr->ifgr_len += sizeof(struct ifg_req);
992                 return (0);
993         }
994
995         len = ifgr->ifgr_len;
996         ifgp = ifgr->ifgr_groups;
997         TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) {
998                 if (len < sizeof(ifgrq))
999                         return (EINVAL);
1000                 bzero(&ifgrq, sizeof ifgrq);
1001                 strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group,
1002                     sizeof(ifgrq.ifgrq_group));
1003                 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp,
1004                     sizeof(struct ifg_req))))
1005                         return (error);
1006                 len -= sizeof(ifgrq);
1007                 ifgp++;
1008         }
1009
1010         return (0);
1011 }
1012
1013 /*
1014  * Stores all members of a group in memory pointed to by data
1015  */
1016 int
1017 if_getgroupmembers(caddr_t data)
1018 {
1019         struct ifgroupreq       *ifgr = (struct ifgroupreq *)data;
1020         struct ifg_group        *ifg;
1021         struct ifg_member       *ifgm;
1022         struct ifg_req           ifgrq, *ifgp;
1023         int                      len, error;
1024
1025         TAILQ_FOREACH(ifg, &ifg_head, ifg_next)
1026                 if (!strcmp(ifg->ifg_group, ifgr->ifgr_name))
1027                         break;
1028         if (ifg == NULL)
1029                 return (ENOENT);
1030
1031         if (ifgr->ifgr_len == 0) {
1032                 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next)
1033                         ifgr->ifgr_len += sizeof(ifgrq);
1034                 return (0);
1035         }
1036
1037         len = ifgr->ifgr_len;
1038         ifgp = ifgr->ifgr_groups;
1039         TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) {
1040                 if (len < sizeof(ifgrq))
1041                         return (EINVAL);
1042                 bzero(&ifgrq, sizeof ifgrq);
1043                 strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname,
1044                     sizeof(ifgrq.ifgrq_member));
1045                 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp,
1046                     sizeof(struct ifg_req))))
1047                         return (error);
1048                 len -= sizeof(ifgrq);
1049                 ifgp++;
1050         }
1051
1052         return (0);
1053 }
1054
1055 /*
1056  * Delete Routes for a Network Interface
1057  *
1058  * Called for each routing entry via the rnh->rnh_walktree() call above
1059  * to delete all route entries referencing a detaching network interface.
1060  *
1061  * Arguments:
1062  *      rn      pointer to node in the routing table
1063  *      arg     argument passed to rnh->rnh_walktree() - detaching interface
1064  *
1065  * Returns:
1066  *      0       successful
1067  *      errno   failed - reason indicated
1068  *
1069  */
1070 static int
1071 if_rtdel(struct radix_node *rn, void *arg)
1072 {
1073         struct rtentry  *rt = (struct rtentry *)rn;
1074         struct ifnet    *ifp = arg;
1075         int             err;
1076
1077         if (rt->rt_ifp == ifp) {
1078
1079                 /*
1080                  * Protect (sorta) against walktree recursion problems
1081                  * with cloned routes
1082                  */
1083                 if (!(rt->rt_flags & RTF_UP))
1084                         return (0);
1085
1086                 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
1087                                 rt_mask(rt), rt->rt_flags,
1088                                 NULL);
1089                 if (err) {
1090                         log(LOG_WARNING, "if_rtdel: error %d\n", err);
1091                 }
1092         }
1093
1094         return (0);
1095 }
1096
1097 /*
1098  * Locate an interface based on a complete address.
1099  */
1100 struct ifaddr *
1101 ifa_ifwithaddr(struct sockaddr *addr)
1102 {
1103         struct ifnet *ifp;
1104
1105         TAILQ_FOREACH(ifp, &ifnet, if_link) {
1106                 struct ifaddr_container *ifac;
1107
1108                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1109                         struct ifaddr *ifa = ifac->ifa;
1110
1111                         if (ifa->ifa_addr->sa_family != addr->sa_family)
1112                                 continue;
1113                         if (sa_equal(addr, ifa->ifa_addr))
1114                                 return (ifa);
1115                         if ((ifp->if_flags & IFF_BROADCAST) &&
1116                             ifa->ifa_broadaddr &&
1117                             /* IPv6 doesn't have broadcast */
1118                             ifa->ifa_broadaddr->sa_len != 0 &&
1119                             sa_equal(ifa->ifa_broadaddr, addr))
1120                                 return (ifa);
1121                 }
1122         }
1123         return (NULL);
1124 }
1125 /*
1126  * Locate the point to point interface with a given destination address.
1127  */
1128 struct ifaddr *
1129 ifa_ifwithdstaddr(struct sockaddr *addr)
1130 {
1131         struct ifnet *ifp;
1132
1133         TAILQ_FOREACH(ifp, &ifnet, if_link) {
1134                 struct ifaddr_container *ifac;
1135
1136                 if (!(ifp->if_flags & IFF_POINTOPOINT))
1137                         continue;
1138
1139                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1140                         struct ifaddr *ifa = ifac->ifa;
1141
1142                         if (ifa->ifa_addr->sa_family != addr->sa_family)
1143                                 continue;
1144                         if (ifa->ifa_dstaddr &&
1145                             sa_equal(addr, ifa->ifa_dstaddr))
1146                                 return (ifa);
1147                 }
1148         }
1149         return (NULL);
1150 }
1151
1152 /*
1153  * Find an interface on a specific network.  If many, choice
1154  * is most specific found.
1155  */
1156 struct ifaddr *
1157 ifa_ifwithnet(struct sockaddr *addr)
1158 {
1159         struct ifnet *ifp;
1160         struct ifaddr *ifa_maybe = NULL;
1161         u_int af = addr->sa_family;
1162         char *addr_data = addr->sa_data, *cplim;
1163
1164         /*
1165          * AF_LINK addresses can be looked up directly by their index number,
1166          * so do that if we can.
1167          */
1168         if (af == AF_LINK) {
1169                 struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr;
1170
1171                 if (sdl->sdl_index && sdl->sdl_index <= if_index)
1172                         return (ifindex2ifnet[sdl->sdl_index]->if_lladdr);
1173         }
1174
1175         /*
1176          * Scan though each interface, looking for ones that have
1177          * addresses in this address family.
1178          */
1179         TAILQ_FOREACH(ifp, &ifnet, if_link) {
1180                 struct ifaddr_container *ifac;
1181
1182                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1183                         struct ifaddr *ifa = ifac->ifa;
1184                         char *cp, *cp2, *cp3;
1185
1186                         if (ifa->ifa_addr->sa_family != af)
1187 next:                           continue;
1188                         if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT) {
1189                                 /*
1190                                  * This is a bit broken as it doesn't
1191                                  * take into account that the remote end may
1192                                  * be a single node in the network we are
1193                                  * looking for.
1194                                  * The trouble is that we don't know the
1195                                  * netmask for the remote end.
1196                                  */
1197                                 if (ifa->ifa_dstaddr != NULL &&
1198                                     sa_equal(addr, ifa->ifa_dstaddr))
1199                                         return (ifa);
1200                         } else {
1201                                 /*
1202                                  * if we have a special address handler,
1203                                  * then use it instead of the generic one.
1204                                  */
1205                                 if (ifa->ifa_claim_addr) {
1206                                         if ((*ifa->ifa_claim_addr)(ifa, addr)) {
1207                                                 return (ifa);
1208                                         } else {
1209                                                 continue;
1210                                         }
1211                                 }
1212
1213                                 /*
1214                                  * Scan all the bits in the ifa's address.
1215                                  * If a bit dissagrees with what we are
1216                                  * looking for, mask it with the netmask
1217                                  * to see if it really matters.
1218                                  * (A byte at a time)
1219                                  */
1220                                 if (ifa->ifa_netmask == 0)
1221                                         continue;
1222                                 cp = addr_data;
1223                                 cp2 = ifa->ifa_addr->sa_data;
1224                                 cp3 = ifa->ifa_netmask->sa_data;
1225                                 cplim = ifa->ifa_netmask->sa_len +
1226                                         (char *)ifa->ifa_netmask;
1227                                 while (cp3 < cplim)
1228                                         if ((*cp++ ^ *cp2++) & *cp3++)
1229                                                 goto next; /* next address! */
1230                                 /*
1231                                  * If the netmask of what we just found
1232                                  * is more specific than what we had before
1233                                  * (if we had one) then remember the new one
1234                                  * before continuing to search
1235                                  * for an even better one.
1236                                  */
1237                                 if (ifa_maybe == NULL ||
1238                                     rn_refines((char *)ifa->ifa_netmask,
1239                                                (char *)ifa_maybe->ifa_netmask))
1240                                         ifa_maybe = ifa;
1241                         }
1242                 }
1243         }
1244         return (ifa_maybe);
1245 }
1246
1247 /*
1248  * Find an interface address specific to an interface best matching
1249  * a given address.
1250  */
1251 struct ifaddr *
1252 ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp)
1253 {
1254         struct ifaddr_container *ifac;
1255         char *cp, *cp2, *cp3;
1256         char *cplim;
1257         struct ifaddr *ifa_maybe = NULL;
1258         u_int af = addr->sa_family;
1259
1260         if (af >= AF_MAX)
1261                 return (0);
1262         TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1263                 struct ifaddr *ifa = ifac->ifa;
1264
1265                 if (ifa->ifa_addr->sa_family != af)
1266                         continue;
1267                 if (ifa_maybe == NULL)
1268                         ifa_maybe = ifa;
1269                 if (ifa->ifa_netmask == NULL) {
1270                         if (sa_equal(addr, ifa->ifa_addr) ||
1271                             (ifa->ifa_dstaddr != NULL &&
1272                              sa_equal(addr, ifa->ifa_dstaddr)))
1273                                 return (ifa);
1274                         continue;
1275                 }
1276                 if (ifp->if_flags & IFF_POINTOPOINT) {
1277                         if (sa_equal(addr, ifa->ifa_dstaddr))
1278                                 return (ifa);
1279                 } else {
1280                         cp = addr->sa_data;
1281                         cp2 = ifa->ifa_addr->sa_data;
1282                         cp3 = ifa->ifa_netmask->sa_data;
1283                         cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask;
1284                         for (; cp3 < cplim; cp3++)
1285                                 if ((*cp++ ^ *cp2++) & *cp3)
1286                                         break;
1287                         if (cp3 == cplim)
1288                                 return (ifa);
1289                 }
1290         }
1291         return (ifa_maybe);
1292 }
1293
1294 /*
1295  * Default action when installing a route with a Link Level gateway.
1296  * Lookup an appropriate real ifa to point to.
1297  * This should be moved to /sys/net/link.c eventually.
1298  */
1299 static void
1300 link_rtrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info)
1301 {
1302         struct ifaddr *ifa;
1303         struct sockaddr *dst;
1304         struct ifnet *ifp;
1305
1306         if (cmd != RTM_ADD || (ifa = rt->rt_ifa) == NULL ||
1307             (ifp = ifa->ifa_ifp) == NULL || (dst = rt_key(rt)) == NULL)
1308                 return;
1309         ifa = ifaof_ifpforaddr(dst, ifp);
1310         if (ifa != NULL) {
1311                 IFAFREE(rt->rt_ifa);
1312                 IFAREF(ifa);
1313                 rt->rt_ifa = ifa;
1314                 if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest)
1315                         ifa->ifa_rtrequest(cmd, rt, info);
1316         }
1317 }
1318
1319 /*
1320  * Mark an interface down and notify protocols of
1321  * the transition.
1322  * NOTE: must be called at splnet or eqivalent.
1323  */
1324 void
1325 if_unroute(struct ifnet *ifp, int flag, int fam)
1326 {
1327         struct ifaddr_container *ifac;
1328
1329         ifp->if_flags &= ~flag;
1330         getmicrotime(&ifp->if_lastchange);
1331         TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1332                 struct ifaddr *ifa = ifac->ifa;
1333
1334                 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
1335                         kpfctlinput(PRC_IFDOWN, ifa->ifa_addr);
1336         }
1337         ifq_purge_all(&ifp->if_snd);
1338         rt_ifmsg(ifp);
1339 }
1340
1341 /*
1342  * Mark an interface up and notify protocols of
1343  * the transition.
1344  * NOTE: must be called at splnet or eqivalent.
1345  */
1346 void
1347 if_route(struct ifnet *ifp, int flag, int fam)
1348 {
1349         struct ifaddr_container *ifac;
1350
1351         ifq_purge_all(&ifp->if_snd);
1352         ifp->if_flags |= flag;
1353         getmicrotime(&ifp->if_lastchange);
1354         TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1355                 struct ifaddr *ifa = ifac->ifa;
1356
1357                 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
1358                         kpfctlinput(PRC_IFUP, ifa->ifa_addr);
1359         }
1360         rt_ifmsg(ifp);
1361 #ifdef INET6
1362         in6_if_up(ifp);
1363 #endif
1364 }
1365
1366 /*
1367  * Mark an interface down and notify protocols of the transition.  An
1368  * interface going down is also considered to be a synchronizing event.
1369  * We must ensure that all packet processing related to the interface
1370  * has completed before we return so e.g. the caller can free the ifnet
1371  * structure that the mbufs may be referencing.
1372  *
1373  * NOTE: must be called at splnet or eqivalent.
1374  */
1375 void
1376 if_down(struct ifnet *ifp)
1377 {
1378         if_unroute(ifp, IFF_UP, AF_UNSPEC);
1379         netmsg_service_sync();
1380 }
1381
1382 /*
1383  * Mark an interface up and notify protocols of
1384  * the transition.
1385  * NOTE: must be called at splnet or eqivalent.
1386  */
1387 void
1388 if_up(struct ifnet *ifp)
1389 {
1390         if_route(ifp, IFF_UP, AF_UNSPEC);
1391 }
1392
1393 /*
1394  * Process a link state change.
1395  * NOTE: must be called at splsoftnet or equivalent.
1396  */
1397 void
1398 if_link_state_change(struct ifnet *ifp)
1399 {
1400         int link_state = ifp->if_link_state;
1401
1402         rt_ifmsg(ifp);
1403         devctl_notify("IFNET", ifp->if_xname,
1404             (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL);
1405 }
1406
1407 /*
1408  * Handle interface watchdog timer routines.  Called
1409  * from softclock, we decrement timers (if set) and
1410  * call the appropriate interface routine on expiration.
1411  */
1412 static void
1413 if_slowtimo(void *arg)
1414 {
1415         struct ifnet *ifp;
1416
1417         crit_enter();
1418
1419         TAILQ_FOREACH(ifp, &ifnet, if_link) {
1420                 if (ifp->if_timer == 0 || --ifp->if_timer)
1421                         continue;
1422                 if (ifp->if_watchdog) {
1423                         if (ifnet_tryserialize_all(ifp)) {
1424                                 (*ifp->if_watchdog)(ifp);
1425                                 ifnet_deserialize_all(ifp);
1426                         } else {
1427                                 /* try again next timeout */
1428                                 ++ifp->if_timer;
1429                         }
1430                 }
1431         }
1432
1433         crit_exit();
1434
1435         callout_reset(&if_slowtimo_timer, hz / IFNET_SLOWHZ, if_slowtimo, NULL);
1436 }
1437
1438 /*
1439  * Map interface name to
1440  * interface structure pointer.
1441  */
1442 struct ifnet *
1443 ifunit(const char *name)
1444 {
1445         struct ifnet *ifp;
1446
1447         /*
1448          * Search all the interfaces for this name/number
1449          */
1450
1451         TAILQ_FOREACH(ifp, &ifnet, if_link) {
1452                 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0)
1453                         break;
1454         }
1455         return (ifp);
1456 }
1457
1458
1459 /*
1460  * Map interface name in a sockaddr_dl to
1461  * interface structure pointer.
1462  */
1463 struct ifnet *
1464 if_withname(struct sockaddr *sa)
1465 {
1466         char ifname[IFNAMSIZ+1];
1467         struct sockaddr_dl *sdl = (struct sockaddr_dl *)sa;
1468
1469         if ( (sa->sa_family != AF_LINK) || (sdl->sdl_nlen == 0) ||
1470              (sdl->sdl_nlen > IFNAMSIZ) )
1471                 return NULL;
1472
1473         /*
1474          * ifunit wants a null-terminated name.  It may not be null-terminated
1475          * in the sockaddr.  We don't want to change the caller's sockaddr,
1476          * and there might not be room to put the trailing null anyway, so we
1477          * make a local copy that we know we can null terminate safely.
1478          */
1479
1480         bcopy(sdl->sdl_data, ifname, sdl->sdl_nlen);
1481         ifname[sdl->sdl_nlen] = '\0';
1482         return ifunit(ifname);
1483 }
1484
1485
1486 /*
1487  * Interface ioctls.
1488  */
1489 int
1490 ifioctl(struct socket *so, u_long cmd, caddr_t data, struct ucred *cred)
1491 {
1492         struct ifnet *ifp;
1493         struct ifreq *ifr;
1494         struct ifstat *ifs;
1495         int error;
1496         short oif_flags;
1497         int new_flags;
1498 #ifdef COMPAT_43
1499         int ocmd;
1500 #endif
1501         size_t namelen, onamelen;
1502         char new_name[IFNAMSIZ];
1503         struct ifaddr *ifa;
1504         struct sockaddr_dl *sdl;
1505
1506         switch (cmd) {
1507         case SIOCGIFCONF:
1508         case OSIOCGIFCONF:
1509                 return (ifconf(cmd, data, cred));
1510         default:
1511                 break;
1512         }
1513
1514         ifr = (struct ifreq *)data;
1515
1516         switch (cmd) {
1517         case SIOCIFCREATE:
1518         case SIOCIFCREATE2:
1519                 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
1520                         return (error);
1521                 return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name),
1522                         cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL));
1523         case SIOCIFDESTROY:
1524                 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
1525                         return (error);
1526                 return (if_clone_destroy(ifr->ifr_name));
1527         case SIOCIFGCLONERS:
1528                 return (if_clone_list((struct if_clonereq *)data));
1529         default:
1530                 break;
1531         }
1532
1533         /*
1534          * Nominal ioctl through interface, lookup the ifp and obtain a
1535          * lock to serialize the ifconfig ioctl operation.
1536          */
1537         ifp = ifunit(ifr->ifr_name);
1538         if (ifp == NULL)
1539                 return (ENXIO);
1540         error = 0;
1541         mtx_lock(&ifp->if_ioctl_mtx);
1542
1543         switch (cmd) {
1544         case SIOCGIFINDEX:
1545                 ifr->ifr_index = ifp->if_index;
1546                 break;
1547
1548         case SIOCGIFFLAGS:
1549                 ifr->ifr_flags = ifp->if_flags;
1550                 ifr->ifr_flagshigh = ifp->if_flags >> 16;
1551                 break;
1552
1553         case SIOCGIFCAP:
1554                 ifr->ifr_reqcap = ifp->if_capabilities;
1555                 ifr->ifr_curcap = ifp->if_capenable;
1556                 break;
1557
1558         case SIOCGIFMETRIC:
1559                 ifr->ifr_metric = ifp->if_metric;
1560                 break;
1561
1562         case SIOCGIFMTU:
1563                 ifr->ifr_mtu = ifp->if_mtu;
1564                 break;
1565
1566         case SIOCGIFDATA:
1567                 error = copyout((caddr_t)&ifp->if_data, ifr->ifr_data,
1568                                 sizeof(ifp->if_data));
1569                 break;
1570
1571         case SIOCGIFPHYS:
1572                 ifr->ifr_phys = ifp->if_physical;
1573                 break;
1574
1575         case SIOCGIFPOLLCPU:
1576                 ifr->ifr_pollcpu = -1;
1577                 break;
1578
1579         case SIOCSIFPOLLCPU:
1580                 break;
1581
1582         case SIOCSIFFLAGS:
1583                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1584                 if (error)
1585                         break;
1586                 new_flags = (ifr->ifr_flags & 0xffff) |
1587                     (ifr->ifr_flagshigh << 16);
1588                 if (ifp->if_flags & IFF_SMART) {
1589                         /* Smart drivers twiddle their own routes */
1590                 } else if (ifp->if_flags & IFF_UP &&
1591                     (new_flags & IFF_UP) == 0) {
1592                         crit_enter();
1593                         if_down(ifp);
1594                         crit_exit();
1595                 } else if (new_flags & IFF_UP &&
1596                     (ifp->if_flags & IFF_UP) == 0) {
1597                         crit_enter();
1598                         if_up(ifp);
1599                         crit_exit();
1600                 }
1601
1602 #ifdef IFPOLL_ENABLE
1603                 if ((new_flags ^ ifp->if_flags) & IFF_NPOLLING) {
1604                         if (new_flags & IFF_NPOLLING)
1605                                 ifpoll_register(ifp);
1606                         else
1607                                 ifpoll_deregister(ifp);
1608                 }
1609 #endif
1610
1611                 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) |
1612                         (new_flags &~ IFF_CANTCHANGE);
1613                 if (new_flags & IFF_PPROMISC) {
1614                         /* Permanently promiscuous mode requested */
1615                         ifp->if_flags |= IFF_PROMISC;
1616                 } else if (ifp->if_pcount == 0) {
1617                         ifp->if_flags &= ~IFF_PROMISC;
1618                 }
1619                 if (ifp->if_ioctl) {
1620                         ifnet_serialize_all(ifp);
1621                         ifp->if_ioctl(ifp, cmd, data, cred);
1622                         ifnet_deserialize_all(ifp);
1623                 }
1624                 getmicrotime(&ifp->if_lastchange);
1625                 break;
1626
1627         case SIOCSIFCAP:
1628                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1629                 if (error)
1630                         break;
1631                 if (ifr->ifr_reqcap & ~ifp->if_capabilities) {
1632                         error = EINVAL;
1633                         break;
1634                 }
1635                 ifnet_serialize_all(ifp);
1636                 ifp->if_ioctl(ifp, cmd, data, cred);
1637                 ifnet_deserialize_all(ifp);
1638                 break;
1639
1640         case SIOCSIFNAME:
1641                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1642                 if (error)
1643                         break;
1644                 error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL);
1645                 if (error)
1646                         break;
1647                 if (new_name[0] == '\0') {
1648                         error = EINVAL;
1649                         break;
1650                 }
1651                 if (ifunit(new_name) != NULL) {
1652                         error = EEXIST;
1653                         break;
1654                 }
1655
1656                 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp);
1657
1658                 /* Announce the departure of the interface. */
1659                 rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
1660
1661                 strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname));
1662                 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
1663                 /* XXX IFA_LOCK(ifa); */
1664                 sdl = (struct sockaddr_dl *)ifa->ifa_addr;
1665                 namelen = strlen(new_name);
1666                 onamelen = sdl->sdl_nlen;
1667                 /*
1668                  * Move the address if needed.  This is safe because we
1669                  * allocate space for a name of length IFNAMSIZ when we
1670                  * create this in if_attach().
1671                  */
1672                 if (namelen != onamelen) {
1673                         bcopy(sdl->sdl_data + onamelen,
1674                             sdl->sdl_data + namelen, sdl->sdl_alen);
1675                 }
1676                 bcopy(new_name, sdl->sdl_data, namelen);
1677                 sdl->sdl_nlen = namelen;
1678                 sdl = (struct sockaddr_dl *)ifa->ifa_netmask;
1679                 bzero(sdl->sdl_data, onamelen);
1680                 while (namelen != 0)
1681                         sdl->sdl_data[--namelen] = 0xff;
1682                 /* XXX IFA_UNLOCK(ifa) */
1683
1684                 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp);
1685
1686                 /* Announce the return of the interface. */
1687                 rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
1688                 break;
1689
1690         case SIOCSIFMETRIC:
1691                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1692                 if (error)
1693                         break;
1694                 ifp->if_metric = ifr->ifr_metric;
1695                 getmicrotime(&ifp->if_lastchange);
1696                 break;
1697
1698         case SIOCSIFPHYS:
1699                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1700                 if (error)
1701                         break;
1702                 if (ifp->if_ioctl == NULL) {
1703                         error = EOPNOTSUPP;
1704                         break;
1705                 }
1706                 ifnet_serialize_all(ifp);
1707                 error = ifp->if_ioctl(ifp, cmd, data, cred);
1708                 ifnet_deserialize_all(ifp);
1709                 if (error == 0)
1710                         getmicrotime(&ifp->if_lastchange);
1711                 break;
1712
1713         case SIOCSIFMTU:
1714         {
1715                 u_long oldmtu = ifp->if_mtu;
1716
1717                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1718                 if (error)
1719                         break;
1720                 if (ifp->if_ioctl == NULL) {
1721                         error = EOPNOTSUPP;
1722                         break;
1723                 }
1724                 if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU) {
1725                         error = EINVAL;
1726                         break;
1727                 }
1728                 ifnet_serialize_all(ifp);
1729                 error = ifp->if_ioctl(ifp, cmd, data, cred);
1730                 ifnet_deserialize_all(ifp);
1731                 if (error == 0) {
1732                         getmicrotime(&ifp->if_lastchange);
1733                         rt_ifmsg(ifp);
1734                 }
1735                 /*
1736                  * If the link MTU changed, do network layer specific procedure.
1737                  */
1738                 if (ifp->if_mtu != oldmtu) {
1739 #ifdef INET6
1740                         nd6_setmtu(ifp);
1741 #endif
1742                 }
1743                 break;
1744         }
1745
1746         case SIOCADDMULTI:
1747         case SIOCDELMULTI:
1748                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1749                 if (error)
1750                         break;
1751
1752                 /* Don't allow group membership on non-multicast interfaces. */
1753                 if ((ifp->if_flags & IFF_MULTICAST) == 0) {
1754                         error = EOPNOTSUPP;
1755                         break;
1756                 }
1757
1758                 /* Don't let users screw up protocols' entries. */
1759                 if (ifr->ifr_addr.sa_family != AF_LINK) {
1760                         error = EINVAL;
1761                         break;
1762                 }
1763
1764                 if (cmd == SIOCADDMULTI) {
1765                         struct ifmultiaddr *ifma;
1766                         error = if_addmulti(ifp, &ifr->ifr_addr, &ifma);
1767                 } else {
1768                         error = if_delmulti(ifp, &ifr->ifr_addr);
1769                 }
1770                 if (error == 0)
1771                         getmicrotime(&ifp->if_lastchange);
1772                 break;
1773
1774         case SIOCSIFPHYADDR:
1775         case SIOCDIFPHYADDR:
1776 #ifdef INET6
1777         case SIOCSIFPHYADDR_IN6:
1778 #endif
1779         case SIOCSLIFPHYADDR:
1780         case SIOCSIFMEDIA:
1781         case SIOCSIFGENERIC:
1782                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1783                 if (error)
1784                         break;
1785                 if (ifp->if_ioctl == 0) {
1786                         error = EOPNOTSUPP;
1787                         break;
1788                 }
1789                 ifnet_serialize_all(ifp);
1790                 error = ifp->if_ioctl(ifp, cmd, data, cred);
1791                 ifnet_deserialize_all(ifp);
1792                 if (error == 0)
1793                         getmicrotime(&ifp->if_lastchange);
1794                 break;
1795
1796         case SIOCGIFSTATUS:
1797                 ifs = (struct ifstat *)data;
1798                 ifs->ascii[0] = '\0';
1799                 /* fall through */
1800         case SIOCGIFPSRCADDR:
1801         case SIOCGIFPDSTADDR:
1802         case SIOCGLIFPHYADDR:
1803         case SIOCGIFMEDIA:
1804         case SIOCGIFGENERIC:
1805                 if (ifp->if_ioctl == NULL) {
1806                         error = EOPNOTSUPP;
1807                         break;
1808                 }
1809                 ifnet_serialize_all(ifp);
1810                 error = ifp->if_ioctl(ifp, cmd, data, cred);
1811                 ifnet_deserialize_all(ifp);
1812                 break;
1813
1814         case SIOCSIFLLADDR:
1815                 error = priv_check_cred(cred, PRIV_ROOT, 0);
1816                 if (error)
1817                         break;
1818                 error = if_setlladdr(ifp, ifr->ifr_addr.sa_data,
1819                                      ifr->ifr_addr.sa_len);
1820                 EVENTHANDLER_INVOKE(iflladdr_event, ifp);
1821                 break;
1822
1823         default:
1824                 oif_flags = ifp->if_flags;
1825                 if (so->so_proto == 0) {
1826                         error = EOPNOTSUPP;
1827                         break;
1828                 }
1829 #ifndef COMPAT_43
1830                 error = so_pru_control_direct(so, cmd, data, ifp);
1831 #else
1832                 ocmd = cmd;
1833
1834                 switch (cmd) {
1835                 case SIOCSIFDSTADDR:
1836                 case SIOCSIFADDR:
1837                 case SIOCSIFBRDADDR:
1838                 case SIOCSIFNETMASK:
1839 #if BYTE_ORDER != BIG_ENDIAN
1840                         if (ifr->ifr_addr.sa_family == 0 &&
1841                             ifr->ifr_addr.sa_len < 16) {
1842                                 ifr->ifr_addr.sa_family = ifr->ifr_addr.sa_len;
1843                                 ifr->ifr_addr.sa_len = 16;
1844                         }
1845 #else
1846                         if (ifr->ifr_addr.sa_len == 0)
1847                                 ifr->ifr_addr.sa_len = 16;
1848 #endif
1849                         break;
1850                 case OSIOCGIFADDR:
1851                         cmd = SIOCGIFADDR;
1852                         break;
1853                 case OSIOCGIFDSTADDR:
1854                         cmd = SIOCGIFDSTADDR;
1855                         break;
1856                 case OSIOCGIFBRDADDR:
1857                         cmd = SIOCGIFBRDADDR;
1858                         break;
1859                 case OSIOCGIFNETMASK:
1860                         cmd = SIOCGIFNETMASK;
1861                         break;
1862                 default:
1863                         break;
1864                 }
1865
1866                 error = so_pru_control_direct(so, cmd, data, ifp);
1867
1868                 switch (ocmd) {
1869                 case OSIOCGIFADDR:
1870                 case OSIOCGIFDSTADDR:
1871                 case OSIOCGIFBRDADDR:
1872                 case OSIOCGIFNETMASK:
1873                         *(u_short *)&ifr->ifr_addr = ifr->ifr_addr.sa_family;
1874                         break;
1875                 }
1876 #endif /* COMPAT_43 */
1877
1878                 if ((oif_flags ^ ifp->if_flags) & IFF_UP) {
1879 #ifdef INET6
1880                         DELAY(100);/* XXX: temporary workaround for fxp issue*/
1881                         if (ifp->if_flags & IFF_UP) {
1882                                 crit_enter();
1883                                 in6_if_up(ifp);
1884                                 crit_exit();
1885                         }
1886 #endif
1887                 }
1888                 break;
1889         }
1890
1891         mtx_unlock(&ifp->if_ioctl_mtx);
1892         return (error);
1893 }
1894
1895 /*
1896  * Set/clear promiscuous mode on interface ifp based on the truth value
1897  * of pswitch.  The calls are reference counted so that only the first
1898  * "on" request actually has an effect, as does the final "off" request.
1899  * Results are undefined if the "off" and "on" requests are not matched.
1900  */
1901 int
1902 ifpromisc(struct ifnet *ifp, int pswitch)
1903 {
1904         struct ifreq ifr;
1905         int error;
1906         int oldflags;
1907
1908         oldflags = ifp->if_flags;
1909         if (ifp->if_flags & IFF_PPROMISC) {
1910                 /* Do nothing if device is in permanently promiscuous mode */
1911                 ifp->if_pcount += pswitch ? 1 : -1;
1912                 return (0);
1913         }
1914         if (pswitch) {
1915                 /*
1916                  * If the device is not configured up, we cannot put it in
1917                  * promiscuous mode.
1918                  */
1919                 if ((ifp->if_flags & IFF_UP) == 0)
1920                         return (ENETDOWN);
1921                 if (ifp->if_pcount++ != 0)
1922                         return (0);
1923                 ifp->if_flags |= IFF_PROMISC;
1924                 log(LOG_INFO, "%s: promiscuous mode enabled\n",
1925                     ifp->if_xname);
1926         } else {
1927                 if (--ifp->if_pcount > 0)
1928                         return (0);
1929                 ifp->if_flags &= ~IFF_PROMISC;
1930                 log(LOG_INFO, "%s: promiscuous mode disabled\n",
1931                     ifp->if_xname);
1932         }
1933         ifr.ifr_flags = ifp->if_flags;
1934         ifr.ifr_flagshigh = ifp->if_flags >> 16;
1935         ifnet_serialize_all(ifp);
1936         error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, NULL);
1937         ifnet_deserialize_all(ifp);
1938         if (error == 0)
1939                 rt_ifmsg(ifp);
1940         else
1941                 ifp->if_flags = oldflags;
1942         return error;
1943 }
1944
1945 /*
1946  * Return interface configuration
1947  * of system.  List may be used
1948  * in later ioctl's (above) to get
1949  * other information.
1950  */
1951 static int
1952 ifconf(u_long cmd, caddr_t data, struct ucred *cred)
1953 {
1954         struct ifconf *ifc = (struct ifconf *)data;
1955         struct ifnet *ifp;
1956         struct sockaddr *sa;
1957         struct ifreq ifr, *ifrp;
1958         int space = ifc->ifc_len, error = 0;
1959
1960         ifrp = ifc->ifc_req;
1961         TAILQ_FOREACH(ifp, &ifnet, if_link) {
1962                 struct ifaddr_container *ifac;
1963                 int addrs;
1964
1965                 if (space <= sizeof ifr)
1966                         break;
1967
1968                 /*
1969                  * Zero the stack declared structure first to prevent
1970                  * memory disclosure.
1971                  */
1972                 bzero(&ifr, sizeof(ifr));
1973                 if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name))
1974                     >= sizeof(ifr.ifr_name)) {
1975                         error = ENAMETOOLONG;
1976                         break;
1977                 }
1978
1979                 addrs = 0;
1980                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1981                         struct ifaddr *ifa = ifac->ifa;
1982
1983                         if (space <= sizeof ifr)
1984                                 break;
1985                         sa = ifa->ifa_addr;
1986                         if (cred->cr_prison &&
1987                             prison_if(cred, sa))
1988                                 continue;
1989                         addrs++;
1990 #ifdef COMPAT_43
1991                         if (cmd == OSIOCGIFCONF) {
1992                                 struct osockaddr *osa =
1993                                          (struct osockaddr *)&ifr.ifr_addr;
1994                                 ifr.ifr_addr = *sa;
1995                                 osa->sa_family = sa->sa_family;
1996                                 error = copyout(&ifr, ifrp, sizeof ifr);
1997                                 ifrp++;
1998                         } else
1999 #endif
2000                         if (sa->sa_len <= sizeof(*sa)) {
2001                                 ifr.ifr_addr = *sa;
2002                                 error = copyout(&ifr, ifrp, sizeof ifr);
2003                                 ifrp++;
2004                         } else {
2005                                 if (space < (sizeof ifr) + sa->sa_len -
2006                                             sizeof(*sa))
2007                                         break;
2008                                 space -= sa->sa_len - sizeof(*sa);
2009                                 error = copyout(&ifr, ifrp,
2010                                                 sizeof ifr.ifr_name);
2011                                 if (error == 0)
2012                                         error = copyout(sa, &ifrp->ifr_addr,
2013                                                         sa->sa_len);
2014                                 ifrp = (struct ifreq *)
2015                                         (sa->sa_len + (caddr_t)&ifrp->ifr_addr);
2016                         }
2017                         if (error)
2018                                 break;
2019                         space -= sizeof ifr;
2020                 }
2021                 if (error)
2022                         break;
2023                 if (!addrs) {
2024                         bzero(&ifr.ifr_addr, sizeof ifr.ifr_addr);
2025                         error = copyout(&ifr, ifrp, sizeof ifr);
2026                         if (error)
2027                                 break;
2028                         space -= sizeof ifr;
2029                         ifrp++;
2030                 }
2031         }
2032         ifc->ifc_len -= space;
2033         return (error);
2034 }
2035
2036 /*
2037  * Just like if_promisc(), but for all-multicast-reception mode.
2038  */
2039 int
2040 if_allmulti(struct ifnet *ifp, int onswitch)
2041 {
2042         int error = 0;
2043         struct ifreq ifr;
2044
2045         crit_enter();
2046
2047         if (onswitch) {
2048                 if (ifp->if_amcount++ == 0) {
2049                         ifp->if_flags |= IFF_ALLMULTI;
2050                         ifr.ifr_flags = ifp->if_flags;
2051                         ifr.ifr_flagshigh = ifp->if_flags >> 16;
2052                         ifnet_serialize_all(ifp);
2053                         error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2054                                               NULL);
2055                         ifnet_deserialize_all(ifp);
2056                 }
2057         } else {
2058                 if (ifp->if_amcount > 1) {
2059                         ifp->if_amcount--;
2060                 } else {
2061                         ifp->if_amcount = 0;
2062                         ifp->if_flags &= ~IFF_ALLMULTI;
2063                         ifr.ifr_flags = ifp->if_flags;
2064                         ifr.ifr_flagshigh = ifp->if_flags >> 16;
2065                         ifnet_serialize_all(ifp);
2066                         error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2067                                               NULL);
2068                         ifnet_deserialize_all(ifp);
2069                 }
2070         }
2071
2072         crit_exit();
2073
2074         if (error == 0)
2075                 rt_ifmsg(ifp);
2076         return error;
2077 }
2078
2079 /*
2080  * Add a multicast listenership to the interface in question.
2081  * The link layer provides a routine which converts
2082  */
2083 int
2084 if_addmulti(
2085         struct ifnet *ifp,      /* interface to manipulate */
2086         struct sockaddr *sa,    /* address to add */
2087         struct ifmultiaddr **retifma)
2088 {
2089         struct sockaddr *llsa, *dupsa;
2090         int error;
2091         struct ifmultiaddr *ifma;
2092
2093         /*
2094          * If the matching multicast address already exists
2095          * then don't add a new one, just add a reference
2096          */
2097         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2098                 if (sa_equal(sa, ifma->ifma_addr)) {
2099                         ifma->ifma_refcount++;
2100                         if (retifma)
2101                                 *retifma = ifma;
2102                         return 0;
2103                 }
2104         }
2105
2106         /*
2107          * Give the link layer a chance to accept/reject it, and also
2108          * find out which AF_LINK address this maps to, if it isn't one
2109          * already.
2110          */
2111         if (ifp->if_resolvemulti) {
2112                 ifnet_serialize_all(ifp);
2113                 error = ifp->if_resolvemulti(ifp, &llsa, sa);
2114                 ifnet_deserialize_all(ifp);
2115                 if (error) 
2116                         return error;
2117         } else {
2118                 llsa = NULL;
2119         }
2120
2121         ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_WAITOK);
2122         dupsa = kmalloc(sa->sa_len, M_IFMADDR, M_WAITOK);
2123         bcopy(sa, dupsa, sa->sa_len);
2124
2125         ifma->ifma_addr = dupsa;
2126         ifma->ifma_lladdr = llsa;
2127         ifma->ifma_ifp = ifp;
2128         ifma->ifma_refcount = 1;
2129         ifma->ifma_protospec = 0;
2130         rt_newmaddrmsg(RTM_NEWMADDR, ifma);
2131
2132         /*
2133          * Some network interfaces can scan the address list at
2134          * interrupt time; lock them out.
2135          */
2136         crit_enter();
2137         TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
2138         crit_exit();
2139         if (retifma)
2140                 *retifma = ifma;
2141
2142         if (llsa != NULL) {
2143                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2144                         if (sa_equal(ifma->ifma_addr, llsa))
2145                                 break;
2146                 }
2147                 if (ifma) {
2148                         ifma->ifma_refcount++;
2149                 } else {
2150                         ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_WAITOK);
2151                         dupsa = kmalloc(llsa->sa_len, M_IFMADDR, M_WAITOK);
2152                         bcopy(llsa, dupsa, llsa->sa_len);
2153                         ifma->ifma_addr = dupsa;
2154                         ifma->ifma_ifp = ifp;
2155                         ifma->ifma_refcount = 1;
2156                         crit_enter();
2157                         TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
2158                         crit_exit();
2159                 }
2160         }
2161         /*
2162          * We are certain we have added something, so call down to the
2163          * interface to let them know about it.
2164          */
2165         crit_enter();
2166         ifnet_serialize_all(ifp);
2167         if (ifp->if_ioctl)
2168                 ifp->if_ioctl(ifp, SIOCADDMULTI, 0, NULL);
2169         ifnet_deserialize_all(ifp);
2170         crit_exit();
2171
2172         return 0;
2173 }
2174
2175 /*
2176  * Remove a reference to a multicast address on this interface.  Yell
2177  * if the request does not match an existing membership.
2178  */
2179 int
2180 if_delmulti(struct ifnet *ifp, struct sockaddr *sa)
2181 {
2182         struct ifmultiaddr *ifma;
2183
2184         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
2185                 if (sa_equal(sa, ifma->ifma_addr))
2186                         break;
2187         if (ifma == NULL)
2188                 return ENOENT;
2189
2190         if (ifma->ifma_refcount > 1) {
2191                 ifma->ifma_refcount--;
2192                 return 0;
2193         }
2194
2195         rt_newmaddrmsg(RTM_DELMADDR, ifma);
2196         sa = ifma->ifma_lladdr;
2197         crit_enter();
2198         TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link);
2199         /*
2200          * Make sure the interface driver is notified
2201          * in the case of a link layer mcast group being left.
2202          */
2203         if (ifma->ifma_addr->sa_family == AF_LINK && sa == NULL) {
2204                 ifnet_serialize_all(ifp);
2205                 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL);
2206                 ifnet_deserialize_all(ifp);
2207         }
2208         crit_exit();
2209         kfree(ifma->ifma_addr, M_IFMADDR);
2210         kfree(ifma, M_IFMADDR);
2211         if (sa == NULL)
2212                 return 0;
2213
2214         /*
2215          * Now look for the link-layer address which corresponds to
2216          * this network address.  It had been squirreled away in
2217          * ifma->ifma_lladdr for this purpose (so we don't have
2218          * to call ifp->if_resolvemulti() again), and we saved that
2219          * value in sa above.  If some nasty deleted the
2220          * link-layer address out from underneath us, we can deal because
2221          * the address we stored was is not the same as the one which was
2222          * in the record for the link-layer address.  (So we don't complain
2223          * in that case.)
2224          */
2225         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
2226                 if (sa_equal(sa, ifma->ifma_addr))
2227                         break;
2228         if (ifma == NULL)
2229                 return 0;
2230
2231         if (ifma->ifma_refcount > 1) {
2232                 ifma->ifma_refcount--;
2233                 return 0;
2234         }
2235
2236         crit_enter();
2237         ifnet_serialize_all(ifp);
2238         TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link);
2239         ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL);
2240         ifnet_deserialize_all(ifp);
2241         crit_exit();
2242         kfree(ifma->ifma_addr, M_IFMADDR);
2243         kfree(sa, M_IFMADDR);
2244         kfree(ifma, M_IFMADDR);
2245
2246         return 0;
2247 }
2248
2249 /*
2250  * Delete all multicast group membership for an interface.
2251  * Should be used to quickly flush all multicast filters.
2252  */
2253 void
2254 if_delallmulti(struct ifnet *ifp)
2255 {
2256         struct ifmultiaddr *ifma;
2257         struct ifmultiaddr *next;
2258
2259         TAILQ_FOREACH_MUTABLE(ifma, &ifp->if_multiaddrs, ifma_link, next)
2260                 if_delmulti(ifp, ifma->ifma_addr);
2261 }
2262
2263
2264 /*
2265  * Set the link layer address on an interface.
2266  *
2267  * At this time we only support certain types of interfaces,
2268  * and we don't allow the length of the address to change.
2269  */
2270 int
2271 if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len)
2272 {
2273         struct sockaddr_dl *sdl;
2274         struct ifreq ifr;
2275
2276         sdl = IF_LLSOCKADDR(ifp);
2277         if (sdl == NULL)
2278                 return (EINVAL);
2279         if (len != sdl->sdl_alen)       /* don't allow length to change */
2280                 return (EINVAL);
2281         switch (ifp->if_type) {
2282         case IFT_ETHER:                 /* these types use struct arpcom */
2283         case IFT_XETHER:
2284         case IFT_L2VLAN:
2285                 bcopy(lladdr, ((struct arpcom *)ifp->if_softc)->ac_enaddr, len);
2286                 bcopy(lladdr, LLADDR(sdl), len);
2287                 break;
2288         default:
2289                 return (ENODEV);
2290         }
2291         /*
2292          * If the interface is already up, we need
2293          * to re-init it in order to reprogram its
2294          * address filter.
2295          */
2296         ifnet_serialize_all(ifp);
2297         if ((ifp->if_flags & IFF_UP) != 0) {
2298 #ifdef INET
2299                 struct ifaddr_container *ifac;
2300 #endif
2301
2302                 ifp->if_flags &= ~IFF_UP;
2303                 ifr.ifr_flags = ifp->if_flags;
2304                 ifr.ifr_flagshigh = ifp->if_flags >> 16;
2305                 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2306                               NULL);
2307                 ifp->if_flags |= IFF_UP;
2308                 ifr.ifr_flags = ifp->if_flags;
2309                 ifr.ifr_flagshigh = ifp->if_flags >> 16;
2310                 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2311                                  NULL);
2312 #ifdef INET
2313                 /*
2314                  * Also send gratuitous ARPs to notify other nodes about
2315                  * the address change.
2316                  */
2317                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
2318                         struct ifaddr *ifa = ifac->ifa;
2319
2320                         if (ifa->ifa_addr != NULL &&
2321                             ifa->ifa_addr->sa_family == AF_INET)
2322                                 arp_gratuitous(ifp, ifa);
2323                 }
2324 #endif
2325         }
2326         ifnet_deserialize_all(ifp);
2327         return (0);
2328 }
2329
2330 struct ifmultiaddr *
2331 ifmaof_ifpforaddr(struct sockaddr *sa, struct ifnet *ifp)
2332 {
2333         struct ifmultiaddr *ifma;
2334
2335         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
2336                 if (sa_equal(ifma->ifma_addr, sa))
2337                         break;
2338
2339         return ifma;
2340 }
2341
2342 /*
2343  * This function locates the first real ethernet MAC from a network
2344  * card and loads it into node, returning 0 on success or ENOENT if
2345  * no suitable interfaces were found.  It is used by the uuid code to
2346  * generate a unique 6-byte number.
2347  */
2348 int
2349 if_getanyethermac(uint16_t *node, int minlen)
2350 {
2351         struct ifnet *ifp;
2352         struct sockaddr_dl *sdl;
2353
2354         TAILQ_FOREACH(ifp, &ifnet, if_link) {
2355                 if (ifp->if_type != IFT_ETHER)
2356                         continue;
2357                 sdl = IF_LLSOCKADDR(ifp);
2358                 if (sdl->sdl_alen < minlen)
2359                         continue;
2360                 bcopy(((struct arpcom *)ifp->if_softc)->ac_enaddr, node,
2361                       minlen);
2362                 return(0);
2363         }
2364         return (ENOENT);
2365 }
2366
2367 /*
2368  * The name argument must be a pointer to storage which will last as
2369  * long as the interface does.  For physical devices, the result of
2370  * device_get_name(dev) is a good choice and for pseudo-devices a
2371  * static string works well.
2372  */
2373 void
2374 if_initname(struct ifnet *ifp, const char *name, int unit)
2375 {
2376         ifp->if_dname = name;
2377         ifp->if_dunit = unit;
2378         if (unit != IF_DUNIT_NONE)
2379                 ksnprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit);
2380         else
2381                 strlcpy(ifp->if_xname, name, IFNAMSIZ);
2382 }
2383
2384 int
2385 if_printf(struct ifnet *ifp, const char *fmt, ...)
2386 {
2387         __va_list ap;
2388         int retval;
2389
2390         retval = kprintf("%s: ", ifp->if_xname);
2391         __va_start(ap, fmt);
2392         retval += kvprintf(fmt, ap);
2393         __va_end(ap);
2394         return (retval);
2395 }
2396
2397 struct ifnet *
2398 if_alloc(uint8_t type)
2399 {
2400         struct ifnet *ifp;
2401         size_t size;
2402
2403         /*
2404          * XXX temporary hack until arpcom is setup in if_l2com
2405          */
2406         if (type == IFT_ETHER)
2407                 size = sizeof(struct arpcom);
2408         else
2409                 size = sizeof(struct ifnet);
2410
2411         ifp = kmalloc(size, M_IFNET, M_WAITOK|M_ZERO);
2412
2413         ifp->if_type = type;
2414
2415         if (if_com_alloc[type] != NULL) {
2416                 ifp->if_l2com = if_com_alloc[type](type, ifp);
2417                 if (ifp->if_l2com == NULL) {
2418                         kfree(ifp, M_IFNET);
2419                         return (NULL);
2420                 }
2421         }
2422         return (ifp);
2423 }
2424
2425 void
2426 if_free(struct ifnet *ifp)
2427 {
2428         kfree(ifp, M_IFNET);
2429 }
2430
2431 void
2432 ifq_set_classic(struct ifaltq *ifq)
2433 {
2434         ifq_set_methods(ifq, ifsq_classic_enqueue, ifsq_classic_dequeue,
2435             ifsq_classic_request);
2436 }
2437
2438 void
2439 ifq_set_methods(struct ifaltq *ifq, ifsq_enqueue_t enqueue,
2440     ifsq_dequeue_t dequeue, ifsq_request_t request)
2441 {
2442         int q;
2443
2444         for (q = 0; q < ifq->altq_subq_cnt; ++q) {
2445                 struct ifaltq_subque *ifsq = &ifq->altq_subq[q];
2446
2447                 ifsq->ifsq_enqueue = enqueue;
2448                 ifsq->ifsq_dequeue = dequeue;
2449                 ifsq->ifsq_request = request;
2450         }
2451 }
2452
2453 int
2454 ifsq_classic_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m,
2455     struct altq_pktattr *pa __unused)
2456 {
2457         if (IF_QFULL(ifsq)) {
2458                 m_freem(m);
2459                 return(ENOBUFS);
2460         } else {
2461                 IF_ENQUEUE(ifsq, m);
2462                 return(0);
2463         }       
2464 }
2465
2466 struct mbuf *
2467 ifsq_classic_dequeue(struct ifaltq_subque *ifsq, struct mbuf *mpolled, int op)
2468 {
2469         struct mbuf *m;
2470
2471         switch (op) {
2472         case ALTDQ_POLL:
2473                 IF_POLL(ifsq, m);
2474                 break;
2475         case ALTDQ_REMOVE:
2476                 IF_DEQUEUE(ifsq, m);
2477                 break;
2478         default:
2479                 panic("unsupported ALTQ dequeue op: %d", op);
2480         }
2481         KKASSERT(mpolled == NULL || mpolled == m);
2482         return(m);
2483 }
2484
2485 int
2486 ifsq_classic_request(struct ifaltq_subque *ifsq, int req, void *arg)
2487 {
2488         switch (req) {
2489         case ALTRQ_PURGE:
2490                 IF_DRAIN(ifsq);
2491                 break;
2492         default:
2493                 panic("unsupported ALTQ request: %d", req);
2494         }
2495         return(0);
2496 }
2497
2498 static void
2499 ifsq_ifstart_try(struct ifaltq_subque *ifsq, int force_sched)
2500 {
2501         struct ifnet *ifp = ifsq_get_ifp(ifsq);
2502         int running = 0, need_sched;
2503
2504         /*
2505          * Try to do direct ifnet.if_start first, if there is
2506          * contention on ifnet's serializer, ifnet.if_start will
2507          * be scheduled on ifnet's CPU.
2508          */
2509         if (!ifnet_tryserialize_tx(ifp)) {
2510                 /*
2511                  * ifnet serializer contention happened,
2512                  * ifnet.if_start is scheduled on ifnet's
2513                  * CPU, and we keep going.
2514                  */
2515                 ifsq_ifstart_schedule(ifsq, 1);
2516                 return;
2517         }
2518
2519         if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) {
2520                 ifp->if_start(ifp, ifsq);
2521                 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq))
2522                         running = 1;
2523         }
2524         need_sched = ifsq_ifstart_need_schedule(ifsq, running);
2525
2526         ifnet_deserialize_tx(ifp);
2527
2528         if (need_sched) {
2529                 /*
2530                  * More data need to be transmitted, ifnet.if_start is
2531                  * scheduled on ifnet's CPU, and we keep going.
2532                  * NOTE: ifnet.if_start interlock is not released.
2533                  */
2534                 ifsq_ifstart_schedule(ifsq, force_sched);
2535         }
2536 }
2537
2538 /*
2539  * IFSUBQ packets staging mechanism:
2540  *
2541  * The packets enqueued into IFSUBQ are staged to a certain amount before the
2542  * ifnet's if_start is called.  In this way, the driver could avoid writing
2543  * to hardware registers upon every packet, instead, hardware registers
2544  * could be written when certain amount of packets are put onto hardware
2545  * TX ring.  The measurement on several modern NICs (emx(4), igb(4), bnx(4),
2546  * bge(4), jme(4)) shows that the hardware registers writing aggregation
2547  * could save ~20% CPU time when 18bytes UDP datagrams are transmitted at
2548  * 1.48Mpps.  The performance improvement by hardware registers writing
2549  * aggeregation is also mentioned by Luigi Rizzo's netmap paper
2550  * (http://info.iet.unipi.it/~luigi/netmap/).
2551  *
2552  * IFSUBQ packets staging is performed for two entry points into drivers's
2553  * transmission function:
2554  * - Direct ifnet's if_start calling, i.e. ifsq_ifstart_try()
2555  * - ifnet's if_start scheduling, i.e. ifsq_ifstart_schedule()
2556  *
2557  * IFSUBQ packets staging will be stopped upon any of the following conditions:
2558  * - If the count of packets enqueued on the current CPU is great than or
2559  *   equal to ifsq_stage_cntmax. (XXX this should be per-interface)
2560  * - If the total length of packets enqueued on the current CPU is great
2561  *   than or equal to the hardware's MTU - max_protohdr.  max_protohdr is
2562  *   cut from the hardware's MTU mainly bacause a full TCP segment's size
2563  *   is usually less than hardware's MTU.
2564  * - ifsq_ifstart_schedule() is not pending on the current CPU and if_start
2565  *   interlock (if_snd.altq_started) is not released.
2566  * - The if_start_rollup(), which is registered as low priority netisr
2567  *   rollup function, is called; probably because no more work is pending
2568  *   for netisr.
2569  *
2570  * NOTE:
2571  * Currently IFSUBQ packet staging is only performed in netisr threads.
2572  */
2573 int
2574 ifq_dispatch(struct ifnet *ifp, struct mbuf *m, struct altq_pktattr *pa)
2575 {
2576         struct ifaltq *ifq = &ifp->if_snd;
2577         struct ifaltq_subque *ifsq;
2578         int error, start = 0, len, mcast = 0, avoid_start = 0;
2579         struct ifsubq_stage_head *head = NULL;
2580         struct ifsubq_stage *stage = NULL;
2581         int qid = 0; /* XXX */
2582
2583         /* TODO find qid here */
2584         ifsq = &ifq->altq_subq[qid];
2585
2586         ASSERT_IFNET_NOT_SERIALIZED_TX(ifp);
2587
2588         len = m->m_pkthdr.len;
2589         if (m->m_flags & M_MCAST)
2590                 mcast = 1;
2591
2592         if (curthread->td_type == TD_TYPE_NETISR) {
2593                 head = &ifsubq_stage_heads[mycpuid];
2594                 stage = ifsq_get_stage(ifsq, mycpuid);
2595
2596                 stage->stg_cnt++;
2597                 stage->stg_len += len;
2598                 if (stage->stg_cnt < ifsq_stage_cntmax &&
2599                     stage->stg_len < (ifp->if_mtu - max_protohdr))
2600                         avoid_start = 1;
2601         }
2602
2603         ALTQ_SQ_LOCK(ifsq);
2604         error = ifsq_enqueue_locked(ifsq, m, pa);
2605         if (error) {
2606                 if (!ifsq_data_ready(ifsq)) {
2607                         ALTQ_SQ_UNLOCK(ifsq);
2608                         return error;
2609                 }
2610                 avoid_start = 0;
2611         }
2612         if (!ifsq_is_started(ifsq)) {
2613                 if (avoid_start) {
2614                         ALTQ_SQ_UNLOCK(ifsq);
2615
2616                         KKASSERT(!error);
2617                         if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0)
2618                                 ifsq_stage_insert(head, stage);
2619
2620                         ifp->if_obytes += len;
2621                         if (mcast)
2622                                 ifp->if_omcasts++;
2623                         return error;
2624                 }
2625
2626                 /*
2627                  * Hold the interlock of ifnet.if_start
2628                  */
2629                 ifsq_set_started(ifsq);
2630                 start = 1;
2631         }
2632         ALTQ_SQ_UNLOCK(ifsq);
2633
2634         if (!error) {
2635                 ifp->if_obytes += len;
2636                 if (mcast)
2637                         ifp->if_omcasts++;
2638         }
2639
2640         if (stage != NULL) {
2641                 if (!start && (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED)) {
2642                         KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED);
2643                         if (!avoid_start) {
2644                                 ifsq_stage_remove(head, stage);
2645                                 ifsq_ifstart_schedule(ifsq, 1);
2646                         }
2647                         return error;
2648                 }
2649
2650                 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED) {
2651                         ifsq_stage_remove(head, stage);
2652                 } else {
2653                         stage->stg_cnt = 0;
2654                         stage->stg_len = 0;
2655                 }
2656         }
2657
2658         if (!start)
2659                 return error;
2660
2661         ifsq_ifstart_try(ifsq, 0);
2662         return error;
2663 }
2664
2665 void *
2666 ifa_create(int size, int flags)
2667 {
2668         struct ifaddr *ifa;
2669         int i;
2670
2671         KASSERT(size >= sizeof(*ifa), ("ifaddr size too small"));
2672
2673         ifa = kmalloc(size, M_IFADDR, flags | M_ZERO);
2674         if (ifa == NULL)
2675                 return NULL;
2676
2677         ifa->ifa_containers = kmalloc(ncpus * sizeof(struct ifaddr_container),
2678                                       M_IFADDR, M_WAITOK | M_ZERO);
2679         ifa->ifa_ncnt = ncpus;
2680         for (i = 0; i < ncpus; ++i) {
2681                 struct ifaddr_container *ifac = &ifa->ifa_containers[i];
2682
2683                 ifac->ifa_magic = IFA_CONTAINER_MAGIC;
2684                 ifac->ifa = ifa;
2685                 ifac->ifa_refcnt = 1;
2686         }
2687 #ifdef IFADDR_DEBUG
2688         kprintf("alloc ifa %p %d\n", ifa, size);
2689 #endif
2690         return ifa;
2691 }
2692
2693 void
2694 ifac_free(struct ifaddr_container *ifac, int cpu_id)
2695 {
2696         struct ifaddr *ifa = ifac->ifa;
2697
2698         KKASSERT(ifac->ifa_magic == IFA_CONTAINER_MAGIC);
2699         KKASSERT(ifac->ifa_refcnt == 0);
2700         KASSERT(ifac->ifa_listmask == 0,
2701                 ("ifa is still on %#x lists", ifac->ifa_listmask));
2702
2703         ifac->ifa_magic = IFA_CONTAINER_DEAD;
2704
2705 #ifdef IFADDR_DEBUG_VERBOSE
2706         kprintf("try free ifa %p cpu_id %d\n", ifac->ifa, cpu_id);
2707 #endif
2708
2709         KASSERT(ifa->ifa_ncnt > 0 && ifa->ifa_ncnt <= ncpus,
2710                 ("invalid # of ifac, %d", ifa->ifa_ncnt));
2711         if (atomic_fetchadd_int(&ifa->ifa_ncnt, -1) == 1) {
2712 #ifdef IFADDR_DEBUG
2713                 kprintf("free ifa %p\n", ifa);
2714 #endif
2715                 kfree(ifa->ifa_containers, M_IFADDR);
2716                 kfree(ifa, M_IFADDR);
2717         }
2718 }
2719
2720 static void
2721 ifa_iflink_dispatch(netmsg_t nmsg)
2722 {
2723         struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2724         struct ifaddr *ifa = msg->ifa;
2725         struct ifnet *ifp = msg->ifp;
2726         int cpu = mycpuid;
2727         struct ifaddr_container *ifac;
2728
2729         crit_enter();
2730
2731         ifac = &ifa->ifa_containers[cpu];
2732         ASSERT_IFAC_VALID(ifac);
2733         KASSERT((ifac->ifa_listmask & IFA_LIST_IFADDRHEAD) == 0,
2734                 ("ifaddr is on if_addrheads"));
2735
2736         ifac->ifa_listmask |= IFA_LIST_IFADDRHEAD;
2737         if (msg->tail)
2738                 TAILQ_INSERT_TAIL(&ifp->if_addrheads[cpu], ifac, ifa_link);
2739         else
2740                 TAILQ_INSERT_HEAD(&ifp->if_addrheads[cpu], ifac, ifa_link);
2741
2742         crit_exit();
2743
2744         ifa_forwardmsg(&nmsg->lmsg, cpu + 1);
2745 }
2746
2747 void
2748 ifa_iflink(struct ifaddr *ifa, struct ifnet *ifp, int tail)
2749 {
2750         struct netmsg_ifaddr msg;
2751
2752         netmsg_init(&msg.base, NULL, &curthread->td_msgport,
2753                     0, ifa_iflink_dispatch);
2754         msg.ifa = ifa;
2755         msg.ifp = ifp;
2756         msg.tail = tail;
2757
2758         ifa_domsg(&msg.base.lmsg, 0);
2759 }
2760
2761 static void
2762 ifa_ifunlink_dispatch(netmsg_t nmsg)
2763 {
2764         struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2765         struct ifaddr *ifa = msg->ifa;
2766         struct ifnet *ifp = msg->ifp;
2767         int cpu = mycpuid;
2768         struct ifaddr_container *ifac;
2769
2770         crit_enter();
2771
2772         ifac = &ifa->ifa_containers[cpu];
2773         ASSERT_IFAC_VALID(ifac);
2774         KASSERT(ifac->ifa_listmask & IFA_LIST_IFADDRHEAD,
2775                 ("ifaddr is not on if_addrhead"));
2776
2777         TAILQ_REMOVE(&ifp->if_addrheads[cpu], ifac, ifa_link);
2778         ifac->ifa_listmask &= ~IFA_LIST_IFADDRHEAD;
2779
2780         crit_exit();
2781
2782         ifa_forwardmsg(&nmsg->lmsg, cpu + 1);
2783 }
2784
2785 void
2786 ifa_ifunlink(struct ifaddr *ifa, struct ifnet *ifp)
2787 {
2788         struct netmsg_ifaddr msg;
2789
2790         netmsg_init(&msg.base, NULL, &curthread->td_msgport,
2791                     0, ifa_ifunlink_dispatch);
2792         msg.ifa = ifa;
2793         msg.ifp = ifp;
2794
2795         ifa_domsg(&msg.base.lmsg, 0);
2796 }
2797
2798 static void
2799 ifa_destroy_dispatch(netmsg_t nmsg)
2800 {
2801         struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2802
2803         IFAFREE(msg->ifa);
2804         ifa_forwardmsg(&nmsg->lmsg, mycpuid + 1);
2805 }
2806
2807 void
2808 ifa_destroy(struct ifaddr *ifa)
2809 {
2810         struct netmsg_ifaddr msg;
2811
2812         netmsg_init(&msg.base, NULL, &curthread->td_msgport,
2813                     0, ifa_destroy_dispatch);
2814         msg.ifa = ifa;
2815
2816         ifa_domsg(&msg.base.lmsg, 0);
2817 }
2818
2819 struct lwkt_port *
2820 ifnet_portfn(int cpu)
2821 {
2822         return &ifnet_threads[cpu].td_msgport;
2823 }
2824
2825 void
2826 ifnet_forwardmsg(struct lwkt_msg *lmsg, int next_cpu)
2827 {
2828         KKASSERT(next_cpu > mycpuid && next_cpu <= ncpus);
2829
2830         if (next_cpu < ncpus)
2831                 lwkt_forwardmsg(ifnet_portfn(next_cpu), lmsg);
2832         else
2833                 lwkt_replymsg(lmsg, 0);
2834 }
2835
2836 int
2837 ifnet_domsg(struct lwkt_msg *lmsg, int cpu)
2838 {
2839         KKASSERT(cpu < ncpus);
2840         return lwkt_domsg(ifnet_portfn(cpu), lmsg, 0);
2841 }
2842
2843 void
2844 ifnet_sendmsg(struct lwkt_msg *lmsg, int cpu)
2845 {
2846         KKASSERT(cpu < ncpus);
2847         lwkt_sendmsg(ifnet_portfn(cpu), lmsg);
2848 }
2849
2850 /*
2851  * Generic netmsg service loop.  Some protocols may roll their own but all
2852  * must do the basic command dispatch function call done here.
2853  */
2854 static void
2855 ifnet_service_loop(void *arg __unused)
2856 {
2857         netmsg_t msg;
2858
2859         while ((msg = lwkt_waitport(&curthread->td_msgport, 0))) {
2860                 KASSERT(msg->base.nm_dispatch, ("ifnet_service: badmsg"));
2861                 msg->base.nm_dispatch(msg);
2862         }
2863 }
2864
2865 static void
2866 if_start_rollup(void)
2867 {
2868         struct ifsubq_stage_head *head = &ifsubq_stage_heads[mycpuid];
2869         struct ifsubq_stage *stage;
2870
2871         while ((stage = TAILQ_FIRST(&head->stg_head)) != NULL) {
2872                 struct ifaltq_subque *ifsq = stage->stg_subq;
2873                 int is_sched = 0;
2874
2875                 if (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED)
2876                         is_sched = 1;
2877                 ifsq_stage_remove(head, stage);
2878
2879                 if (is_sched) {
2880                         ifsq_ifstart_schedule(ifsq, 1);
2881                 } else {
2882                         int start = 0;
2883
2884                         ALTQ_SQ_LOCK(ifsq);
2885                         if (!ifsq_is_started(ifsq)) {
2886                                 /*
2887                                  * Hold the interlock of ifnet.if_start
2888                                  */
2889                                 ifsq_set_started(ifsq);
2890                                 start = 1;
2891                         }
2892                         ALTQ_SQ_UNLOCK(ifsq);
2893
2894                         if (start)
2895                                 ifsq_ifstart_try(ifsq, 1);
2896                 }
2897                 KKASSERT((stage->stg_flags &
2898                     (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0);
2899         }
2900 }
2901
2902 static void
2903 ifnetinit(void *dummy __unused)
2904 {
2905         int i;
2906
2907         for (i = 0; i < ncpus; ++i) {
2908                 struct thread *thr = &ifnet_threads[i];
2909
2910                 lwkt_create(ifnet_service_loop, NULL, NULL,
2911                             thr, TDF_NOSTART|TDF_FORCE_SPINPORT,
2912                             i, "ifnet %d", i);
2913                 netmsg_service_port_init(&thr->td_msgport);
2914                 lwkt_schedule(thr);
2915         }
2916
2917         for (i = 0; i < ncpus; ++i)
2918                 TAILQ_INIT(&ifsubq_stage_heads[i].stg_head);
2919         netisr_register_rollup(if_start_rollup, NETISR_ROLLUP_PRIO_IFSTART);
2920 }
2921
2922 struct ifnet *
2923 ifnet_byindex(unsigned short idx)
2924 {
2925         if (idx > if_index)
2926                 return NULL;
2927         return ifindex2ifnet[idx];
2928 }
2929
2930 struct ifaddr *
2931 ifaddr_byindex(unsigned short idx)
2932 {
2933         struct ifnet *ifp;
2934
2935         ifp = ifnet_byindex(idx);
2936         if (!ifp)
2937                 return NULL;
2938         return TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
2939 }
2940
2941 void
2942 if_register_com_alloc(u_char type,
2943     if_com_alloc_t *a, if_com_free_t *f)
2944 {
2945
2946         KASSERT(if_com_alloc[type] == NULL,
2947             ("if_register_com_alloc: %d already registered", type));
2948         KASSERT(if_com_free[type] == NULL,
2949             ("if_register_com_alloc: %d free already registered", type));
2950
2951         if_com_alloc[type] = a;
2952         if_com_free[type] = f;
2953 }
2954
2955 void
2956 if_deregister_com_alloc(u_char type)
2957 {
2958
2959         KASSERT(if_com_alloc[type] != NULL,
2960             ("if_deregister_com_alloc: %d not registered", type));
2961         KASSERT(if_com_free[type] != NULL,
2962             ("if_deregister_com_alloc: %d free not registered", type));
2963         if_com_alloc[type] = NULL;
2964         if_com_free[type] = NULL;
2965 }
2966
2967 int
2968 if_ring_count2(int cnt, int cnt_max)
2969 {
2970         int shift = 0;
2971
2972         KASSERT(cnt_max >= 1 && powerof2(cnt_max),
2973             ("invalid ring count max %d", cnt_max));
2974
2975         if (cnt <= 0)
2976                 cnt = cnt_max;
2977         if (cnt > ncpus2)
2978                 cnt = ncpus2;
2979         if (cnt > cnt_max)
2980                 cnt = cnt_max;
2981
2982         while ((1 << (shift + 1)) <= cnt)
2983                 ++shift;
2984         cnt = 1 << shift;
2985
2986         KASSERT(cnt >= 1 && cnt <= ncpus2 && cnt <= cnt_max,
2987             ("calculate cnt %d, ncpus2 %d, cnt max %d",
2988              cnt, ncpus2, cnt_max));
2989         return cnt;
2990 }
2991
2992 void
2993 ifq_set_maxlen(struct ifaltq *ifq, int len)
2994 {
2995         ifq->altq_maxlen = len + (ncpus * ifsq_stage_cntmax);
2996 }