if: Multiple TX queue support step 3 of 3; map CPUID to subqueue
[dragonfly.git] / sys / net / if.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1980, 1986, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)if.c 8.3 (Berkeley) 1/4/94
f23061d4 34 * $FreeBSD: src/sys/net/if.c,v 1.185 2004/03/13 02:35:03 brooks Exp $
984263bc
MD
35 */
36
37#include "opt_compat.h"
38#include "opt_inet6.h"
39#include "opt_inet.h"
b3a7093f 40#include "opt_ifpoll.h"
984263bc
MD
41
42#include <sys/param.h>
43#include <sys/malloc.h>
44#include <sys/mbuf.h>
45#include <sys/systm.h>
46#include <sys/proc.h>
895c1f85 47#include <sys/priv.h>
6b6e0885 48#include <sys/protosw.h>
984263bc
MD
49#include <sys/socket.h>
50#include <sys/socketvar.h>
6b6e0885 51#include <sys/socketops.h>
984263bc
MD
52#include <sys/protosw.h>
53#include <sys/kernel.h>
9db4b353 54#include <sys/ktr.h>
9683f229 55#include <sys/mutex.h>
984263bc
MD
56#include <sys/sockio.h>
57#include <sys/syslog.h>
58#include <sys/sysctl.h>
698ac46c 59#include <sys/domain.h>
e9cb6d99 60#include <sys/thread.h>
78195a76 61#include <sys/serialize.h>
71fc104f 62#include <sys/bus.h>
984263bc 63
9683f229
MD
64#include <sys/thread2.h>
65#include <sys/msgport2.h>
66#include <sys/mutex2.h>
67
984263bc
MD
68#include <net/if.h>
69#include <net/if_arp.h>
70#include <net/if_dl.h>
71#include <net/if_types.h>
72#include <net/if_var.h>
4d723e5a 73#include <net/ifq_var.h>
984263bc
MD
74#include <net/radix.h>
75#include <net/route.h>
65a24520 76#include <net/if_clone.h>
9db4b353 77#include <net/netisr.h>
b2632176
SZ
78#include <net/netmsg2.h>
79
d5a2b87c 80#include <machine/atomic.h>
984263bc 81#include <machine/stdarg.h>
b2632176 82#include <machine/smp.h>
984263bc
MD
83
84#if defined(INET) || defined(INET6)
85/*XXX*/
86#include <netinet/in.h>
87#include <netinet/in_var.h>
88#include <netinet/if_ether.h>
89#ifdef INET6
984263bc
MD
90#include <netinet6/in6_var.h>
91#include <netinet6/in6_ifattach.h>
92#endif
93#endif
94
9eee10d0
DRJ
95#if defined(COMPAT_43)
96#include <emulation/43bsd/43bsd_socket.h>
97#endif /* COMPAT_43 */
98
b2632176 99struct netmsg_ifaddr {
002c1265 100 struct netmsg_base base;
b2632176
SZ
101 struct ifaddr *ifa;
102 struct ifnet *ifp;
103 int tail;
104};
105
f0a26983
SZ
106struct ifsubq_stage_head {
107 TAILQ_HEAD(, ifsubq_stage) stg_head;
28cc0c29
SZ
108} __cachealign;
109
984263bc
MD
110/*
111 * System initialization
112 */
698ac46c
HS
113static void if_attachdomain(void *);
114static void if_attachdomain1(struct ifnet *);
436c57ea
SZ
115static int ifconf(u_long, caddr_t, struct ucred *);
116static void ifinit(void *);
90af4fd3 117static void ifnetinit(void *);
436c57ea
SZ
118static void if_slowtimo(void *);
119static void link_rtrequest(int, struct rtentry *, struct rt_addrinfo *);
120static int if_rtdel(struct radix_node *, void *);
984263bc
MD
121
122#ifdef INET6
123/*
124 * XXX: declare here to avoid to include many inet6 related files..
125 * should be more generalized?
126 */
436c57ea 127extern void nd6_setmtu(struct ifnet *);
984263bc
MD
128#endif
129
436c57ea
SZ
130SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers");
131SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management");
132
f0a26983
SZ
133static int ifsq_stage_cntmax = 4;
134TUNABLE_INT("net.link.stage_cntmax", &ifsq_stage_cntmax);
28cc0c29 135SYSCTL_INT(_net_link, OID_AUTO, stage_cntmax, CTLFLAG_RW,
f0a26983 136 &ifsq_stage_cntmax, 0, "ifq staging packet count max");
28cc0c29 137
436c57ea 138SYSINIT(interfaces, SI_SUB_PROTO_IF, SI_ORDER_FIRST, ifinit, NULL)
b2632176 139/* Must be after netisr_init */
90af4fd3 140SYSINIT(ifnet, SI_SUB_PRE_DRIVERS, SI_ORDER_SECOND, ifnetinit, NULL)
436c57ea 141
aeb3c11e
RP
142static if_com_alloc_t *if_com_alloc[256];
143static if_com_free_t *if_com_free[256];
144
436c57ea
SZ
145MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address");
146MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address");
cb80735c 147MALLOC_DEFINE(M_IFNET, "ifnet", "interface structure");
984263bc 148
436c57ea 149int ifqmaxlen = IFQ_MAXLEN;
b64bfcc3 150struct ifnethead ifnet = TAILQ_HEAD_INITIALIZER(ifnet);
984263bc 151
436c57ea
SZ
152struct callout if_slowtimo_timer;
153
154int if_index = 0;
155struct ifnet **ifindex2ifnet = NULL;
90af4fd3 156static struct thread ifnet_threads[MAXCPU];
abbb44bb 157
f0a26983 158static struct ifsubq_stage_head ifsubq_stage_heads[MAXCPU];
28cc0c29 159
f0a26983 160#ifdef notyet
9db4b353 161#define IFQ_KTR_STRING "ifq=%p"
5bf48697 162#define IFQ_KTR_ARGS struct ifaltq *ifq
9db4b353
SZ
163#ifndef KTR_IFQ
164#define KTR_IFQ KTR_ALL
165#endif
166KTR_INFO_MASTER(ifq);
5bf48697
AE
167KTR_INFO(KTR_IFQ, ifq, enqueue, 0, IFQ_KTR_STRING, IFQ_KTR_ARGS);
168KTR_INFO(KTR_IFQ, ifq, dequeue, 1, IFQ_KTR_STRING, IFQ_KTR_ARGS);
9db4b353
SZ
169#define logifq(name, arg) KTR_LOG(ifq_ ## name, arg)
170
171#define IF_START_KTR_STRING "ifp=%p"
5bf48697 172#define IF_START_KTR_ARGS struct ifnet *ifp
9db4b353
SZ
173#ifndef KTR_IF_START
174#define KTR_IF_START KTR_ALL
175#endif
176KTR_INFO_MASTER(if_start);
177KTR_INFO(KTR_IF_START, if_start, run, 0,
5bf48697 178 IF_START_KTR_STRING, IF_START_KTR_ARGS);
9db4b353 179KTR_INFO(KTR_IF_START, if_start, sched, 1,
5bf48697 180 IF_START_KTR_STRING, IF_START_KTR_ARGS);
9db4b353 181KTR_INFO(KTR_IF_START, if_start, avoid, 2,
5bf48697 182 IF_START_KTR_STRING, IF_START_KTR_ARGS);
9db4b353 183KTR_INFO(KTR_IF_START, if_start, contend_sched, 3,
5bf48697 184 IF_START_KTR_STRING, IF_START_KTR_ARGS);
9db4b353 185KTR_INFO(KTR_IF_START, if_start, chase_sched, 4,
5bf48697 186 IF_START_KTR_STRING, IF_START_KTR_ARGS);
9db4b353 187#define logifstart(name, arg) KTR_LOG(if_start_ ## name, arg)
f0a26983 188#endif
9db4b353 189
743da179 190TAILQ_HEAD(, ifg_group) ifg_head = TAILQ_HEAD_INITIALIZER(ifg_head);
315a7da3 191
984263bc
MD
192/*
193 * Network interface utility routines.
194 *
195 * Routines with ifa_ifwith* names take sockaddr *'s as
196 * parameters.
197 */
198/* ARGSUSED*/
199void
f23061d4 200ifinit(void *dummy)
984263bc
MD
201{
202 struct ifnet *ifp;
984263bc 203
abbb44bb
JS
204 callout_init(&if_slowtimo_timer);
205
4986965b 206 crit_enter();
984263bc 207 TAILQ_FOREACH(ifp, &ifnet, if_link) {
f0a26983 208 if (ifp->if_snd.altq_maxlen == 0) {
3e4a09e7 209 if_printf(ifp, "XXX: driver didn't set ifq_maxlen\n");
ef9870ec 210 ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
984263bc
MD
211 }
212 }
4986965b 213 crit_exit();
abbb44bb 214
984263bc
MD
215 if_slowtimo(0);
216}
217
9db4b353 218static void
f0a26983 219ifsq_ifstart_ipifunc(void *arg)
9db4b353 220{
f0a26983
SZ
221 struct ifaltq_subque *ifsq = arg;
222 struct lwkt_msg *lmsg = ifsq_get_ifstart_lmsg(ifsq, mycpuid);
9db4b353
SZ
223
224 crit_enter();
225 if (lmsg->ms_flags & MSGF_DONE)
ff5fbdd8 226 lwkt_sendmsg(netisr_portfn(mycpuid), lmsg);
9db4b353
SZ
227 crit_exit();
228}
229
3cab6b0d 230static __inline void
f0a26983 231ifsq_stage_remove(struct ifsubq_stage_head *head, struct ifsubq_stage *stage)
3cab6b0d 232{
f0a26983
SZ
233 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED);
234 TAILQ_REMOVE(&head->stg_head, stage, stg_link);
235 stage->stg_flags &= ~(IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED);
236 stage->stg_cnt = 0;
237 stage->stg_len = 0;
3cab6b0d
SZ
238}
239
240static __inline void
f0a26983 241ifsq_stage_insert(struct ifsubq_stage_head *head, struct ifsubq_stage *stage)
3cab6b0d 242{
f0a26983
SZ
243 KKASSERT((stage->stg_flags &
244 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0);
245 stage->stg_flags |= IFSQ_STAGE_FLAG_QUED;
246 TAILQ_INSERT_TAIL(&head->stg_head, stage, stg_link);
3cab6b0d
SZ
247}
248
9db4b353
SZ
249/*
250 * Schedule ifnet.if_start on ifnet's CPU
251 */
252static void
f0a26983 253ifsq_ifstart_schedule(struct ifaltq_subque *ifsq, int force)
9db4b353 254{
9db4b353
SZ
255 int cpu;
256
3cab6b0d 257 if (!force && curthread->td_type == TD_TYPE_NETISR &&
f0a26983
SZ
258 ifsq_stage_cntmax > 0) {
259 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid);
260
261 stage->stg_cnt = 0;
262 stage->stg_len = 0;
263 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0)
264 ifsq_stage_insert(&ifsubq_stage_heads[mycpuid], stage);
265 stage->stg_flags |= IFSQ_STAGE_FLAG_SCHED;
3cab6b0d
SZ
266 return;
267 }
268
f0a26983 269 cpu = ifsq_get_cpuid(ifsq);
9db4b353 270 if (cpu != mycpuid)
f0a26983 271 lwkt_send_ipiq(globaldata_find(cpu), ifsq_ifstart_ipifunc, ifsq);
9db4b353 272 else
f0a26983 273 ifsq_ifstart_ipifunc(ifsq);
9db4b353
SZ
274}
275
276/*
277 * NOTE:
278 * This function will release ifnet.if_start interlock,
279 * if ifnet.if_start does not need to be scheduled
280 */
281static __inline int
f0a26983 282ifsq_ifstart_need_schedule(struct ifaltq_subque *ifsq, int running)
9db4b353 283{
f0a26983 284 if (!running || ifsq_is_empty(ifsq)
9db4b353 285#ifdef ALTQ
f0a26983 286 || ifsq->ifsq_altq->altq_tbr != NULL
9db4b353
SZ
287#endif
288 ) {
f0a26983 289 ALTQ_SQ_LOCK(ifsq);
9db4b353
SZ
290 /*
291 * ifnet.if_start interlock is released, if:
292 * 1) Hardware can not take any packets, due to
293 * o interface is marked down
9ed293e0 294 * o hardware queue is full (ifq_is_oactive)
9db4b353
SZ
295 * Under the second situation, hardware interrupt
296 * or polling(4) will call/schedule ifnet.if_start
297 * when hardware queue is ready
298 * 2) There is not packet in the ifnet.if_snd.
299 * Further ifq_dispatch or ifq_handoff will call/
300 * schedule ifnet.if_start
301 * 3) TBR is used and it does not allow further
302 * dequeueing.
303 * TBR callout will call ifnet.if_start
304 */
f0a26983
SZ
305 if (!running || !ifsq_data_ready(ifsq)) {
306 ifsq_clr_started(ifsq);
307 ALTQ_SQ_UNLOCK(ifsq);
9db4b353
SZ
308 return 0;
309 }
f0a26983 310 ALTQ_SQ_UNLOCK(ifsq);
9db4b353
SZ
311 }
312 return 1;
313}
314
315static void
f0a26983 316ifsq_ifstart_dispatch(netmsg_t msg)
9db4b353 317{
002c1265 318 struct lwkt_msg *lmsg = &msg->base.lmsg;
f0a26983
SZ
319 struct ifaltq_subque *ifsq = lmsg->u.ms_resultp;
320 struct ifnet *ifp = ifsq_get_ifp(ifsq);
404c9fd9 321 int running = 0, need_sched;
9db4b353
SZ
322
323 crit_enter();
324 lwkt_replymsg(lmsg, 0); /* reply ASAP */
325 crit_exit();
326
f0a26983 327 if (mycpuid != ifsq_get_cpuid(ifsq)) {
9db4b353 328 /*
404c9fd9 329 * We need to chase the ifnet CPU change.
9db4b353 330 */
f0a26983 331 ifsq_ifstart_schedule(ifsq, 1);
404c9fd9 332 return;
9db4b353 333 }
9db4b353 334
3c4cd924 335 ifnet_serialize_tx(ifp, ifsq);
f0a26983
SZ
336 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) {
337 ifp->if_start(ifp, ifsq);
338 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq))
404c9fd9 339 running = 1;
9db4b353 340 }
f0a26983 341 need_sched = ifsq_ifstart_need_schedule(ifsq, running);
3c4cd924 342 ifnet_deserialize_tx(ifp, ifsq);
404c9fd9
SZ
343
344 if (need_sched) {
2b2f1d64
SZ
345 /*
346 * More data need to be transmitted, ifnet.if_start is
347 * scheduled on ifnet's CPU, and we keep going.
348 * NOTE: ifnet.if_start interlock is not released.
349 */
f0a26983 350 ifsq_ifstart_schedule(ifsq, 0);
9db4b353
SZ
351 }
352}
353
354/* Device driver ifnet.if_start helper function */
355void
f0a26983 356ifsq_devstart(struct ifaltq_subque *ifsq)
9db4b353 357{
f0a26983 358 struct ifnet *ifp = ifsq_get_ifp(ifsq);
9db4b353
SZ
359 int running = 0;
360
3c4cd924 361 ASSERT_IFNET_SERIALIZED_TX(ifp, ifsq);
9db4b353 362
f0a26983
SZ
363 ALTQ_SQ_LOCK(ifsq);
364 if (ifsq_is_started(ifsq) || !ifsq_data_ready(ifsq)) {
365 ALTQ_SQ_UNLOCK(ifsq);
9db4b353
SZ
366 return;
367 }
f0a26983
SZ
368 ifsq_set_started(ifsq);
369 ALTQ_SQ_UNLOCK(ifsq);
9db4b353 370
f0a26983 371 ifp->if_start(ifp, ifsq);
9db4b353 372
f0a26983 373 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq))
9db4b353
SZ
374 running = 1;
375
f0a26983 376 if (ifsq_ifstart_need_schedule(ifsq, running)) {
9db4b353
SZ
377 /*
378 * More data need to be transmitted, ifnet.if_start is
379 * scheduled on ifnet's CPU, and we keep going.
380 * NOTE: ifnet.if_start interlock is not released.
381 */
f0a26983 382 ifsq_ifstart_schedule(ifsq, 0);
9db4b353
SZ
383 }
384}
385
f0a26983
SZ
386void
387if_devstart(struct ifnet *ifp)
388{
389 ifsq_devstart(ifq_get_subq_default(&ifp->if_snd));
390}
391
2dffecda
SZ
392/* Device driver ifnet.if_start schedule helper function */
393void
f0a26983
SZ
394ifsq_devstart_sched(struct ifaltq_subque *ifsq)
395{
396 ifsq_ifstart_schedule(ifsq, 1);
397}
398
399void
2dffecda
SZ
400if_devstart_sched(struct ifnet *ifp)
401{
f0a26983 402 ifsq_devstart_sched(ifq_get_subq_default(&ifp->if_snd));
2dffecda
SZ
403}
404
a3dd34d2
SZ
405static void
406if_default_serialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
407{
408 lwkt_serialize_enter(ifp->if_serializer);
409}
410
411static void
412if_default_deserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
413{
414 lwkt_serialize_exit(ifp->if_serializer);
415}
416
417static int
418if_default_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
419{
420 return lwkt_serialize_try(ifp->if_serializer);
421}
422
2c9effcf
SZ
423#ifdef INVARIANTS
424static void
425if_default_serialize_assert(struct ifnet *ifp,
426 enum ifnet_serialize slz __unused,
427 boolean_t serialized)
428{
429 if (serialized)
430 ASSERT_SERIALIZED(ifp->if_serializer);
431 else
432 ASSERT_NOT_SERIALIZED(ifp->if_serializer);
433}
434#endif
435
984263bc 436/*
78195a76
MD
437 * Attach an interface to the list of "active" interfaces.
438 *
439 * The serializer is optional. If non-NULL access to the interface
440 * may be MPSAFE.
984263bc
MD
441 */
442void
78195a76 443if_attach(struct ifnet *ifp, lwkt_serialize_t serializer)
984263bc
MD
444{
445 unsigned socksize, ifasize;
446 int namelen, masklen;
82ed7fc2
RG
447 struct sockaddr_dl *sdl;
448 struct ifaddr *ifa;
e3e4574a 449 struct ifaltq *ifq;
f0a26983 450 int i, q;
590b8cd4 451
984263bc 452 static int if_indexlim = 8;
984263bc 453
a3dd34d2
SZ
454 if (ifp->if_serialize != NULL) {
455 KASSERT(ifp->if_deserialize != NULL &&
2c9effcf
SZ
456 ifp->if_tryserialize != NULL &&
457 ifp->if_serialize_assert != NULL,
ed20d0e3 458 ("serialize functions are partially setup"));
ae474cfa
SZ
459
460 /*
461 * If the device supplies serialize functions,
462 * then clear if_serializer to catch any invalid
463 * usage of this field.
464 */
465 KASSERT(serializer == NULL,
466 ("both serialize functions and default serializer "
ed20d0e3 467 "are supplied"));
ae474cfa 468 ifp->if_serializer = NULL;
a3dd34d2
SZ
469 } else {
470 KASSERT(ifp->if_deserialize == NULL &&
2c9effcf
SZ
471 ifp->if_tryserialize == NULL &&
472 ifp->if_serialize_assert == NULL,
ed20d0e3 473 ("serialize functions are partially setup"));
a3dd34d2
SZ
474 ifp->if_serialize = if_default_serialize;
475 ifp->if_deserialize = if_default_deserialize;
476 ifp->if_tryserialize = if_default_tryserialize;
2c9effcf
SZ
477#ifdef INVARIANTS
478 ifp->if_serialize_assert = if_default_serialize_assert;
479#endif
ae474cfa
SZ
480
481 /*
482 * The serializer can be passed in from the device,
483 * allowing the same serializer to be used for both
484 * the interrupt interlock and the device queue.
485 * If not specified, the netif structure will use an
486 * embedded serializer.
487 */
488 if (serializer == NULL) {
489 serializer = &ifp->if_default_serializer;
490 lwkt_serialize_init(serializer);
491 }
492 ifp->if_serializer = serializer;
a3dd34d2
SZ
493 }
494
9683f229
MD
495 mtx_init(&ifp->if_ioctl_mtx);
496 mtx_lock(&ifp->if_ioctl_mtx);
497
984263bc
MD
498 TAILQ_INSERT_TAIL(&ifnet, ifp, if_link);
499 ifp->if_index = ++if_index;
b2632176 500
984263bc
MD
501 /*
502 * XXX -
503 * The old code would work if the interface passed a pre-existing
504 * chain of ifaddrs to this code. We don't trust our callers to
505 * properly initialize the tailq, however, so we no longer allow
506 * this unlikely case.
507 */
b2632176
SZ
508 ifp->if_addrheads = kmalloc(ncpus * sizeof(struct ifaddrhead),
509 M_IFADDR, M_WAITOK | M_ZERO);
510 for (i = 0; i < ncpus; ++i)
511 TAILQ_INIT(&ifp->if_addrheads[i]);
512
984263bc 513 TAILQ_INIT(&ifp->if_prefixhead);
441d34b2 514 TAILQ_INIT(&ifp->if_multiaddrs);
2097a299 515 TAILQ_INIT(&ifp->if_groups);
984263bc 516 getmicrotime(&ifp->if_lastchange);
141697b6 517 if (ifindex2ifnet == NULL || if_index >= if_indexlim) {
590b8cd4 518 unsigned int n;
141697b6 519 struct ifnet **q;
590b8cd4
JH
520
521 if_indexlim <<= 1;
984263bc
MD
522
523 /* grow ifindex2ifnet */
141697b6 524 n = if_indexlim * sizeof(*q);
efda3bd0 525 q = kmalloc(n, M_IFADDR, M_WAITOK | M_ZERO);
984263bc 526 if (ifindex2ifnet) {
f23061d4 527 bcopy(ifindex2ifnet, q, n/2);
efda3bd0 528 kfree(ifindex2ifnet, M_IFADDR);
984263bc 529 }
141697b6 530 ifindex2ifnet = q;
984263bc
MD
531 }
532
533 ifindex2ifnet[if_index] = ifp;
534
535 /*
536 * create a Link Level name for this device
537 */
3e4a09e7 538 namelen = strlen(ifp->if_xname);
60615e94 539 masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
984263bc
MD
540 socksize = masklen + ifp->if_addrlen;
541#define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
542 if (socksize < sizeof(*sdl))
543 socksize = sizeof(*sdl);
544 socksize = ROUNDUP(socksize);
60615e94 545#undef ROUNDUP
590b8cd4 546 ifasize = sizeof(struct ifaddr) + 2 * socksize;
b2632176 547 ifa = ifa_create(ifasize, M_WAITOK);
590b8cd4
JH
548 sdl = (struct sockaddr_dl *)(ifa + 1);
549 sdl->sdl_len = socksize;
550 sdl->sdl_family = AF_LINK;
551 bcopy(ifp->if_xname, sdl->sdl_data, namelen);
552 sdl->sdl_nlen = namelen;
553 sdl->sdl_index = ifp->if_index;
554 sdl->sdl_type = ifp->if_type;
141697b6 555 ifp->if_lladdr = ifa;
590b8cd4
JH
556 ifa->ifa_ifp = ifp;
557 ifa->ifa_rtrequest = link_rtrequest;
558 ifa->ifa_addr = (struct sockaddr *)sdl;
559 sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl);
560 ifa->ifa_netmask = (struct sockaddr *)sdl;
561 sdl->sdl_len = masklen;
562 while (namelen != 0)
563 sdl->sdl_data[--namelen] = 0xff;
b2632176 564 ifa_iflink(ifa, ifp, 0 /* Insert head */);
984263bc 565
f2bd8b67 566 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp);
71fc104f 567 devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL);
f2bd8b67 568
2cc2f639
SZ
569 if (ifp->if_mapsubq == NULL)
570 ifp->if_mapsubq = ifq_mapsubq_default;
571
e3e4574a
JS
572 ifq = &ifp->if_snd;
573 ifq->altq_type = 0;
574 ifq->altq_disc = NULL;
575 ifq->altq_flags &= ALTQF_CANTCHANGE;
576 ifq->altq_tbr = NULL;
577 ifq->altq_ifp = ifp;
4d723e5a 578
f0a26983
SZ
579 if (ifq->altq_subq_cnt <= 0)
580 ifq->altq_subq_cnt = 1;
581 ifq->altq_subq = kmalloc_cachealign(
582 ifq->altq_subq_cnt * sizeof(struct ifaltq_subque),
28cc0c29 583 M_DEVBUF, M_WAITOK | M_ZERO);
28cc0c29 584
f0a26983
SZ
585 if (ifq->altq_maxlen == 0) {
586 if_printf(ifp, "driver didn't set ifq_maxlen\n");
587 ifq_set_maxlen(ifq, ifqmaxlen);
42fdf81e
SZ
588 }
589
f0a26983
SZ
590 for (q = 0; q < ifq->altq_subq_cnt; ++q) {
591 struct ifaltq_subque *ifsq = &ifq->altq_subq[q];
592
593 ALTQ_SQ_LOCK_INIT(ifsq);
594 ifsq->ifsq_index = q;
595
596 ifsq->ifsq_altq = ifq;
597 ifsq->ifsq_ifp = ifp;
598
599 ifsq->ifq_maxlen = ifq->altq_maxlen;
600 ifsq->ifsq_prepended = NULL;
601 ifsq->ifsq_started = 0;
602 ifsq->ifsq_hw_oactive = 0;
603 ifsq_set_cpuid(ifsq, 0);
604
605 ifsq->ifsq_stage =
606 kmalloc_cachealign(ncpus * sizeof(struct ifsubq_stage),
607 M_DEVBUF, M_WAITOK | M_ZERO);
608 for (i = 0; i < ncpus; ++i)
609 ifsq->ifsq_stage[i].stg_subq = ifsq;
610
611 ifsq->ifsq_ifstart_nmsg =
612 kmalloc(ncpus * sizeof(struct netmsg_base),
613 M_LWKTMSG, M_WAITOK);
614 for (i = 0; i < ncpus; ++i) {
615 netmsg_init(&ifsq->ifsq_ifstart_nmsg[i], NULL,
616 &netisr_adone_rport, 0, ifsq_ifstart_dispatch);
617 ifsq->ifsq_ifstart_nmsg[i].lmsg.u.ms_resultp = ifsq;
618 }
619 }
620 ifq_set_classic(ifq);
621
9c70fe43 622 if (!SLIST_EMPTY(&domains))
698ac46c
HS
623 if_attachdomain1(ifp);
624
984263bc
MD
625 /* Announce the interface. */
626 rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
9683f229
MD
627
628 mtx_unlock(&ifp->if_ioctl_mtx);
984263bc
MD
629}
630
698ac46c
HS
631static void
632if_attachdomain(void *dummy)
633{
634 struct ifnet *ifp;
698ac46c 635
4986965b
JS
636 crit_enter();
637 TAILQ_FOREACH(ifp, &ifnet, if_list)
698ac46c 638 if_attachdomain1(ifp);
4986965b 639 crit_exit();
698ac46c
HS
640}
641SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST,
642 if_attachdomain, NULL);
643
644static void
645if_attachdomain1(struct ifnet *ifp)
646{
647 struct domain *dp;
698ac46c 648
4986965b 649 crit_enter();
698ac46c
HS
650
651 /* address family dependent data region */
652 bzero(ifp->if_afdata, sizeof(ifp->if_afdata));
9c70fe43 653 SLIST_FOREACH(dp, &domains, dom_next)
698ac46c
HS
654 if (dp->dom_ifattach)
655 ifp->if_afdata[dp->dom_family] =
656 (*dp->dom_ifattach)(ifp);
4986965b 657 crit_exit();
698ac46c
HS
658}
659
984263bc 660/*
c727e142
SZ
661 * Purge all addresses whose type is _not_ AF_LINK
662 */
663void
664if_purgeaddrs_nolink(struct ifnet *ifp)
665{
b2632176
SZ
666 struct ifaddr_container *ifac, *next;
667
668 TAILQ_FOREACH_MUTABLE(ifac, &ifp->if_addrheads[mycpuid],
669 ifa_link, next) {
670 struct ifaddr *ifa = ifac->ifa;
c727e142 671
c727e142
SZ
672 /* Leave link ifaddr as it is */
673 if (ifa->ifa_addr->sa_family == AF_LINK)
674 continue;
675#ifdef INET
676 /* XXX: Ugly!! ad hoc just for INET */
677 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET) {
678 struct ifaliasreq ifr;
b2632176
SZ
679#ifdef IFADDR_DEBUG_VERBOSE
680 int i;
681
682 kprintf("purge in4 addr %p: ", ifa);
683 for (i = 0; i < ncpus; ++i)
684 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt);
685 kprintf("\n");
686#endif
c727e142
SZ
687
688 bzero(&ifr, sizeof ifr);
689 ifr.ifra_addr = *ifa->ifa_addr;
690 if (ifa->ifa_dstaddr)
691 ifr.ifra_broadaddr = *ifa->ifa_dstaddr;
692 if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp,
693 NULL) == 0)
694 continue;
695 }
696#endif /* INET */
697#ifdef INET6
698 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET6) {
b2632176
SZ
699#ifdef IFADDR_DEBUG_VERBOSE
700 int i;
701
702 kprintf("purge in6 addr %p: ", ifa);
703 for (i = 0; i < ncpus; ++i)
704 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt);
705 kprintf("\n");
706#endif
707
c727e142
SZ
708 in6_purgeaddr(ifa);
709 /* ifp_addrhead is already updated */
710 continue;
711 }
712#endif /* INET6 */
b2632176
SZ
713 ifa_ifunlink(ifa, ifp);
714 ifa_destroy(ifa);
c727e142
SZ
715 }
716}
717
5804f3d1
SZ
718static void
719ifq_stage_detach_handler(netmsg_t nmsg)
720{
721 struct ifaltq *ifq = nmsg->lmsg.u.ms_resultp;
f0a26983 722 int q;
5804f3d1 723
f0a26983
SZ
724 for (q = 0; q < ifq->altq_subq_cnt; ++q) {
725 struct ifaltq_subque *ifsq = &ifq->altq_subq[q];
726 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid);
727
728 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED)
729 ifsq_stage_remove(&ifsubq_stage_heads[mycpuid], stage);
730 }
5804f3d1
SZ
731 lwkt_replymsg(&nmsg->lmsg, 0);
732}
733
734static void
735ifq_stage_detach(struct ifaltq *ifq)
736{
737 struct netmsg_base base;
738 int cpu;
739
740 netmsg_init(&base, NULL, &curthread->td_msgport, 0,
741 ifq_stage_detach_handler);
742 base.lmsg.u.ms_resultp = ifq;
743
744 for (cpu = 0; cpu < ncpus; ++cpu)
745 lwkt_domsg(netisr_portfn(cpu), &base.lmsg, 0);
746}
747
c727e142 748/*
984263bc
MD
749 * Detach an interface, removing it from the
750 * list of "active" interfaces.
751 */
752void
f23061d4 753if_detach(struct ifnet *ifp)
984263bc 754{
984263bc 755 struct radix_node_head *rnh;
f0a26983 756 int i, q;
ecdefdda 757 int cpu, origcpu;
698ac46c 758 struct domain *dp;
984263bc 759
f2bd8b67
JS
760 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp);
761
984263bc
MD
762 /*
763 * Remove routes and flush queues.
764 */
4986965b 765 crit_enter();
b3a7093f
SZ
766#ifdef IFPOLL_ENABLE
767 if (ifp->if_flags & IFF_NPOLLING)
768 ifpoll_deregister(ifp);
769#endif
984263bc
MD
770 if_down(ifp);
771
5b1156d4 772#ifdef ALTQ
4d723e5a
JS
773 if (ifq_is_enabled(&ifp->if_snd))
774 altq_disable(&ifp->if_snd);
775 if (ifq_is_attached(&ifp->if_snd))
776 altq_detach(&ifp->if_snd);
5b1156d4 777#endif
4d723e5a 778
984263bc 779 /*
984263bc
MD
780 * Clean up all addresses.
781 */
141697b6 782 ifp->if_lladdr = NULL;
984263bc 783
c727e142 784 if_purgeaddrs_nolink(ifp);
b2632176 785 if (!TAILQ_EMPTY(&ifp->if_addrheads[mycpuid])) {
c727e142
SZ
786 struct ifaddr *ifa;
787
b2632176 788 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
c727e142 789 KASSERT(ifa->ifa_addr->sa_family == AF_LINK,
27eaa4f1 790 ("non-link ifaddr is left on if_addrheads"));
984263bc 791
b2632176
SZ
792 ifa_ifunlink(ifa, ifp);
793 ifa_destroy(ifa);
794 KASSERT(TAILQ_EMPTY(&ifp->if_addrheads[mycpuid]),
27eaa4f1 795 ("there are still ifaddrs left on if_addrheads"));
984263bc
MD
796 }
797
a98eb818
JS
798#ifdef INET
799 /*
800 * Remove all IPv4 kernel structures related to ifp.
801 */
802 in_ifdetach(ifp);
803#endif
804
984263bc
MD
805#ifdef INET6
806 /*
807 * Remove all IPv6 kernel structs related to ifp. This should be done
808 * before removing routing entries below, since IPv6 interface direct
809 * routes are expected to be removed by the IPv6-specific kernel API.
810 * Otherwise, the kernel will detect some inconsistency and bark it.
811 */
812 in6_ifdetach(ifp);
813#endif
814
815 /*
816 * Delete all remaining routes using this interface
817 * Unfortuneatly the only way to do this is to slog through
818 * the entire routing table looking for routes which point
819 * to this interface...oh well...
820 */
ecdefdda 821 origcpu = mycpuid;
271d38c4 822 for (cpu = 0; cpu < ncpus; cpu++) {
ecdefdda
MD
823 lwkt_migratecpu(cpu);
824 for (i = 1; i <= AF_MAX; i++) {
b2632176 825 if ((rnh = rt_tables[cpu][i]) == NULL)
ecdefdda
MD
826 continue;
827 rnh->rnh_walktree(rnh, if_rtdel, ifp);
828 }
984263bc 829 }
ecdefdda 830 lwkt_migratecpu(origcpu);
984263bc
MD
831
832 /* Announce that the interface is gone. */
833 rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
71fc104f 834 devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL);
984263bc 835
9c70fe43 836 SLIST_FOREACH(dp, &domains, dom_next)
698ac46c
HS
837 if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family])
838 (*dp->dom_ifdetach)(ifp,
839 ifp->if_afdata[dp->dom_family]);
698ac46c 840
141697b6
JS
841 /*
842 * Remove interface from ifindex2ifp[] and maybe decrement if_index.
843 */
75857e7c 844 ifindex2ifnet[ifp->if_index] = NULL;
141697b6
JS
845 while (if_index > 0 && ifindex2ifnet[if_index] == NULL)
846 if_index--;
75857e7c 847
984263bc 848 TAILQ_REMOVE(&ifnet, ifp, if_link);
b2632176 849 kfree(ifp->if_addrheads, M_IFADDR);
5804f3d1
SZ
850
851 lwkt_synchronize_ipiqs("if_detach");
852 ifq_stage_detach(&ifp->if_snd);
853
f0a26983
SZ
854 for (q = 0; q < ifp->if_snd.altq_subq_cnt; ++q) {
855 struct ifaltq_subque *ifsq = &ifp->if_snd.altq_subq[q];
856
857 kfree(ifsq->ifsq_ifstart_nmsg, M_LWKTMSG);
858 kfree(ifsq->ifsq_stage, M_DEVBUF);
859 }
407cde39
SZ
860 kfree(ifp->if_snd.altq_subq, M_DEVBUF);
861
4986965b 862 crit_exit();
984263bc
MD
863}
864
865/*
315a7da3
JL
866 * Create interface group without members
867 */
868struct ifg_group *
869if_creategroup(const char *groupname)
870{
871 struct ifg_group *ifg = NULL;
872
873 if ((ifg = (struct ifg_group *)kmalloc(sizeof(struct ifg_group),
874 M_TEMP, M_NOWAIT)) == NULL)
875 return (NULL);
876
877 strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group));
878 ifg->ifg_refcnt = 0;
879 ifg->ifg_carp_demoted = 0;
880 TAILQ_INIT(&ifg->ifg_members);
881#if NPF > 0
882 pfi_attach_ifgroup(ifg);
883#endif
884 TAILQ_INSERT_TAIL(&ifg_head, ifg, ifg_next);
885
886 return (ifg);
887}
888
889/*
890 * Add a group to an interface
891 */
892int
893if_addgroup(struct ifnet *ifp, const char *groupname)
894{
895 struct ifg_list *ifgl;
896 struct ifg_group *ifg = NULL;
897 struct ifg_member *ifgm;
898
899 if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' &&
900 groupname[strlen(groupname) - 1] <= '9')
901 return (EINVAL);
902
903 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
904 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname))
905 return (EEXIST);
906
907 if ((ifgl = kmalloc(sizeof(*ifgl), M_TEMP, M_NOWAIT)) == NULL)
908 return (ENOMEM);
909
910 if ((ifgm = kmalloc(sizeof(*ifgm), M_TEMP, M_NOWAIT)) == NULL) {
911 kfree(ifgl, M_TEMP);
912 return (ENOMEM);
913 }
914
915 TAILQ_FOREACH(ifg, &ifg_head, ifg_next)
916 if (!strcmp(ifg->ifg_group, groupname))
917 break;
918
919 if (ifg == NULL && (ifg = if_creategroup(groupname)) == NULL) {
920 kfree(ifgl, M_TEMP);
921 kfree(ifgm, M_TEMP);
922 return (ENOMEM);
923 }
924
925 ifg->ifg_refcnt++;
926 ifgl->ifgl_group = ifg;
927 ifgm->ifgm_ifp = ifp;
928
929 TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next);
930 TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next);
931
932#if NPF > 0
933 pfi_group_change(groupname);
934#endif
935
936 return (0);
937}
938
939/*
940 * Remove a group from an interface
941 */
942int
943if_delgroup(struct ifnet *ifp, const char *groupname)
944{
945 struct ifg_list *ifgl;
946 struct ifg_member *ifgm;
947
948 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
949 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname))
950 break;
951 if (ifgl == NULL)
952 return (ENOENT);
953
954 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next);
955
956 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next)
957 if (ifgm->ifgm_ifp == ifp)
958 break;
959
960 if (ifgm != NULL) {
961 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next);
962 kfree(ifgm, M_TEMP);
963 }
964
965 if (--ifgl->ifgl_group->ifg_refcnt == 0) {
966 TAILQ_REMOVE(&ifg_head, ifgl->ifgl_group, ifg_next);
967#if NPF > 0
968 pfi_detach_ifgroup(ifgl->ifgl_group);
969#endif
970 kfree(ifgl->ifgl_group, M_TEMP);
971 }
972
973 kfree(ifgl, M_TEMP);
974
975#if NPF > 0
976 pfi_group_change(groupname);
977#endif
978
979 return (0);
980}
981
982/*
983 * Stores all groups from an interface in memory pointed
984 * to by data
985 */
986int
987if_getgroup(caddr_t data, struct ifnet *ifp)
988{
989 int len, error;
990 struct ifg_list *ifgl;
991 struct ifg_req ifgrq, *ifgp;
992 struct ifgroupreq *ifgr = (struct ifgroupreq *)data;
993
994 if (ifgr->ifgr_len == 0) {
995 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
996 ifgr->ifgr_len += sizeof(struct ifg_req);
997 return (0);
998 }
999
1000 len = ifgr->ifgr_len;
1001 ifgp = ifgr->ifgr_groups;
1002 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) {
1003 if (len < sizeof(ifgrq))
1004 return (EINVAL);
1005 bzero(&ifgrq, sizeof ifgrq);
1006 strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group,
1007 sizeof(ifgrq.ifgrq_group));
1008 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp,
1009 sizeof(struct ifg_req))))
1010 return (error);
1011 len -= sizeof(ifgrq);
1012 ifgp++;
1013 }
1014
1015 return (0);
1016}
1017
1018/*
1019 * Stores all members of a group in memory pointed to by data
1020 */
1021int
1022if_getgroupmembers(caddr_t data)
1023{
1024 struct ifgroupreq *ifgr = (struct ifgroupreq *)data;
1025 struct ifg_group *ifg;
1026 struct ifg_member *ifgm;
1027 struct ifg_req ifgrq, *ifgp;
1028 int len, error;
1029
1030 TAILQ_FOREACH(ifg, &ifg_head, ifg_next)
1031 if (!strcmp(ifg->ifg_group, ifgr->ifgr_name))
1032 break;
1033 if (ifg == NULL)
1034 return (ENOENT);
1035
1036 if (ifgr->ifgr_len == 0) {
1037 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next)
1038 ifgr->ifgr_len += sizeof(ifgrq);
1039 return (0);
1040 }
1041
1042 len = ifgr->ifgr_len;
1043 ifgp = ifgr->ifgr_groups;
1044 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) {
1045 if (len < sizeof(ifgrq))
1046 return (EINVAL);
1047 bzero(&ifgrq, sizeof ifgrq);
1048 strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname,
1049 sizeof(ifgrq.ifgrq_member));
1050 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp,
1051 sizeof(struct ifg_req))))
1052 return (error);
1053 len -= sizeof(ifgrq);
1054 ifgp++;
1055 }
1056
1057 return (0);
1058}
1059
1060/*
984263bc 1061 * Delete Routes for a Network Interface
f23061d4 1062 *
984263bc
MD
1063 * Called for each routing entry via the rnh->rnh_walktree() call above
1064 * to delete all route entries referencing a detaching network interface.
1065 *
1066 * Arguments:
1067 * rn pointer to node in the routing table
1068 * arg argument passed to rnh->rnh_walktree() - detaching interface
1069 *
1070 * Returns:
1071 * 0 successful
1072 * errno failed - reason indicated
1073 *
1074 */
1075static int
f23061d4 1076if_rtdel(struct radix_node *rn, void *arg)
984263bc
MD
1077{
1078 struct rtentry *rt = (struct rtentry *)rn;
1079 struct ifnet *ifp = arg;
1080 int err;
1081
1082 if (rt->rt_ifp == ifp) {
1083
1084 /*
1085 * Protect (sorta) against walktree recursion problems
1086 * with cloned routes
1087 */
f23061d4 1088 if (!(rt->rt_flags & RTF_UP))
984263bc
MD
1089 return (0);
1090
1091 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
1092 rt_mask(rt), rt->rt_flags,
2038fb68 1093 NULL);
984263bc
MD
1094 if (err) {
1095 log(LOG_WARNING, "if_rtdel: error %d\n", err);
1096 }
1097 }
1098
1099 return (0);
1100}
1101
1102/*
984263bc
MD
1103 * Locate an interface based on a complete address.
1104 */
984263bc 1105struct ifaddr *
f23061d4 1106ifa_ifwithaddr(struct sockaddr *addr)
984263bc 1107{
82ed7fc2 1108 struct ifnet *ifp;
984263bc 1109
b2632176
SZ
1110 TAILQ_FOREACH(ifp, &ifnet, if_link) {
1111 struct ifaddr_container *ifac;
1112
1113 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1114 struct ifaddr *ifa = ifac->ifa;
1115
1116 if (ifa->ifa_addr->sa_family != addr->sa_family)
1117 continue;
1118 if (sa_equal(addr, ifa->ifa_addr))
1119 return (ifa);
1120 if ((ifp->if_flags & IFF_BROADCAST) &&
1121 ifa->ifa_broadaddr &&
1122 /* IPv6 doesn't have broadcast */
1123 ifa->ifa_broadaddr->sa_len != 0 &&
1124 sa_equal(ifa->ifa_broadaddr, addr))
1125 return (ifa);
1126 }
984263bc 1127 }
b2632176 1128 return (NULL);
984263bc
MD
1129}
1130/*
1131 * Locate the point to point interface with a given destination address.
1132 */
984263bc 1133struct ifaddr *
f23061d4 1134ifa_ifwithdstaddr(struct sockaddr *addr)
984263bc 1135{
82ed7fc2 1136 struct ifnet *ifp;
984263bc 1137
b2632176
SZ
1138 TAILQ_FOREACH(ifp, &ifnet, if_link) {
1139 struct ifaddr_container *ifac;
1140
1141 if (!(ifp->if_flags & IFF_POINTOPOINT))
1142 continue;
1143
1144 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1145 struct ifaddr *ifa = ifac->ifa;
1146
984263bc
MD
1147 if (ifa->ifa_addr->sa_family != addr->sa_family)
1148 continue;
0c3c561c
JH
1149 if (ifa->ifa_dstaddr &&
1150 sa_equal(addr, ifa->ifa_dstaddr))
984263bc 1151 return (ifa);
b2632176 1152 }
984263bc 1153 }
b2632176 1154 return (NULL);
984263bc
MD
1155}
1156
1157/*
1158 * Find an interface on a specific network. If many, choice
1159 * is most specific found.
1160 */
1161struct ifaddr *
f23061d4 1162ifa_ifwithnet(struct sockaddr *addr)
984263bc 1163{
82ed7fc2 1164 struct ifnet *ifp;
b2632176 1165 struct ifaddr *ifa_maybe = NULL;
984263bc
MD
1166 u_int af = addr->sa_family;
1167 char *addr_data = addr->sa_data, *cplim;
1168
1169 /*
1170 * AF_LINK addresses can be looked up directly by their index number,
1171 * so do that if we can.
1172 */
1173 if (af == AF_LINK) {
b2632176 1174 struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr;
590b8cd4 1175
b2632176
SZ
1176 if (sdl->sdl_index && sdl->sdl_index <= if_index)
1177 return (ifindex2ifnet[sdl->sdl_index]->if_lladdr);
984263bc
MD
1178 }
1179
1180 /*
1181 * Scan though each interface, looking for ones that have
1182 * addresses in this address family.
1183 */
1184 TAILQ_FOREACH(ifp, &ifnet, if_link) {
b2632176
SZ
1185 struct ifaddr_container *ifac;
1186
1187 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1188 struct ifaddr *ifa = ifac->ifa;
82ed7fc2 1189 char *cp, *cp2, *cp3;
984263bc
MD
1190
1191 if (ifa->ifa_addr->sa_family != af)
1192next: continue;
1193 if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT) {
1194 /*
1195 * This is a bit broken as it doesn't
1196 * take into account that the remote end may
1197 * be a single node in the network we are
1198 * looking for.
1199 * The trouble is that we don't know the
1200 * netmask for the remote end.
1201 */
0c3c561c
JH
1202 if (ifa->ifa_dstaddr != NULL &&
1203 sa_equal(addr, ifa->ifa_dstaddr))
f23061d4 1204 return (ifa);
984263bc
MD
1205 } else {
1206 /*
1207 * if we have a special address handler,
1208 * then use it instead of the generic one.
1209 */
f23061d4 1210 if (ifa->ifa_claim_addr) {
984263bc
MD
1211 if ((*ifa->ifa_claim_addr)(ifa, addr)) {
1212 return (ifa);
1213 } else {
1214 continue;
1215 }
1216 }
1217
1218 /*
1219 * Scan all the bits in the ifa's address.
1220 * If a bit dissagrees with what we are
1221 * looking for, mask it with the netmask
1222 * to see if it really matters.
1223 * (A byte at a time)
1224 */
1225 if (ifa->ifa_netmask == 0)
1226 continue;
1227 cp = addr_data;
1228 cp2 = ifa->ifa_addr->sa_data;
1229 cp3 = ifa->ifa_netmask->sa_data;
590b8cd4
JH
1230 cplim = ifa->ifa_netmask->sa_len +
1231 (char *)ifa->ifa_netmask;
984263bc
MD
1232 while (cp3 < cplim)
1233 if ((*cp++ ^ *cp2++) & *cp3++)
1234 goto next; /* next address! */
1235 /*
1236 * If the netmask of what we just found
1237 * is more specific than what we had before
1238 * (if we had one) then remember the new one
1239 * before continuing to search
1240 * for an even better one.
1241 */
4090d6ff 1242 if (ifa_maybe == NULL ||
f23061d4
JH
1243 rn_refines((char *)ifa->ifa_netmask,
1244 (char *)ifa_maybe->ifa_netmask))
984263bc
MD
1245 ifa_maybe = ifa;
1246 }
1247 }
1248 }
1249 return (ifa_maybe);
1250}
1251
1252/*
1253 * Find an interface address specific to an interface best matching
1254 * a given address.
1255 */
1256struct ifaddr *
f23061d4 1257ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp)
984263bc 1258{
b2632176 1259 struct ifaddr_container *ifac;
82ed7fc2
RG
1260 char *cp, *cp2, *cp3;
1261 char *cplim;
4090d6ff 1262 struct ifaddr *ifa_maybe = NULL;
984263bc
MD
1263 u_int af = addr->sa_family;
1264
1265 if (af >= AF_MAX)
1266 return (0);
b2632176
SZ
1267 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1268 struct ifaddr *ifa = ifac->ifa;
1269
984263bc
MD
1270 if (ifa->ifa_addr->sa_family != af)
1271 continue;
4090d6ff 1272 if (ifa_maybe == NULL)
984263bc 1273 ifa_maybe = ifa;
0c3c561c
JH
1274 if (ifa->ifa_netmask == NULL) {
1275 if (sa_equal(addr, ifa->ifa_addr) ||
1276 (ifa->ifa_dstaddr != NULL &&
1277 sa_equal(addr, ifa->ifa_dstaddr)))
984263bc
MD
1278 return (ifa);
1279 continue;
1280 }
1281 if (ifp->if_flags & IFF_POINTOPOINT) {
0c3c561c 1282 if (sa_equal(addr, ifa->ifa_dstaddr))
984263bc
MD
1283 return (ifa);
1284 } else {
1285 cp = addr->sa_data;
1286 cp2 = ifa->ifa_addr->sa_data;
1287 cp3 = ifa->ifa_netmask->sa_data;
1288 cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask;
1289 for (; cp3 < cplim; cp3++)
1290 if ((*cp++ ^ *cp2++) & *cp3)
1291 break;
1292 if (cp3 == cplim)
1293 return (ifa);
1294 }
1295 }
1296 return (ifa_maybe);
1297}
1298
984263bc
MD
1299/*
1300 * Default action when installing a route with a Link Level gateway.
1301 * Lookup an appropriate real ifa to point to.
1302 * This should be moved to /sys/net/link.c eventually.
1303 */
1304static void
f23061d4 1305link_rtrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info)
984263bc 1306{
82ed7fc2 1307 struct ifaddr *ifa;
984263bc
MD
1308 struct sockaddr *dst;
1309 struct ifnet *ifp;
1310
f23061d4
JH
1311 if (cmd != RTM_ADD || (ifa = rt->rt_ifa) == NULL ||
1312 (ifp = ifa->ifa_ifp) == NULL || (dst = rt_key(rt)) == NULL)
984263bc
MD
1313 return;
1314 ifa = ifaof_ifpforaddr(dst, ifp);
f23061d4 1315 if (ifa != NULL) {
984263bc 1316 IFAFREE(rt->rt_ifa);
f23061d4 1317 IFAREF(ifa);
984263bc 1318 rt->rt_ifa = ifa;
984263bc
MD
1319 if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest)
1320 ifa->ifa_rtrequest(cmd, rt, info);
1321 }
1322}
1323
1324/*
1325 * Mark an interface down and notify protocols of
1326 * the transition.
1327 * NOTE: must be called at splnet or eqivalent.
1328 */
1329void
f23061d4 1330if_unroute(struct ifnet *ifp, int flag, int fam)
984263bc 1331{
b2632176 1332 struct ifaddr_container *ifac;
984263bc
MD
1333
1334 ifp->if_flags &= ~flag;
1335 getmicrotime(&ifp->if_lastchange);
b2632176
SZ
1336 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1337 struct ifaddr *ifa = ifac->ifa;
1338
984263bc 1339 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
91be174d 1340 kpfctlinput(PRC_IFDOWN, ifa->ifa_addr);
b2632176 1341 }
9275f515 1342 ifq_purge_all(&ifp->if_snd);
984263bc
MD
1343 rt_ifmsg(ifp);
1344}
1345
1346/*
1347 * Mark an interface up and notify protocols of
1348 * the transition.
1349 * NOTE: must be called at splnet or eqivalent.
1350 */
1351void
f23061d4 1352if_route(struct ifnet *ifp, int flag, int fam)
984263bc 1353{
b2632176 1354 struct ifaddr_container *ifac;
984263bc 1355
9275f515 1356 ifq_purge_all(&ifp->if_snd);
984263bc
MD
1357 ifp->if_flags |= flag;
1358 getmicrotime(&ifp->if_lastchange);
b2632176
SZ
1359 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1360 struct ifaddr *ifa = ifac->ifa;
1361
984263bc 1362 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
91be174d 1363 kpfctlinput(PRC_IFUP, ifa->ifa_addr);
b2632176 1364 }
984263bc
MD
1365 rt_ifmsg(ifp);
1366#ifdef INET6
1367 in6_if_up(ifp);
1368#endif
1369}
1370
1371/*
5c703385
MD
1372 * Mark an interface down and notify protocols of the transition. An
1373 * interface going down is also considered to be a synchronizing event.
1374 * We must ensure that all packet processing related to the interface
1375 * has completed before we return so e.g. the caller can free the ifnet
1376 * structure that the mbufs may be referencing.
1377 *
984263bc
MD
1378 * NOTE: must be called at splnet or eqivalent.
1379 */
1380void
f23061d4 1381if_down(struct ifnet *ifp)
984263bc 1382{
984263bc 1383 if_unroute(ifp, IFF_UP, AF_UNSPEC);
5c703385 1384 netmsg_service_sync();
984263bc
MD
1385}
1386
1387/*
1388 * Mark an interface up and notify protocols of
1389 * the transition.
1390 * NOTE: must be called at splnet or eqivalent.
1391 */
1392void
f23061d4 1393if_up(struct ifnet *ifp)
984263bc 1394{
984263bc
MD
1395 if_route(ifp, IFF_UP, AF_UNSPEC);
1396}
1397
1398/*
6de83abe
SZ
1399 * Process a link state change.
1400 * NOTE: must be called at splsoftnet or equivalent.
1401 */
1402void
1403if_link_state_change(struct ifnet *ifp)
1404{
71fc104f
HT
1405 int link_state = ifp->if_link_state;
1406
6de83abe 1407 rt_ifmsg(ifp);
71fc104f
HT
1408 devctl_notify("IFNET", ifp->if_xname,
1409 (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL);
6de83abe
SZ
1410}
1411
1412/*
984263bc
MD
1413 * Handle interface watchdog timer routines. Called
1414 * from softclock, we decrement timers (if set) and
1415 * call the appropriate interface routine on expiration.
1416 */
1417static void
f23061d4 1418if_slowtimo(void *arg)
984263bc 1419{
82ed7fc2 1420 struct ifnet *ifp;
4986965b
JS
1421
1422 crit_enter();
984263bc
MD
1423
1424 TAILQ_FOREACH(ifp, &ifnet, if_link) {
1425 if (ifp->if_timer == 0 || --ifp->if_timer)
1426 continue;
78195a76 1427 if (ifp->if_watchdog) {
a3dd34d2 1428 if (ifnet_tryserialize_all(ifp)) {
78195a76 1429 (*ifp->if_watchdog)(ifp);
a3dd34d2 1430 ifnet_deserialize_all(ifp);
78195a76
MD
1431 } else {
1432 /* try again next timeout */
1433 ++ifp->if_timer;
1434 }
1435 }
984263bc 1436 }
4986965b
JS
1437
1438 crit_exit();
1439
abbb44bb 1440 callout_reset(&if_slowtimo_timer, hz / IFNET_SLOWHZ, if_slowtimo, NULL);
984263bc
MD
1441}
1442
1443/*
1444 * Map interface name to
1445 * interface structure pointer.
1446 */
1447struct ifnet *
1448ifunit(const char *name)
1449{
984263bc 1450 struct ifnet *ifp;
984263bc 1451
984263bc 1452 /*
3e4a09e7 1453 * Search all the interfaces for this name/number
984263bc 1454 */
3e4a09e7 1455
984263bc 1456 TAILQ_FOREACH(ifp, &ifnet, if_link) {
3e4a09e7 1457 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0)
984263bc
MD
1458 break;
1459 }
1460 return (ifp);
1461}
1462
1463
1464/*
1465 * Map interface name in a sockaddr_dl to
1466 * interface structure pointer.
1467 */
1468struct ifnet *
f23061d4 1469if_withname(struct sockaddr *sa)
984263bc
MD
1470{
1471 char ifname[IFNAMSIZ+1];
1472 struct sockaddr_dl *sdl = (struct sockaddr_dl *)sa;
1473
1474 if ( (sa->sa_family != AF_LINK) || (sdl->sdl_nlen == 0) ||
1475 (sdl->sdl_nlen > IFNAMSIZ) )
1476 return NULL;
1477
1478 /*
1479 * ifunit wants a null-terminated name. It may not be null-terminated
1480 * in the sockaddr. We don't want to change the caller's sockaddr,
1481 * and there might not be room to put the trailing null anyway, so we
1482 * make a local copy that we know we can null terminate safely.
1483 */
1484
1485 bcopy(sdl->sdl_data, ifname, sdl->sdl_nlen);
1486 ifname[sdl->sdl_nlen] = '\0';
1487 return ifunit(ifname);
1488}
1489
1490
1491/*
1492 * Interface ioctls.
1493 */
1494int
87de5057 1495ifioctl(struct socket *so, u_long cmd, caddr_t data, struct ucred *cred)
984263bc 1496{
41c20dac
MD
1497 struct ifnet *ifp;
1498 struct ifreq *ifr;
984263bc
MD
1499 struct ifstat *ifs;
1500 int error;
1501 short oif_flags;
1502 int new_flags;
9683f229
MD
1503#ifdef COMPAT_43
1504 int ocmd;
1505#endif
1fdf0954
HP
1506 size_t namelen, onamelen;
1507 char new_name[IFNAMSIZ];
1508 struct ifaddr *ifa;
1509 struct sockaddr_dl *sdl;
984263bc
MD
1510
1511 switch (cmd) {
984263bc
MD
1512 case SIOCGIFCONF:
1513 case OSIOCGIFCONF:
87de5057 1514 return (ifconf(cmd, data, cred));
9683f229
MD
1515 default:
1516 break;
984263bc 1517 }
9683f229 1518
984263bc
MD
1519 ifr = (struct ifreq *)data;
1520
1521 switch (cmd) {
1522 case SIOCIFCREATE:
c5e14c14
RP
1523 case SIOCIFCREATE2:
1524 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
1525 return (error);
1526 return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name),
1527 cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL));
984263bc 1528 case SIOCIFDESTROY:
895c1f85 1529 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
984263bc 1530 return (error);
c5e14c14 1531 return (if_clone_destroy(ifr->ifr_name));
984263bc
MD
1532 case SIOCIFGCLONERS:
1533 return (if_clone_list((struct if_clonereq *)data));
9683f229
MD
1534 default:
1535 break;
984263bc
MD
1536 }
1537
9683f229
MD
1538 /*
1539 * Nominal ioctl through interface, lookup the ifp and obtain a
1540 * lock to serialize the ifconfig ioctl operation.
1541 */
984263bc 1542 ifp = ifunit(ifr->ifr_name);
9683f229 1543 if (ifp == NULL)
984263bc 1544 return (ENXIO);
9683f229
MD
1545 error = 0;
1546 mtx_lock(&ifp->if_ioctl_mtx);
984263bc 1547
9683f229 1548 switch (cmd) {
12b71966
PA
1549 case SIOCGIFINDEX:
1550 ifr->ifr_index = ifp->if_index;
1551 break;
1552
984263bc
MD
1553 case SIOCGIFFLAGS:
1554 ifr->ifr_flags = ifp->if_flags;
46f25451 1555 ifr->ifr_flagshigh = ifp->if_flags >> 16;
984263bc
MD
1556 break;
1557
1558 case SIOCGIFCAP:
1559 ifr->ifr_reqcap = ifp->if_capabilities;
1560 ifr->ifr_curcap = ifp->if_capenable;
1561 break;
1562
1563 case SIOCGIFMETRIC:
1564 ifr->ifr_metric = ifp->if_metric;
1565 break;
1566
1567 case SIOCGIFMTU:
1568 ifr->ifr_mtu = ifp->if_mtu;
1569 break;
1570
315a7da3
JL
1571 case SIOCGIFDATA:
1572 error = copyout((caddr_t)&ifp->if_data, ifr->ifr_data,
9683f229 1573 sizeof(ifp->if_data));
315a7da3
JL
1574 break;
1575
984263bc
MD
1576 case SIOCGIFPHYS:
1577 ifr->ifr_phys = ifp->if_physical;
1578 break;
1579
1630efc5 1580 case SIOCGIFPOLLCPU:
1630efc5 1581 ifr->ifr_pollcpu = -1;
1630efc5
SZ
1582 break;
1583
1584 case SIOCSIFPOLLCPU:
1630efc5
SZ
1585 break;
1586
984263bc 1587 case SIOCSIFFLAGS:
895c1f85 1588 error = priv_check_cred(cred, PRIV_ROOT, 0);
984263bc 1589 if (error)
9683f229 1590 break;
984263bc
MD
1591 new_flags = (ifr->ifr_flags & 0xffff) |
1592 (ifr->ifr_flagshigh << 16);
1593 if (ifp->if_flags & IFF_SMART) {
1594 /* Smart drivers twiddle their own routes */
1595 } else if (ifp->if_flags & IFF_UP &&
1596 (new_flags & IFF_UP) == 0) {
4986965b 1597 crit_enter();
984263bc 1598 if_down(ifp);
4986965b 1599 crit_exit();
984263bc
MD
1600 } else if (new_flags & IFF_UP &&
1601 (ifp->if_flags & IFF_UP) == 0) {
4986965b 1602 crit_enter();
984263bc 1603 if_up(ifp);
4986965b 1604 crit_exit();
984263bc 1605 }
9c095379 1606
b3a7093f
SZ
1607#ifdef IFPOLL_ENABLE
1608 if ((new_flags ^ ifp->if_flags) & IFF_NPOLLING) {
1609 if (new_flags & IFF_NPOLLING)
1610 ifpoll_register(ifp);
1611 else
1612 ifpoll_deregister(ifp);
1613 }
1614#endif
9c095379 1615
984263bc
MD
1616 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) |
1617 (new_flags &~ IFF_CANTCHANGE);
984263bc
MD
1618 if (new_flags & IFF_PPROMISC) {
1619 /* Permanently promiscuous mode requested */
1620 ifp->if_flags |= IFF_PROMISC;
1621 } else if (ifp->if_pcount == 0) {
1622 ifp->if_flags &= ~IFF_PROMISC;
1623 }
78195a76 1624 if (ifp->if_ioctl) {
a3dd34d2 1625 ifnet_serialize_all(ifp);
87de5057 1626 ifp->if_ioctl(ifp, cmd, data, cred);
a3dd34d2 1627 ifnet_deserialize_all(ifp);
78195a76 1628 }
984263bc
MD
1629 getmicrotime(&ifp->if_lastchange);
1630 break;
1631
1632 case SIOCSIFCAP:
895c1f85 1633 error = priv_check_cred(cred, PRIV_ROOT, 0);
984263bc 1634 if (error)
9683f229
MD
1635 break;
1636 if (ifr->ifr_reqcap & ~ifp->if_capabilities) {
1637 error = EINVAL;
1638 break;
1639 }
a3dd34d2 1640 ifnet_serialize_all(ifp);
87de5057 1641 ifp->if_ioctl(ifp, cmd, data, cred);
a3dd34d2 1642 ifnet_deserialize_all(ifp);
984263bc
MD
1643 break;
1644
f23061d4 1645 case SIOCSIFNAME:
895c1f85 1646 error = priv_check_cred(cred, PRIV_ROOT, 0);
9683f229
MD
1647 if (error)
1648 break;
f23061d4 1649 error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL);
9683f229
MD
1650 if (error)
1651 break;
1652 if (new_name[0] == '\0') {
1653 error = EINVAL;
1654 break;
1655 }
1656 if (ifunit(new_name) != NULL) {
1657 error = EEXIST;
1658 break;
1659 }
f2bd8b67
JS
1660
1661 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp);
f23061d4
JH
1662
1663 /* Announce the departure of the interface. */
1664 rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
1665
1666 strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname));
b2632176 1667 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
f23061d4
JH
1668 /* XXX IFA_LOCK(ifa); */
1669 sdl = (struct sockaddr_dl *)ifa->ifa_addr;
1670 namelen = strlen(new_name);
1671 onamelen = sdl->sdl_nlen;
1672 /*
1673 * Move the address if needed. This is safe because we
1674 * allocate space for a name of length IFNAMSIZ when we
1675 * create this in if_attach().
1676 */
1677 if (namelen != onamelen) {
1678 bcopy(sdl->sdl_data + onamelen,
1679 sdl->sdl_data + namelen, sdl->sdl_alen);
1680 }
1681 bcopy(new_name, sdl->sdl_data, namelen);
1682 sdl->sdl_nlen = namelen;
1683 sdl = (struct sockaddr_dl *)ifa->ifa_netmask;
1684 bzero(sdl->sdl_data, onamelen);
1685 while (namelen != 0)
1686 sdl->sdl_data[--namelen] = 0xff;
1687 /* XXX IFA_UNLOCK(ifa) */
f2bd8b67
JS
1688
1689 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp);
f23061d4
JH
1690
1691 /* Announce the return of the interface. */
1692 rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
1693 break;
1fdf0954 1694
984263bc 1695 case SIOCSIFMETRIC:
895c1f85 1696 error = priv_check_cred(cred, PRIV_ROOT, 0);
984263bc 1697 if (error)
9683f229 1698 break;
984263bc
MD
1699 ifp->if_metric = ifr->ifr_metric;
1700 getmicrotime(&ifp->if_lastchange);
1701 break;
1702
1703 case SIOCSIFPHYS:
895c1f85 1704 error = priv_check_cred(cred, PRIV_ROOT, 0);
984263bc 1705 if (error)
9683f229
MD
1706 break;
1707 if (ifp->if_ioctl == NULL) {
1708 error = EOPNOTSUPP;
1709 break;
1710 }
a3dd34d2 1711 ifnet_serialize_all(ifp);
87de5057 1712 error = ifp->if_ioctl(ifp, cmd, data, cred);
a3dd34d2 1713 ifnet_deserialize_all(ifp);
984263bc
MD
1714 if (error == 0)
1715 getmicrotime(&ifp->if_lastchange);
9683f229 1716 break;
984263bc
MD
1717
1718 case SIOCSIFMTU:
1719 {
1720 u_long oldmtu = ifp->if_mtu;
1721
895c1f85 1722 error = priv_check_cred(cred, PRIV_ROOT, 0);
984263bc 1723 if (error)
9683f229
MD
1724 break;
1725 if (ifp->if_ioctl == NULL) {
1726 error = EOPNOTSUPP;
1727 break;
1728 }
1729 if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU) {
1730 error = EINVAL;
1731 break;
1732 }
a3dd34d2 1733 ifnet_serialize_all(ifp);
87de5057 1734 error = ifp->if_ioctl(ifp, cmd, data, cred);
a3dd34d2 1735 ifnet_deserialize_all(ifp);
984263bc
MD
1736 if (error == 0) {
1737 getmicrotime(&ifp->if_lastchange);
1738 rt_ifmsg(ifp);
1739 }
1740 /*
1741 * If the link MTU changed, do network layer specific procedure.
1742 */
1743 if (ifp->if_mtu != oldmtu) {
1744#ifdef INET6
1745 nd6_setmtu(ifp);
1746#endif
1747 }
9683f229 1748 break;
984263bc
MD
1749 }
1750
1751 case SIOCADDMULTI:
1752 case SIOCDELMULTI:
895c1f85 1753 error = priv_check_cred(cred, PRIV_ROOT, 0);
984263bc 1754 if (error)
9683f229 1755 break;
984263bc
MD
1756
1757 /* Don't allow group membership on non-multicast interfaces. */
9683f229
MD
1758 if ((ifp->if_flags & IFF_MULTICAST) == 0) {
1759 error = EOPNOTSUPP;
1760 break;
1761 }
984263bc
MD
1762
1763 /* Don't let users screw up protocols' entries. */
9683f229
MD
1764 if (ifr->ifr_addr.sa_family != AF_LINK) {
1765 error = EINVAL;
1766 break;
1767 }
984263bc
MD
1768
1769 if (cmd == SIOCADDMULTI) {
1770 struct ifmultiaddr *ifma;
1771 error = if_addmulti(ifp, &ifr->ifr_addr, &ifma);
1772 } else {
1773 error = if_delmulti(ifp, &ifr->ifr_addr);
1774 }
1775 if (error == 0)
1776 getmicrotime(&ifp->if_lastchange);
9683f229 1777 break;
984263bc
MD
1778
1779 case SIOCSIFPHYADDR:
1780 case SIOCDIFPHYADDR:
1781#ifdef INET6
1782 case SIOCSIFPHYADDR_IN6:
1783#endif
1784 case SIOCSLIFPHYADDR:
1785 case SIOCSIFMEDIA:
1786 case SIOCSIFGENERIC:
895c1f85 1787 error = priv_check_cred(cred, PRIV_ROOT, 0);
984263bc 1788 if (error)
9683f229
MD
1789 break;
1790 if (ifp->if_ioctl == 0) {
1791 error = EOPNOTSUPP;
1792 break;
1793 }
a3dd34d2 1794 ifnet_serialize_all(ifp);
87de5057 1795 error = ifp->if_ioctl(ifp, cmd, data, cred);
a3dd34d2 1796 ifnet_deserialize_all(ifp);
984263bc
MD
1797 if (error == 0)
1798 getmicrotime(&ifp->if_lastchange);
9683f229 1799 break;
984263bc
MD
1800
1801 case SIOCGIFSTATUS:
1802 ifs = (struct ifstat *)data;
1803 ifs->ascii[0] = '\0';
9683f229 1804 /* fall through */
984263bc
MD
1805 case SIOCGIFPSRCADDR:
1806 case SIOCGIFPDSTADDR:
1807 case SIOCGLIFPHYADDR:
1808 case SIOCGIFMEDIA:
1809 case SIOCGIFGENERIC:
9683f229
MD
1810 if (ifp->if_ioctl == NULL) {
1811 error = EOPNOTSUPP;
1812 break;
1813 }
a3dd34d2 1814 ifnet_serialize_all(ifp);
87de5057 1815 error = ifp->if_ioctl(ifp, cmd, data, cred);
a3dd34d2 1816 ifnet_deserialize_all(ifp);
9683f229 1817 break;
984263bc
MD
1818
1819 case SIOCSIFLLADDR:
895c1f85 1820 error = priv_check_cred(cred, PRIV_ROOT, 0);
984263bc 1821 if (error)
9683f229
MD
1822 break;
1823 error = if_setlladdr(ifp, ifr->ifr_addr.sa_data,
1824 ifr->ifr_addr.sa_len);
19f10c78 1825 EVENTHANDLER_INVOKE(iflladdr_event, ifp);
9683f229 1826 break;
984263bc
MD
1827
1828 default:
1829 oif_flags = ifp->if_flags;
9683f229
MD
1830 if (so->so_proto == 0) {
1831 error = EOPNOTSUPP;
1832 break;
1833 }
984263bc 1834#ifndef COMPAT_43
04951810 1835 error = so_pru_control_direct(so, cmd, data, ifp);
984263bc 1836#else
9683f229 1837 ocmd = cmd;
984263bc
MD
1838
1839 switch (cmd) {
984263bc
MD
1840 case SIOCSIFDSTADDR:
1841 case SIOCSIFADDR:
1842 case SIOCSIFBRDADDR:
1843 case SIOCSIFNETMASK:
1844#if BYTE_ORDER != BIG_ENDIAN
1845 if (ifr->ifr_addr.sa_family == 0 &&
1846 ifr->ifr_addr.sa_len < 16) {
1847 ifr->ifr_addr.sa_family = ifr->ifr_addr.sa_len;
1848 ifr->ifr_addr.sa_len = 16;
1849 }
1850#else
1851 if (ifr->ifr_addr.sa_len == 0)
1852 ifr->ifr_addr.sa_len = 16;
1853#endif
1854 break;
984263bc
MD
1855 case OSIOCGIFADDR:
1856 cmd = SIOCGIFADDR;
1857 break;
984263bc
MD
1858 case OSIOCGIFDSTADDR:
1859 cmd = SIOCGIFDSTADDR;
1860 break;
984263bc
MD
1861 case OSIOCGIFBRDADDR:
1862 cmd = SIOCGIFBRDADDR;
1863 break;
984263bc
MD
1864 case OSIOCGIFNETMASK:
1865 cmd = SIOCGIFNETMASK;
9683f229
MD
1866 break;
1867 default:
1868 break;
984263bc 1869 }
984263bc 1870
002c1265
MD
1871 error = so_pru_control_direct(so, cmd, data, ifp);
1872
1873 switch (ocmd) {
984263bc
MD
1874 case OSIOCGIFADDR:
1875 case OSIOCGIFDSTADDR:
1876 case OSIOCGIFBRDADDR:
1877 case OSIOCGIFNETMASK:
1878 *(u_short *)&ifr->ifr_addr = ifr->ifr_addr.sa_family;
002c1265 1879 break;
984263bc 1880 }
984263bc
MD
1881#endif /* COMPAT_43 */
1882
1883 if ((oif_flags ^ ifp->if_flags) & IFF_UP) {
1884#ifdef INET6
1885 DELAY(100);/* XXX: temporary workaround for fxp issue*/
1886 if (ifp->if_flags & IFF_UP) {
4986965b 1887 crit_enter();
984263bc 1888 in6_if_up(ifp);
4986965b 1889 crit_exit();
984263bc
MD
1890 }
1891#endif
1892 }
9683f229 1893 break;
984263bc 1894 }
9683f229
MD
1895
1896 mtx_unlock(&ifp->if_ioctl_mtx);
1897 return (error);
984263bc
MD
1898}
1899
1900/*
1901 * Set/clear promiscuous mode on interface ifp based on the truth value
1902 * of pswitch. The calls are reference counted so that only the first
1903 * "on" request actually has an effect, as does the final "off" request.
1904 * Results are undefined if the "off" and "on" requests are not matched.
1905 */
1906int
f23061d4 1907ifpromisc(struct ifnet *ifp, int pswitch)
984263bc
MD
1908{
1909 struct ifreq ifr;
1910 int error;
1911 int oldflags;
1912
1913 oldflags = ifp->if_flags;
46f25451 1914 if (ifp->if_flags & IFF_PPROMISC) {
984263bc
MD
1915 /* Do nothing if device is in permanently promiscuous mode */
1916 ifp->if_pcount += pswitch ? 1 : -1;
1917 return (0);
1918 }
1919 if (pswitch) {
1920 /*
1921 * If the device is not configured up, we cannot put it in
1922 * promiscuous mode.
1923 */
1924 if ((ifp->if_flags & IFF_UP) == 0)
1925 return (ENETDOWN);
1926 if (ifp->if_pcount++ != 0)
1927 return (0);
1928 ifp->if_flags |= IFF_PROMISC;
3e4a09e7
MD
1929 log(LOG_INFO, "%s: promiscuous mode enabled\n",
1930 ifp->if_xname);
984263bc
MD
1931 } else {
1932 if (--ifp->if_pcount > 0)
1933 return (0);
1934 ifp->if_flags &= ~IFF_PROMISC;
3e4a09e7
MD
1935 log(LOG_INFO, "%s: promiscuous mode disabled\n",
1936 ifp->if_xname);
984263bc
MD
1937 }
1938 ifr.ifr_flags = ifp->if_flags;
46f25451 1939 ifr.ifr_flagshigh = ifp->if_flags >> 16;
a3dd34d2
SZ
1940 ifnet_serialize_all(ifp);
1941 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, NULL);
1942 ifnet_deserialize_all(ifp);
984263bc
MD
1943 if (error == 0)
1944 rt_ifmsg(ifp);
1945 else
1946 ifp->if_flags = oldflags;
1947 return error;
1948}
1949
1950/*
1951 * Return interface configuration
1952 * of system. List may be used
1953 * in later ioctl's (above) to get
1954 * other information.
1955 */
984263bc 1956static int
87de5057 1957ifconf(u_long cmd, caddr_t data, struct ucred *cred)
984263bc 1958{
41c20dac
MD
1959 struct ifconf *ifc = (struct ifconf *)data;
1960 struct ifnet *ifp;
984263bc
MD
1961 struct sockaddr *sa;
1962 struct ifreq ifr, *ifrp;
1963 int space = ifc->ifc_len, error = 0;
1964
1965 ifrp = ifc->ifc_req;
1966 TAILQ_FOREACH(ifp, &ifnet, if_link) {
b2632176 1967 struct ifaddr_container *ifac;
3e4a09e7 1968 int addrs;
984263bc 1969
f23061d4 1970 if (space <= sizeof ifr)
984263bc 1971 break;
623c059e
JS
1972
1973 /*
95f018e8
MD
1974 * Zero the stack declared structure first to prevent
1975 * memory disclosure.
623c059e 1976 */
95f018e8 1977 bzero(&ifr, sizeof(ifr));
3e4a09e7
MD
1978 if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name))
1979 >= sizeof(ifr.ifr_name)) {
984263bc
MD
1980 error = ENAMETOOLONG;
1981 break;
984263bc
MD
1982 }
1983
1984 addrs = 0;
b2632176
SZ
1985 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1986 struct ifaddr *ifa = ifac->ifa;
1987
f23061d4 1988 if (space <= sizeof ifr)
984263bc
MD
1989 break;
1990 sa = ifa->ifa_addr;
87de5057
MD
1991 if (cred->cr_prison &&
1992 prison_if(cred, sa))
984263bc
MD
1993 continue;
1994 addrs++;
1995#ifdef COMPAT_43
1996 if (cmd == OSIOCGIFCONF) {
1997 struct osockaddr *osa =
1998 (struct osockaddr *)&ifr.ifr_addr;
1999 ifr.ifr_addr = *sa;
2000 osa->sa_family = sa->sa_family;
f23061d4 2001 error = copyout(&ifr, ifrp, sizeof ifr);
984263bc
MD
2002 ifrp++;
2003 } else
2004#endif
2005 if (sa->sa_len <= sizeof(*sa)) {
2006 ifr.ifr_addr = *sa;
f23061d4 2007 error = copyout(&ifr, ifrp, sizeof ifr);
984263bc
MD
2008 ifrp++;
2009 } else {
f23061d4 2010 if (space < (sizeof ifr) + sa->sa_len -
984263bc
MD
2011 sizeof(*sa))
2012 break;
2013 space -= sa->sa_len - sizeof(*sa);
f23061d4
JH
2014 error = copyout(&ifr, ifrp,
2015 sizeof ifr.ifr_name);
984263bc 2016 if (error == 0)
f23061d4
JH
2017 error = copyout(sa, &ifrp->ifr_addr,
2018 sa->sa_len);
984263bc
MD
2019 ifrp = (struct ifreq *)
2020 (sa->sa_len + (caddr_t)&ifrp->ifr_addr);
2021 }
2022 if (error)
2023 break;
f23061d4 2024 space -= sizeof ifr;
984263bc
MD
2025 }
2026 if (error)
2027 break;
2028 if (!addrs) {
f23061d4
JH
2029 bzero(&ifr.ifr_addr, sizeof ifr.ifr_addr);
2030 error = copyout(&ifr, ifrp, sizeof ifr);
984263bc
MD
2031 if (error)
2032 break;
f23061d4 2033 space -= sizeof ifr;
984263bc
MD
2034 ifrp++;
2035 }
2036 }
2037 ifc->ifc_len -= space;
2038 return (error);
2039}
2040
2041/*
2042 * Just like if_promisc(), but for all-multicast-reception mode.
2043 */
2044int
f23061d4 2045if_allmulti(struct ifnet *ifp, int onswitch)
984263bc
MD
2046{
2047 int error = 0;
984263bc
MD
2048 struct ifreq ifr;
2049
4986965b
JS
2050 crit_enter();
2051
984263bc
MD
2052 if (onswitch) {
2053 if (ifp->if_amcount++ == 0) {
2054 ifp->if_flags |= IFF_ALLMULTI;
2055 ifr.ifr_flags = ifp->if_flags;
46f25451 2056 ifr.ifr_flagshigh = ifp->if_flags >> 16;
a3dd34d2 2057 ifnet_serialize_all(ifp);
bd4539cc 2058 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2038fb68 2059 NULL);
a3dd34d2 2060 ifnet_deserialize_all(ifp);
984263bc
MD
2061 }
2062 } else {
2063 if (ifp->if_amcount > 1) {
2064 ifp->if_amcount--;
2065 } else {
2066 ifp->if_amcount = 0;
2067 ifp->if_flags &= ~IFF_ALLMULTI;
2068 ifr.ifr_flags = ifp->if_flags;
46f25451 2069 ifr.ifr_flagshigh = ifp->if_flags >> 16;
a3dd34d2 2070 ifnet_serialize_all(ifp);
bd4539cc 2071 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2038fb68 2072 NULL);
a3dd34d2 2073 ifnet_deserialize_all(ifp);
984263bc
MD
2074 }
2075 }
4986965b
JS
2076
2077 crit_exit();
984263bc
MD
2078
2079 if (error == 0)
2080 rt_ifmsg(ifp);
2081 return error;
2082}
2083
2084/*
2085 * Add a multicast listenership to the interface in question.
2086 * The link layer provides a routine which converts
2087 */
2088int
f23061d4
JH
2089if_addmulti(
2090 struct ifnet *ifp, /* interface to manipulate */
2091 struct sockaddr *sa, /* address to add */
2092 struct ifmultiaddr **retifma)
984263bc
MD
2093{
2094 struct sockaddr *llsa, *dupsa;
4986965b 2095 int error;
984263bc
MD
2096 struct ifmultiaddr *ifma;
2097
2098 /*
2099 * If the matching multicast address already exists
2100 * then don't add a new one, just add a reference
2101 */
441d34b2 2102 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
0c3c561c 2103 if (sa_equal(sa, ifma->ifma_addr)) {
984263bc
MD
2104 ifma->ifma_refcount++;
2105 if (retifma)
2106 *retifma = ifma;
2107 return 0;
2108 }
2109 }
2110
2111 /*
2112 * Give the link layer a chance to accept/reject it, and also
2113 * find out which AF_LINK address this maps to, if it isn't one
2114 * already.
2115 */
2116 if (ifp->if_resolvemulti) {
a3dd34d2 2117 ifnet_serialize_all(ifp);
984263bc 2118 error = ifp->if_resolvemulti(ifp, &llsa, sa);
a3dd34d2 2119 ifnet_deserialize_all(ifp);
78195a76
MD
2120 if (error)
2121 return error;
984263bc 2122 } else {
4090d6ff 2123 llsa = NULL;
984263bc
MD
2124 }
2125
884717e1
SW
2126 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_WAITOK);
2127 dupsa = kmalloc(sa->sa_len, M_IFMADDR, M_WAITOK);
984263bc
MD
2128 bcopy(sa, dupsa, sa->sa_len);
2129
2130 ifma->ifma_addr = dupsa;
2131 ifma->ifma_lladdr = llsa;
2132 ifma->ifma_ifp = ifp;
2133 ifma->ifma_refcount = 1;
2134 ifma->ifma_protospec = 0;
2135 rt_newmaddrmsg(RTM_NEWMADDR, ifma);
2136
2137 /*
2138 * Some network interfaces can scan the address list at
2139 * interrupt time; lock them out.
2140 */
4986965b 2141 crit_enter();
441d34b2 2142 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
4986965b 2143 crit_exit();
6cd0715f
RP
2144 if (retifma)
2145 *retifma = ifma;
984263bc 2146
4090d6ff 2147 if (llsa != NULL) {
441d34b2 2148 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
0c3c561c 2149 if (sa_equal(ifma->ifma_addr, llsa))
984263bc
MD
2150 break;
2151 }
2152 if (ifma) {
2153 ifma->ifma_refcount++;
2154 } else {
884717e1
SW
2155 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_WAITOK);
2156 dupsa = kmalloc(llsa->sa_len, M_IFMADDR, M_WAITOK);
984263bc
MD
2157 bcopy(llsa, dupsa, llsa->sa_len);
2158 ifma->ifma_addr = dupsa;
2159 ifma->ifma_ifp = ifp;
2160 ifma->ifma_refcount = 1;
4986965b 2161 crit_enter();
441d34b2 2162 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
4986965b 2163 crit_exit();
984263bc
MD
2164 }
2165 }
2166 /*
2167 * We are certain we have added something, so call down to the
2168 * interface to let them know about it.
2169 */
4986965b 2170 crit_enter();
a3dd34d2 2171 ifnet_serialize_all(ifp);
6cd0715f
RP
2172 if (ifp->if_ioctl)
2173 ifp->if_ioctl(ifp, SIOCADDMULTI, 0, NULL);
a3dd34d2 2174 ifnet_deserialize_all(ifp);
4986965b 2175 crit_exit();
984263bc
MD
2176
2177 return 0;
2178}
2179
2180/*
2181 * Remove a reference to a multicast address on this interface. Yell
2182 * if the request does not match an existing membership.
2183 */
2184int
f23061d4 2185if_delmulti(struct ifnet *ifp, struct sockaddr *sa)
984263bc
MD
2186{
2187 struct ifmultiaddr *ifma;
984263bc 2188
441d34b2 2189 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
0c3c561c 2190 if (sa_equal(sa, ifma->ifma_addr))
984263bc 2191 break;
4090d6ff 2192 if (ifma == NULL)
984263bc
MD
2193 return ENOENT;
2194
2195 if (ifma->ifma_refcount > 1) {
2196 ifma->ifma_refcount--;
2197 return 0;
2198 }
2199
2200 rt_newmaddrmsg(RTM_DELMADDR, ifma);
2201 sa = ifma->ifma_lladdr;
4986965b 2202 crit_enter();
441d34b2 2203 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link);
984263bc
MD
2204 /*
2205 * Make sure the interface driver is notified
2206 * in the case of a link layer mcast group being left.
2207 */
4090d6ff 2208 if (ifma->ifma_addr->sa_family == AF_LINK && sa == NULL) {
a3dd34d2 2209 ifnet_serialize_all(ifp);
2038fb68 2210 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL);
a3dd34d2 2211 ifnet_deserialize_all(ifp);
78195a76 2212 }
4986965b 2213 crit_exit();
efda3bd0
MD
2214 kfree(ifma->ifma_addr, M_IFMADDR);
2215 kfree(ifma, M_IFMADDR);
4090d6ff 2216 if (sa == NULL)
984263bc
MD
2217 return 0;
2218
2219 /*
2220 * Now look for the link-layer address which corresponds to
2221 * this network address. It had been squirreled away in
2222 * ifma->ifma_lladdr for this purpose (so we don't have
2223 * to call ifp->if_resolvemulti() again), and we saved that
2224 * value in sa above. If some nasty deleted the
2225 * link-layer address out from underneath us, we can deal because
2226 * the address we stored was is not the same as the one which was
2227 * in the record for the link-layer address. (So we don't complain
2228 * in that case.)
2229 */
441d34b2 2230 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
0c3c561c 2231 if (sa_equal(sa, ifma->ifma_addr))
984263bc 2232 break;
4090d6ff 2233 if (ifma == NULL)
984263bc
MD
2234 return 0;
2235
2236 if (ifma->ifma_refcount > 1) {
2237 ifma->ifma_refcount--;
2238 return 0;
2239 }
2240
4986965b 2241 crit_enter();
a3dd34d2 2242 ifnet_serialize_all(ifp);
441d34b2 2243 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link);
2038fb68 2244 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL);
a3dd34d2 2245 ifnet_deserialize_all(ifp);
4986965b 2246 crit_exit();
efda3bd0
MD
2247 kfree(ifma->ifma_addr, M_IFMADDR);
2248 kfree(sa, M_IFMADDR);
2249 kfree(ifma, M_IFMADDR);
984263bc
MD
2250
2251 return 0;
2252}
2253
2254/*
3976c93a
RP
2255 * Delete all multicast group membership for an interface.
2256 * Should be used to quickly flush all multicast filters.
2257 */
2258void
2259if_delallmulti(struct ifnet *ifp)
2260{
2261 struct ifmultiaddr *ifma;
2262 struct ifmultiaddr *next;
2263
441d34b2 2264 TAILQ_FOREACH_MUTABLE(ifma, &ifp->if_multiaddrs, ifma_link, next)
3976c93a
RP
2265 if_delmulti(ifp, ifma->ifma_addr);
2266}
2267
2268
2269/*
984263bc
MD
2270 * Set the link layer address on an interface.
2271 *
2272 * At this time we only support certain types of interfaces,
2273 * and we don't allow the length of the address to change.
2274 */
2275int
2276if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len)
2277{
2278 struct sockaddr_dl *sdl;
984263bc
MD
2279 struct ifreq ifr;
2280
f2682cb9 2281 sdl = IF_LLSOCKADDR(ifp);
984263bc
MD
2282 if (sdl == NULL)
2283 return (EINVAL);
2284 if (len != sdl->sdl_alen) /* don't allow length to change */
2285 return (EINVAL);
2286 switch (ifp->if_type) {
2287 case IFT_ETHER: /* these types use struct arpcom */
984263bc 2288 case IFT_XETHER:
984263bc
MD
2289 case IFT_L2VLAN:
2290 bcopy(lladdr, ((struct arpcom *)ifp->if_softc)->ac_enaddr, len);
984263bc
MD
2291 bcopy(lladdr, LLADDR(sdl), len);
2292 break;
2293 default:
2294 return (ENODEV);
2295 }
2296 /*
2297 * If the interface is already up, we need
2298 * to re-init it in order to reprogram its
2299 * address filter.
2300 */
a3dd34d2 2301 ifnet_serialize_all(ifp);
984263bc 2302 if ((ifp->if_flags & IFF_UP) != 0) {
c97d9b76 2303#ifdef INET
b2632176 2304 struct ifaddr_container *ifac;
c97d9b76 2305#endif
b2632176 2306
984263bc
MD
2307 ifp->if_flags &= ~IFF_UP;
2308 ifr.ifr_flags = ifp->if_flags;
46f25451 2309 ifr.ifr_flagshigh = ifp->if_flags >> 16;
78195a76 2310 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2038fb68 2311 NULL);
984263bc
MD
2312 ifp->if_flags |= IFF_UP;
2313 ifr.ifr_flags = ifp->if_flags;
46f25451 2314 ifr.ifr_flagshigh = ifp->if_flags >> 16;
78195a76 2315 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2038fb68 2316 NULL);
984263bc
MD
2317#ifdef INET
2318 /*
2319 * Also send gratuitous ARPs to notify other nodes about
2320 * the address change.
2321 */
b2632176
SZ
2322 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
2323 struct ifaddr *ifa = ifac->ifa;
2324
984263bc
MD
2325 if (ifa->ifa_addr != NULL &&
2326 ifa->ifa_addr->sa_family == AF_INET)
69b66ae8 2327 arp_gratuitous(ifp, ifa);
984263bc
MD
2328 }
2329#endif
2330 }
a3dd34d2 2331 ifnet_deserialize_all(ifp);
984263bc
MD
2332 return (0);
2333}
2334
2335struct ifmultiaddr *
f23061d4 2336ifmaof_ifpforaddr(struct sockaddr *sa, struct ifnet *ifp)
984263bc
MD
2337{
2338 struct ifmultiaddr *ifma;
2339
441d34b2 2340 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
0c3c561c 2341 if (sa_equal(ifma->ifma_addr, sa))
984263bc
MD
2342 break;
2343
2344 return ifma;
2345}
2346
1550dfd9 2347/*
e9bd1548
MD
2348 * This function locates the first real ethernet MAC from a network
2349 * card and loads it into node, returning 0 on success or ENOENT if
2350 * no suitable interfaces were found. It is used by the uuid code to
2351 * generate a unique 6-byte number.
2352 */
2353int
2354if_getanyethermac(uint16_t *node, int minlen)
2355{
2356 struct ifnet *ifp;
2357 struct sockaddr_dl *sdl;
2358
2359 TAILQ_FOREACH(ifp, &ifnet, if_link) {
2360 if (ifp->if_type != IFT_ETHER)
2361 continue;
2362 sdl = IF_LLSOCKADDR(ifp);
2363 if (sdl->sdl_alen < minlen)
2364 continue;
2365 bcopy(((struct arpcom *)ifp->if_softc)->ac_enaddr, node,
2366 minlen);
2367 return(0);
2368 }
2369 return (ENOENT);
2370}
2371
2372/*
1550dfd9
MD
2373 * The name argument must be a pointer to storage which will last as
2374 * long as the interface does. For physical devices, the result of
2375 * device_get_name(dev) is a good choice and for pseudo-devices a
2376 * static string works well.
2377 */
2378void
2379if_initname(struct ifnet *ifp, const char *name, int unit)
2380{
3e4a09e7
MD
2381 ifp->if_dname = name;
2382 ifp->if_dunit = unit;
1550dfd9 2383 if (unit != IF_DUNIT_NONE)
f8c7a42d 2384 ksnprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit);
1550dfd9
MD
2385 else
2386 strlcpy(ifp->if_xname, name, IFNAMSIZ);
2387}
2388
984263bc
MD
2389int
2390if_printf(struct ifnet *ifp, const char *fmt, ...)
2391{
e2565a42 2392 __va_list ap;
984263bc
MD
2393 int retval;
2394
4b1cf444 2395 retval = kprintf("%s: ", ifp->if_xname);
e2565a42 2396 __va_start(ap, fmt);
379210cb 2397 retval += kvprintf(fmt, ap);
e2565a42 2398 __va_end(ap);
984263bc
MD
2399 return (retval);
2400}
2401
cb80735c
RP
2402struct ifnet *
2403if_alloc(uint8_t type)
2404{
2405 struct ifnet *ifp;
7e395935 2406 size_t size;
cb80735c 2407
7e395935
MD
2408 /*
2409 * XXX temporary hack until arpcom is setup in if_l2com
2410 */
2411 if (type == IFT_ETHER)
2412 size = sizeof(struct arpcom);
2413 else
2414 size = sizeof(struct ifnet);
2415
2416 ifp = kmalloc(size, M_IFNET, M_WAITOK|M_ZERO);
cb80735c
RP
2417
2418 ifp->if_type = type;
2419
aeb3c11e
RP
2420 if (if_com_alloc[type] != NULL) {
2421 ifp->if_l2com = if_com_alloc[type](type, ifp);
2422 if (ifp->if_l2com == NULL) {
2423 kfree(ifp, M_IFNET);
2424 return (NULL);
2425 }
2426 }
cb80735c
RP
2427 return (ifp);
2428}
2429
2430void
2431if_free(struct ifnet *ifp)
2432{
2433 kfree(ifp, M_IFNET);
2434}
2435
b2f93efe
JS
2436void
2437ifq_set_classic(struct ifaltq *ifq)
2438{
2cc2f639
SZ
2439 ifq_set_methods(ifq, ifq->altq_ifp->if_mapsubq,
2440 ifsq_classic_enqueue, ifsq_classic_dequeue, ifsq_classic_request);
f0a26983
SZ
2441}
2442
2443void
2cc2f639
SZ
2444ifq_set_methods(struct ifaltq *ifq, altq_mapsubq_t mapsubq,
2445 ifsq_enqueue_t enqueue, ifsq_dequeue_t dequeue, ifsq_request_t request)
f0a26983
SZ
2446{
2447 int q;
2448
2cc2f639
SZ
2449 KASSERT(mapsubq != NULL, ("mapsubq is not specified"));
2450 KASSERT(enqueue != NULL, ("enqueue is not specified"));
2451 KASSERT(dequeue != NULL, ("dequeue is not specified"));
2452 KASSERT(request != NULL, ("request is not specified"));
2453
2454 ifq->altq_mapsubq = mapsubq;
f0a26983
SZ
2455 for (q = 0; q < ifq->altq_subq_cnt; ++q) {
2456 struct ifaltq_subque *ifsq = &ifq->altq_subq[q];
2457
2458 ifsq->ifsq_enqueue = enqueue;
2459 ifsq->ifsq_dequeue = dequeue;
2460 ifsq->ifsq_request = request;
2461 }
b2f93efe
JS
2462}
2463
9db4b353 2464int
f0a26983
SZ
2465ifsq_classic_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m,
2466 struct altq_pktattr *pa __unused)
e3e4574a 2467{
f0a26983 2468 if (IF_QFULL(ifsq)) {
e3e4574a
JS
2469 m_freem(m);
2470 return(ENOBUFS);
2471 } else {
f0a26983 2472 IF_ENQUEUE(ifsq, m);
e3e4574a
JS
2473 return(0);
2474 }
2475}
2476
9db4b353 2477struct mbuf *
f0a26983 2478ifsq_classic_dequeue(struct ifaltq_subque *ifsq, struct mbuf *mpolled, int op)
e3e4574a
JS
2479{
2480 struct mbuf *m;
2481
2482 switch (op) {
2483 case ALTDQ_POLL:
f0a26983 2484 IF_POLL(ifsq, m);
e3e4574a
JS
2485 break;
2486 case ALTDQ_REMOVE:
f0a26983 2487 IF_DEQUEUE(ifsq, m);
e3e4574a
JS
2488 break;
2489 default:
2490 panic("unsupported ALTQ dequeue op: %d", op);
2491 }
d2c71fa0 2492 KKASSERT(mpolled == NULL || mpolled == m);
e3e4574a
JS
2493 return(m);
2494}
2495
9db4b353 2496int
f0a26983 2497ifsq_classic_request(struct ifaltq_subque *ifsq, int req, void *arg)
e3e4574a
JS
2498{
2499 switch (req) {
2500 case ALTRQ_PURGE:
f0a26983 2501 IF_DRAIN(ifsq);
e3e4574a
JS
2502 break;
2503 default:
3f625015 2504 panic("unsupported ALTQ request: %d", req);
e3e4574a 2505 }
e3e4574a
JS
2506 return(0);
2507}
b2632176 2508
28cc0c29 2509static void
f0a26983 2510ifsq_ifstart_try(struct ifaltq_subque *ifsq, int force_sched)
28cc0c29 2511{
f0a26983 2512 struct ifnet *ifp = ifsq_get_ifp(ifsq);
28cc0c29
SZ
2513 int running = 0, need_sched;
2514
2515 /*
2516 * Try to do direct ifnet.if_start first, if there is
2517 * contention on ifnet's serializer, ifnet.if_start will
2518 * be scheduled on ifnet's CPU.
2519 */
3c4cd924 2520 if (!ifnet_tryserialize_tx(ifp, ifsq)) {
28cc0c29
SZ
2521 /*
2522 * ifnet serializer contention happened,
2523 * ifnet.if_start is scheduled on ifnet's
2524 * CPU, and we keep going.
2525 */
f0a26983 2526 ifsq_ifstart_schedule(ifsq, 1);
28cc0c29
SZ
2527 return;
2528 }
2529
f0a26983
SZ
2530 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) {
2531 ifp->if_start(ifp, ifsq);
2532 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq))
28cc0c29
SZ
2533 running = 1;
2534 }
f0a26983 2535 need_sched = ifsq_ifstart_need_schedule(ifsq, running);
28cc0c29 2536
3c4cd924 2537 ifnet_deserialize_tx(ifp, ifsq);
28cc0c29
SZ
2538
2539 if (need_sched) {
2540 /*
2541 * More data need to be transmitted, ifnet.if_start is
2542 * scheduled on ifnet's CPU, and we keep going.
2543 * NOTE: ifnet.if_start interlock is not released.
2544 */
f0a26983 2545 ifsq_ifstart_schedule(ifsq, force_sched);
28cc0c29
SZ
2546 }
2547}
2548
2aa7f7f8 2549/*
f0a26983 2550 * IFSUBQ packets staging mechanism:
2aa7f7f8 2551 *
f0a26983 2552 * The packets enqueued into IFSUBQ are staged to a certain amount before the
2aa7f7f8
SZ
2553 * ifnet's if_start is called. In this way, the driver could avoid writing
2554 * to hardware registers upon every packet, instead, hardware registers
2555 * could be written when certain amount of packets are put onto hardware
2556 * TX ring. The measurement on several modern NICs (emx(4), igb(4), bnx(4),
2557 * bge(4), jme(4)) shows that the hardware registers writing aggregation
2558 * could save ~20% CPU time when 18bytes UDP datagrams are transmitted at
2559 * 1.48Mpps. The performance improvement by hardware registers writing
2560 * aggeregation is also mentioned by Luigi Rizzo's netmap paper
2561 * (http://info.iet.unipi.it/~luigi/netmap/).
2562 *
f0a26983 2563 * IFSUBQ packets staging is performed for two entry points into drivers's
2aa7f7f8 2564 * transmission function:
f0a26983
SZ
2565 * - Direct ifnet's if_start calling, i.e. ifsq_ifstart_try()
2566 * - ifnet's if_start scheduling, i.e. ifsq_ifstart_schedule()
2aa7f7f8 2567 *
f0a26983 2568 * IFSUBQ packets staging will be stopped upon any of the following conditions:
2aa7f7f8 2569 * - If the count of packets enqueued on the current CPU is great than or
f0a26983 2570 * equal to ifsq_stage_cntmax. (XXX this should be per-interface)
2aa7f7f8
SZ
2571 * - If the total length of packets enqueued on the current CPU is great
2572 * than or equal to the hardware's MTU - max_protohdr. max_protohdr is
2573 * cut from the hardware's MTU mainly bacause a full TCP segment's size
2574 * is usually less than hardware's MTU.
f0a26983 2575 * - ifsq_ifstart_schedule() is not pending on the current CPU and if_start
2aa7f7f8
SZ
2576 * interlock (if_snd.altq_started) is not released.
2577 * - The if_start_rollup(), which is registered as low priority netisr
2578 * rollup function, is called; probably because no more work is pending
2579 * for netisr.
2580 *
2581 * NOTE:
f0a26983 2582 * Currently IFSUBQ packet staging is only performed in netisr threads.
2aa7f7f8 2583 */
9db4b353
SZ
2584int
2585ifq_dispatch(struct ifnet *ifp, struct mbuf *m, struct altq_pktattr *pa)
2586{
2587 struct ifaltq *ifq = &ifp->if_snd;
f0a26983 2588 struct ifaltq_subque *ifsq;
28cc0c29 2589 int error, start = 0, len, mcast = 0, avoid_start = 0;
f0a26983
SZ
2590 struct ifsubq_stage_head *head = NULL;
2591 struct ifsubq_stage *stage = NULL;
57dff79c 2592
2cc2f639 2593 ifsq = ifq_map_subq(ifq, mycpuid);
3c4cd924 2594 ASSERT_IFNET_NOT_SERIALIZED_TX(ifp, ifsq);
9db4b353 2595
fe53d127
SZ
2596 len = m->m_pkthdr.len;
2597 if (m->m_flags & M_MCAST)
2598 mcast = 1;
2599
28cc0c29 2600 if (curthread->td_type == TD_TYPE_NETISR) {
f0a26983
SZ
2601 head = &ifsubq_stage_heads[mycpuid];
2602 stage = ifsq_get_stage(ifsq, mycpuid);
28cc0c29 2603
f0a26983
SZ
2604 stage->stg_cnt++;
2605 stage->stg_len += len;
2606 if (stage->stg_cnt < ifsq_stage_cntmax &&
2607 stage->stg_len < (ifp->if_mtu - max_protohdr))
28cc0c29
SZ
2608 avoid_start = 1;
2609 }
2610
f0a26983
SZ
2611 ALTQ_SQ_LOCK(ifsq);
2612 error = ifsq_enqueue_locked(ifsq, m, pa);
9db4b353 2613 if (error) {
f0a26983
SZ
2614 if (!ifsq_data_ready(ifsq)) {
2615 ALTQ_SQ_UNLOCK(ifsq);
087561ef
SZ
2616 return error;
2617 }
28cc0c29 2618 avoid_start = 0;
9db4b353 2619 }
f0a26983 2620 if (!ifsq_is_started(ifsq)) {
28cc0c29 2621 if (avoid_start) {
f0a26983 2622 ALTQ_SQ_UNLOCK(ifsq);
28cc0c29
SZ
2623
2624 KKASSERT(!error);
f0a26983
SZ
2625 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0)
2626 ifsq_stage_insert(head, stage);
28cc0c29
SZ
2627
2628 ifp->if_obytes += len;
2629 if (mcast)
2630 ifp->if_omcasts++;
28cc0c29
SZ
2631 return error;
2632 }
2633
9db4b353
SZ
2634 /*
2635 * Hold the interlock of ifnet.if_start
2636 */
f0a26983 2637 ifsq_set_started(ifsq);
9db4b353
SZ
2638 start = 1;
2639 }
f0a26983 2640 ALTQ_SQ_UNLOCK(ifsq);
9db4b353 2641
fe53d127
SZ
2642 if (!error) {
2643 ifp->if_obytes += len;
2644 if (mcast)
2645 ifp->if_omcasts++;
2646 }
9db4b353 2647
28cc0c29 2648 if (stage != NULL) {
f0a26983
SZ
2649 if (!start && (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED)) {
2650 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED);
3cab6b0d 2651 if (!avoid_start) {
f0a26983
SZ
2652 ifsq_stage_remove(head, stage);
2653 ifsq_ifstart_schedule(ifsq, 1);
3cab6b0d
SZ
2654 }
2655 return error;
2656 }
2657
f0a26983
SZ
2658 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED) {
2659 ifsq_stage_remove(head, stage);
28cc0c29 2660 } else {
f0a26983
SZ
2661 stage->stg_cnt = 0;
2662 stage->stg_len = 0;
28cc0c29 2663 }
9db4b353
SZ
2664 }
2665
f0a26983 2666 if (!start)
087561ef 2667 return error;
9db4b353 2668
f0a26983 2669 ifsq_ifstart_try(ifsq, 0);
087561ef 2670 return error;
9db4b353
SZ
2671}
2672
b2632176
SZ
2673void *
2674ifa_create(int size, int flags)
2675{
2676 struct ifaddr *ifa;
2677 int i;
2678
ed20d0e3 2679 KASSERT(size >= sizeof(*ifa), ("ifaddr size too small"));
b2632176
SZ
2680
2681 ifa = kmalloc(size, M_IFADDR, flags | M_ZERO);
2682 if (ifa == NULL)
2683 return NULL;
2684
2685 ifa->ifa_containers = kmalloc(ncpus * sizeof(struct ifaddr_container),
2686 M_IFADDR, M_WAITOK | M_ZERO);
d5a2b87c 2687 ifa->ifa_ncnt = ncpus;
b2632176
SZ
2688 for (i = 0; i < ncpus; ++i) {
2689 struct ifaddr_container *ifac = &ifa->ifa_containers[i];
2690
2691 ifac->ifa_magic = IFA_CONTAINER_MAGIC;
2692 ifac->ifa = ifa;
2693 ifac->ifa_refcnt = 1;
2694 }
2695#ifdef IFADDR_DEBUG
2696 kprintf("alloc ifa %p %d\n", ifa, size);
2697#endif
2698 return ifa;
2699}
2700
b2632176
SZ
2701void
2702ifac_free(struct ifaddr_container *ifac, int cpu_id)
2703{
d5a2b87c 2704 struct ifaddr *ifa = ifac->ifa;
b2632176
SZ
2705
2706 KKASSERT(ifac->ifa_magic == IFA_CONTAINER_MAGIC);
2707 KKASSERT(ifac->ifa_refcnt == 0);
40f667f2 2708 KASSERT(ifac->ifa_listmask == 0,
ed20d0e3 2709 ("ifa is still on %#x lists", ifac->ifa_listmask));
b2632176
SZ
2710
2711 ifac->ifa_magic = IFA_CONTAINER_DEAD;
2712
b2632176 2713#ifdef IFADDR_DEBUG_VERBOSE
8967ddc7 2714 kprintf("try free ifa %p cpu_id %d\n", ifac->ifa, cpu_id);
b2632176
SZ
2715#endif
2716
d5a2b87c 2717 KASSERT(ifa->ifa_ncnt > 0 && ifa->ifa_ncnt <= ncpus,
ed20d0e3 2718 ("invalid # of ifac, %d", ifa->ifa_ncnt));
d5a2b87c
SZ
2719 if (atomic_fetchadd_int(&ifa->ifa_ncnt, -1) == 1) {
2720#ifdef IFADDR_DEBUG
2721 kprintf("free ifa %p\n", ifa);
2722#endif
2723 kfree(ifa->ifa_containers, M_IFADDR);
2724 kfree(ifa, M_IFADDR);
2725 }
b2632176
SZ
2726}
2727
2728static void
002c1265 2729ifa_iflink_dispatch(netmsg_t nmsg)
b2632176
SZ
2730{
2731 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2732 struct ifaddr *ifa = msg->ifa;
2733 struct ifnet *ifp = msg->ifp;
2734 int cpu = mycpuid;
40f667f2 2735 struct ifaddr_container *ifac;
b2632176
SZ
2736
2737 crit_enter();
23027d35 2738
40f667f2 2739 ifac = &ifa->ifa_containers[cpu];
2adb7bc2 2740 ASSERT_IFAC_VALID(ifac);
40f667f2 2741 KASSERT((ifac->ifa_listmask & IFA_LIST_IFADDRHEAD) == 0,
ed20d0e3 2742 ("ifaddr is on if_addrheads"));
23027d35 2743
40f667f2
SZ
2744 ifac->ifa_listmask |= IFA_LIST_IFADDRHEAD;
2745 if (msg->tail)
2746 TAILQ_INSERT_TAIL(&ifp->if_addrheads[cpu], ifac, ifa_link);
2747 else
2748 TAILQ_INSERT_HEAD(&ifp->if_addrheads[cpu], ifac, ifa_link);
23027d35 2749
b2632176
SZ
2750 crit_exit();
2751
002c1265 2752 ifa_forwardmsg(&nmsg->lmsg, cpu + 1);
b2632176
SZ
2753}
2754
2755void
2756ifa_iflink(struct ifaddr *ifa, struct ifnet *ifp, int tail)
2757{
2758 struct netmsg_ifaddr msg;
2759
002c1265 2760 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
48e7b118 2761 0, ifa_iflink_dispatch);
b2632176
SZ
2762 msg.ifa = ifa;
2763 msg.ifp = ifp;
2764 msg.tail = tail;
2765
002c1265 2766 ifa_domsg(&msg.base.lmsg, 0);
b2632176
SZ
2767}
2768
2769static void
002c1265 2770ifa_ifunlink_dispatch(netmsg_t nmsg)
b2632176
SZ
2771{
2772 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2773 struct ifaddr *ifa = msg->ifa;
2774 struct ifnet *ifp = msg->ifp;
2775 int cpu = mycpuid;
40f667f2 2776 struct ifaddr_container *ifac;
b2632176
SZ
2777
2778 crit_enter();
23027d35 2779
40f667f2 2780 ifac = &ifa->ifa_containers[cpu];
2adb7bc2 2781 ASSERT_IFAC_VALID(ifac);
40f667f2 2782 KASSERT(ifac->ifa_listmask & IFA_LIST_IFADDRHEAD,
ed20d0e3 2783 ("ifaddr is not on if_addrhead"));
23027d35 2784
40f667f2
SZ
2785 TAILQ_REMOVE(&ifp->if_addrheads[cpu], ifac, ifa_link);
2786 ifac->ifa_listmask &= ~IFA_LIST_IFADDRHEAD;
23027d35 2787
b2632176
SZ
2788 crit_exit();
2789
002c1265 2790 ifa_forwardmsg(&nmsg->lmsg, cpu + 1);
b2632176
SZ
2791}
2792
2793void
2794ifa_ifunlink(struct ifaddr *ifa, struct ifnet *ifp)
2795{
2796 struct netmsg_ifaddr msg;
2797
002c1265 2798 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
48e7b118 2799 0, ifa_ifunlink_dispatch);
b2632176
SZ
2800 msg.ifa = ifa;
2801 msg.ifp = ifp;
2802
002c1265 2803 ifa_domsg(&msg.base.lmsg, 0);
b2632176
SZ
2804}
2805
2806static void
002c1265 2807ifa_destroy_dispatch(netmsg_t nmsg)
b2632176
SZ
2808{
2809 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2810
2811 IFAFREE(msg->ifa);
002c1265 2812 ifa_forwardmsg(&nmsg->lmsg, mycpuid + 1);
b2632176
SZ
2813}
2814
2815void
2816ifa_destroy(struct ifaddr *ifa)
2817{
2818 struct netmsg_ifaddr msg;
2819
002c1265 2820 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
48e7b118 2821 0, ifa_destroy_dispatch);
b2632176
SZ
2822 msg.ifa = ifa;
2823
002c1265 2824 ifa_domsg(&msg.base.lmsg, 0);
b2632176
SZ
2825}
2826
2827struct lwkt_port *
d7944f0b 2828ifnet_portfn(int cpu)
b2632176 2829{
90af4fd3 2830 return &ifnet_threads[cpu].td_msgport;
b2632176
SZ
2831}
2832
c4882b7e
SZ
2833void
2834ifnet_forwardmsg(struct lwkt_msg *lmsg, int next_cpu)
2835{
ea2e6532
SZ
2836 KKASSERT(next_cpu > mycpuid && next_cpu <= ncpus);
2837
c4882b7e
SZ
2838 if (next_cpu < ncpus)
2839 lwkt_forwardmsg(ifnet_portfn(next_cpu), lmsg);
2840 else
2841 lwkt_replymsg(lmsg, 0);
2842}
2843
2a3e1dbd 2844int
c4882b7e
SZ
2845ifnet_domsg(struct lwkt_msg *lmsg, int cpu)
2846{
2847 KKASSERT(cpu < ncpus);
2a3e1dbd 2848 return lwkt_domsg(ifnet_portfn(cpu), lmsg, 0);
c4882b7e
SZ
2849}
2850
8967ddc7
SZ
2851void
2852ifnet_sendmsg(struct lwkt_msg *lmsg, int cpu)
2853{
2854 KKASSERT(cpu < ncpus);
2855 lwkt_sendmsg(ifnet_portfn(cpu), lmsg);
2856}
2857
c3c96e44
MD
2858/*
2859 * Generic netmsg service loop. Some protocols may roll their own but all
2860 * must do the basic command dispatch function call done here.
2861 */
2862static void
2863ifnet_service_loop(void *arg __unused)
2864{
002c1265 2865 netmsg_t msg;
c3c96e44
MD
2866
2867 while ((msg = lwkt_waitport(&curthread->td_msgport, 0))) {
002c1265
MD
2868 KASSERT(msg->base.nm_dispatch, ("ifnet_service: badmsg"));
2869 msg->base.nm_dispatch(msg);
c3c96e44
MD
2870 }
2871}
2872
239bdb58
SZ
2873static void
2874if_start_rollup(void)
2875{
f0a26983
SZ
2876 struct ifsubq_stage_head *head = &ifsubq_stage_heads[mycpuid];
2877 struct ifsubq_stage *stage;
28cc0c29 2878
f0a26983
SZ
2879 while ((stage = TAILQ_FIRST(&head->stg_head)) != NULL) {
2880 struct ifaltq_subque *ifsq = stage->stg_subq;
3cab6b0d 2881 int is_sched = 0;
28cc0c29 2882
f0a26983 2883 if (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED)
3cab6b0d 2884 is_sched = 1;
f0a26983 2885 ifsq_stage_remove(head, stage);
28cc0c29 2886
3cab6b0d 2887 if (is_sched) {
f0a26983 2888 ifsq_ifstart_schedule(ifsq, 1);
3cab6b0d
SZ
2889 } else {
2890 int start = 0;
28cc0c29 2891
f0a26983
SZ
2892 ALTQ_SQ_LOCK(ifsq);
2893 if (!ifsq_is_started(ifsq)) {
3cab6b0d
SZ
2894 /*
2895 * Hold the interlock of ifnet.if_start
2896 */
f0a26983 2897 ifsq_set_started(ifsq);
3cab6b0d
SZ
2898 start = 1;
2899 }
f0a26983 2900 ALTQ_SQ_UNLOCK(ifsq);
3cab6b0d
SZ
2901
2902 if (start)
f0a26983 2903 ifsq_ifstart_try(ifsq, 1);
3cab6b0d 2904 }
f0a26983
SZ
2905 KKASSERT((stage->stg_flags &
2906 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0);
28cc0c29 2907 }
239bdb58 2908}
239bdb58 2909
b2632176 2910static void
90af4fd3 2911ifnetinit(void *dummy __unused)
b2632176
SZ
2912{
2913 int i;
2914
2915 for (i = 0; i < ncpus; ++i) {
90af4fd3 2916 struct thread *thr = &ifnet_threads[i];
b2632176 2917
c3c96e44 2918 lwkt_create(ifnet_service_loop, NULL, NULL,
4643740a 2919 thr, TDF_NOSTART|TDF_FORCE_SPINPORT,
392cd266 2920 i, "ifnet %d", i);
b2632176 2921 netmsg_service_port_init(&thr->td_msgport);
c3c96e44 2922 lwkt_schedule(thr);
b2632176 2923 }
28cc0c29
SZ
2924
2925 for (i = 0; i < ncpus; ++i)
f0a26983 2926 TAILQ_INIT(&ifsubq_stage_heads[i].stg_head);
239bdb58 2927 netisr_register_rollup(if_start_rollup, NETISR_ROLLUP_PRIO_IFSTART);
b2632176 2928}
bd08b792
RP
2929
2930struct ifnet *
2931ifnet_byindex(unsigned short idx)
2932{
2933 if (idx > if_index)
2934 return NULL;
2935 return ifindex2ifnet[idx];
2936}
2937
2938struct ifaddr *
2939ifaddr_byindex(unsigned short idx)
2940{
2941 struct ifnet *ifp;
2942
2943 ifp = ifnet_byindex(idx);
ec27babc
RP
2944 if (!ifp)
2945 return NULL;
d79c4535 2946 return TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
bd08b792 2947}
aeb3c11e
RP
2948
2949void
2950if_register_com_alloc(u_char type,
2951 if_com_alloc_t *a, if_com_free_t *f)
2952{
2953
2954 KASSERT(if_com_alloc[type] == NULL,
2955 ("if_register_com_alloc: %d already registered", type));
2956 KASSERT(if_com_free[type] == NULL,
2957 ("if_register_com_alloc: %d free already registered", type));
2958
2959 if_com_alloc[type] = a;
2960 if_com_free[type] = f;
2961}
2962
2963void
2964if_deregister_com_alloc(u_char type)
2965{
2966
2967 KASSERT(if_com_alloc[type] != NULL,
2968 ("if_deregister_com_alloc: %d not registered", type));
2969 KASSERT(if_com_free[type] != NULL,
2970 ("if_deregister_com_alloc: %d free not registered", type));
2971 if_com_alloc[type] = NULL;
2972 if_com_free[type] = NULL;
2973}
a317449e
SZ
2974
2975int
2976if_ring_count2(int cnt, int cnt_max)
2977{
2978 int shift = 0;
2979
2980 KASSERT(cnt_max >= 1 && powerof2(cnt_max),
ed20d0e3 2981 ("invalid ring count max %d", cnt_max));
a317449e
SZ
2982
2983 if (cnt <= 0)
2984 cnt = cnt_max;
2985 if (cnt > ncpus2)
2986 cnt = ncpus2;
2987 if (cnt > cnt_max)
2988 cnt = cnt_max;
2989
2990 while ((1 << (shift + 1)) <= cnt)
2991 ++shift;
2992 cnt = 1 << shift;
2993
2994 KASSERT(cnt >= 1 && cnt <= ncpus2 && cnt <= cnt_max,
ed20d0e3 2995 ("calculate cnt %d, ncpus2 %d, cnt max %d",
a317449e
SZ
2996 cnt, ncpus2, cnt_max));
2997 return cnt;
2998}
b7a0c958
SZ
2999
3000void
3001ifq_set_maxlen(struct ifaltq *ifq, int len)
3002{
f0a26983 3003 ifq->altq_maxlen = len + (ncpus * ifsq_stage_cntmax);
b7a0c958 3004}
2cc2f639
SZ
3005
3006int
3007ifq_mapsubq_default(struct ifaltq *ifq __unused, int cpuid __unused)
3008{
3009 return ALTQ_SUBQ_INDEX_DEFAULT;
3010}