tcp/tso: Add per-device TSO aggregation size limit
[dragonfly.git] / sys / net / if.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1980, 1986, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)if.c 8.3 (Berkeley) 1/4/94
f23061d4 34 * $FreeBSD: src/sys/net/if.c,v 1.185 2004/03/13 02:35:03 brooks Exp $
984263bc
MD
35 */
36
37#include "opt_compat.h"
38#include "opt_inet6.h"
39#include "opt_inet.h"
b3a7093f 40#include "opt_ifpoll.h"
984263bc
MD
41
42#include <sys/param.h>
43#include <sys/malloc.h>
44#include <sys/mbuf.h>
45#include <sys/systm.h>
46#include <sys/proc.h>
895c1f85 47#include <sys/priv.h>
6b6e0885 48#include <sys/protosw.h>
984263bc
MD
49#include <sys/socket.h>
50#include <sys/socketvar.h>
6b6e0885 51#include <sys/socketops.h>
984263bc
MD
52#include <sys/protosw.h>
53#include <sys/kernel.h>
9db4b353 54#include <sys/ktr.h>
9683f229 55#include <sys/mutex.h>
984263bc
MD
56#include <sys/sockio.h>
57#include <sys/syslog.h>
58#include <sys/sysctl.h>
698ac46c 59#include <sys/domain.h>
e9cb6d99 60#include <sys/thread.h>
78195a76 61#include <sys/serialize.h>
71fc104f 62#include <sys/bus.h>
984263bc 63
9683f229
MD
64#include <sys/thread2.h>
65#include <sys/msgport2.h>
66#include <sys/mutex2.h>
67
984263bc
MD
68#include <net/if.h>
69#include <net/if_arp.h>
70#include <net/if_dl.h>
71#include <net/if_types.h>
72#include <net/if_var.h>
4d723e5a 73#include <net/ifq_var.h>
984263bc
MD
74#include <net/radix.h>
75#include <net/route.h>
65a24520 76#include <net/if_clone.h>
9db4b353 77#include <net/netisr.h>
b2632176
SZ
78#include <net/netmsg2.h>
79
d5a2b87c 80#include <machine/atomic.h>
984263bc 81#include <machine/stdarg.h>
b2632176 82#include <machine/smp.h>
984263bc
MD
83
84#if defined(INET) || defined(INET6)
85/*XXX*/
86#include <netinet/in.h>
87#include <netinet/in_var.h>
88#include <netinet/if_ether.h>
89#ifdef INET6
984263bc
MD
90#include <netinet6/in6_var.h>
91#include <netinet6/in6_ifattach.h>
92#endif
93#endif
94
9eee10d0
DRJ
95#if defined(COMPAT_43)
96#include <emulation/43bsd/43bsd_socket.h>
97#endif /* COMPAT_43 */
98
b2632176 99struct netmsg_ifaddr {
002c1265 100 struct netmsg_base base;
b2632176
SZ
101 struct ifaddr *ifa;
102 struct ifnet *ifp;
103 int tail;
104};
105
f0a26983
SZ
106struct ifsubq_stage_head {
107 TAILQ_HEAD(, ifsubq_stage) stg_head;
28cc0c29
SZ
108} __cachealign;
109
984263bc
MD
110/*
111 * System initialization
112 */
698ac46c
HS
113static void if_attachdomain(void *);
114static void if_attachdomain1(struct ifnet *);
436c57ea
SZ
115static int ifconf(u_long, caddr_t, struct ucred *);
116static void ifinit(void *);
90af4fd3 117static void ifnetinit(void *);
436c57ea
SZ
118static void if_slowtimo(void *);
119static void link_rtrequest(int, struct rtentry *, struct rt_addrinfo *);
120static int if_rtdel(struct radix_node *, void *);
984263bc
MD
121
122#ifdef INET6
123/*
124 * XXX: declare here to avoid to include many inet6 related files..
125 * should be more generalized?
126 */
436c57ea 127extern void nd6_setmtu(struct ifnet *);
984263bc
MD
128#endif
129
436c57ea
SZ
130SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers");
131SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management");
132
f0a26983
SZ
133static int ifsq_stage_cntmax = 4;
134TUNABLE_INT("net.link.stage_cntmax", &ifsq_stage_cntmax);
28cc0c29 135SYSCTL_INT(_net_link, OID_AUTO, stage_cntmax, CTLFLAG_RW,
f0a26983 136 &ifsq_stage_cntmax, 0, "ifq staging packet count max");
28cc0c29 137
436c57ea 138SYSINIT(interfaces, SI_SUB_PROTO_IF, SI_ORDER_FIRST, ifinit, NULL)
b2632176 139/* Must be after netisr_init */
90af4fd3 140SYSINIT(ifnet, SI_SUB_PRE_DRIVERS, SI_ORDER_SECOND, ifnetinit, NULL)
436c57ea 141
aeb3c11e
RP
142static if_com_alloc_t *if_com_alloc[256];
143static if_com_free_t *if_com_free[256];
144
436c57ea
SZ
145MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address");
146MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address");
cb80735c 147MALLOC_DEFINE(M_IFNET, "ifnet", "interface structure");
984263bc 148
436c57ea 149int ifqmaxlen = IFQ_MAXLEN;
b64bfcc3 150struct ifnethead ifnet = TAILQ_HEAD_INITIALIZER(ifnet);
984263bc 151
436c57ea
SZ
152struct callout if_slowtimo_timer;
153
154int if_index = 0;
155struct ifnet **ifindex2ifnet = NULL;
90af4fd3 156static struct thread ifnet_threads[MAXCPU];
abbb44bb 157
f0a26983 158static struct ifsubq_stage_head ifsubq_stage_heads[MAXCPU];
28cc0c29 159
f0a26983 160#ifdef notyet
9db4b353 161#define IFQ_KTR_STRING "ifq=%p"
5bf48697 162#define IFQ_KTR_ARGS struct ifaltq *ifq
9db4b353
SZ
163#ifndef KTR_IFQ
164#define KTR_IFQ KTR_ALL
165#endif
166KTR_INFO_MASTER(ifq);
5bf48697
AE
167KTR_INFO(KTR_IFQ, ifq, enqueue, 0, IFQ_KTR_STRING, IFQ_KTR_ARGS);
168KTR_INFO(KTR_IFQ, ifq, dequeue, 1, IFQ_KTR_STRING, IFQ_KTR_ARGS);
9db4b353
SZ
169#define logifq(name, arg) KTR_LOG(ifq_ ## name, arg)
170
171#define IF_START_KTR_STRING "ifp=%p"
5bf48697 172#define IF_START_KTR_ARGS struct ifnet *ifp
9db4b353
SZ
173#ifndef KTR_IF_START
174#define KTR_IF_START KTR_ALL
175#endif
176KTR_INFO_MASTER(if_start);
177KTR_INFO(KTR_IF_START, if_start, run, 0,
5bf48697 178 IF_START_KTR_STRING, IF_START_KTR_ARGS);
9db4b353 179KTR_INFO(KTR_IF_START, if_start, sched, 1,
5bf48697 180 IF_START_KTR_STRING, IF_START_KTR_ARGS);
9db4b353 181KTR_INFO(KTR_IF_START, if_start, avoid, 2,
5bf48697 182 IF_START_KTR_STRING, IF_START_KTR_ARGS);
9db4b353 183KTR_INFO(KTR_IF_START, if_start, contend_sched, 3,
5bf48697 184 IF_START_KTR_STRING, IF_START_KTR_ARGS);
9db4b353 185KTR_INFO(KTR_IF_START, if_start, chase_sched, 4,
5bf48697 186 IF_START_KTR_STRING, IF_START_KTR_ARGS);
9db4b353 187#define logifstart(name, arg) KTR_LOG(if_start_ ## name, arg)
f0a26983 188#endif
9db4b353 189
743da179 190TAILQ_HEAD(, ifg_group) ifg_head = TAILQ_HEAD_INITIALIZER(ifg_head);
315a7da3 191
984263bc
MD
192/*
193 * Network interface utility routines.
194 *
195 * Routines with ifa_ifwith* names take sockaddr *'s as
196 * parameters.
197 */
198/* ARGSUSED*/
199void
f23061d4 200ifinit(void *dummy)
984263bc
MD
201{
202 struct ifnet *ifp;
984263bc 203
abbb44bb
JS
204 callout_init(&if_slowtimo_timer);
205
4986965b 206 crit_enter();
984263bc 207 TAILQ_FOREACH(ifp, &ifnet, if_link) {
f0a26983 208 if (ifp->if_snd.altq_maxlen == 0) {
3e4a09e7 209 if_printf(ifp, "XXX: driver didn't set ifq_maxlen\n");
ef9870ec 210 ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
984263bc
MD
211 }
212 }
4986965b 213 crit_exit();
abbb44bb 214
984263bc
MD
215 if_slowtimo(0);
216}
217
9db4b353 218static void
f0a26983 219ifsq_ifstart_ipifunc(void *arg)
9db4b353 220{
f0a26983
SZ
221 struct ifaltq_subque *ifsq = arg;
222 struct lwkt_msg *lmsg = ifsq_get_ifstart_lmsg(ifsq, mycpuid);
9db4b353
SZ
223
224 crit_enter();
225 if (lmsg->ms_flags & MSGF_DONE)
ff5fbdd8 226 lwkt_sendmsg(netisr_portfn(mycpuid), lmsg);
9db4b353
SZ
227 crit_exit();
228}
229
3cab6b0d 230static __inline void
f0a26983 231ifsq_stage_remove(struct ifsubq_stage_head *head, struct ifsubq_stage *stage)
3cab6b0d 232{
f0a26983
SZ
233 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED);
234 TAILQ_REMOVE(&head->stg_head, stage, stg_link);
235 stage->stg_flags &= ~(IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED);
236 stage->stg_cnt = 0;
237 stage->stg_len = 0;
3cab6b0d
SZ
238}
239
240static __inline void
f0a26983 241ifsq_stage_insert(struct ifsubq_stage_head *head, struct ifsubq_stage *stage)
3cab6b0d 242{
f0a26983
SZ
243 KKASSERT((stage->stg_flags &
244 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0);
245 stage->stg_flags |= IFSQ_STAGE_FLAG_QUED;
246 TAILQ_INSERT_TAIL(&head->stg_head, stage, stg_link);
3cab6b0d
SZ
247}
248
9db4b353
SZ
249/*
250 * Schedule ifnet.if_start on ifnet's CPU
251 */
252static void
f0a26983 253ifsq_ifstart_schedule(struct ifaltq_subque *ifsq, int force)
9db4b353 254{
9db4b353
SZ
255 int cpu;
256
3cab6b0d 257 if (!force && curthread->td_type == TD_TYPE_NETISR &&
f0a26983
SZ
258 ifsq_stage_cntmax > 0) {
259 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid);
260
261 stage->stg_cnt = 0;
262 stage->stg_len = 0;
263 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0)
264 ifsq_stage_insert(&ifsubq_stage_heads[mycpuid], stage);
265 stage->stg_flags |= IFSQ_STAGE_FLAG_SCHED;
3cab6b0d
SZ
266 return;
267 }
268
f0a26983 269 cpu = ifsq_get_cpuid(ifsq);
9db4b353 270 if (cpu != mycpuid)
f0a26983 271 lwkt_send_ipiq(globaldata_find(cpu), ifsq_ifstart_ipifunc, ifsq);
9db4b353 272 else
f0a26983 273 ifsq_ifstart_ipifunc(ifsq);
9db4b353
SZ
274}
275
276/*
277 * NOTE:
278 * This function will release ifnet.if_start interlock,
279 * if ifnet.if_start does not need to be scheduled
280 */
281static __inline int
f0a26983 282ifsq_ifstart_need_schedule(struct ifaltq_subque *ifsq, int running)
9db4b353 283{
f0a26983 284 if (!running || ifsq_is_empty(ifsq)
9db4b353 285#ifdef ALTQ
f0a26983 286 || ifsq->ifsq_altq->altq_tbr != NULL
9db4b353
SZ
287#endif
288 ) {
f0a26983 289 ALTQ_SQ_LOCK(ifsq);
9db4b353
SZ
290 /*
291 * ifnet.if_start interlock is released, if:
292 * 1) Hardware can not take any packets, due to
293 * o interface is marked down
9ed293e0 294 * o hardware queue is full (ifq_is_oactive)
9db4b353
SZ
295 * Under the second situation, hardware interrupt
296 * or polling(4) will call/schedule ifnet.if_start
297 * when hardware queue is ready
298 * 2) There is not packet in the ifnet.if_snd.
299 * Further ifq_dispatch or ifq_handoff will call/
300 * schedule ifnet.if_start
301 * 3) TBR is used and it does not allow further
302 * dequeueing.
303 * TBR callout will call ifnet.if_start
304 */
f0a26983
SZ
305 if (!running || !ifsq_data_ready(ifsq)) {
306 ifsq_clr_started(ifsq);
307 ALTQ_SQ_UNLOCK(ifsq);
9db4b353
SZ
308 return 0;
309 }
f0a26983 310 ALTQ_SQ_UNLOCK(ifsq);
9db4b353
SZ
311 }
312 return 1;
313}
314
315static void
f0a26983 316ifsq_ifstart_dispatch(netmsg_t msg)
9db4b353 317{
002c1265 318 struct lwkt_msg *lmsg = &msg->base.lmsg;
f0a26983
SZ
319 struct ifaltq_subque *ifsq = lmsg->u.ms_resultp;
320 struct ifnet *ifp = ifsq_get_ifp(ifsq);
404c9fd9 321 int running = 0, need_sched;
9db4b353
SZ
322
323 crit_enter();
324 lwkt_replymsg(lmsg, 0); /* reply ASAP */
325 crit_exit();
326
f0a26983 327 if (mycpuid != ifsq_get_cpuid(ifsq)) {
9db4b353 328 /*
404c9fd9 329 * We need to chase the ifnet CPU change.
9db4b353 330 */
f0a26983 331 ifsq_ifstart_schedule(ifsq, 1);
404c9fd9 332 return;
9db4b353 333 }
9db4b353 334
3c4cd924 335 ifnet_serialize_tx(ifp, ifsq);
f0a26983
SZ
336 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) {
337 ifp->if_start(ifp, ifsq);
338 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq))
404c9fd9 339 running = 1;
9db4b353 340 }
f0a26983 341 need_sched = ifsq_ifstart_need_schedule(ifsq, running);
3c4cd924 342 ifnet_deserialize_tx(ifp, ifsq);
404c9fd9
SZ
343
344 if (need_sched) {
2b2f1d64
SZ
345 /*
346 * More data need to be transmitted, ifnet.if_start is
347 * scheduled on ifnet's CPU, and we keep going.
348 * NOTE: ifnet.if_start interlock is not released.
349 */
f0a26983 350 ifsq_ifstart_schedule(ifsq, 0);
9db4b353
SZ
351 }
352}
353
354/* Device driver ifnet.if_start helper function */
355void
f0a26983 356ifsq_devstart(struct ifaltq_subque *ifsq)
9db4b353 357{
f0a26983 358 struct ifnet *ifp = ifsq_get_ifp(ifsq);
9db4b353
SZ
359 int running = 0;
360
3c4cd924 361 ASSERT_IFNET_SERIALIZED_TX(ifp, ifsq);
9db4b353 362
f0a26983
SZ
363 ALTQ_SQ_LOCK(ifsq);
364 if (ifsq_is_started(ifsq) || !ifsq_data_ready(ifsq)) {
365 ALTQ_SQ_UNLOCK(ifsq);
9db4b353
SZ
366 return;
367 }
f0a26983
SZ
368 ifsq_set_started(ifsq);
369 ALTQ_SQ_UNLOCK(ifsq);
9db4b353 370
f0a26983 371 ifp->if_start(ifp, ifsq);
9db4b353 372
f0a26983 373 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq))
9db4b353
SZ
374 running = 1;
375
f0a26983 376 if (ifsq_ifstart_need_schedule(ifsq, running)) {
9db4b353
SZ
377 /*
378 * More data need to be transmitted, ifnet.if_start is
379 * scheduled on ifnet's CPU, and we keep going.
380 * NOTE: ifnet.if_start interlock is not released.
381 */
f0a26983 382 ifsq_ifstart_schedule(ifsq, 0);
9db4b353
SZ
383 }
384}
385
f0a26983
SZ
386void
387if_devstart(struct ifnet *ifp)
388{
389 ifsq_devstart(ifq_get_subq_default(&ifp->if_snd));
390}
391
2dffecda
SZ
392/* Device driver ifnet.if_start schedule helper function */
393void
f0a26983
SZ
394ifsq_devstart_sched(struct ifaltq_subque *ifsq)
395{
396 ifsq_ifstart_schedule(ifsq, 1);
397}
398
399void
2dffecda
SZ
400if_devstart_sched(struct ifnet *ifp)
401{
f0a26983 402 ifsq_devstart_sched(ifq_get_subq_default(&ifp->if_snd));
2dffecda
SZ
403}
404
a3dd34d2
SZ
405static void
406if_default_serialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
407{
408 lwkt_serialize_enter(ifp->if_serializer);
409}
410
411static void
412if_default_deserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
413{
414 lwkt_serialize_exit(ifp->if_serializer);
415}
416
417static int
418if_default_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused)
419{
420 return lwkt_serialize_try(ifp->if_serializer);
421}
422
2c9effcf
SZ
423#ifdef INVARIANTS
424static void
425if_default_serialize_assert(struct ifnet *ifp,
426 enum ifnet_serialize slz __unused,
427 boolean_t serialized)
428{
429 if (serialized)
430 ASSERT_SERIALIZED(ifp->if_serializer);
431 else
432 ASSERT_NOT_SERIALIZED(ifp->if_serializer);
433}
434#endif
435
984263bc 436/*
78195a76
MD
437 * Attach an interface to the list of "active" interfaces.
438 *
439 * The serializer is optional. If non-NULL access to the interface
440 * may be MPSAFE.
984263bc
MD
441 */
442void
78195a76 443if_attach(struct ifnet *ifp, lwkt_serialize_t serializer)
984263bc
MD
444{
445 unsigned socksize, ifasize;
446 int namelen, masklen;
82ed7fc2
RG
447 struct sockaddr_dl *sdl;
448 struct ifaddr *ifa;
e3e4574a 449 struct ifaltq *ifq;
f0a26983 450 int i, q;
590b8cd4 451
984263bc 452 static int if_indexlim = 8;
984263bc 453
a3dd34d2
SZ
454 if (ifp->if_serialize != NULL) {
455 KASSERT(ifp->if_deserialize != NULL &&
2c9effcf
SZ
456 ifp->if_tryserialize != NULL &&
457 ifp->if_serialize_assert != NULL,
ed20d0e3 458 ("serialize functions are partially setup"));
ae474cfa
SZ
459
460 /*
461 * If the device supplies serialize functions,
462 * then clear if_serializer to catch any invalid
463 * usage of this field.
464 */
465 KASSERT(serializer == NULL,
466 ("both serialize functions and default serializer "
ed20d0e3 467 "are supplied"));
ae474cfa 468 ifp->if_serializer = NULL;
a3dd34d2
SZ
469 } else {
470 KASSERT(ifp->if_deserialize == NULL &&
2c9effcf
SZ
471 ifp->if_tryserialize == NULL &&
472 ifp->if_serialize_assert == NULL,
ed20d0e3 473 ("serialize functions are partially setup"));
a3dd34d2
SZ
474 ifp->if_serialize = if_default_serialize;
475 ifp->if_deserialize = if_default_deserialize;
476 ifp->if_tryserialize = if_default_tryserialize;
2c9effcf
SZ
477#ifdef INVARIANTS
478 ifp->if_serialize_assert = if_default_serialize_assert;
479#endif
ae474cfa
SZ
480
481 /*
482 * The serializer can be passed in from the device,
483 * allowing the same serializer to be used for both
484 * the interrupt interlock and the device queue.
485 * If not specified, the netif structure will use an
486 * embedded serializer.
487 */
488 if (serializer == NULL) {
489 serializer = &ifp->if_default_serializer;
490 lwkt_serialize_init(serializer);
491 }
492 ifp->if_serializer = serializer;
a3dd34d2
SZ
493 }
494
9683f229
MD
495 mtx_init(&ifp->if_ioctl_mtx);
496 mtx_lock(&ifp->if_ioctl_mtx);
497
984263bc
MD
498 TAILQ_INSERT_TAIL(&ifnet, ifp, if_link);
499 ifp->if_index = ++if_index;
b2632176 500
984263bc
MD
501 /*
502 * XXX -
503 * The old code would work if the interface passed a pre-existing
504 * chain of ifaddrs to this code. We don't trust our callers to
505 * properly initialize the tailq, however, so we no longer allow
506 * this unlikely case.
507 */
b2632176
SZ
508 ifp->if_addrheads = kmalloc(ncpus * sizeof(struct ifaddrhead),
509 M_IFADDR, M_WAITOK | M_ZERO);
510 for (i = 0; i < ncpus; ++i)
511 TAILQ_INIT(&ifp->if_addrheads[i]);
512
984263bc 513 TAILQ_INIT(&ifp->if_prefixhead);
441d34b2 514 TAILQ_INIT(&ifp->if_multiaddrs);
2097a299 515 TAILQ_INIT(&ifp->if_groups);
984263bc 516 getmicrotime(&ifp->if_lastchange);
141697b6 517 if (ifindex2ifnet == NULL || if_index >= if_indexlim) {
590b8cd4 518 unsigned int n;
141697b6 519 struct ifnet **q;
590b8cd4
JH
520
521 if_indexlim <<= 1;
984263bc
MD
522
523 /* grow ifindex2ifnet */
141697b6 524 n = if_indexlim * sizeof(*q);
efda3bd0 525 q = kmalloc(n, M_IFADDR, M_WAITOK | M_ZERO);
984263bc 526 if (ifindex2ifnet) {
f23061d4 527 bcopy(ifindex2ifnet, q, n/2);
efda3bd0 528 kfree(ifindex2ifnet, M_IFADDR);
984263bc 529 }
141697b6 530 ifindex2ifnet = q;
984263bc
MD
531 }
532
533 ifindex2ifnet[if_index] = ifp;
534
535 /*
536 * create a Link Level name for this device
537 */
3e4a09e7 538 namelen = strlen(ifp->if_xname);
60615e94 539 masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
984263bc
MD
540 socksize = masklen + ifp->if_addrlen;
541#define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
542 if (socksize < sizeof(*sdl))
543 socksize = sizeof(*sdl);
544 socksize = ROUNDUP(socksize);
60615e94 545#undef ROUNDUP
590b8cd4 546 ifasize = sizeof(struct ifaddr) + 2 * socksize;
b2632176 547 ifa = ifa_create(ifasize, M_WAITOK);
590b8cd4
JH
548 sdl = (struct sockaddr_dl *)(ifa + 1);
549 sdl->sdl_len = socksize;
550 sdl->sdl_family = AF_LINK;
551 bcopy(ifp->if_xname, sdl->sdl_data, namelen);
552 sdl->sdl_nlen = namelen;
553 sdl->sdl_index = ifp->if_index;
554 sdl->sdl_type = ifp->if_type;
141697b6 555 ifp->if_lladdr = ifa;
590b8cd4
JH
556 ifa->ifa_ifp = ifp;
557 ifa->ifa_rtrequest = link_rtrequest;
558 ifa->ifa_addr = (struct sockaddr *)sdl;
559 sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl);
560 ifa->ifa_netmask = (struct sockaddr *)sdl;
561 sdl->sdl_len = masklen;
562 while (namelen != 0)
563 sdl->sdl_data[--namelen] = 0xff;
b2632176 564 ifa_iflink(ifa, ifp, 0 /* Insert head */);
984263bc 565
f2bd8b67 566 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp);
71fc104f 567 devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL);
f2bd8b67 568
2cc2f639
SZ
569 if (ifp->if_mapsubq == NULL)
570 ifp->if_mapsubq = ifq_mapsubq_default;
571
e3e4574a
JS
572 ifq = &ifp->if_snd;
573 ifq->altq_type = 0;
574 ifq->altq_disc = NULL;
575 ifq->altq_flags &= ALTQF_CANTCHANGE;
576 ifq->altq_tbr = NULL;
577 ifq->altq_ifp = ifp;
4d723e5a 578
f0a26983
SZ
579 if (ifq->altq_subq_cnt <= 0)
580 ifq->altq_subq_cnt = 1;
581 ifq->altq_subq = kmalloc_cachealign(
582 ifq->altq_subq_cnt * sizeof(struct ifaltq_subque),
28cc0c29 583 M_DEVBUF, M_WAITOK | M_ZERO);
28cc0c29 584
f0a26983
SZ
585 if (ifq->altq_maxlen == 0) {
586 if_printf(ifp, "driver didn't set ifq_maxlen\n");
587 ifq_set_maxlen(ifq, ifqmaxlen);
42fdf81e
SZ
588 }
589
f0a26983
SZ
590 for (q = 0; q < ifq->altq_subq_cnt; ++q) {
591 struct ifaltq_subque *ifsq = &ifq->altq_subq[q];
592
593 ALTQ_SQ_LOCK_INIT(ifsq);
594 ifsq->ifsq_index = q;
595
596 ifsq->ifsq_altq = ifq;
597 ifsq->ifsq_ifp = ifp;
598
599 ifsq->ifq_maxlen = ifq->altq_maxlen;
600 ifsq->ifsq_prepended = NULL;
601 ifsq->ifsq_started = 0;
602 ifsq->ifsq_hw_oactive = 0;
603 ifsq_set_cpuid(ifsq, 0);
604
605 ifsq->ifsq_stage =
606 kmalloc_cachealign(ncpus * sizeof(struct ifsubq_stage),
607 M_DEVBUF, M_WAITOK | M_ZERO);
608 for (i = 0; i < ncpus; ++i)
609 ifsq->ifsq_stage[i].stg_subq = ifsq;
610
611 ifsq->ifsq_ifstart_nmsg =
612 kmalloc(ncpus * sizeof(struct netmsg_base),
613 M_LWKTMSG, M_WAITOK);
614 for (i = 0; i < ncpus; ++i) {
615 netmsg_init(&ifsq->ifsq_ifstart_nmsg[i], NULL,
616 &netisr_adone_rport, 0, ifsq_ifstart_dispatch);
617 ifsq->ifsq_ifstart_nmsg[i].lmsg.u.ms_resultp = ifsq;
618 }
619 }
620 ifq_set_classic(ifq);
621
9c70fe43 622 if (!SLIST_EMPTY(&domains))
698ac46c
HS
623 if_attachdomain1(ifp);
624
984263bc
MD
625 /* Announce the interface. */
626 rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
9683f229
MD
627
628 mtx_unlock(&ifp->if_ioctl_mtx);
984263bc
MD
629}
630
698ac46c
HS
631static void
632if_attachdomain(void *dummy)
633{
634 struct ifnet *ifp;
698ac46c 635
4986965b
JS
636 crit_enter();
637 TAILQ_FOREACH(ifp, &ifnet, if_list)
698ac46c 638 if_attachdomain1(ifp);
4986965b 639 crit_exit();
698ac46c
HS
640}
641SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST,
642 if_attachdomain, NULL);
643
644static void
645if_attachdomain1(struct ifnet *ifp)
646{
647 struct domain *dp;
698ac46c 648
4986965b 649 crit_enter();
698ac46c
HS
650
651 /* address family dependent data region */
652 bzero(ifp->if_afdata, sizeof(ifp->if_afdata));
9c70fe43 653 SLIST_FOREACH(dp, &domains, dom_next)
698ac46c
HS
654 if (dp->dom_ifattach)
655 ifp->if_afdata[dp->dom_family] =
656 (*dp->dom_ifattach)(ifp);
4986965b 657 crit_exit();
698ac46c
HS
658}
659
984263bc 660/*
c727e142
SZ
661 * Purge all addresses whose type is _not_ AF_LINK
662 */
663void
664if_purgeaddrs_nolink(struct ifnet *ifp)
665{
b2632176
SZ
666 struct ifaddr_container *ifac, *next;
667
668 TAILQ_FOREACH_MUTABLE(ifac, &ifp->if_addrheads[mycpuid],
669 ifa_link, next) {
670 struct ifaddr *ifa = ifac->ifa;
c727e142 671
c727e142
SZ
672 /* Leave link ifaddr as it is */
673 if (ifa->ifa_addr->sa_family == AF_LINK)
674 continue;
675#ifdef INET
676 /* XXX: Ugly!! ad hoc just for INET */
677 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET) {
678 struct ifaliasreq ifr;
b2632176
SZ
679#ifdef IFADDR_DEBUG_VERBOSE
680 int i;
681
682 kprintf("purge in4 addr %p: ", ifa);
683 for (i = 0; i < ncpus; ++i)
684 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt);
685 kprintf("\n");
686#endif
c727e142
SZ
687
688 bzero(&ifr, sizeof ifr);
689 ifr.ifra_addr = *ifa->ifa_addr;
690 if (ifa->ifa_dstaddr)
691 ifr.ifra_broadaddr = *ifa->ifa_dstaddr;
692 if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp,
693 NULL) == 0)
694 continue;
695 }
696#endif /* INET */
697#ifdef INET6
698 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET6) {
b2632176
SZ
699#ifdef IFADDR_DEBUG_VERBOSE
700 int i;
701
702 kprintf("purge in6 addr %p: ", ifa);
703 for (i = 0; i < ncpus; ++i)
704 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt);
705 kprintf("\n");
706#endif
707
c727e142
SZ
708 in6_purgeaddr(ifa);
709 /* ifp_addrhead is already updated */
710 continue;
711 }
712#endif /* INET6 */
b2632176
SZ
713 ifa_ifunlink(ifa, ifp);
714 ifa_destroy(ifa);
c727e142
SZ
715 }
716}
717
5804f3d1
SZ
718static void
719ifq_stage_detach_handler(netmsg_t nmsg)
720{
721 struct ifaltq *ifq = nmsg->lmsg.u.ms_resultp;
f0a26983 722 int q;
5804f3d1 723
f0a26983
SZ
724 for (q = 0; q < ifq->altq_subq_cnt; ++q) {
725 struct ifaltq_subque *ifsq = &ifq->altq_subq[q];
726 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid);
727
728 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED)
729 ifsq_stage_remove(&ifsubq_stage_heads[mycpuid], stage);
730 }
5804f3d1
SZ
731 lwkt_replymsg(&nmsg->lmsg, 0);
732}
733
734static void
735ifq_stage_detach(struct ifaltq *ifq)
736{
737 struct netmsg_base base;
738 int cpu;
739
740 netmsg_init(&base, NULL, &curthread->td_msgport, 0,
741 ifq_stage_detach_handler);
742 base.lmsg.u.ms_resultp = ifq;
743
744 for (cpu = 0; cpu < ncpus; ++cpu)
745 lwkt_domsg(netisr_portfn(cpu), &base.lmsg, 0);
746}
747
c727e142 748/*
984263bc
MD
749 * Detach an interface, removing it from the
750 * list of "active" interfaces.
751 */
752void
f23061d4 753if_detach(struct ifnet *ifp)
984263bc 754{
984263bc 755 struct radix_node_head *rnh;
f0a26983 756 int i, q;
ecdefdda 757 int cpu, origcpu;
698ac46c 758 struct domain *dp;
984263bc 759
f2bd8b67
JS
760 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp);
761
984263bc
MD
762 /*
763 * Remove routes and flush queues.
764 */
4986965b 765 crit_enter();
b3a7093f
SZ
766#ifdef IFPOLL_ENABLE
767 if (ifp->if_flags & IFF_NPOLLING)
768 ifpoll_deregister(ifp);
769#endif
984263bc
MD
770 if_down(ifp);
771
5b1156d4 772#ifdef ALTQ
4d723e5a
JS
773 if (ifq_is_enabled(&ifp->if_snd))
774 altq_disable(&ifp->if_snd);
775 if (ifq_is_attached(&ifp->if_snd))
776 altq_detach(&ifp->if_snd);
5b1156d4 777#endif
4d723e5a 778
984263bc 779 /*
984263bc
MD
780 * Clean up all addresses.
781 */
141697b6 782 ifp->if_lladdr = NULL;
984263bc 783
c727e142 784 if_purgeaddrs_nolink(ifp);
b2632176 785 if (!TAILQ_EMPTY(&ifp->if_addrheads[mycpuid])) {
c727e142
SZ
786 struct ifaddr *ifa;
787
b2632176 788 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
c727e142 789 KASSERT(ifa->ifa_addr->sa_family == AF_LINK,
27eaa4f1 790 ("non-link ifaddr is left on if_addrheads"));
984263bc 791
b2632176
SZ
792 ifa_ifunlink(ifa, ifp);
793 ifa_destroy(ifa);
794 KASSERT(TAILQ_EMPTY(&ifp->if_addrheads[mycpuid]),
27eaa4f1 795 ("there are still ifaddrs left on if_addrheads"));
984263bc
MD
796 }
797
a98eb818
JS
798#ifdef INET
799 /*
800 * Remove all IPv4 kernel structures related to ifp.
801 */
802 in_ifdetach(ifp);
803#endif
804
984263bc
MD
805#ifdef INET6
806 /*
807 * Remove all IPv6 kernel structs related to ifp. This should be done
808 * before removing routing entries below, since IPv6 interface direct
809 * routes are expected to be removed by the IPv6-specific kernel API.
810 * Otherwise, the kernel will detect some inconsistency and bark it.
811 */
812 in6_ifdetach(ifp);
813#endif
814
815 /*
816 * Delete all remaining routes using this interface
817 * Unfortuneatly the only way to do this is to slog through
818 * the entire routing table looking for routes which point
819 * to this interface...oh well...
820 */
ecdefdda 821 origcpu = mycpuid;
271d38c4 822 for (cpu = 0; cpu < ncpus; cpu++) {
ecdefdda
MD
823 lwkt_migratecpu(cpu);
824 for (i = 1; i <= AF_MAX; i++) {
b2632176 825 if ((rnh = rt_tables[cpu][i]) == NULL)
ecdefdda
MD
826 continue;
827 rnh->rnh_walktree(rnh, if_rtdel, ifp);
828 }
984263bc 829 }
ecdefdda 830 lwkt_migratecpu(origcpu);
984263bc
MD
831
832 /* Announce that the interface is gone. */
833 rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
71fc104f 834 devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL);
984263bc 835
9c70fe43 836 SLIST_FOREACH(dp, &domains, dom_next)
698ac46c
HS
837 if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family])
838 (*dp->dom_ifdetach)(ifp,
839 ifp->if_afdata[dp->dom_family]);
698ac46c 840
141697b6
JS
841 /*
842 * Remove interface from ifindex2ifp[] and maybe decrement if_index.
843 */
75857e7c 844 ifindex2ifnet[ifp->if_index] = NULL;
141697b6
JS
845 while (if_index > 0 && ifindex2ifnet[if_index] == NULL)
846 if_index--;
75857e7c 847
984263bc 848 TAILQ_REMOVE(&ifnet, ifp, if_link);
b2632176 849 kfree(ifp->if_addrheads, M_IFADDR);
5804f3d1
SZ
850
851 lwkt_synchronize_ipiqs("if_detach");
852 ifq_stage_detach(&ifp->if_snd);
853
f0a26983
SZ
854 for (q = 0; q < ifp->if_snd.altq_subq_cnt; ++q) {
855 struct ifaltq_subque *ifsq = &ifp->if_snd.altq_subq[q];
856
857 kfree(ifsq->ifsq_ifstart_nmsg, M_LWKTMSG);
858 kfree(ifsq->ifsq_stage, M_DEVBUF);
859 }
407cde39
SZ
860 kfree(ifp->if_snd.altq_subq, M_DEVBUF);
861
4986965b 862 crit_exit();
984263bc
MD
863}
864
865/*
315a7da3
JL
866 * Create interface group without members
867 */
868struct ifg_group *
869if_creategroup(const char *groupname)
870{
871 struct ifg_group *ifg = NULL;
872
873 if ((ifg = (struct ifg_group *)kmalloc(sizeof(struct ifg_group),
874 M_TEMP, M_NOWAIT)) == NULL)
875 return (NULL);
876
877 strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group));
878 ifg->ifg_refcnt = 0;
879 ifg->ifg_carp_demoted = 0;
880 TAILQ_INIT(&ifg->ifg_members);
881#if NPF > 0
882 pfi_attach_ifgroup(ifg);
883#endif
884 TAILQ_INSERT_TAIL(&ifg_head, ifg, ifg_next);
885
886 return (ifg);
887}
888
889/*
890 * Add a group to an interface
891 */
892int
893if_addgroup(struct ifnet *ifp, const char *groupname)
894{
895 struct ifg_list *ifgl;
896 struct ifg_group *ifg = NULL;
897 struct ifg_member *ifgm;
898
899 if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' &&
900 groupname[strlen(groupname) - 1] <= '9')
901 return (EINVAL);
902
903 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
904 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname))
905 return (EEXIST);
906
907 if ((ifgl = kmalloc(sizeof(*ifgl), M_TEMP, M_NOWAIT)) == NULL)
908 return (ENOMEM);
909
910 if ((ifgm = kmalloc(sizeof(*ifgm), M_TEMP, M_NOWAIT)) == NULL) {
911 kfree(ifgl, M_TEMP);
912 return (ENOMEM);
913 }
914
915 TAILQ_FOREACH(ifg, &ifg_head, ifg_next)
916 if (!strcmp(ifg->ifg_group, groupname))
917 break;
918
919 if (ifg == NULL && (ifg = if_creategroup(groupname)) == NULL) {
920 kfree(ifgl, M_TEMP);
921 kfree(ifgm, M_TEMP);
922 return (ENOMEM);
923 }
924
925 ifg->ifg_refcnt++;
926 ifgl->ifgl_group = ifg;
927 ifgm->ifgm_ifp = ifp;
928
929 TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next);
930 TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next);
931
932#if NPF > 0
933 pfi_group_change(groupname);
934#endif
935
936 return (0);
937}
938
939/*
940 * Remove a group from an interface
941 */
942int
943if_delgroup(struct ifnet *ifp, const char *groupname)
944{
945 struct ifg_list *ifgl;
946 struct ifg_member *ifgm;
947
948 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
949 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname))
950 break;
951 if (ifgl == NULL)
952 return (ENOENT);
953
954 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next);
955
956 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next)
957 if (ifgm->ifgm_ifp == ifp)
958 break;
959
960 if (ifgm != NULL) {
961 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next);
962 kfree(ifgm, M_TEMP);
963 }
964
965 if (--ifgl->ifgl_group->ifg_refcnt == 0) {
966 TAILQ_REMOVE(&ifg_head, ifgl->ifgl_group, ifg_next);
967#if NPF > 0
968 pfi_detach_ifgroup(ifgl->ifgl_group);
969#endif
970 kfree(ifgl->ifgl_group, M_TEMP);
971 }
972
973 kfree(ifgl, M_TEMP);
974
975#if NPF > 0
976 pfi_group_change(groupname);
977#endif
978
979 return (0);
980}
981
982/*
983 * Stores all groups from an interface in memory pointed
984 * to by data
985 */
986int
987if_getgroup(caddr_t data, struct ifnet *ifp)
988{
989 int len, error;
990 struct ifg_list *ifgl;
991 struct ifg_req ifgrq, *ifgp;
992 struct ifgroupreq *ifgr = (struct ifgroupreq *)data;
993
994 if (ifgr->ifgr_len == 0) {
995 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
996 ifgr->ifgr_len += sizeof(struct ifg_req);
997 return (0);
998 }
999
1000 len = ifgr->ifgr_len;
1001 ifgp = ifgr->ifgr_groups;
1002 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) {
1003 if (len < sizeof(ifgrq))
1004 return (EINVAL);
1005 bzero(&ifgrq, sizeof ifgrq);
1006 strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group,
1007 sizeof(ifgrq.ifgrq_group));
1008 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp,
1009 sizeof(struct ifg_req))))
1010 return (error);
1011 len -= sizeof(ifgrq);
1012 ifgp++;
1013 }
1014
1015 return (0);
1016}
1017
1018/*
1019 * Stores all members of a group in memory pointed to by data
1020 */
1021int
1022if_getgroupmembers(caddr_t data)
1023{
1024 struct ifgroupreq *ifgr = (struct ifgroupreq *)data;
1025 struct ifg_group *ifg;
1026 struct ifg_member *ifgm;
1027 struct ifg_req ifgrq, *ifgp;
1028 int len, error;
1029
1030 TAILQ_FOREACH(ifg, &ifg_head, ifg_next)
1031 if (!strcmp(ifg->ifg_group, ifgr->ifgr_name))
1032 break;
1033 if (ifg == NULL)
1034 return (ENOENT);
1035
1036 if (ifgr->ifgr_len == 0) {
1037 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next)
1038 ifgr->ifgr_len += sizeof(ifgrq);
1039 return (0);
1040 }
1041
1042 len = ifgr->ifgr_len;
1043 ifgp = ifgr->ifgr_groups;
1044 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) {
1045 if (len < sizeof(ifgrq))
1046 return (EINVAL);
1047 bzero(&ifgrq, sizeof ifgrq);
1048 strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname,
1049 sizeof(ifgrq.ifgrq_member));
1050 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp,
1051 sizeof(struct ifg_req))))
1052 return (error);
1053 len -= sizeof(ifgrq);
1054 ifgp++;
1055 }
1056
1057 return (0);
1058}
1059
1060/*
984263bc 1061 * Delete Routes for a Network Interface
f23061d4 1062 *
984263bc
MD
1063 * Called for each routing entry via the rnh->rnh_walktree() call above
1064 * to delete all route entries referencing a detaching network interface.
1065 *
1066 * Arguments:
1067 * rn pointer to node in the routing table
1068 * arg argument passed to rnh->rnh_walktree() - detaching interface
1069 *
1070 * Returns:
1071 * 0 successful
1072 * errno failed - reason indicated
1073 *
1074 */
1075static int
f23061d4 1076if_rtdel(struct radix_node *rn, void *arg)
984263bc
MD
1077{
1078 struct rtentry *rt = (struct rtentry *)rn;
1079 struct ifnet *ifp = arg;
1080 int err;
1081
1082 if (rt->rt_ifp == ifp) {
1083
1084 /*
1085 * Protect (sorta) against walktree recursion problems
1086 * with cloned routes
1087 */
f23061d4 1088 if (!(rt->rt_flags & RTF_UP))
984263bc
MD
1089 return (0);
1090
1091 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
1092 rt_mask(rt), rt->rt_flags,
2038fb68 1093 NULL);
984263bc
MD
1094 if (err) {
1095 log(LOG_WARNING, "if_rtdel: error %d\n", err);
1096 }
1097 }
1098
1099 return (0);
1100}
1101
1102/*
984263bc
MD
1103 * Locate an interface based on a complete address.
1104 */
984263bc 1105struct ifaddr *
f23061d4 1106ifa_ifwithaddr(struct sockaddr *addr)
984263bc 1107{
82ed7fc2 1108 struct ifnet *ifp;
984263bc 1109
b2632176
SZ
1110 TAILQ_FOREACH(ifp, &ifnet, if_link) {
1111 struct ifaddr_container *ifac;
1112
1113 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1114 struct ifaddr *ifa = ifac->ifa;
1115
1116 if (ifa->ifa_addr->sa_family != addr->sa_family)
1117 continue;
1118 if (sa_equal(addr, ifa->ifa_addr))
1119 return (ifa);
1120 if ((ifp->if_flags & IFF_BROADCAST) &&
1121 ifa->ifa_broadaddr &&
1122 /* IPv6 doesn't have broadcast */
1123 ifa->ifa_broadaddr->sa_len != 0 &&
1124 sa_equal(ifa->ifa_broadaddr, addr))
1125 return (ifa);
1126 }
984263bc 1127 }
b2632176 1128 return (NULL);
984263bc
MD
1129}
1130/*
1131 * Locate the point to point interface with a given destination address.
1132 */
984263bc 1133struct ifaddr *
f23061d4 1134ifa_ifwithdstaddr(struct sockaddr *addr)
984263bc 1135{
82ed7fc2 1136 struct ifnet *ifp;
984263bc 1137
b2632176
SZ
1138 TAILQ_FOREACH(ifp, &ifnet, if_link) {
1139 struct ifaddr_container *ifac;
1140
1141 if (!(ifp->if_flags & IFF_POINTOPOINT))
1142 continue;
1143
1144 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1145 struct ifaddr *ifa = ifac->ifa;
1146
984263bc
MD
1147 if (ifa->ifa_addr->sa_family != addr->sa_family)
1148 continue;
0c3c561c
JH
1149 if (ifa->ifa_dstaddr &&
1150 sa_equal(addr, ifa->ifa_dstaddr))
984263bc 1151 return (ifa);
b2632176 1152 }
984263bc 1153 }
b2632176 1154 return (NULL);
984263bc
MD
1155}
1156
1157/*
1158 * Find an interface on a specific network. If many, choice
1159 * is most specific found.
1160 */
1161struct ifaddr *
f23061d4 1162ifa_ifwithnet(struct sockaddr *addr)
984263bc 1163{
82ed7fc2 1164 struct ifnet *ifp;
b2632176 1165 struct ifaddr *ifa_maybe = NULL;
984263bc
MD
1166 u_int af = addr->sa_family;
1167 char *addr_data = addr->sa_data, *cplim;
1168
1169 /*
1170 * AF_LINK addresses can be looked up directly by their index number,
1171 * so do that if we can.
1172 */
1173 if (af == AF_LINK) {
b2632176 1174 struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr;
590b8cd4 1175
b2632176
SZ
1176 if (sdl->sdl_index && sdl->sdl_index <= if_index)
1177 return (ifindex2ifnet[sdl->sdl_index]->if_lladdr);
984263bc
MD
1178 }
1179
1180 /*
1181 * Scan though each interface, looking for ones that have
1182 * addresses in this address family.
1183 */
1184 TAILQ_FOREACH(ifp, &ifnet, if_link) {
b2632176
SZ
1185 struct ifaddr_container *ifac;
1186
1187 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1188 struct ifaddr *ifa = ifac->ifa;
82ed7fc2 1189 char *cp, *cp2, *cp3;
984263bc
MD
1190
1191 if (ifa->ifa_addr->sa_family != af)
1192next: continue;
1193 if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT) {
1194 /*
1195 * This is a bit broken as it doesn't
1196 * take into account that the remote end may
1197 * be a single node in the network we are
1198 * looking for.
1199 * The trouble is that we don't know the
1200 * netmask for the remote end.
1201 */
0c3c561c
JH
1202 if (ifa->ifa_dstaddr != NULL &&
1203 sa_equal(addr, ifa->ifa_dstaddr))
f23061d4 1204 return (ifa);
984263bc
MD
1205 } else {
1206 /*
1207 * if we have a special address handler,
1208 * then use it instead of the generic one.
1209 */
f23061d4 1210 if (ifa->ifa_claim_addr) {
984263bc
MD
1211 if ((*ifa->ifa_claim_addr)(ifa, addr)) {
1212 return (ifa);
1213 } else {
1214 continue;
1215 }
1216 }
1217
1218 /*
1219 * Scan all the bits in the ifa's address.
1220 * If a bit dissagrees with what we are
1221 * looking for, mask it with the netmask
1222 * to see if it really matters.
1223 * (A byte at a time)
1224 */
1225 if (ifa->ifa_netmask == 0)
1226 continue;
1227 cp = addr_data;
1228 cp2 = ifa->ifa_addr->sa_data;
1229 cp3 = ifa->ifa_netmask->sa_data;
590b8cd4
JH
1230 cplim = ifa->ifa_netmask->sa_len +
1231 (char *)ifa->ifa_netmask;
984263bc
MD
1232 while (cp3 < cplim)
1233 if ((*cp++ ^ *cp2++) & *cp3++)
1234 goto next; /* next address! */
1235 /*
1236 * If the netmask of what we just found
1237 * is more specific than what we had before
1238 * (if we had one) then remember the new one
1239 * before continuing to search
1240 * for an even better one.
1241 */
4090d6ff 1242 if (ifa_maybe == NULL ||
f23061d4
JH
1243 rn_refines((char *)ifa->ifa_netmask,
1244 (char *)ifa_maybe->ifa_netmask))
984263bc
MD
1245 ifa_maybe = ifa;
1246 }
1247 }
1248 }
1249 return (ifa_maybe);
1250}
1251
1252/*
1253 * Find an interface address specific to an interface best matching
1254 * a given address.
1255 */
1256struct ifaddr *
f23061d4 1257ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp)
984263bc 1258{
b2632176 1259 struct ifaddr_container *ifac;
82ed7fc2
RG
1260 char *cp, *cp2, *cp3;
1261 char *cplim;
4090d6ff 1262 struct ifaddr *ifa_maybe = NULL;
984263bc
MD
1263 u_int af = addr->sa_family;
1264
1265 if (af >= AF_MAX)
1266 return (0);
b2632176
SZ
1267 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1268 struct ifaddr *ifa = ifac->ifa;
1269
984263bc
MD
1270 if (ifa->ifa_addr->sa_family != af)
1271 continue;
4090d6ff 1272 if (ifa_maybe == NULL)
984263bc 1273 ifa_maybe = ifa;
0c3c561c
JH
1274 if (ifa->ifa_netmask == NULL) {
1275 if (sa_equal(addr, ifa->ifa_addr) ||
1276 (ifa->ifa_dstaddr != NULL &&
1277 sa_equal(addr, ifa->ifa_dstaddr)))
984263bc
MD
1278 return (ifa);
1279 continue;
1280 }
1281 if (ifp->if_flags & IFF_POINTOPOINT) {
0c3c561c 1282 if (sa_equal(addr, ifa->ifa_dstaddr))
984263bc
MD
1283 return (ifa);
1284 } else {
1285 cp = addr->sa_data;
1286 cp2 = ifa->ifa_addr->sa_data;
1287 cp3 = ifa->ifa_netmask->sa_data;
1288 cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask;
1289 for (; cp3 < cplim; cp3++)
1290 if ((*cp++ ^ *cp2++) & *cp3)
1291 break;
1292 if (cp3 == cplim)
1293 return (ifa);
1294 }
1295 }
1296 return (ifa_maybe);
1297}
1298
984263bc
MD
1299/*
1300 * Default action when installing a route with a Link Level gateway.
1301 * Lookup an appropriate real ifa to point to.
1302 * This should be moved to /sys/net/link.c eventually.
1303 */
1304static void
f23061d4 1305link_rtrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info)
984263bc 1306{
82ed7fc2 1307 struct ifaddr *ifa;
984263bc
MD
1308 struct sockaddr *dst;
1309 struct ifnet *ifp;
1310
f23061d4
JH
1311 if (cmd != RTM_ADD || (ifa = rt->rt_ifa) == NULL ||
1312 (ifp = ifa->ifa_ifp) == NULL || (dst = rt_key(rt)) == NULL)
984263bc
MD
1313 return;
1314 ifa = ifaof_ifpforaddr(dst, ifp);
f23061d4 1315 if (ifa != NULL) {
984263bc 1316 IFAFREE(rt->rt_ifa);
f23061d4 1317 IFAREF(ifa);
984263bc 1318 rt->rt_ifa = ifa;
984263bc
MD
1319 if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest)
1320 ifa->ifa_rtrequest(cmd, rt, info);
1321 }
1322}
1323
1324/*
1325 * Mark an interface down and notify protocols of
1326 * the transition.
1327 * NOTE: must be called at splnet or eqivalent.
1328 */
1329void
f23061d4 1330if_unroute(struct ifnet *ifp, int flag, int fam)
984263bc 1331{
b2632176 1332 struct ifaddr_container *ifac;
984263bc
MD
1333
1334 ifp->if_flags &= ~flag;
1335 getmicrotime(&ifp->if_lastchange);
b2632176
SZ
1336 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1337 struct ifaddr *ifa = ifac->ifa;
1338
984263bc 1339 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
91be174d 1340 kpfctlinput(PRC_IFDOWN, ifa->ifa_addr);
b2632176 1341 }
9275f515 1342 ifq_purge_all(&ifp->if_snd);
984263bc
MD
1343 rt_ifmsg(ifp);
1344}
1345
1346/*
1347 * Mark an interface up and notify protocols of
1348 * the transition.
1349 * NOTE: must be called at splnet or eqivalent.
1350 */
1351void
f23061d4 1352if_route(struct ifnet *ifp, int flag, int fam)
984263bc 1353{
b2632176 1354 struct ifaddr_container *ifac;
984263bc 1355
9275f515 1356 ifq_purge_all(&ifp->if_snd);
984263bc
MD
1357 ifp->if_flags |= flag;
1358 getmicrotime(&ifp->if_lastchange);
b2632176
SZ
1359 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1360 struct ifaddr *ifa = ifac->ifa;
1361
984263bc 1362 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
91be174d 1363 kpfctlinput(PRC_IFUP, ifa->ifa_addr);
b2632176 1364 }
984263bc
MD
1365 rt_ifmsg(ifp);
1366#ifdef INET6
1367 in6_if_up(ifp);
1368#endif
1369}
1370
1371/*
5c703385
MD
1372 * Mark an interface down and notify protocols of the transition. An
1373 * interface going down is also considered to be a synchronizing event.
1374 * We must ensure that all packet processing related to the interface
1375 * has completed before we return so e.g. the caller can free the ifnet
1376 * structure that the mbufs may be referencing.
1377 *
984263bc
MD
1378 * NOTE: must be called at splnet or eqivalent.
1379 */
1380void
f23061d4 1381if_down(struct ifnet *ifp)
984263bc 1382{
984263bc 1383 if_unroute(ifp, IFF_UP, AF_UNSPEC);
5c703385 1384 netmsg_service_sync();
984263bc
MD
1385}
1386
1387/*
1388 * Mark an interface up and notify protocols of
1389 * the transition.
1390 * NOTE: must be called at splnet or eqivalent.
1391 */
1392void
f23061d4 1393if_up(struct ifnet *ifp)
984263bc 1394{
984263bc
MD
1395 if_route(ifp, IFF_UP, AF_UNSPEC);
1396}
1397
1398/*
6de83abe
SZ
1399 * Process a link state change.
1400 * NOTE: must be called at splsoftnet or equivalent.
1401 */
1402void
1403if_link_state_change(struct ifnet *ifp)
1404{
71fc104f
HT
1405 int link_state = ifp->if_link_state;
1406
6de83abe 1407 rt_ifmsg(ifp);
71fc104f
HT
1408 devctl_notify("IFNET", ifp->if_xname,
1409 (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL);
6de83abe
SZ
1410}
1411
1412/*
984263bc
MD
1413 * Handle interface watchdog timer routines. Called
1414 * from softclock, we decrement timers (if set) and
1415 * call the appropriate interface routine on expiration.
1416 */
1417static void
f23061d4 1418if_slowtimo(void *arg)
984263bc 1419{
82ed7fc2 1420 struct ifnet *ifp;
4986965b
JS
1421
1422 crit_enter();
984263bc
MD
1423
1424 TAILQ_FOREACH(ifp, &ifnet, if_link) {
1425 if (ifp->if_timer == 0 || --ifp->if_timer)
1426 continue;
78195a76 1427 if (ifp->if_watchdog) {
a3dd34d2 1428 if (ifnet_tryserialize_all(ifp)) {
78195a76 1429 (*ifp->if_watchdog)(ifp);
a3dd34d2 1430 ifnet_deserialize_all(ifp);
78195a76
MD
1431 } else {
1432 /* try again next timeout */
1433 ++ifp->if_timer;
1434 }
1435 }
984263bc 1436 }
4986965b
JS
1437
1438 crit_exit();
1439
abbb44bb 1440 callout_reset(&if_slowtimo_timer, hz / IFNET_SLOWHZ, if_slowtimo, NULL);
984263bc
MD
1441}
1442
1443/*
1444 * Map interface name to
1445 * interface structure pointer.
1446 */
1447struct ifnet *
1448ifunit(const char *name)
1449{
984263bc 1450 struct ifnet *ifp;
984263bc 1451
984263bc 1452 /*
3e4a09e7 1453 * Search all the interfaces for this name/number
984263bc 1454 */
3e4a09e7 1455
984263bc 1456 TAILQ_FOREACH(ifp, &ifnet, if_link) {
3e4a09e7 1457 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0)
984263bc
MD
1458 break;
1459 }
1460 return (ifp);
1461}
1462
1463
1464/*
1465 * Map interface name in a sockaddr_dl to
1466 * interface structure pointer.
1467 */
1468struct ifnet *
f23061d4 1469if_withname(struct sockaddr *sa)
984263bc
MD
1470{
1471 char ifname[IFNAMSIZ+1];
1472 struct sockaddr_dl *sdl = (struct sockaddr_dl *)sa;
1473
1474 if ( (sa->sa_family != AF_LINK) || (sdl->sdl_nlen == 0) ||
1475 (sdl->sdl_nlen > IFNAMSIZ) )
1476 return NULL;
1477
1478 /*
1479 * ifunit wants a null-terminated name. It may not be null-terminated
1480 * in the sockaddr. We don't want to change the caller's sockaddr,
1481 * and there might not be room to put the trailing null anyway, so we
1482 * make a local copy that we know we can null terminate safely.
1483 */
1484
1485 bcopy(sdl->sdl_data, ifname, sdl->sdl_nlen);
1486 ifname[sdl->sdl_nlen] = '\0';
1487 return ifunit(ifname);
1488}
1489
1490
1491/*
1492 * Interface ioctls.
1493 */
1494int
87de5057 1495ifioctl(struct socket *so, u_long cmd, caddr_t data, struct ucred *cred)
984263bc 1496{
41c20dac
MD
1497 struct ifnet *ifp;
1498 struct ifreq *ifr;
984263bc
MD
1499 struct ifstat *ifs;
1500 int error;
1501 short oif_flags;
1502 int new_flags;
9683f229
MD
1503#ifdef COMPAT_43
1504 int ocmd;
1505#endif
1fdf0954
HP
1506 size_t namelen, onamelen;
1507 char new_name[IFNAMSIZ];
1508 struct ifaddr *ifa;
1509 struct sockaddr_dl *sdl;
984263bc
MD
1510
1511 switch (cmd) {
984263bc
MD
1512 case SIOCGIFCONF:
1513 case OSIOCGIFCONF:
87de5057 1514 return (ifconf(cmd, data, cred));
9683f229
MD
1515 default:
1516 break;
984263bc 1517 }
9683f229 1518
984263bc
MD
1519 ifr = (struct ifreq *)data;
1520
1521 switch (cmd) {
1522 case SIOCIFCREATE:
c5e14c14
RP
1523 case SIOCIFCREATE2:
1524 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
1525 return (error);
1526 return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name),
1527 cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL));
984263bc 1528 case SIOCIFDESTROY:
895c1f85 1529 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
984263bc 1530 return (error);
c5e14c14 1531 return (if_clone_destroy(ifr->ifr_name));
984263bc
MD
1532 case SIOCIFGCLONERS:
1533 return (if_clone_list((struct if_clonereq *)data));
9683f229
MD
1534 default:
1535 break;
984263bc
MD
1536 }
1537
9683f229
MD
1538 /*
1539 * Nominal ioctl through interface, lookup the ifp and obtain a
1540 * lock to serialize the ifconfig ioctl operation.
1541 */
984263bc 1542 ifp = ifunit(ifr->ifr_name);
9683f229 1543 if (ifp == NULL)
984263bc 1544 return (ENXIO);
9683f229
MD
1545 error = 0;
1546 mtx_lock(&ifp->if_ioctl_mtx);
984263bc 1547
9683f229 1548 switch (cmd) {
12b71966
PA
1549 case SIOCGIFINDEX:
1550 ifr->ifr_index = ifp->if_index;
1551 break;
1552
984263bc
MD
1553 case SIOCGIFFLAGS:
1554 ifr->ifr_flags = ifp->if_flags;
46f25451 1555 ifr->ifr_flagshigh = ifp->if_flags >> 16;
984263bc
MD
1556 break;
1557
1558 case SIOCGIFCAP:
1559 ifr->ifr_reqcap = ifp->if_capabilities;
1560 ifr->ifr_curcap = ifp->if_capenable;
1561 break;
1562
1563 case SIOCGIFMETRIC:
1564 ifr->ifr_metric = ifp->if_metric;
1565 break;
1566
1567 case SIOCGIFMTU:
1568 ifr->ifr_mtu = ifp->if_mtu;
1569 break;
1570
e41e61d5
SZ
1571 case SIOCGIFTSOLEN:
1572 ifr->ifr_tsolen = ifp->if_tsolen;
1573 break;
1574
315a7da3
JL
1575 case SIOCGIFDATA:
1576 error = copyout((caddr_t)&ifp->if_data, ifr->ifr_data,
9683f229 1577 sizeof(ifp->if_data));
315a7da3
JL
1578 break;
1579
984263bc
MD
1580 case SIOCGIFPHYS:
1581 ifr->ifr_phys = ifp->if_physical;
1582 break;
1583
1630efc5 1584 case SIOCGIFPOLLCPU:
1630efc5 1585 ifr->ifr_pollcpu = -1;
1630efc5
SZ
1586 break;
1587
1588 case SIOCSIFPOLLCPU:
1630efc5
SZ
1589 break;
1590
984263bc 1591 case SIOCSIFFLAGS:
895c1f85 1592 error = priv_check_cred(cred, PRIV_ROOT, 0);
984263bc 1593 if (error)
9683f229 1594 break;
984263bc
MD
1595 new_flags = (ifr->ifr_flags & 0xffff) |
1596 (ifr->ifr_flagshigh << 16);
1597 if (ifp->if_flags & IFF_SMART) {
1598 /* Smart drivers twiddle their own routes */
1599 } else if (ifp->if_flags & IFF_UP &&
1600 (new_flags & IFF_UP) == 0) {
4986965b 1601 crit_enter();
984263bc 1602 if_down(ifp);
4986965b 1603 crit_exit();
984263bc
MD
1604 } else if (new_flags & IFF_UP &&
1605 (ifp->if_flags & IFF_UP) == 0) {
4986965b 1606 crit_enter();
984263bc 1607 if_up(ifp);
4986965b 1608 crit_exit();
984263bc 1609 }
9c095379 1610
b3a7093f
SZ
1611#ifdef IFPOLL_ENABLE
1612 if ((new_flags ^ ifp->if_flags) & IFF_NPOLLING) {
1613 if (new_flags & IFF_NPOLLING)
1614 ifpoll_register(ifp);
1615 else
1616 ifpoll_deregister(ifp);
1617 }
1618#endif
9c095379 1619
984263bc
MD
1620 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) |
1621 (new_flags &~ IFF_CANTCHANGE);
984263bc
MD
1622 if (new_flags & IFF_PPROMISC) {
1623 /* Permanently promiscuous mode requested */
1624 ifp->if_flags |= IFF_PROMISC;
1625 } else if (ifp->if_pcount == 0) {
1626 ifp->if_flags &= ~IFF_PROMISC;
1627 }
78195a76 1628 if (ifp->if_ioctl) {
a3dd34d2 1629 ifnet_serialize_all(ifp);
87de5057 1630 ifp->if_ioctl(ifp, cmd, data, cred);
a3dd34d2 1631 ifnet_deserialize_all(ifp);
78195a76 1632 }
984263bc
MD
1633 getmicrotime(&ifp->if_lastchange);
1634 break;
1635
1636 case SIOCSIFCAP:
895c1f85 1637 error = priv_check_cred(cred, PRIV_ROOT, 0);
984263bc 1638 if (error)
9683f229
MD
1639 break;
1640 if (ifr->ifr_reqcap & ~ifp->if_capabilities) {
1641 error = EINVAL;
1642 break;
1643 }
a3dd34d2 1644 ifnet_serialize_all(ifp);
87de5057 1645 ifp->if_ioctl(ifp, cmd, data, cred);
a3dd34d2 1646 ifnet_deserialize_all(ifp);
984263bc
MD
1647 break;
1648
f23061d4 1649 case SIOCSIFNAME:
895c1f85 1650 error = priv_check_cred(cred, PRIV_ROOT, 0);
9683f229
MD
1651 if (error)
1652 break;
f23061d4 1653 error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL);
9683f229
MD
1654 if (error)
1655 break;
1656 if (new_name[0] == '\0') {
1657 error = EINVAL;
1658 break;
1659 }
1660 if (ifunit(new_name) != NULL) {
1661 error = EEXIST;
1662 break;
1663 }
f2bd8b67
JS
1664
1665 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp);
f23061d4
JH
1666
1667 /* Announce the departure of the interface. */
1668 rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
1669
1670 strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname));
b2632176 1671 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
f23061d4
JH
1672 /* XXX IFA_LOCK(ifa); */
1673 sdl = (struct sockaddr_dl *)ifa->ifa_addr;
1674 namelen = strlen(new_name);
1675 onamelen = sdl->sdl_nlen;
1676 /*
1677 * Move the address if needed. This is safe because we
1678 * allocate space for a name of length IFNAMSIZ when we
1679 * create this in if_attach().
1680 */
1681 if (namelen != onamelen) {
1682 bcopy(sdl->sdl_data + onamelen,
1683 sdl->sdl_data + namelen, sdl->sdl_alen);
1684 }
1685 bcopy(new_name, sdl->sdl_data, namelen);
1686 sdl->sdl_nlen = namelen;
1687 sdl = (struct sockaddr_dl *)ifa->ifa_netmask;
1688 bzero(sdl->sdl_data, onamelen);
1689 while (namelen != 0)
1690 sdl->sdl_data[--namelen] = 0xff;
1691 /* XXX IFA_UNLOCK(ifa) */
f2bd8b67
JS
1692
1693 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp);
f23061d4
JH
1694
1695 /* Announce the return of the interface. */
1696 rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
1697 break;
1fdf0954 1698
984263bc 1699 case SIOCSIFMETRIC:
895c1f85 1700 error = priv_check_cred(cred, PRIV_ROOT, 0);
984263bc 1701 if (error)
9683f229 1702 break;
984263bc
MD
1703 ifp->if_metric = ifr->ifr_metric;
1704 getmicrotime(&ifp->if_lastchange);
1705 break;
1706
1707 case SIOCSIFPHYS:
895c1f85 1708 error = priv_check_cred(cred, PRIV_ROOT, 0);
984263bc 1709 if (error)
9683f229
MD
1710 break;
1711 if (ifp->if_ioctl == NULL) {
1712 error = EOPNOTSUPP;
1713 break;
1714 }
a3dd34d2 1715 ifnet_serialize_all(ifp);
87de5057 1716 error = ifp->if_ioctl(ifp, cmd, data, cred);
a3dd34d2 1717 ifnet_deserialize_all(ifp);
984263bc
MD
1718 if (error == 0)
1719 getmicrotime(&ifp->if_lastchange);
9683f229 1720 break;
984263bc
MD
1721
1722 case SIOCSIFMTU:
1723 {
1724 u_long oldmtu = ifp->if_mtu;
1725
895c1f85 1726 error = priv_check_cred(cred, PRIV_ROOT, 0);
984263bc 1727 if (error)
9683f229
MD
1728 break;
1729 if (ifp->if_ioctl == NULL) {
1730 error = EOPNOTSUPP;
1731 break;
1732 }
1733 if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU) {
1734 error = EINVAL;
1735 break;
1736 }
a3dd34d2 1737 ifnet_serialize_all(ifp);
87de5057 1738 error = ifp->if_ioctl(ifp, cmd, data, cred);
a3dd34d2 1739 ifnet_deserialize_all(ifp);
984263bc
MD
1740 if (error == 0) {
1741 getmicrotime(&ifp->if_lastchange);
1742 rt_ifmsg(ifp);
1743 }
1744 /*
1745 * If the link MTU changed, do network layer specific procedure.
1746 */
1747 if (ifp->if_mtu != oldmtu) {
1748#ifdef INET6
1749 nd6_setmtu(ifp);
1750#endif
1751 }
9683f229 1752 break;
984263bc
MD
1753 }
1754
e41e61d5
SZ
1755 case SIOCSIFTSOLEN:
1756 error = priv_check_cred(cred, PRIV_ROOT, 0);
1757 if (error)
1758 break;
1759
1760 /* XXX need driver supplied upper limit */
1761 if (ifr->ifr_tsolen <= 0) {
1762 error = EINVAL;
1763 break;
1764 }
1765 ifp->if_tsolen = ifr->ifr_tsolen;
1766 break;
1767
984263bc
MD
1768 case SIOCADDMULTI:
1769 case SIOCDELMULTI:
895c1f85 1770 error = priv_check_cred(cred, PRIV_ROOT, 0);
984263bc 1771 if (error)
9683f229 1772 break;
984263bc
MD
1773
1774 /* Don't allow group membership on non-multicast interfaces. */
9683f229
MD
1775 if ((ifp->if_flags & IFF_MULTICAST) == 0) {
1776 error = EOPNOTSUPP;
1777 break;
1778 }
984263bc
MD
1779
1780 /* Don't let users screw up protocols' entries. */
9683f229
MD
1781 if (ifr->ifr_addr.sa_family != AF_LINK) {
1782 error = EINVAL;
1783 break;
1784 }
984263bc
MD
1785
1786 if (cmd == SIOCADDMULTI) {
1787 struct ifmultiaddr *ifma;
1788 error = if_addmulti(ifp, &ifr->ifr_addr, &ifma);
1789 } else {
1790 error = if_delmulti(ifp, &ifr->ifr_addr);
1791 }
1792 if (error == 0)
1793 getmicrotime(&ifp->if_lastchange);
9683f229 1794 break;
984263bc
MD
1795
1796 case SIOCSIFPHYADDR:
1797 case SIOCDIFPHYADDR:
1798#ifdef INET6
1799 case SIOCSIFPHYADDR_IN6:
1800#endif
1801 case SIOCSLIFPHYADDR:
1802 case SIOCSIFMEDIA:
1803 case SIOCSIFGENERIC:
895c1f85 1804 error = priv_check_cred(cred, PRIV_ROOT, 0);
984263bc 1805 if (error)
9683f229
MD
1806 break;
1807 if (ifp->if_ioctl == 0) {
1808 error = EOPNOTSUPP;
1809 break;
1810 }
a3dd34d2 1811 ifnet_serialize_all(ifp);
87de5057 1812 error = ifp->if_ioctl(ifp, cmd, data, cred);
a3dd34d2 1813 ifnet_deserialize_all(ifp);
984263bc
MD
1814 if (error == 0)
1815 getmicrotime(&ifp->if_lastchange);
9683f229 1816 break;
984263bc
MD
1817
1818 case SIOCGIFSTATUS:
1819 ifs = (struct ifstat *)data;
1820 ifs->ascii[0] = '\0';
9683f229 1821 /* fall through */
984263bc
MD
1822 case SIOCGIFPSRCADDR:
1823 case SIOCGIFPDSTADDR:
1824 case SIOCGLIFPHYADDR:
1825 case SIOCGIFMEDIA:
1826 case SIOCGIFGENERIC:
9683f229
MD
1827 if (ifp->if_ioctl == NULL) {
1828 error = EOPNOTSUPP;
1829 break;
1830 }
a3dd34d2 1831 ifnet_serialize_all(ifp);
87de5057 1832 error = ifp->if_ioctl(ifp, cmd, data, cred);
a3dd34d2 1833 ifnet_deserialize_all(ifp);
9683f229 1834 break;
984263bc
MD
1835
1836 case SIOCSIFLLADDR:
895c1f85 1837 error = priv_check_cred(cred, PRIV_ROOT, 0);
984263bc 1838 if (error)
9683f229
MD
1839 break;
1840 error = if_setlladdr(ifp, ifr->ifr_addr.sa_data,
1841 ifr->ifr_addr.sa_len);
19f10c78 1842 EVENTHANDLER_INVOKE(iflladdr_event, ifp);
9683f229 1843 break;
984263bc
MD
1844
1845 default:
1846 oif_flags = ifp->if_flags;
9683f229
MD
1847 if (so->so_proto == 0) {
1848 error = EOPNOTSUPP;
1849 break;
1850 }
984263bc 1851#ifndef COMPAT_43
04951810 1852 error = so_pru_control_direct(so, cmd, data, ifp);
984263bc 1853#else
9683f229 1854 ocmd = cmd;
984263bc
MD
1855
1856 switch (cmd) {
984263bc
MD
1857 case SIOCSIFDSTADDR:
1858 case SIOCSIFADDR:
1859 case SIOCSIFBRDADDR:
1860 case SIOCSIFNETMASK:
1861#if BYTE_ORDER != BIG_ENDIAN
1862 if (ifr->ifr_addr.sa_family == 0 &&
1863 ifr->ifr_addr.sa_len < 16) {
1864 ifr->ifr_addr.sa_family = ifr->ifr_addr.sa_len;
1865 ifr->ifr_addr.sa_len = 16;
1866 }
1867#else
1868 if (ifr->ifr_addr.sa_len == 0)
1869 ifr->ifr_addr.sa_len = 16;
1870#endif
1871 break;
984263bc
MD
1872 case OSIOCGIFADDR:
1873 cmd = SIOCGIFADDR;
1874 break;
984263bc
MD
1875 case OSIOCGIFDSTADDR:
1876 cmd = SIOCGIFDSTADDR;
1877 break;
984263bc
MD
1878 case OSIOCGIFBRDADDR:
1879 cmd = SIOCGIFBRDADDR;
1880 break;
984263bc
MD
1881 case OSIOCGIFNETMASK:
1882 cmd = SIOCGIFNETMASK;
9683f229
MD
1883 break;
1884 default:
1885 break;
984263bc 1886 }
984263bc 1887
002c1265
MD
1888 error = so_pru_control_direct(so, cmd, data, ifp);
1889
1890 switch (ocmd) {
984263bc
MD
1891 case OSIOCGIFADDR:
1892 case OSIOCGIFDSTADDR:
1893 case OSIOCGIFBRDADDR:
1894 case OSIOCGIFNETMASK:
1895 *(u_short *)&ifr->ifr_addr = ifr->ifr_addr.sa_family;
002c1265 1896 break;
984263bc 1897 }
984263bc
MD
1898#endif /* COMPAT_43 */
1899
1900 if ((oif_flags ^ ifp->if_flags) & IFF_UP) {
1901#ifdef INET6
1902 DELAY(100);/* XXX: temporary workaround for fxp issue*/
1903 if (ifp->if_flags & IFF_UP) {
4986965b 1904 crit_enter();
984263bc 1905 in6_if_up(ifp);
4986965b 1906 crit_exit();
984263bc
MD
1907 }
1908#endif
1909 }
9683f229 1910 break;
984263bc 1911 }
9683f229
MD
1912
1913 mtx_unlock(&ifp->if_ioctl_mtx);
1914 return (error);
984263bc
MD
1915}
1916
1917/*
1918 * Set/clear promiscuous mode on interface ifp based on the truth value
1919 * of pswitch. The calls are reference counted so that only the first
1920 * "on" request actually has an effect, as does the final "off" request.
1921 * Results are undefined if the "off" and "on" requests are not matched.
1922 */
1923int
f23061d4 1924ifpromisc(struct ifnet *ifp, int pswitch)
984263bc
MD
1925{
1926 struct ifreq ifr;
1927 int error;
1928 int oldflags;
1929
1930 oldflags = ifp->if_flags;
46f25451 1931 if (ifp->if_flags & IFF_PPROMISC) {
984263bc
MD
1932 /* Do nothing if device is in permanently promiscuous mode */
1933 ifp->if_pcount += pswitch ? 1 : -1;
1934 return (0);
1935 }
1936 if (pswitch) {
1937 /*
1938 * If the device is not configured up, we cannot put it in
1939 * promiscuous mode.
1940 */
1941 if ((ifp->if_flags & IFF_UP) == 0)
1942 return (ENETDOWN);
1943 if (ifp->if_pcount++ != 0)
1944 return (0);
1945 ifp->if_flags |= IFF_PROMISC;
3e4a09e7
MD
1946 log(LOG_INFO, "%s: promiscuous mode enabled\n",
1947 ifp->if_xname);
984263bc
MD
1948 } else {
1949 if (--ifp->if_pcount > 0)
1950 return (0);
1951 ifp->if_flags &= ~IFF_PROMISC;
3e4a09e7
MD
1952 log(LOG_INFO, "%s: promiscuous mode disabled\n",
1953 ifp->if_xname);
984263bc
MD
1954 }
1955 ifr.ifr_flags = ifp->if_flags;
46f25451 1956 ifr.ifr_flagshigh = ifp->if_flags >> 16;
a3dd34d2
SZ
1957 ifnet_serialize_all(ifp);
1958 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, NULL);
1959 ifnet_deserialize_all(ifp);
984263bc
MD
1960 if (error == 0)
1961 rt_ifmsg(ifp);
1962 else
1963 ifp->if_flags = oldflags;
1964 return error;
1965}
1966
1967/*
1968 * Return interface configuration
1969 * of system. List may be used
1970 * in later ioctl's (above) to get
1971 * other information.
1972 */
984263bc 1973static int
87de5057 1974ifconf(u_long cmd, caddr_t data, struct ucred *cred)
984263bc 1975{
41c20dac
MD
1976 struct ifconf *ifc = (struct ifconf *)data;
1977 struct ifnet *ifp;
984263bc
MD
1978 struct sockaddr *sa;
1979 struct ifreq ifr, *ifrp;
1980 int space = ifc->ifc_len, error = 0;
1981
1982 ifrp = ifc->ifc_req;
1983 TAILQ_FOREACH(ifp, &ifnet, if_link) {
b2632176 1984 struct ifaddr_container *ifac;
3e4a09e7 1985 int addrs;
984263bc 1986
f23061d4 1987 if (space <= sizeof ifr)
984263bc 1988 break;
623c059e
JS
1989
1990 /*
95f018e8
MD
1991 * Zero the stack declared structure first to prevent
1992 * memory disclosure.
623c059e 1993 */
95f018e8 1994 bzero(&ifr, sizeof(ifr));
3e4a09e7
MD
1995 if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name))
1996 >= sizeof(ifr.ifr_name)) {
984263bc
MD
1997 error = ENAMETOOLONG;
1998 break;
984263bc
MD
1999 }
2000
2001 addrs = 0;
b2632176
SZ
2002 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
2003 struct ifaddr *ifa = ifac->ifa;
2004
f23061d4 2005 if (space <= sizeof ifr)
984263bc
MD
2006 break;
2007 sa = ifa->ifa_addr;
87de5057
MD
2008 if (cred->cr_prison &&
2009 prison_if(cred, sa))
984263bc
MD
2010 continue;
2011 addrs++;
2012#ifdef COMPAT_43
2013 if (cmd == OSIOCGIFCONF) {
2014 struct osockaddr *osa =
2015 (struct osockaddr *)&ifr.ifr_addr;
2016 ifr.ifr_addr = *sa;
2017 osa->sa_family = sa->sa_family;
f23061d4 2018 error = copyout(&ifr, ifrp, sizeof ifr);
984263bc
MD
2019 ifrp++;
2020 } else
2021#endif
2022 if (sa->sa_len <= sizeof(*sa)) {
2023 ifr.ifr_addr = *sa;
f23061d4 2024 error = copyout(&ifr, ifrp, sizeof ifr);
984263bc
MD
2025 ifrp++;
2026 } else {
f23061d4 2027 if (space < (sizeof ifr) + sa->sa_len -
984263bc
MD
2028 sizeof(*sa))
2029 break;
2030 space -= sa->sa_len - sizeof(*sa);
f23061d4
JH
2031 error = copyout(&ifr, ifrp,
2032 sizeof ifr.ifr_name);
984263bc 2033 if (error == 0)
f23061d4
JH
2034 error = copyout(sa, &ifrp->ifr_addr,
2035 sa->sa_len);
984263bc
MD
2036 ifrp = (struct ifreq *)
2037 (sa->sa_len + (caddr_t)&ifrp->ifr_addr);
2038 }
2039 if (error)
2040 break;
f23061d4 2041 space -= sizeof ifr;
984263bc
MD
2042 }
2043 if (error)
2044 break;
2045 if (!addrs) {
f23061d4
JH
2046 bzero(&ifr.ifr_addr, sizeof ifr.ifr_addr);
2047 error = copyout(&ifr, ifrp, sizeof ifr);
984263bc
MD
2048 if (error)
2049 break;
f23061d4 2050 space -= sizeof ifr;
984263bc
MD
2051 ifrp++;
2052 }
2053 }
2054 ifc->ifc_len -= space;
2055 return (error);
2056}
2057
2058/*
2059 * Just like if_promisc(), but for all-multicast-reception mode.
2060 */
2061int
f23061d4 2062if_allmulti(struct ifnet *ifp, int onswitch)
984263bc
MD
2063{
2064 int error = 0;
984263bc
MD
2065 struct ifreq ifr;
2066
4986965b
JS
2067 crit_enter();
2068
984263bc
MD
2069 if (onswitch) {
2070 if (ifp->if_amcount++ == 0) {
2071 ifp->if_flags |= IFF_ALLMULTI;
2072 ifr.ifr_flags = ifp->if_flags;
46f25451 2073 ifr.ifr_flagshigh = ifp->if_flags >> 16;
a3dd34d2 2074 ifnet_serialize_all(ifp);
bd4539cc 2075 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2038fb68 2076 NULL);
a3dd34d2 2077 ifnet_deserialize_all(ifp);
984263bc
MD
2078 }
2079 } else {
2080 if (ifp->if_amcount > 1) {
2081 ifp->if_amcount--;
2082 } else {
2083 ifp->if_amcount = 0;
2084 ifp->if_flags &= ~IFF_ALLMULTI;
2085 ifr.ifr_flags = ifp->if_flags;
46f25451 2086 ifr.ifr_flagshigh = ifp->if_flags >> 16;
a3dd34d2 2087 ifnet_serialize_all(ifp);
bd4539cc 2088 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2038fb68 2089 NULL);
a3dd34d2 2090 ifnet_deserialize_all(ifp);
984263bc
MD
2091 }
2092 }
4986965b
JS
2093
2094 crit_exit();
984263bc
MD
2095
2096 if (error == 0)
2097 rt_ifmsg(ifp);
2098 return error;
2099}
2100
2101/*
2102 * Add a multicast listenership to the interface in question.
2103 * The link layer provides a routine which converts
2104 */
2105int
f23061d4
JH
2106if_addmulti(
2107 struct ifnet *ifp, /* interface to manipulate */
2108 struct sockaddr *sa, /* address to add */
2109 struct ifmultiaddr **retifma)
984263bc
MD
2110{
2111 struct sockaddr *llsa, *dupsa;
4986965b 2112 int error;
984263bc
MD
2113 struct ifmultiaddr *ifma;
2114
2115 /*
2116 * If the matching multicast address already exists
2117 * then don't add a new one, just add a reference
2118 */
441d34b2 2119 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
0c3c561c 2120 if (sa_equal(sa, ifma->ifma_addr)) {
984263bc
MD
2121 ifma->ifma_refcount++;
2122 if (retifma)
2123 *retifma = ifma;
2124 return 0;
2125 }
2126 }
2127
2128 /*
2129 * Give the link layer a chance to accept/reject it, and also
2130 * find out which AF_LINK address this maps to, if it isn't one
2131 * already.
2132 */
2133 if (ifp->if_resolvemulti) {
a3dd34d2 2134 ifnet_serialize_all(ifp);
984263bc 2135 error = ifp->if_resolvemulti(ifp, &llsa, sa);
a3dd34d2 2136 ifnet_deserialize_all(ifp);
78195a76
MD
2137 if (error)
2138 return error;
984263bc 2139 } else {
4090d6ff 2140 llsa = NULL;
984263bc
MD
2141 }
2142
884717e1
SW
2143 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_WAITOK);
2144 dupsa = kmalloc(sa->sa_len, M_IFMADDR, M_WAITOK);
984263bc
MD
2145 bcopy(sa, dupsa, sa->sa_len);
2146
2147 ifma->ifma_addr = dupsa;
2148 ifma->ifma_lladdr = llsa;
2149 ifma->ifma_ifp = ifp;
2150 ifma->ifma_refcount = 1;
2151 ifma->ifma_protospec = 0;
2152 rt_newmaddrmsg(RTM_NEWMADDR, ifma);
2153
2154 /*
2155 * Some network interfaces can scan the address list at
2156 * interrupt time; lock them out.
2157 */
4986965b 2158 crit_enter();
441d34b2 2159 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
4986965b 2160 crit_exit();
6cd0715f
RP
2161 if (retifma)
2162 *retifma = ifma;
984263bc 2163
4090d6ff 2164 if (llsa != NULL) {
441d34b2 2165 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
0c3c561c 2166 if (sa_equal(ifma->ifma_addr, llsa))
984263bc
MD
2167 break;
2168 }
2169 if (ifma) {
2170 ifma->ifma_refcount++;
2171 } else {
884717e1
SW
2172 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_WAITOK);
2173 dupsa = kmalloc(llsa->sa_len, M_IFMADDR, M_WAITOK);
984263bc
MD
2174 bcopy(llsa, dupsa, llsa->sa_len);
2175 ifma->ifma_addr = dupsa;
2176 ifma->ifma_ifp = ifp;
2177 ifma->ifma_refcount = 1;
4986965b 2178 crit_enter();
441d34b2 2179 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
4986965b 2180 crit_exit();
984263bc
MD
2181 }
2182 }
2183 /*
2184 * We are certain we have added something, so call down to the
2185 * interface to let them know about it.
2186 */
4986965b 2187 crit_enter();
a3dd34d2 2188 ifnet_serialize_all(ifp);
6cd0715f
RP
2189 if (ifp->if_ioctl)
2190 ifp->if_ioctl(ifp, SIOCADDMULTI, 0, NULL);
a3dd34d2 2191 ifnet_deserialize_all(ifp);
4986965b 2192 crit_exit();
984263bc
MD
2193
2194 return 0;
2195}
2196
2197/*
2198 * Remove a reference to a multicast address on this interface. Yell
2199 * if the request does not match an existing membership.
2200 */
2201int
f23061d4 2202if_delmulti(struct ifnet *ifp, struct sockaddr *sa)
984263bc
MD
2203{
2204 struct ifmultiaddr *ifma;
984263bc 2205
441d34b2 2206 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
0c3c561c 2207 if (sa_equal(sa, ifma->ifma_addr))
984263bc 2208 break;
4090d6ff 2209 if (ifma == NULL)
984263bc
MD
2210 return ENOENT;
2211
2212 if (ifma->ifma_refcount > 1) {
2213 ifma->ifma_refcount--;
2214 return 0;
2215 }
2216
2217 rt_newmaddrmsg(RTM_DELMADDR, ifma);
2218 sa = ifma->ifma_lladdr;
4986965b 2219 crit_enter();
441d34b2 2220 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link);
984263bc
MD
2221 /*
2222 * Make sure the interface driver is notified
2223 * in the case of a link layer mcast group being left.
2224 */
4090d6ff 2225 if (ifma->ifma_addr->sa_family == AF_LINK && sa == NULL) {
a3dd34d2 2226 ifnet_serialize_all(ifp);
2038fb68 2227 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL);
a3dd34d2 2228 ifnet_deserialize_all(ifp);
78195a76 2229 }
4986965b 2230 crit_exit();
efda3bd0
MD
2231 kfree(ifma->ifma_addr, M_IFMADDR);
2232 kfree(ifma, M_IFMADDR);
4090d6ff 2233 if (sa == NULL)
984263bc
MD
2234 return 0;
2235
2236 /*
2237 * Now look for the link-layer address which corresponds to
2238 * this network address. It had been squirreled away in
2239 * ifma->ifma_lladdr for this purpose (so we don't have
2240 * to call ifp->if_resolvemulti() again), and we saved that
2241 * value in sa above. If some nasty deleted the
2242 * link-layer address out from underneath us, we can deal because
2243 * the address we stored was is not the same as the one which was
2244 * in the record for the link-layer address. (So we don't complain
2245 * in that case.)
2246 */
441d34b2 2247 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
0c3c561c 2248 if (sa_equal(sa, ifma->ifma_addr))
984263bc 2249 break;
4090d6ff 2250 if (ifma == NULL)
984263bc
MD
2251 return 0;
2252
2253 if (ifma->ifma_refcount > 1) {
2254 ifma->ifma_refcount--;
2255 return 0;
2256 }
2257
4986965b 2258 crit_enter();
a3dd34d2 2259 ifnet_serialize_all(ifp);
441d34b2 2260 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link);
2038fb68 2261 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL);
a3dd34d2 2262 ifnet_deserialize_all(ifp);
4986965b 2263 crit_exit();
efda3bd0
MD
2264 kfree(ifma->ifma_addr, M_IFMADDR);
2265 kfree(sa, M_IFMADDR);
2266 kfree(ifma, M_IFMADDR);
984263bc
MD
2267
2268 return 0;
2269}
2270
2271/*
3976c93a
RP
2272 * Delete all multicast group membership for an interface.
2273 * Should be used to quickly flush all multicast filters.
2274 */
2275void
2276if_delallmulti(struct ifnet *ifp)
2277{
2278 struct ifmultiaddr *ifma;
2279 struct ifmultiaddr *next;
2280
441d34b2 2281 TAILQ_FOREACH_MUTABLE(ifma, &ifp->if_multiaddrs, ifma_link, next)
3976c93a
RP
2282 if_delmulti(ifp, ifma->ifma_addr);
2283}
2284
2285
2286/*
984263bc
MD
2287 * Set the link layer address on an interface.
2288 *
2289 * At this time we only support certain types of interfaces,
2290 * and we don't allow the length of the address to change.
2291 */
2292int
2293if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len)
2294{
2295 struct sockaddr_dl *sdl;
984263bc
MD
2296 struct ifreq ifr;
2297
f2682cb9 2298 sdl = IF_LLSOCKADDR(ifp);
984263bc
MD
2299 if (sdl == NULL)
2300 return (EINVAL);
2301 if (len != sdl->sdl_alen) /* don't allow length to change */
2302 return (EINVAL);
2303 switch (ifp->if_type) {
2304 case IFT_ETHER: /* these types use struct arpcom */
984263bc 2305 case IFT_XETHER:
984263bc
MD
2306 case IFT_L2VLAN:
2307 bcopy(lladdr, ((struct arpcom *)ifp->if_softc)->ac_enaddr, len);
984263bc
MD
2308 bcopy(lladdr, LLADDR(sdl), len);
2309 break;
2310 default:
2311 return (ENODEV);
2312 }
2313 /*
2314 * If the interface is already up, we need
2315 * to re-init it in order to reprogram its
2316 * address filter.
2317 */
a3dd34d2 2318 ifnet_serialize_all(ifp);
984263bc 2319 if ((ifp->if_flags & IFF_UP) != 0) {
c97d9b76 2320#ifdef INET
b2632176 2321 struct ifaddr_container *ifac;
c97d9b76 2322#endif
b2632176 2323
984263bc
MD
2324 ifp->if_flags &= ~IFF_UP;
2325 ifr.ifr_flags = ifp->if_flags;
46f25451 2326 ifr.ifr_flagshigh = ifp->if_flags >> 16;
78195a76 2327 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2038fb68 2328 NULL);
984263bc
MD
2329 ifp->if_flags |= IFF_UP;
2330 ifr.ifr_flags = ifp->if_flags;
46f25451 2331 ifr.ifr_flagshigh = ifp->if_flags >> 16;
78195a76 2332 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr,
2038fb68 2333 NULL);
984263bc
MD
2334#ifdef INET
2335 /*
2336 * Also send gratuitous ARPs to notify other nodes about
2337 * the address change.
2338 */
b2632176
SZ
2339 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
2340 struct ifaddr *ifa = ifac->ifa;
2341
984263bc
MD
2342 if (ifa->ifa_addr != NULL &&
2343 ifa->ifa_addr->sa_family == AF_INET)
69b66ae8 2344 arp_gratuitous(ifp, ifa);
984263bc
MD
2345 }
2346#endif
2347 }
a3dd34d2 2348 ifnet_deserialize_all(ifp);
984263bc
MD
2349 return (0);
2350}
2351
2352struct ifmultiaddr *
f23061d4 2353ifmaof_ifpforaddr(struct sockaddr *sa, struct ifnet *ifp)
984263bc
MD
2354{
2355 struct ifmultiaddr *ifma;
2356
441d34b2 2357 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
0c3c561c 2358 if (sa_equal(ifma->ifma_addr, sa))
984263bc
MD
2359 break;
2360
2361 return ifma;
2362}
2363
1550dfd9 2364/*
e9bd1548
MD
2365 * This function locates the first real ethernet MAC from a network
2366 * card and loads it into node, returning 0 on success or ENOENT if
2367 * no suitable interfaces were found. It is used by the uuid code to
2368 * generate a unique 6-byte number.
2369 */
2370int
2371if_getanyethermac(uint16_t *node, int minlen)
2372{
2373 struct ifnet *ifp;
2374 struct sockaddr_dl *sdl;
2375
2376 TAILQ_FOREACH(ifp, &ifnet, if_link) {
2377 if (ifp->if_type != IFT_ETHER)
2378 continue;
2379 sdl = IF_LLSOCKADDR(ifp);
2380 if (sdl->sdl_alen < minlen)
2381 continue;
2382 bcopy(((struct arpcom *)ifp->if_softc)->ac_enaddr, node,
2383 minlen);
2384 return(0);
2385 }
2386 return (ENOENT);
2387}
2388
2389/*
1550dfd9
MD
2390 * The name argument must be a pointer to storage which will last as
2391 * long as the interface does. For physical devices, the result of
2392 * device_get_name(dev) is a good choice and for pseudo-devices a
2393 * static string works well.
2394 */
2395void
2396if_initname(struct ifnet *ifp, const char *name, int unit)
2397{
3e4a09e7
MD
2398 ifp->if_dname = name;
2399 ifp->if_dunit = unit;
1550dfd9 2400 if (unit != IF_DUNIT_NONE)
f8c7a42d 2401 ksnprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit);
1550dfd9
MD
2402 else
2403 strlcpy(ifp->if_xname, name, IFNAMSIZ);
2404}
2405
984263bc
MD
2406int
2407if_printf(struct ifnet *ifp, const char *fmt, ...)
2408{
e2565a42 2409 __va_list ap;
984263bc
MD
2410 int retval;
2411
4b1cf444 2412 retval = kprintf("%s: ", ifp->if_xname);
e2565a42 2413 __va_start(ap, fmt);
379210cb 2414 retval += kvprintf(fmt, ap);
e2565a42 2415 __va_end(ap);
984263bc
MD
2416 return (retval);
2417}
2418
cb80735c
RP
2419struct ifnet *
2420if_alloc(uint8_t type)
2421{
2422 struct ifnet *ifp;
7e395935 2423 size_t size;
cb80735c 2424
7e395935
MD
2425 /*
2426 * XXX temporary hack until arpcom is setup in if_l2com
2427 */
2428 if (type == IFT_ETHER)
2429 size = sizeof(struct arpcom);
2430 else
2431 size = sizeof(struct ifnet);
2432
2433 ifp = kmalloc(size, M_IFNET, M_WAITOK|M_ZERO);
cb80735c
RP
2434
2435 ifp->if_type = type;
2436
aeb3c11e
RP
2437 if (if_com_alloc[type] != NULL) {
2438 ifp->if_l2com = if_com_alloc[type](type, ifp);
2439 if (ifp->if_l2com == NULL) {
2440 kfree(ifp, M_IFNET);
2441 return (NULL);
2442 }
2443 }
cb80735c
RP
2444 return (ifp);
2445}
2446
2447void
2448if_free(struct ifnet *ifp)
2449{
2450 kfree(ifp, M_IFNET);
2451}
2452
b2f93efe
JS
2453void
2454ifq_set_classic(struct ifaltq *ifq)
2455{
2cc2f639
SZ
2456 ifq_set_methods(ifq, ifq->altq_ifp->if_mapsubq,
2457 ifsq_classic_enqueue, ifsq_classic_dequeue, ifsq_classic_request);
f0a26983
SZ
2458}
2459
2460void
2cc2f639
SZ
2461ifq_set_methods(struct ifaltq *ifq, altq_mapsubq_t mapsubq,
2462 ifsq_enqueue_t enqueue, ifsq_dequeue_t dequeue, ifsq_request_t request)
f0a26983
SZ
2463{
2464 int q;
2465
2cc2f639
SZ
2466 KASSERT(mapsubq != NULL, ("mapsubq is not specified"));
2467 KASSERT(enqueue != NULL, ("enqueue is not specified"));
2468 KASSERT(dequeue != NULL, ("dequeue is not specified"));
2469 KASSERT(request != NULL, ("request is not specified"));
2470
2471 ifq->altq_mapsubq = mapsubq;
f0a26983
SZ
2472 for (q = 0; q < ifq->altq_subq_cnt; ++q) {
2473 struct ifaltq_subque *ifsq = &ifq->altq_subq[q];
2474
2475 ifsq->ifsq_enqueue = enqueue;
2476 ifsq->ifsq_dequeue = dequeue;
2477 ifsq->ifsq_request = request;
2478 }
b2f93efe
JS
2479}
2480
9db4b353 2481int
f0a26983
SZ
2482ifsq_classic_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m,
2483 struct altq_pktattr *pa __unused)
e3e4574a 2484{
f0a26983 2485 if (IF_QFULL(ifsq)) {
e3e4574a
JS
2486 m_freem(m);
2487 return(ENOBUFS);
2488 } else {
f0a26983 2489 IF_ENQUEUE(ifsq, m);
e3e4574a
JS
2490 return(0);
2491 }
2492}
2493
9db4b353 2494struct mbuf *
f0a26983 2495ifsq_classic_dequeue(struct ifaltq_subque *ifsq, struct mbuf *mpolled, int op)
e3e4574a
JS
2496{
2497 struct mbuf *m;
2498
2499 switch (op) {
2500 case ALTDQ_POLL:
f0a26983 2501 IF_POLL(ifsq, m);
e3e4574a
JS
2502 break;
2503 case ALTDQ_REMOVE:
f0a26983 2504 IF_DEQUEUE(ifsq, m);
e3e4574a
JS
2505 break;
2506 default:
2507 panic("unsupported ALTQ dequeue op: %d", op);
2508 }
d2c71fa0 2509 KKASSERT(mpolled == NULL || mpolled == m);
e3e4574a
JS
2510 return(m);
2511}
2512
9db4b353 2513int
f0a26983 2514ifsq_classic_request(struct ifaltq_subque *ifsq, int req, void *arg)
e3e4574a
JS
2515{
2516 switch (req) {
2517 case ALTRQ_PURGE:
f0a26983 2518 IF_DRAIN(ifsq);
e3e4574a
JS
2519 break;
2520 default:
3f625015 2521 panic("unsupported ALTQ request: %d", req);
e3e4574a 2522 }
e3e4574a
JS
2523 return(0);
2524}
b2632176 2525
28cc0c29 2526static void
f0a26983 2527ifsq_ifstart_try(struct ifaltq_subque *ifsq, int force_sched)
28cc0c29 2528{
f0a26983 2529 struct ifnet *ifp = ifsq_get_ifp(ifsq);
28cc0c29
SZ
2530 int running = 0, need_sched;
2531
2532 /*
2533 * Try to do direct ifnet.if_start first, if there is
2534 * contention on ifnet's serializer, ifnet.if_start will
2535 * be scheduled on ifnet's CPU.
2536 */
3c4cd924 2537 if (!ifnet_tryserialize_tx(ifp, ifsq)) {
28cc0c29
SZ
2538 /*
2539 * ifnet serializer contention happened,
2540 * ifnet.if_start is scheduled on ifnet's
2541 * CPU, and we keep going.
2542 */
f0a26983 2543 ifsq_ifstart_schedule(ifsq, 1);
28cc0c29
SZ
2544 return;
2545 }
2546
f0a26983
SZ
2547 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) {
2548 ifp->if_start(ifp, ifsq);
2549 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq))
28cc0c29
SZ
2550 running = 1;
2551 }
f0a26983 2552 need_sched = ifsq_ifstart_need_schedule(ifsq, running);
28cc0c29 2553
3c4cd924 2554 ifnet_deserialize_tx(ifp, ifsq);
28cc0c29
SZ
2555
2556 if (need_sched) {
2557 /*
2558 * More data need to be transmitted, ifnet.if_start is
2559 * scheduled on ifnet's CPU, and we keep going.
2560 * NOTE: ifnet.if_start interlock is not released.
2561 */
f0a26983 2562 ifsq_ifstart_schedule(ifsq, force_sched);
28cc0c29
SZ
2563 }
2564}
2565
2aa7f7f8 2566/*
f0a26983 2567 * IFSUBQ packets staging mechanism:
2aa7f7f8 2568 *
f0a26983 2569 * The packets enqueued into IFSUBQ are staged to a certain amount before the
2aa7f7f8
SZ
2570 * ifnet's if_start is called. In this way, the driver could avoid writing
2571 * to hardware registers upon every packet, instead, hardware registers
2572 * could be written when certain amount of packets are put onto hardware
2573 * TX ring. The measurement on several modern NICs (emx(4), igb(4), bnx(4),
2574 * bge(4), jme(4)) shows that the hardware registers writing aggregation
2575 * could save ~20% CPU time when 18bytes UDP datagrams are transmitted at
2576 * 1.48Mpps. The performance improvement by hardware registers writing
2577 * aggeregation is also mentioned by Luigi Rizzo's netmap paper
2578 * (http://info.iet.unipi.it/~luigi/netmap/).
2579 *
f0a26983 2580 * IFSUBQ packets staging is performed for two entry points into drivers's
2aa7f7f8 2581 * transmission function:
f0a26983
SZ
2582 * - Direct ifnet's if_start calling, i.e. ifsq_ifstart_try()
2583 * - ifnet's if_start scheduling, i.e. ifsq_ifstart_schedule()
2aa7f7f8 2584 *
f0a26983 2585 * IFSUBQ packets staging will be stopped upon any of the following conditions:
2aa7f7f8 2586 * - If the count of packets enqueued on the current CPU is great than or
f0a26983 2587 * equal to ifsq_stage_cntmax. (XXX this should be per-interface)
2aa7f7f8
SZ
2588 * - If the total length of packets enqueued on the current CPU is great
2589 * than or equal to the hardware's MTU - max_protohdr. max_protohdr is
2590 * cut from the hardware's MTU mainly bacause a full TCP segment's size
2591 * is usually less than hardware's MTU.
f0a26983 2592 * - ifsq_ifstart_schedule() is not pending on the current CPU and if_start
2aa7f7f8
SZ
2593 * interlock (if_snd.altq_started) is not released.
2594 * - The if_start_rollup(), which is registered as low priority netisr
2595 * rollup function, is called; probably because no more work is pending
2596 * for netisr.
2597 *
2598 * NOTE:
f0a26983 2599 * Currently IFSUBQ packet staging is only performed in netisr threads.
2aa7f7f8 2600 */
9db4b353
SZ
2601int
2602ifq_dispatch(struct ifnet *ifp, struct mbuf *m, struct altq_pktattr *pa)
2603{
2604 struct ifaltq *ifq = &ifp->if_snd;
f0a26983 2605 struct ifaltq_subque *ifsq;
28cc0c29 2606 int error, start = 0, len, mcast = 0, avoid_start = 0;
f0a26983
SZ
2607 struct ifsubq_stage_head *head = NULL;
2608 struct ifsubq_stage *stage = NULL;
57dff79c 2609
2cc2f639 2610 ifsq = ifq_map_subq(ifq, mycpuid);
3c4cd924 2611 ASSERT_IFNET_NOT_SERIALIZED_TX(ifp, ifsq);
9db4b353 2612
fe53d127
SZ
2613 len = m->m_pkthdr.len;
2614 if (m->m_flags & M_MCAST)
2615 mcast = 1;
2616
28cc0c29 2617 if (curthread->td_type == TD_TYPE_NETISR) {
f0a26983
SZ
2618 head = &ifsubq_stage_heads[mycpuid];
2619 stage = ifsq_get_stage(ifsq, mycpuid);
28cc0c29 2620
f0a26983
SZ
2621 stage->stg_cnt++;
2622 stage->stg_len += len;
2623 if (stage->stg_cnt < ifsq_stage_cntmax &&
2624 stage->stg_len < (ifp->if_mtu - max_protohdr))
28cc0c29
SZ
2625 avoid_start = 1;
2626 }
2627
f0a26983
SZ
2628 ALTQ_SQ_LOCK(ifsq);
2629 error = ifsq_enqueue_locked(ifsq, m, pa);
9db4b353 2630 if (error) {
f0a26983
SZ
2631 if (!ifsq_data_ready(ifsq)) {
2632 ALTQ_SQ_UNLOCK(ifsq);
087561ef
SZ
2633 return error;
2634 }
28cc0c29 2635 avoid_start = 0;
9db4b353 2636 }
f0a26983 2637 if (!ifsq_is_started(ifsq)) {
28cc0c29 2638 if (avoid_start) {
f0a26983 2639 ALTQ_SQ_UNLOCK(ifsq);
28cc0c29
SZ
2640
2641 KKASSERT(!error);
f0a26983
SZ
2642 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0)
2643 ifsq_stage_insert(head, stage);
28cc0c29
SZ
2644
2645 ifp->if_obytes += len;
2646 if (mcast)
2647 ifp->if_omcasts++;
28cc0c29
SZ
2648 return error;
2649 }
2650
9db4b353
SZ
2651 /*
2652 * Hold the interlock of ifnet.if_start
2653 */
f0a26983 2654 ifsq_set_started(ifsq);
9db4b353
SZ
2655 start = 1;
2656 }
f0a26983 2657 ALTQ_SQ_UNLOCK(ifsq);
9db4b353 2658
fe53d127
SZ
2659 if (!error) {
2660 ifp->if_obytes += len;
2661 if (mcast)
2662 ifp->if_omcasts++;
2663 }
9db4b353 2664
28cc0c29 2665 if (stage != NULL) {
f0a26983
SZ
2666 if (!start && (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED)) {
2667 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED);
3cab6b0d 2668 if (!avoid_start) {
f0a26983
SZ
2669 ifsq_stage_remove(head, stage);
2670 ifsq_ifstart_schedule(ifsq, 1);
3cab6b0d
SZ
2671 }
2672 return error;
2673 }
2674
f0a26983
SZ
2675 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED) {
2676 ifsq_stage_remove(head, stage);
28cc0c29 2677 } else {
f0a26983
SZ
2678 stage->stg_cnt = 0;
2679 stage->stg_len = 0;
28cc0c29 2680 }
9db4b353
SZ
2681 }
2682
f0a26983 2683 if (!start)
087561ef 2684 return error;
9db4b353 2685
f0a26983 2686 ifsq_ifstart_try(ifsq, 0);
087561ef 2687 return error;
9db4b353
SZ
2688}
2689
b2632176
SZ
2690void *
2691ifa_create(int size, int flags)
2692{
2693 struct ifaddr *ifa;
2694 int i;
2695
ed20d0e3 2696 KASSERT(size >= sizeof(*ifa), ("ifaddr size too small"));
b2632176
SZ
2697
2698 ifa = kmalloc(size, M_IFADDR, flags | M_ZERO);
2699 if (ifa == NULL)
2700 return NULL;
2701
2702 ifa->ifa_containers = kmalloc(ncpus * sizeof(struct ifaddr_container),
2703 M_IFADDR, M_WAITOK | M_ZERO);
d5a2b87c 2704 ifa->ifa_ncnt = ncpus;
b2632176
SZ
2705 for (i = 0; i < ncpus; ++i) {
2706 struct ifaddr_container *ifac = &ifa->ifa_containers[i];
2707
2708 ifac->ifa_magic = IFA_CONTAINER_MAGIC;
2709 ifac->ifa = ifa;
2710 ifac->ifa_refcnt = 1;
2711 }
2712#ifdef IFADDR_DEBUG
2713 kprintf("alloc ifa %p %d\n", ifa, size);
2714#endif
2715 return ifa;
2716}
2717
b2632176
SZ
2718void
2719ifac_free(struct ifaddr_container *ifac, int cpu_id)
2720{
d5a2b87c 2721 struct ifaddr *ifa = ifac->ifa;
b2632176
SZ
2722
2723 KKASSERT(ifac->ifa_magic == IFA_CONTAINER_MAGIC);
2724 KKASSERT(ifac->ifa_refcnt == 0);
40f667f2 2725 KASSERT(ifac->ifa_listmask == 0,
ed20d0e3 2726 ("ifa is still on %#x lists", ifac->ifa_listmask));
b2632176
SZ
2727
2728 ifac->ifa_magic = IFA_CONTAINER_DEAD;
2729
b2632176 2730#ifdef IFADDR_DEBUG_VERBOSE
8967ddc7 2731 kprintf("try free ifa %p cpu_id %d\n", ifac->ifa, cpu_id);
b2632176
SZ
2732#endif
2733
d5a2b87c 2734 KASSERT(ifa->ifa_ncnt > 0 && ifa->ifa_ncnt <= ncpus,
ed20d0e3 2735 ("invalid # of ifac, %d", ifa->ifa_ncnt));
d5a2b87c
SZ
2736 if (atomic_fetchadd_int(&ifa->ifa_ncnt, -1) == 1) {
2737#ifdef IFADDR_DEBUG
2738 kprintf("free ifa %p\n", ifa);
2739#endif
2740 kfree(ifa->ifa_containers, M_IFADDR);
2741 kfree(ifa, M_IFADDR);
2742 }
b2632176
SZ
2743}
2744
2745static void
002c1265 2746ifa_iflink_dispatch(netmsg_t nmsg)
b2632176
SZ
2747{
2748 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2749 struct ifaddr *ifa = msg->ifa;
2750 struct ifnet *ifp = msg->ifp;
2751 int cpu = mycpuid;
40f667f2 2752 struct ifaddr_container *ifac;
b2632176
SZ
2753
2754 crit_enter();
23027d35 2755
40f667f2 2756 ifac = &ifa->ifa_containers[cpu];
2adb7bc2 2757 ASSERT_IFAC_VALID(ifac);
40f667f2 2758 KASSERT((ifac->ifa_listmask & IFA_LIST_IFADDRHEAD) == 0,
ed20d0e3 2759 ("ifaddr is on if_addrheads"));
23027d35 2760
40f667f2
SZ
2761 ifac->ifa_listmask |= IFA_LIST_IFADDRHEAD;
2762 if (msg->tail)
2763 TAILQ_INSERT_TAIL(&ifp->if_addrheads[cpu], ifac, ifa_link);
2764 else
2765 TAILQ_INSERT_HEAD(&ifp->if_addrheads[cpu], ifac, ifa_link);
23027d35 2766
b2632176
SZ
2767 crit_exit();
2768
002c1265 2769 ifa_forwardmsg(&nmsg->lmsg, cpu + 1);
b2632176
SZ
2770}
2771
2772void
2773ifa_iflink(struct ifaddr *ifa, struct ifnet *ifp, int tail)
2774{
2775 struct netmsg_ifaddr msg;
2776
002c1265 2777 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
48e7b118 2778 0, ifa_iflink_dispatch);
b2632176
SZ
2779 msg.ifa = ifa;
2780 msg.ifp = ifp;
2781 msg.tail = tail;
2782
002c1265 2783 ifa_domsg(&msg.base.lmsg, 0);
b2632176
SZ
2784}
2785
2786static void
002c1265 2787ifa_ifunlink_dispatch(netmsg_t nmsg)
b2632176
SZ
2788{
2789 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2790 struct ifaddr *ifa = msg->ifa;
2791 struct ifnet *ifp = msg->ifp;
2792 int cpu = mycpuid;
40f667f2 2793 struct ifaddr_container *ifac;
b2632176
SZ
2794
2795 crit_enter();
23027d35 2796
40f667f2 2797 ifac = &ifa->ifa_containers[cpu];
2adb7bc2 2798 ASSERT_IFAC_VALID(ifac);
40f667f2 2799 KASSERT(ifac->ifa_listmask & IFA_LIST_IFADDRHEAD,
ed20d0e3 2800 ("ifaddr is not on if_addrhead"));
23027d35 2801
40f667f2
SZ
2802 TAILQ_REMOVE(&ifp->if_addrheads[cpu], ifac, ifa_link);
2803 ifac->ifa_listmask &= ~IFA_LIST_IFADDRHEAD;
23027d35 2804
b2632176
SZ
2805 crit_exit();
2806
002c1265 2807 ifa_forwardmsg(&nmsg->lmsg, cpu + 1);
b2632176
SZ
2808}
2809
2810void
2811ifa_ifunlink(struct ifaddr *ifa, struct ifnet *ifp)
2812{
2813 struct netmsg_ifaddr msg;
2814
002c1265 2815 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
48e7b118 2816 0, ifa_ifunlink_dispatch);
b2632176
SZ
2817 msg.ifa = ifa;
2818 msg.ifp = ifp;
2819
002c1265 2820 ifa_domsg(&msg.base.lmsg, 0);
b2632176
SZ
2821}
2822
2823static void
002c1265 2824ifa_destroy_dispatch(netmsg_t nmsg)
b2632176
SZ
2825{
2826 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg;
2827
2828 IFAFREE(msg->ifa);
002c1265 2829 ifa_forwardmsg(&nmsg->lmsg, mycpuid + 1);
b2632176
SZ
2830}
2831
2832void
2833ifa_destroy(struct ifaddr *ifa)
2834{
2835 struct netmsg_ifaddr msg;
2836
002c1265 2837 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
48e7b118 2838 0, ifa_destroy_dispatch);
b2632176
SZ
2839 msg.ifa = ifa;
2840
002c1265 2841 ifa_domsg(&msg.base.lmsg, 0);
b2632176
SZ
2842}
2843
2844struct lwkt_port *
d7944f0b 2845ifnet_portfn(int cpu)
b2632176 2846{
90af4fd3 2847 return &ifnet_threads[cpu].td_msgport;
b2632176
SZ
2848}
2849
c4882b7e
SZ
2850void
2851ifnet_forwardmsg(struct lwkt_msg *lmsg, int next_cpu)
2852{
ea2e6532
SZ
2853 KKASSERT(next_cpu > mycpuid && next_cpu <= ncpus);
2854
c4882b7e
SZ
2855 if (next_cpu < ncpus)
2856 lwkt_forwardmsg(ifnet_portfn(next_cpu), lmsg);
2857 else
2858 lwkt_replymsg(lmsg, 0);
2859}
2860
2a3e1dbd 2861int
c4882b7e
SZ
2862ifnet_domsg(struct lwkt_msg *lmsg, int cpu)
2863{
2864 KKASSERT(cpu < ncpus);
2a3e1dbd 2865 return lwkt_domsg(ifnet_portfn(cpu), lmsg, 0);
c4882b7e
SZ
2866}
2867
8967ddc7
SZ
2868void
2869ifnet_sendmsg(struct lwkt_msg *lmsg, int cpu)
2870{
2871 KKASSERT(cpu < ncpus);
2872 lwkt_sendmsg(ifnet_portfn(cpu), lmsg);
2873}
2874
c3c96e44
MD
2875/*
2876 * Generic netmsg service loop. Some protocols may roll their own but all
2877 * must do the basic command dispatch function call done here.
2878 */
2879static void
2880ifnet_service_loop(void *arg __unused)
2881{
002c1265 2882 netmsg_t msg;
c3c96e44
MD
2883
2884 while ((msg = lwkt_waitport(&curthread->td_msgport, 0))) {
002c1265
MD
2885 KASSERT(msg->base.nm_dispatch, ("ifnet_service: badmsg"));
2886 msg->base.nm_dispatch(msg);
c3c96e44
MD
2887 }
2888}
2889
239bdb58
SZ
2890static void
2891if_start_rollup(void)
2892{
f0a26983
SZ
2893 struct ifsubq_stage_head *head = &ifsubq_stage_heads[mycpuid];
2894 struct ifsubq_stage *stage;
28cc0c29 2895
f0a26983
SZ
2896 while ((stage = TAILQ_FIRST(&head->stg_head)) != NULL) {
2897 struct ifaltq_subque *ifsq = stage->stg_subq;
3cab6b0d 2898 int is_sched = 0;
28cc0c29 2899
f0a26983 2900 if (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED)
3cab6b0d 2901 is_sched = 1;
f0a26983 2902 ifsq_stage_remove(head, stage);
28cc0c29 2903
3cab6b0d 2904 if (is_sched) {
f0a26983 2905 ifsq_ifstart_schedule(ifsq, 1);
3cab6b0d
SZ
2906 } else {
2907 int start = 0;
28cc0c29 2908
f0a26983
SZ
2909 ALTQ_SQ_LOCK(ifsq);
2910 if (!ifsq_is_started(ifsq)) {
3cab6b0d
SZ
2911 /*
2912 * Hold the interlock of ifnet.if_start
2913 */
f0a26983 2914 ifsq_set_started(ifsq);
3cab6b0d
SZ
2915 start = 1;
2916 }
f0a26983 2917 ALTQ_SQ_UNLOCK(ifsq);
3cab6b0d
SZ
2918
2919 if (start)
f0a26983 2920 ifsq_ifstart_try(ifsq, 1);
3cab6b0d 2921 }
f0a26983
SZ
2922 KKASSERT((stage->stg_flags &
2923 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0);
28cc0c29 2924 }
239bdb58 2925}
239bdb58 2926
b2632176 2927static void
90af4fd3 2928ifnetinit(void *dummy __unused)
b2632176
SZ
2929{
2930 int i;
2931
2932 for (i = 0; i < ncpus; ++i) {
90af4fd3 2933 struct thread *thr = &ifnet_threads[i];
b2632176 2934
c3c96e44 2935 lwkt_create(ifnet_service_loop, NULL, NULL,
4643740a 2936 thr, TDF_NOSTART|TDF_FORCE_SPINPORT,
392cd266 2937 i, "ifnet %d", i);
b2632176 2938 netmsg_service_port_init(&thr->td_msgport);
c3c96e44 2939 lwkt_schedule(thr);
b2632176 2940 }
28cc0c29
SZ
2941
2942 for (i = 0; i < ncpus; ++i)
f0a26983 2943 TAILQ_INIT(&ifsubq_stage_heads[i].stg_head);
239bdb58 2944 netisr_register_rollup(if_start_rollup, NETISR_ROLLUP_PRIO_IFSTART);
b2632176 2945}
bd08b792
RP
2946
2947struct ifnet *
2948ifnet_byindex(unsigned short idx)
2949{
2950 if (idx > if_index)
2951 return NULL;
2952 return ifindex2ifnet[idx];
2953}
2954
2955struct ifaddr *
2956ifaddr_byindex(unsigned short idx)
2957{
2958 struct ifnet *ifp;
2959
2960 ifp = ifnet_byindex(idx);
ec27babc
RP
2961 if (!ifp)
2962 return NULL;
d79c4535 2963 return TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa;
bd08b792 2964}
aeb3c11e
RP
2965
2966void
2967if_register_com_alloc(u_char type,
2968 if_com_alloc_t *a, if_com_free_t *f)
2969{
2970
2971 KASSERT(if_com_alloc[type] == NULL,
2972 ("if_register_com_alloc: %d already registered", type));
2973 KASSERT(if_com_free[type] == NULL,
2974 ("if_register_com_alloc: %d free already registered", type));
2975
2976 if_com_alloc[type] = a;
2977 if_com_free[type] = f;
2978}
2979
2980void
2981if_deregister_com_alloc(u_char type)
2982{
2983
2984 KASSERT(if_com_alloc[type] != NULL,
2985 ("if_deregister_com_alloc: %d not registered", type));
2986 KASSERT(if_com_free[type] != NULL,
2987 ("if_deregister_com_alloc: %d free not registered", type));
2988 if_com_alloc[type] = NULL;
2989 if_com_free[type] = NULL;
2990}
a317449e
SZ
2991
2992int
2993if_ring_count2(int cnt, int cnt_max)
2994{
2995 int shift = 0;
2996
2997 KASSERT(cnt_max >= 1 && powerof2(cnt_max),
ed20d0e3 2998 ("invalid ring count max %d", cnt_max));
a317449e
SZ
2999
3000 if (cnt <= 0)
3001 cnt = cnt_max;
3002 if (cnt > ncpus2)
3003 cnt = ncpus2;
3004 if (cnt > cnt_max)
3005 cnt = cnt_max;
3006
3007 while ((1 << (shift + 1)) <= cnt)
3008 ++shift;
3009 cnt = 1 << shift;
3010
3011 KASSERT(cnt >= 1 && cnt <= ncpus2 && cnt <= cnt_max,
ed20d0e3 3012 ("calculate cnt %d, ncpus2 %d, cnt max %d",
a317449e
SZ
3013 cnt, ncpus2, cnt_max));
3014 return cnt;
3015}
b7a0c958
SZ
3016
3017void
3018ifq_set_maxlen(struct ifaltq *ifq, int len)
3019{
f0a26983 3020 ifq->altq_maxlen = len + (ncpus * ifsq_stage_cntmax);
b7a0c958 3021}
2cc2f639
SZ
3022
3023int
3024ifq_mapsubq_default(struct ifaltq *ifq __unused, int cpuid __unused)
3025{
3026 return ALTQ_SUBQ_INDEX_DEFAULT;
3027}