2 * Copyright 2001 Wasabi Systems, Inc.
5 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed for the NetBSD Project by
18 * Wasabi Systems, Inc.
19 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
20 * or promote products derived from this software without specific prior
23 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
37 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
38 * All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Jason L. Wright
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
56 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
57 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
58 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
59 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
60 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
62 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
63 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE.
66 * $OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp $
67 * $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $
68 * $FreeBSD: src/sys/net/if_bridge.c,v 1.26 2005/10/13 23:05:55 thompsa Exp $
69 * $DragonFly: src/sys/net/bridge/if_bridge.c,v 1.54 2008/11/21 12:43:42 sephe Exp $
73 * Network interface bridge support.
77 * - Currently only supports Ethernet-like interfaces (Ethernet,
78 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
79 * to bridge other types of interfaces (FDDI-FDDI, and maybe
80 * consider heterogenous bridges).
83 * Bridge's route information is duplicated to each CPUs:
86 * +-----------+ +-----------+ +-----------+ +-----------+
87 * | rtnode | | rtnode | | rtnode | | rtnode |
89 * | dst eaddr | | dst eaddr | | dst eaddr | | dst eaddr |
90 * +-----------+ +-----------+ +-----------+ +-----------+
93 * | | +----------+ | |
97 * +-------------->| timeout |<-------------+
101 * We choose to put timeout and dst_ifp into shared part, so updating
102 * them will be cheaper than using message forwarding. Also there is
103 * not need to use spinlock to protect the updating: timeout and dst_ifp
104 * is not related and specific field's updating order has no importance.
105 * The cache pollution by the share part should not be heavy: in a stable
106 * setup, dst_ifp probably will be not changed in rtnode's life time,
107 * while timeout is refreshed once per second; most of the time, timeout
108 * and dst_ifp are read-only accessed.
111 * Bridge route information installation on bridge_input path:
113 * CPU0 CPU1 CPU2 CPU3
120 * ifnet0<-----------------------+
123 * rtnode exists?(Y)free nmsg :
154 * The netmsgs forwarded between protocol threads and ifnet threads are
155 * allocated with (M_WAITOK|M_NULLOK), so it will not fail under most
156 * cases (route information is too precious to be not installed :).
157 * Since multiple threads may try to install route information for the
158 * same dst eaddr, we look up route information in ifnet0. However, this
159 * looking up only need to be performed on ifnet0, which is the start
160 * point of the route information installation process.
163 * Bridge route information deleting/flushing:
165 * CPU0 CPU1 CPU2 CPU3
169 * find suitable rtnodes,
170 * mark their rtinfo dead
172 * | domsg <------------------------------------------+
175 * V fwdmsg fwdmsg fwdmsg |
176 * ifnet0 --------> ifnet1 --------> ifnet2 --------> ifnet3
177 * delete rtnodes delete rtnodes delete rtnodes delete rtnodes
178 * w/ dead rtinfo w/ dead rtinfo w/ dead rtinfo w/ dead rtinfo
181 * All deleting/flushing operations are serialized by netisr0, so each
182 * operation only reaps the route information marked dead by itself.
185 * Bridge route information adding/deleting/flushing:
186 * Since all operation is serialized by the fixed message flow between
187 * ifnet threads, it is not possible to create corrupted per-cpu route
191 #include "opt_inet.h"
192 #include "opt_inet6.h"
194 #include <sys/param.h>
195 #include <sys/mbuf.h>
196 #include <sys/malloc.h>
197 #include <sys/protosw.h>
198 #include <sys/systm.h>
199 #include <sys/time.h>
200 #include <sys/socket.h> /* for net/if.h */
201 #include <sys/sockio.h>
202 #include <sys/ctype.h> /* string functions */
203 #include <sys/kernel.h>
204 #include <sys/random.h>
205 #include <sys/sysctl.h>
206 #include <sys/module.h>
207 #include <sys/proc.h>
208 #include <sys/lock.h>
209 #include <sys/thread.h>
210 #include <sys/thread2.h>
211 #include <sys/mpipe.h>
215 #include <net/if_dl.h>
216 #include <net/if_types.h>
217 #include <net/if_var.h>
218 #include <net/pfil.h>
219 #include <net/ifq_var.h>
220 #include <net/if_clone.h>
222 #include <netinet/in.h> /* for struct arpcom */
223 #include <netinet/in_systm.h>
224 #include <netinet/in_var.h>
225 #include <netinet/ip.h>
226 #include <netinet/ip_var.h>
228 #include <netinet/ip6.h>
229 #include <netinet6/ip6_var.h>
231 #include <netinet/if_ether.h> /* for struct arpcom */
232 #include <net/bridge/if_bridgevar.h>
233 #include <net/if_llc.h>
234 #include <net/netmsg2.h>
236 #include <net/route.h>
237 #include <sys/in_cksum.h>
240 * Size of the route hash table. Must be a power of two.
242 #ifndef BRIDGE_RTHASH_SIZE
243 #define BRIDGE_RTHASH_SIZE 1024
246 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
249 * Maximum number of addresses to cache.
251 #ifndef BRIDGE_RTABLE_MAX
252 #define BRIDGE_RTABLE_MAX 100
256 * Spanning tree defaults.
258 #define BSTP_DEFAULT_MAX_AGE (20 * 256)
259 #define BSTP_DEFAULT_HELLO_TIME (2 * 256)
260 #define BSTP_DEFAULT_FORWARD_DELAY (15 * 256)
261 #define BSTP_DEFAULT_HOLD_TIME (1 * 256)
262 #define BSTP_DEFAULT_BRIDGE_PRIORITY 0x8000
263 #define BSTP_DEFAULT_PORT_PRIORITY 0x80
264 #define BSTP_DEFAULT_PATH_COST 55
267 * Timeout (in seconds) for entries learned dynamically.
269 #ifndef BRIDGE_RTABLE_TIMEOUT
270 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
274 * Number of seconds between walks of the route list.
276 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
277 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
281 * List of capabilities to mask on the member interface.
283 #define BRIDGE_IFCAPS_MASK IFCAP_TXCSUM
285 typedef int (*bridge_ctl_t)(struct bridge_softc *, void *);
287 struct netmsg_brctl {
288 struct netmsg bc_nmsg;
289 bridge_ctl_t bc_func;
290 struct bridge_softc *bc_sc;
294 struct netmsg_brsaddr {
295 struct netmsg br_nmsg;
296 struct bridge_softc *br_softc;
297 struct ifnet *br_dst_if;
298 struct bridge_rtinfo *br_rtinfo;
300 uint8_t br_dst[ETHER_ADDR_LEN];
304 struct netmsg_braddbif {
305 struct netmsg br_nmsg;
306 struct bridge_softc *br_softc;
307 struct bridge_ifinfo *br_bif_info;
308 struct ifnet *br_bif_ifp;
311 struct netmsg_brdelbif {
312 struct netmsg br_nmsg;
313 struct bridge_softc *br_softc;
314 struct bridge_ifinfo *br_bif_info;
315 struct bridge_iflist_head *br_bif_list;
318 struct netmsg_brsflags {
319 struct netmsg br_nmsg;
320 struct bridge_softc *br_softc;
321 struct bridge_ifinfo *br_bif_info;
322 uint32_t br_bif_flags;
325 eventhandler_tag bridge_detach_cookie = NULL;
327 extern struct mbuf *(*bridge_input_p)(struct ifnet *, struct mbuf *);
328 extern int (*bridge_output_p)(struct ifnet *, struct mbuf *);
329 extern void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
331 static int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
333 static int bridge_clone_create(struct if_clone *, int);
334 static void bridge_clone_destroy(struct ifnet *);
336 static int bridge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
337 static void bridge_mutecaps(struct bridge_ifinfo *, struct ifnet *, int);
338 static void bridge_ifdetach(void *, struct ifnet *);
339 static void bridge_init(void *);
340 static void bridge_stop(struct ifnet *);
341 static void bridge_start(struct ifnet *);
342 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
343 static int bridge_output(struct ifnet *, struct mbuf *);
345 static void bridge_forward(struct bridge_softc *, struct mbuf *m);
347 static void bridge_timer_handler(struct netmsg *);
348 static void bridge_timer(void *);
350 static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
352 static void bridge_span(struct bridge_softc *, struct mbuf *);
354 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
355 struct ifnet *, uint8_t);
356 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
357 static void bridge_rtreap(struct bridge_softc *);
358 static void bridge_rttrim(struct bridge_softc *);
359 static int bridge_rtage_finddead(struct bridge_softc *);
360 static void bridge_rtage(struct bridge_softc *);
361 static void bridge_rtflush(struct bridge_softc *, int);
362 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
363 static int bridge_rtsaddr(struct bridge_softc *, const uint8_t *,
364 struct ifnet *, uint8_t);
365 static void bridge_rtmsg_sync(struct bridge_softc *sc);
366 static void bridge_rtreap_handler(struct netmsg *);
367 static void bridge_rtinstall_handler(struct netmsg *);
368 static int bridge_rtinstall_oncpu(struct bridge_softc *, const uint8_t *,
369 struct ifnet *, int, uint8_t, struct bridge_rtinfo **);
371 static void bridge_rtable_init(struct bridge_softc *);
372 static void bridge_rtable_fini(struct bridge_softc *);
374 static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
375 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
377 static void bridge_rtnode_insert(struct bridge_softc *,
378 struct bridge_rtnode *);
379 static void bridge_rtnode_destroy(struct bridge_softc *,
380 struct bridge_rtnode *);
382 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
384 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
386 static struct bridge_iflist *bridge_lookup_member_ifinfo(struct bridge_softc *,
387 struct bridge_ifinfo *);
388 static void bridge_delete_member(struct bridge_softc *,
389 struct bridge_iflist *, int);
390 static void bridge_delete_span(struct bridge_softc *,
391 struct bridge_iflist *);
393 static int bridge_control(struct bridge_softc *, u_long,
394 bridge_ctl_t, void *);
395 static int bridge_ioctl_init(struct bridge_softc *, void *);
396 static int bridge_ioctl_stop(struct bridge_softc *, void *);
397 static int bridge_ioctl_add(struct bridge_softc *, void *);
398 static int bridge_ioctl_del(struct bridge_softc *, void *);
399 static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
400 static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
401 static int bridge_ioctl_scache(struct bridge_softc *, void *);
402 static int bridge_ioctl_gcache(struct bridge_softc *, void *);
403 static int bridge_ioctl_gifs(struct bridge_softc *, void *);
404 static int bridge_ioctl_rts(struct bridge_softc *, void *);
405 static int bridge_ioctl_saddr(struct bridge_softc *, void *);
406 static int bridge_ioctl_sto(struct bridge_softc *, void *);
407 static int bridge_ioctl_gto(struct bridge_softc *, void *);
408 static int bridge_ioctl_daddr(struct bridge_softc *, void *);
409 static int bridge_ioctl_flush(struct bridge_softc *, void *);
410 static int bridge_ioctl_gpri(struct bridge_softc *, void *);
411 static int bridge_ioctl_spri(struct bridge_softc *, void *);
412 static int bridge_ioctl_ght(struct bridge_softc *, void *);
413 static int bridge_ioctl_sht(struct bridge_softc *, void *);
414 static int bridge_ioctl_gfd(struct bridge_softc *, void *);
415 static int bridge_ioctl_sfd(struct bridge_softc *, void *);
416 static int bridge_ioctl_gma(struct bridge_softc *, void *);
417 static int bridge_ioctl_sma(struct bridge_softc *, void *);
418 static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
419 static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
420 static int bridge_ioctl_addspan(struct bridge_softc *, void *);
421 static int bridge_ioctl_delspan(struct bridge_softc *, void *);
422 static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
424 static int bridge_ip_checkbasic(struct mbuf **mp);
426 static int bridge_ip6_checkbasic(struct mbuf **mp);
428 static int bridge_fragment(struct ifnet *, struct mbuf *,
429 struct ether_header *, int, struct llc *);
430 static void bridge_enqueue_internal(struct ifnet *, struct mbuf *m,
432 static void bridge_enqueue_handler(struct netmsg *);
433 static void bridge_pfil_enqueue_handler(struct netmsg *);
434 static void bridge_pfil_enqueue(struct ifnet *, struct mbuf *, int);
435 static void bridge_handoff(struct ifnet *, struct mbuf *);
437 static void bridge_del_bif_handler(struct netmsg *);
438 static void bridge_add_bif_handler(struct netmsg *);
439 static void bridge_set_bifflags_handler(struct netmsg *);
440 static void bridge_del_bif(struct bridge_softc *, struct bridge_ifinfo *,
441 struct bridge_iflist_head *);
442 static void bridge_add_bif(struct bridge_softc *, struct bridge_ifinfo *,
444 static void bridge_set_bifflags(struct bridge_softc *,
445 struct bridge_ifinfo *, uint32_t);
447 SYSCTL_DECL(_net_link);
448 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge");
450 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
451 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
452 static int pfil_member = 1; /* run pfil hooks on the member interface */
453 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW,
454 &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
455 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW,
456 &pfil_bridge, 0, "Packet filter on the bridge interface");
457 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW,
458 &pfil_member, 0, "Packet filter on the member interface");
460 struct bridge_control_arg {
462 struct ifbreq ifbreq;
463 struct ifbifconf ifbifconf;
464 struct ifbareq ifbareq;
465 struct ifbaconf ifbaconf;
466 struct ifbrparam ifbrparam;
473 struct bridge_control {
474 bridge_ctl_t bc_func;
479 #define BC_F_COPYIN 0x01 /* copy arguments in */
480 #define BC_F_COPYOUT 0x02 /* copy arguments out */
481 #define BC_F_SUSER 0x04 /* do super-user check */
483 const struct bridge_control bridge_control_table[] = {
484 { bridge_ioctl_add, sizeof(struct ifbreq),
485 BC_F_COPYIN|BC_F_SUSER },
486 { bridge_ioctl_del, sizeof(struct ifbreq),
487 BC_F_COPYIN|BC_F_SUSER },
489 { bridge_ioctl_gifflags, sizeof(struct ifbreq),
490 BC_F_COPYIN|BC_F_COPYOUT },
491 { bridge_ioctl_sifflags, sizeof(struct ifbreq),
492 BC_F_COPYIN|BC_F_SUSER },
494 { bridge_ioctl_scache, sizeof(struct ifbrparam),
495 BC_F_COPYIN|BC_F_SUSER },
496 { bridge_ioctl_gcache, sizeof(struct ifbrparam),
499 { bridge_ioctl_gifs, sizeof(struct ifbifconf),
500 BC_F_COPYIN|BC_F_COPYOUT },
501 { bridge_ioctl_rts, sizeof(struct ifbaconf),
502 BC_F_COPYIN|BC_F_COPYOUT },
504 { bridge_ioctl_saddr, sizeof(struct ifbareq),
505 BC_F_COPYIN|BC_F_SUSER },
507 { bridge_ioctl_sto, sizeof(struct ifbrparam),
508 BC_F_COPYIN|BC_F_SUSER },
509 { bridge_ioctl_gto, sizeof(struct ifbrparam),
512 { bridge_ioctl_daddr, sizeof(struct ifbareq),
513 BC_F_COPYIN|BC_F_SUSER },
515 { bridge_ioctl_flush, sizeof(struct ifbreq),
516 BC_F_COPYIN|BC_F_SUSER },
518 { bridge_ioctl_gpri, sizeof(struct ifbrparam),
520 { bridge_ioctl_spri, sizeof(struct ifbrparam),
521 BC_F_COPYIN|BC_F_SUSER },
523 { bridge_ioctl_ght, sizeof(struct ifbrparam),
525 { bridge_ioctl_sht, sizeof(struct ifbrparam),
526 BC_F_COPYIN|BC_F_SUSER },
528 { bridge_ioctl_gfd, sizeof(struct ifbrparam),
530 { bridge_ioctl_sfd, sizeof(struct ifbrparam),
531 BC_F_COPYIN|BC_F_SUSER },
533 { bridge_ioctl_gma, sizeof(struct ifbrparam),
535 { bridge_ioctl_sma, sizeof(struct ifbrparam),
536 BC_F_COPYIN|BC_F_SUSER },
538 { bridge_ioctl_sifprio, sizeof(struct ifbreq),
539 BC_F_COPYIN|BC_F_SUSER },
541 { bridge_ioctl_sifcost, sizeof(struct ifbreq),
542 BC_F_COPYIN|BC_F_SUSER },
544 { bridge_ioctl_addspan, sizeof(struct ifbreq),
545 BC_F_COPYIN|BC_F_SUSER },
546 { bridge_ioctl_delspan, sizeof(struct ifbreq),
547 BC_F_COPYIN|BC_F_SUSER },
549 static const int bridge_control_table_size =
550 sizeof(bridge_control_table) / sizeof(bridge_control_table[0]);
552 LIST_HEAD(, bridge_softc) bridge_list;
554 struct if_clone bridge_cloner = IF_CLONE_INITIALIZER("bridge",
556 bridge_clone_destroy, 0, IF_MAXUNIT);
559 bridge_modevent(module_t mod, int type, void *data)
563 LIST_INIT(&bridge_list);
564 if_clone_attach(&bridge_cloner);
565 bridge_input_p = bridge_input;
566 bridge_output_p = bridge_output;
567 bridge_detach_cookie = EVENTHANDLER_REGISTER(
568 ifnet_detach_event, bridge_ifdetach, NULL,
569 EVENTHANDLER_PRI_ANY);
571 bstp_linkstate_p = bstp_linkstate;
575 if (!LIST_EMPTY(&bridge_list))
577 EVENTHANDLER_DEREGISTER(ifnet_detach_event,
578 bridge_detach_cookie);
579 if_clone_detach(&bridge_cloner);
580 bridge_input_p = NULL;
581 bridge_output_p = NULL;
583 bstp_linkstate_p = NULL;
592 static moduledata_t bridge_mod = {
598 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
602 * bridge_clone_create:
604 * Create a new bridge instance.
607 bridge_clone_create(struct if_clone *ifc, int unit)
609 struct bridge_softc *sc;
614 sc = kmalloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);
615 ifp = sc->sc_ifp = &sc->sc_if;
617 sc->sc_brtmax = BRIDGE_RTABLE_MAX;
618 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
619 sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
620 sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
621 sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
622 sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
623 sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
625 /* Initialize our routing table. */
626 bridge_rtable_init(sc);
628 callout_init(&sc->sc_brcallout);
629 netmsg_init(&sc->sc_brtimemsg, &netisr_adone_rport,
630 MSGF_DROPABLE | MSGF_PRIORITY, bridge_timer_handler);
631 sc->sc_brtimemsg.nm_lmsg.u.ms_resultp = sc;
633 callout_init(&sc->sc_bstpcallout);
634 netmsg_init(&sc->sc_bstptimemsg, &netisr_adone_rport,
635 MSGF_DROPABLE | MSGF_PRIORITY, bstp_tick_handler);
636 sc->sc_bstptimemsg.nm_lmsg.u.ms_resultp = sc;
638 /* Initialize per-cpu member iface lists */
639 sc->sc_iflists = kmalloc(sizeof(*sc->sc_iflists) * ncpus,
641 for (cpu = 0; cpu < ncpus; ++cpu)
642 LIST_INIT(&sc->sc_iflists[cpu]);
644 LIST_INIT(&sc->sc_spanlist);
647 if_initname(ifp, ifc->ifc_name, unit);
648 ifp->if_mtu = ETHERMTU;
649 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
650 ifp->if_ioctl = bridge_ioctl;
651 ifp->if_start = bridge_start;
652 ifp->if_init = bridge_init;
653 ifp->if_type = IFT_BRIDGE;
654 ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
655 ifp->if_snd.ifq_maxlen = ifqmaxlen;
656 ifq_set_ready(&ifp->if_snd);
657 ifp->if_hdrlen = ETHER_HDR_LEN;
660 * Generate a random ethernet address and use the private AC:DE:48
664 bcopy(&rnd, &eaddr[0], 4); /* ETHER_ADDR_LEN == 6 */
666 bcopy(&rnd, &eaddr[2], 4); /* ETHER_ADDR_LEN == 6 */
668 eaddr[0] &= ~1; /* clear multicast bit */
669 eaddr[0] |= 2; /* set the LAA bit */
671 ether_ifattach(ifp, eaddr, NULL);
672 /* Now undo some of the damage... */
673 ifp->if_baudrate = 0;
674 ifp->if_type = IFT_BRIDGE;
676 crit_enter(); /* XXX MP */
677 LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
684 bridge_delete_dispatch(struct netmsg *nmsg)
686 struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
687 struct bridge_softc *sc = lmsg->u.ms_resultp;
688 struct ifnet *bifp = sc->sc_ifp;
689 struct bridge_iflist *bif;
691 lwkt_serialize_enter(bifp->if_serializer);
693 while ((bif = LIST_FIRST(&sc->sc_iflists[mycpuid])) != NULL)
694 bridge_delete_member(sc, bif, 0);
696 while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL)
697 bridge_delete_span(sc, bif);
699 lwkt_serialize_exit(bifp->if_serializer);
701 lwkt_replymsg(lmsg, 0);
705 * bridge_clone_destroy:
707 * Destroy a bridge instance.
710 bridge_clone_destroy(struct ifnet *ifp)
712 struct bridge_softc *sc = ifp->if_softc;
713 struct lwkt_msg *lmsg;
716 lwkt_serialize_enter(ifp->if_serializer);
719 ifp->if_flags &= ~IFF_UP;
721 lwkt_serialize_exit(ifp->if_serializer);
723 netmsg_init(&nmsg, &curthread->td_msgport, 0, bridge_delete_dispatch);
724 lmsg = &nmsg.nm_lmsg;
725 lmsg->u.ms_resultp = sc;
726 lwkt_domsg(BRIDGE_CFGPORT, lmsg, 0);
728 crit_enter(); /* XXX MP */
729 LIST_REMOVE(sc, sc_list);
734 /* Tear down the routing table. */
735 bridge_rtable_fini(sc);
737 /* Free per-cpu member iface lists */
738 kfree(sc->sc_iflists, M_DEVBUF);
746 * Handle a control request from the operator.
749 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
751 struct bridge_softc *sc = ifp->if_softc;
752 struct bridge_control_arg args;
753 struct ifdrv *ifd = (struct ifdrv *) data;
754 const struct bridge_control *bc;
757 ASSERT_SERIALIZED(ifp->if_serializer);
766 if (ifd->ifd_cmd >= bridge_control_table_size) {
770 bc = &bridge_control_table[ifd->ifd_cmd];
772 if (cmd == SIOCGDRVSPEC &&
773 (bc->bc_flags & BC_F_COPYOUT) == 0) {
776 } else if (cmd == SIOCSDRVSPEC &&
777 (bc->bc_flags & BC_F_COPYOUT)) {
782 if (bc->bc_flags & BC_F_SUSER) {
783 error = suser_cred(cr, NULL_CRED_OKAY);
788 if (ifd->ifd_len != bc->bc_argsize ||
789 ifd->ifd_len > sizeof(args.bca_u)) {
794 memset(&args, 0, sizeof(args));
795 if (bc->bc_flags & BC_F_COPYIN) {
796 error = copyin(ifd->ifd_data, &args.bca_u,
802 error = bridge_control(sc, cmd, bc->bc_func, &args);
804 KKASSERT(args.bca_len == 0 && args.bca_kptr == NULL);
808 if (bc->bc_flags & BC_F_COPYOUT) {
809 error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
810 if (args.bca_len != 0) {
811 KKASSERT(args.bca_kptr != NULL);
813 error = copyout(args.bca_kptr,
814 args.bca_uptr, args.bca_len);
816 kfree(args.bca_kptr, M_TEMP);
818 KKASSERT(args.bca_kptr == NULL);
821 KKASSERT(args.bca_len == 0 && args.bca_kptr == NULL);
826 if (!(ifp->if_flags & IFF_UP) &&
827 (ifp->if_flags & IFF_RUNNING)) {
829 * If interface is marked down and it is running,
833 } else if ((ifp->if_flags & IFF_UP) &&
834 !(ifp->if_flags & IFF_RUNNING)) {
836 * If interface is marked up and it is stopped, then
844 /* Do not allow the MTU to be changed on the bridge */
849 error = ether_ioctl(ifp, cmd, data);
858 * Clear or restore unwanted capabilities on the member interface
861 bridge_mutecaps(struct bridge_ifinfo *bif_info, struct ifnet *ifp, int mute)
866 if (ifp->if_ioctl == NULL)
869 bzero(&ifr, sizeof(ifr));
870 ifr.ifr_reqcap = ifp->if_capenable;
873 /* mask off and save capabilities */
874 bif_info->bifi_mutecap = ifr.ifr_reqcap & BRIDGE_IFCAPS_MASK;
875 if (bif_info->bifi_mutecap != 0)
876 ifr.ifr_reqcap &= ~BRIDGE_IFCAPS_MASK;
878 /* restore muted capabilities */
879 ifr.ifr_reqcap |= bif_info->bifi_mutecap;
882 if (bif_info->bifi_mutecap != 0) {
883 lwkt_serialize_enter(ifp->if_serializer);
884 error = ifp->if_ioctl(ifp, SIOCSIFCAP, (caddr_t)&ifr, NULL);
885 lwkt_serialize_exit(ifp->if_serializer);
890 * bridge_lookup_member:
892 * Lookup a bridge member interface.
894 static struct bridge_iflist *
895 bridge_lookup_member(struct bridge_softc *sc, const char *name)
897 struct bridge_iflist *bif;
899 LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
900 if (strcmp(bif->bif_ifp->if_xname, name) == 0)
907 * bridge_lookup_member_if:
909 * Lookup a bridge member interface by ifnet*.
911 static struct bridge_iflist *
912 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
914 struct bridge_iflist *bif;
916 LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
917 if (bif->bif_ifp == member_ifp)
924 * bridge_lookup_member_ifinfo:
926 * Lookup a bridge member interface by bridge_ifinfo.
928 static struct bridge_iflist *
929 bridge_lookup_member_ifinfo(struct bridge_softc *sc,
930 struct bridge_ifinfo *bif_info)
932 struct bridge_iflist *bif;
934 LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
935 if (bif->bif_info == bif_info)
942 * bridge_delete_member:
944 * Delete the specified member interface.
947 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
950 struct ifnet *ifs = bif->bif_ifp;
951 struct ifnet *bifp = sc->sc_ifp;
952 struct bridge_ifinfo *bif_info = bif->bif_info;
953 struct bridge_iflist_head saved_bifs;
955 ASSERT_SERIALIZED(bifp->if_serializer);
956 KKASSERT(bif_info != NULL);
958 ifs->if_bridge = NULL;
961 * Release bridge interface's serializer:
962 * - To avoid possible dead lock.
963 * - Various sync operation will block the current thread.
965 lwkt_serialize_exit(bifp->if_serializer);
968 switch (ifs->if_type) {
972 * Take the interface out of promiscuous mode.
975 bridge_mutecaps(bif_info, ifs, 0);
982 panic("bridge_delete_member: impossible");
988 * Remove bifs from percpu linked list.
990 * Removed bifs are not freed immediately, instead,
991 * they are saved in saved_bifs. They will be freed
992 * after we make sure that no one is accessing them,
993 * i.e. after following netmsg_service_sync()
995 LIST_INIT(&saved_bifs);
996 bridge_del_bif(sc, bif_info, &saved_bifs);
999 * Make sure that all protocol threads:
1000 * o see 'ifs' if_bridge is changed
1001 * o know that bif is removed from the percpu linked list
1003 netmsg_service_sync();
1006 * Free the removed bifs
1008 KKASSERT(!LIST_EMPTY(&saved_bifs));
1009 while ((bif = LIST_FIRST(&saved_bifs)) != NULL) {
1010 LIST_REMOVE(bif, bif_next);
1011 kfree(bif, M_DEVBUF);
1014 /* See the comment in bridge_ioctl_stop() */
1015 bridge_rtmsg_sync(sc);
1016 bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
1018 lwkt_serialize_enter(bifp->if_serializer);
1020 if (bifp->if_flags & IFF_RUNNING)
1021 bstp_initialization(sc);
1024 * Free the bif_info after bstp_initialization(), so that
1025 * bridge_softc.sc_root_port will not reference a dangling
1028 kfree(bif_info, M_DEVBUF);
1032 * bridge_delete_span:
1034 * Delete the specified span interface.
1037 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1039 KASSERT(bif->bif_ifp->if_bridge == NULL,
1040 ("%s: not a span interface", __func__));
1042 LIST_REMOVE(bif, bif_next);
1043 kfree(bif, M_DEVBUF);
1047 bridge_ioctl_init(struct bridge_softc *sc, void *arg __unused)
1049 struct ifnet *ifp = sc->sc_ifp;
1051 if (ifp->if_flags & IFF_RUNNING)
1054 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1057 ifp->if_flags |= IFF_RUNNING;
1058 bstp_initialization(sc);
1063 bridge_ioctl_stop(struct bridge_softc *sc, void *arg __unused)
1065 struct ifnet *ifp = sc->sc_ifp;
1066 struct lwkt_msg *lmsg;
1068 if ((ifp->if_flags & IFF_RUNNING) == 0)
1071 callout_stop(&sc->sc_brcallout);
1074 lmsg = &sc->sc_brtimemsg.nm_lmsg;
1075 if ((lmsg->ms_flags & MSGF_DONE) == 0) {
1076 /* Pending to be processed; drop it */
1083 ifp->if_flags &= ~IFF_RUNNING;
1085 lwkt_serialize_exit(ifp->if_serializer);
1087 /* Let everyone know that we are stopped */
1088 netmsg_service_sync();
1091 * Sync ifnetX msgports in the order we forward rtnode
1092 * installation message. This is used to make sure that
1093 * all rtnode installation messages sent by bridge_rtupdate()
1094 * during above netmsg_service_sync() are flushed.
1096 bridge_rtmsg_sync(sc);
1097 bridge_rtflush(sc, IFBF_FLUSHDYN);
1099 lwkt_serialize_enter(ifp->if_serializer);
1104 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1106 struct ifbreq *req = arg;
1107 struct bridge_iflist *bif;
1108 struct bridge_ifinfo *bif_info;
1109 struct ifnet *ifs, *bifp;
1113 ASSERT_SERIALIZED(bifp->if_serializer);
1115 ifs = ifunit(req->ifbr_ifsname);
1119 /* If it's in the span list, it can't be a member. */
1120 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1121 if (ifs == bif->bif_ifp)
1124 /* Allow the first Ethernet member to define the MTU */
1125 if (ifs->if_type != IFT_GIF) {
1126 if (LIST_EMPTY(&sc->sc_iflists[mycpuid])) {
1127 bifp->if_mtu = ifs->if_mtu;
1128 } else if (bifp->if_mtu != ifs->if_mtu) {
1129 if_printf(bifp, "invalid MTU for %s\n", ifs->if_xname);
1134 if (ifs->if_bridge == sc)
1137 if (ifs->if_bridge != NULL)
1140 bif_info = kmalloc(sizeof(*bif_info), M_DEVBUF, M_WAITOK | M_ZERO);
1141 bif_info->bifi_priority = BSTP_DEFAULT_PORT_PRIORITY;
1142 bif_info->bifi_path_cost = BSTP_DEFAULT_PATH_COST;
1143 bif_info->bifi_ifp = ifs;
1146 * Release bridge interface's serializer:
1147 * - To avoid possible dead lock.
1148 * - Various sync operation will block the current thread.
1150 lwkt_serialize_exit(bifp->if_serializer);
1152 switch (ifs->if_type) {
1156 * Place the interface into promiscuous mode.
1158 error = ifpromisc(ifs, 1);
1160 lwkt_serialize_enter(bifp->if_serializer);
1163 bridge_mutecaps(bif_info, ifs, 1);
1166 case IFT_GIF: /* :^) */
1171 lwkt_serialize_enter(bifp->if_serializer);
1176 * Add bifs to percpu linked lists
1178 bridge_add_bif(sc, bif_info, ifs);
1180 lwkt_serialize_enter(bifp->if_serializer);
1182 if (bifp->if_flags & IFF_RUNNING)
1183 bstp_initialization(sc);
1188 * Everything has been setup, so let the member interface
1189 * deliver packets to this bridge on its input/output path.
1191 ifs->if_bridge = sc;
1194 if (bif_info != NULL)
1195 kfree(bif_info, M_DEVBUF);
1201 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1203 struct ifbreq *req = arg;
1204 struct bridge_iflist *bif;
1206 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1210 bridge_delete_member(sc, bif, 0);
1216 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1218 struct ifbreq *req = arg;
1219 struct bridge_iflist *bif;
1221 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1225 req->ifbr_ifsflags = bif->bif_flags;
1226 req->ifbr_state = bif->bif_state;
1227 req->ifbr_priority = bif->bif_priority;
1228 req->ifbr_path_cost = bif->bif_path_cost;
1229 req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1235 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1237 struct ifbreq *req = arg;
1238 struct bridge_iflist *bif;
1239 struct ifnet *bifp = sc->sc_ifp;
1241 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1245 if (req->ifbr_ifsflags & IFBIF_SPAN) {
1246 /* SPAN is readonly */
1250 if (req->ifbr_ifsflags & IFBIF_STP) {
1251 switch (bif->bif_ifp->if_type) {
1253 /* These can do spanning tree. */
1257 /* Nothing else can. */
1262 lwkt_serialize_exit(bifp->if_serializer);
1263 bridge_set_bifflags(sc, bif->bif_info, req->ifbr_ifsflags);
1264 lwkt_serialize_enter(bifp->if_serializer);
1266 if (bifp->if_flags & IFF_RUNNING)
1267 bstp_initialization(sc);
1273 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1275 struct ifbrparam *param = arg;
1276 struct ifnet *ifp = sc->sc_ifp;
1278 sc->sc_brtmax = param->ifbrp_csize;
1280 lwkt_serialize_exit(ifp->if_serializer);
1282 lwkt_serialize_enter(ifp->if_serializer);
1288 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1290 struct ifbrparam *param = arg;
1292 param->ifbrp_csize = sc->sc_brtmax;
1298 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1300 struct bridge_control_arg *bc_arg = arg;
1301 struct ifbifconf *bifc = arg;
1302 struct bridge_iflist *bif;
1303 struct ifbreq *breq;
1307 LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next)
1309 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1312 if (bifc->ifbic_len == 0) {
1313 bifc->ifbic_len = sizeof(*breq) * count;
1315 } else if (count == 0 || bifc->ifbic_len < sizeof(*breq)) {
1316 bifc->ifbic_len = 0;
1320 len = min(bifc->ifbic_len, sizeof(*breq) * count);
1321 KKASSERT(len >= sizeof(*breq));
1323 breq = kmalloc(len, M_TEMP, M_INTWAIT | M_NULLOK | M_ZERO);
1325 bifc->ifbic_len = 0;
1328 bc_arg->bca_kptr = breq;
1331 LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
1332 if (len < sizeof(*breq))
1335 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1336 sizeof(breq->ifbr_ifsname));
1337 breq->ifbr_ifsflags = bif->bif_flags;
1338 breq->ifbr_state = bif->bif_state;
1339 breq->ifbr_priority = bif->bif_priority;
1340 breq->ifbr_path_cost = bif->bif_path_cost;
1341 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1344 len -= sizeof(*breq);
1346 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1347 if (len < sizeof(*breq))
1350 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1351 sizeof(breq->ifbr_ifsname));
1352 breq->ifbr_ifsflags = bif->bif_flags;
1353 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1356 len -= sizeof(*breq);
1359 bifc->ifbic_len = sizeof(*breq) * count;
1360 KKASSERT(bifc->ifbic_len > 0);
1362 bc_arg->bca_len = bifc->ifbic_len;
1363 bc_arg->bca_uptr = bifc->ifbic_req;
1368 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1370 struct bridge_control_arg *bc_arg = arg;
1371 struct ifbaconf *bac = arg;
1372 struct bridge_rtnode *brt;
1373 struct ifbareq *bareq;
1377 LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list)
1380 if (bac->ifbac_len == 0) {
1381 bac->ifbac_len = sizeof(*bareq) * count;
1383 } else if (count == 0 || bac->ifbac_len < sizeof(*bareq)) {
1388 len = min(bac->ifbac_len, sizeof(*bareq) * count);
1389 KKASSERT(len >= sizeof(*bareq));
1391 bareq = kmalloc(len, M_TEMP, M_INTWAIT | M_NULLOK | M_ZERO);
1392 if (bareq == NULL) {
1396 bc_arg->bca_kptr = bareq;
1399 LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
1400 struct bridge_rtinfo *bri = brt->brt_info;
1401 unsigned long expire;
1403 if (len < sizeof(*bareq))
1406 strlcpy(bareq->ifba_ifsname, bri->bri_ifp->if_xname,
1407 sizeof(bareq->ifba_ifsname));
1408 memcpy(bareq->ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1409 expire = bri->bri_expire;
1410 if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1411 time_second < expire)
1412 bareq->ifba_expire = expire - time_second;
1414 bareq->ifba_expire = 0;
1415 bareq->ifba_flags = bri->bri_flags;
1418 len -= sizeof(*bareq);
1421 bac->ifbac_len = sizeof(*bareq) * count;
1422 KKASSERT(bac->ifbac_len > 0);
1424 bc_arg->bca_len = bac->ifbac_len;
1425 bc_arg->bca_uptr = bac->ifbac_req;
1430 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1432 struct ifbareq *req = arg;
1433 struct bridge_iflist *bif;
1434 struct ifnet *ifp = sc->sc_ifp;
1437 ASSERT_SERIALIZED(ifp->if_serializer);
1439 bif = bridge_lookup_member(sc, req->ifba_ifsname);
1443 lwkt_serialize_exit(ifp->if_serializer);
1444 error = bridge_rtsaddr(sc, req->ifba_dst, bif->bif_ifp,
1446 lwkt_serialize_enter(ifp->if_serializer);
1451 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1453 struct ifbrparam *param = arg;
1455 sc->sc_brttimeout = param->ifbrp_ctime;
1461 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1463 struct ifbrparam *param = arg;
1465 param->ifbrp_ctime = sc->sc_brttimeout;
1471 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1473 struct ifbareq *req = arg;
1474 struct ifnet *ifp = sc->sc_ifp;
1477 lwkt_serialize_exit(ifp->if_serializer);
1478 error = bridge_rtdaddr(sc, req->ifba_dst);
1479 lwkt_serialize_enter(ifp->if_serializer);
1484 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1486 struct ifbreq *req = arg;
1487 struct ifnet *ifp = sc->sc_ifp;
1489 lwkt_serialize_exit(ifp->if_serializer);
1490 bridge_rtflush(sc, req->ifbr_ifsflags);
1491 lwkt_serialize_enter(ifp->if_serializer);
1497 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1499 struct ifbrparam *param = arg;
1501 param->ifbrp_prio = sc->sc_bridge_priority;
1507 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1509 struct ifbrparam *param = arg;
1511 sc->sc_bridge_priority = param->ifbrp_prio;
1513 if (sc->sc_ifp->if_flags & IFF_RUNNING)
1514 bstp_initialization(sc);
1520 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1522 struct ifbrparam *param = arg;
1524 param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1530 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1532 struct ifbrparam *param = arg;
1534 if (param->ifbrp_hellotime == 0)
1536 sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1538 if (sc->sc_ifp->if_flags & IFF_RUNNING)
1539 bstp_initialization(sc);
1545 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1547 struct ifbrparam *param = arg;
1549 param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1555 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1557 struct ifbrparam *param = arg;
1559 if (param->ifbrp_fwddelay == 0)
1561 sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1563 if (sc->sc_ifp->if_flags & IFF_RUNNING)
1564 bstp_initialization(sc);
1570 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1572 struct ifbrparam *param = arg;
1574 param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1580 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1582 struct ifbrparam *param = arg;
1584 if (param->ifbrp_maxage == 0)
1586 sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1588 if (sc->sc_ifp->if_flags & IFF_RUNNING)
1589 bstp_initialization(sc);
1595 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1597 struct ifbreq *req = arg;
1598 struct bridge_iflist *bif;
1600 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1604 bif->bif_priority = req->ifbr_priority;
1606 if (sc->sc_ifp->if_flags & IFF_RUNNING)
1607 bstp_initialization(sc);
1613 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1615 struct ifbreq *req = arg;
1616 struct bridge_iflist *bif;
1618 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1622 bif->bif_path_cost = req->ifbr_path_cost;
1624 if (sc->sc_ifp->if_flags & IFF_RUNNING)
1625 bstp_initialization(sc);
1631 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1633 struct ifbreq *req = arg;
1634 struct bridge_iflist *bif;
1637 ifs = ifunit(req->ifbr_ifsname);
1641 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1642 if (ifs == bif->bif_ifp)
1645 if (ifs->if_bridge != NULL)
1648 switch (ifs->if_type) {
1658 bif = kmalloc(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
1660 bif->bif_flags = IFBIF_SPAN;
1661 /* NOTE: span bif does not need bridge_ifinfo */
1663 LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1669 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1671 struct ifbreq *req = arg;
1672 struct bridge_iflist *bif;
1675 ifs = ifunit(req->ifbr_ifsname);
1679 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1680 if (ifs == bif->bif_ifp)
1686 bridge_delete_span(sc, bif);
1692 bridge_ifdetach_dispatch(struct netmsg *nmsg)
1694 struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
1695 struct ifnet *ifp, *bifp;
1696 struct bridge_softc *sc;
1697 struct bridge_iflist *bif;
1699 ifp = lmsg->u.ms_resultp;
1700 sc = ifp->if_bridge;
1702 /* Check if the interface is a bridge member */
1706 lwkt_serialize_enter(bifp->if_serializer);
1708 bif = bridge_lookup_member_if(sc, ifp);
1710 bridge_delete_member(sc, bif, 1);
1712 /* XXX Why bif will be NULL? */
1715 lwkt_serialize_exit(bifp->if_serializer);
1719 crit_enter(); /* XXX MP */
1721 /* Check if the interface is a span port */
1722 LIST_FOREACH(sc, &bridge_list, sc_list) {
1725 lwkt_serialize_enter(bifp->if_serializer);
1727 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1728 if (ifp == bif->bif_ifp) {
1729 bridge_delete_span(sc, bif);
1733 lwkt_serialize_exit(bifp->if_serializer);
1739 lwkt_replymsg(lmsg, 0);
1745 * Detach an interface from a bridge. Called when a member
1746 * interface is detaching.
1749 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
1751 struct lwkt_msg *lmsg;
1754 netmsg_init(&nmsg, &curthread->td_msgport, 0, bridge_ifdetach_dispatch);
1755 lmsg = &nmsg.nm_lmsg;
1756 lmsg->u.ms_resultp = ifp;
1758 lwkt_domsg(BRIDGE_CFGPORT, lmsg, 0);
1764 * Initialize a bridge interface.
1767 bridge_init(void *xsc)
1769 bridge_control(xsc, SIOCSIFFLAGS, bridge_ioctl_init, NULL);
1775 * Stop the bridge interface.
1778 bridge_stop(struct ifnet *ifp)
1780 bridge_control(ifp->if_softc, SIOCSIFFLAGS, bridge_ioctl_stop, NULL);
1784 bridge_enqueue_internal(struct ifnet *dst_ifp, struct mbuf *m,
1785 netisr_fn_t handler)
1787 struct netmsg_packet *nmp;
1789 int cpu = mycpu->gd_cpuid;
1791 nmp = &m->m_hdr.mh_netmsg;
1792 netmsg_init(&nmp->nm_netmsg, &netisr_apanic_rport, 0, handler);
1794 nmp->nm_netmsg.nm_lmsg.u.ms_resultp = dst_ifp;
1796 port = cpu_portfn(cpu);
1797 lwkt_sendmsg(port, &nmp->nm_netmsg.nm_lmsg);
1801 bridge_pfil_enqueue(struct ifnet *dst_ifp, struct mbuf *m,
1804 netisr_fn_t handler;
1806 if (runfilt && (inet_pfil_hook.ph_hashooks > 0
1808 || inet6_pfil_hook.ph_hashooks > 0
1811 handler = bridge_pfil_enqueue_handler;
1813 handler = bridge_enqueue_handler;
1815 bridge_enqueue_internal(dst_ifp, m, handler);
1821 * Enqueue a packet on a bridge member interface.
1825 bridge_enqueue(struct ifnet *dst_ifp, struct mbuf *m)
1827 bridge_enqueue_internal(dst_ifp, m, bridge_enqueue_handler);
1833 * Send output from a bridge member interface. This
1834 * performs the bridging function for locally originated
1837 * The mbuf has the Ethernet header already attached. We must
1838 * enqueue or free the mbuf before returning.
1841 bridge_output(struct ifnet *ifp, struct mbuf *m)
1843 struct bridge_softc *sc = ifp->if_bridge;
1844 struct ether_header *eh;
1845 struct ifnet *dst_if;
1847 ASSERT_NOT_SERIALIZED(ifp->if_serializer);
1850 * Make sure that we are still a member of a bridge interface.
1857 if (m->m_len < ETHER_HDR_LEN) {
1858 m = m_pullup(m, ETHER_HDR_LEN);
1863 /* Serialize our bridge interface. */
1864 lwkt_serialize_enter(sc->sc_ifp->if_serializer);
1866 eh = mtod(m, struct ether_header *);
1869 * If bridge is down, but the original output interface is up,
1870 * go ahead and send out that interface. Otherwise, the packet
1873 if ((sc->sc_ifp->if_flags & IFF_RUNNING) == 0) {
1879 * If the packet is a multicast, or we don't know a better way to
1880 * get there, send to all interfaces.
1882 if (ETHER_IS_MULTICAST(eh->ether_dhost))
1885 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1886 if (dst_if == NULL) {
1887 struct bridge_iflist *bif;
1894 * Following loop is MPSAFE; nothing is blocking
1897 LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
1898 dst_if = bif->bif_ifp;
1899 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1903 * If this is not the original output interface,
1904 * and the interface is participating in spanning
1905 * tree, make sure the port is in a state that
1906 * allows forwarding.
1908 if (dst_if != ifp &&
1909 (bif->bif_flags & IFBIF_STP) != 0) {
1910 switch (bif->bif_state) {
1911 case BSTP_IFSTATE_BLOCKING:
1912 case BSTP_IFSTATE_LISTENING:
1913 case BSTP_IFSTATE_DISABLED:
1918 if (LIST_NEXT(bif, bif_next) == NULL) {
1922 mc = m_copypacket(m, MB_DONTWAIT);
1924 sc->sc_ifp->if_oerrors++;
1928 bridge_enqueue(dst_if, mc);
1932 lwkt_serialize_exit(sc->sc_ifp->if_serializer);
1938 * XXX Spanning tree consideration here?
1942 lwkt_serialize_exit(sc->sc_ifp->if_serializer);
1943 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1946 bridge_enqueue(dst_if, m);
1953 * Start output on a bridge.
1957 bridge_start(struct ifnet *ifp)
1959 struct bridge_softc *sc = ifp->if_softc;
1961 ASSERT_SERIALIZED(ifp->if_serializer);
1963 ifp->if_flags |= IFF_OACTIVE;
1965 struct ifnet *dst_if = NULL;
1966 struct ether_header *eh;
1969 m = ifq_dequeue(&ifp->if_snd, NULL);
1973 if (m->m_len < sizeof(*eh)) {
1974 m = m_pullup(m, sizeof(*eh));
1980 eh = mtod(m, struct ether_header *);
1985 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0)
1986 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1989 bridge_broadcast(sc, ifp, m, 0);
1991 bridge_enqueue(dst_if, m);
1993 ifp->if_flags &= ~IFF_OACTIVE;
1999 * The forwarding function of the bridge.
2002 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
2004 struct bridge_iflist *bif;
2005 struct ifnet *src_if, *dst_if, *ifp;
2006 struct ether_header *eh;
2008 src_if = m->m_pkthdr.rcvif;
2011 ASSERT_SERIALIZED(ifp->if_serializer);
2014 ifp->if_ibytes += m->m_pkthdr.len;
2017 * Look up the bridge_iflist.
2019 bif = bridge_lookup_member_if(sc, src_if);
2021 /* Interface is not a bridge member (anymore?) */
2026 if (bif->bif_flags & IFBIF_STP) {
2027 switch (bif->bif_state) {
2028 case BSTP_IFSTATE_BLOCKING:
2029 case BSTP_IFSTATE_LISTENING:
2030 case BSTP_IFSTATE_DISABLED:
2036 eh = mtod(m, struct ether_header *);
2039 * If the interface is learning, and the source
2040 * address is valid and not multicast, record
2043 if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
2044 ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
2045 (eh->ether_shost[0] == 0 &&
2046 eh->ether_shost[1] == 0 &&
2047 eh->ether_shost[2] == 0 &&
2048 eh->ether_shost[3] == 0 &&
2049 eh->ether_shost[4] == 0 &&
2050 eh->ether_shost[5] == 0) == 0)
2051 bridge_rtupdate(sc, eh->ether_shost, src_if, IFBAF_DYNAMIC);
2053 if ((bif->bif_flags & IFBIF_STP) != 0 &&
2054 bif->bif_state == BSTP_IFSTATE_LEARNING) {
2060 * At this point, the port either doesn't participate
2061 * in spanning tree or it is in the forwarding state.
2065 * If the packet is unicast, destined for someone on
2066 * "this" side of the bridge, drop it.
2068 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2069 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
2070 if (src_if == dst_if) {
2075 /* ...forward it to all interfaces. */
2076 sc->sc_ifp->if_imcasts++;
2080 if (dst_if == NULL) {
2081 bridge_broadcast(sc, src_if, m, 1);
2086 * At this point, we're dealing with a unicast frame
2087 * going to a different interface.
2089 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
2093 bif = bridge_lookup_member_if(sc, dst_if);
2095 /* Not a member of the bridge (anymore?) */
2100 if (bif->bif_flags & IFBIF_STP) {
2101 switch (bif->bif_state) {
2102 case BSTP_IFSTATE_DISABLED:
2103 case BSTP_IFSTATE_BLOCKING:
2109 lwkt_serialize_exit(ifp->if_serializer);
2111 /* run the packet filter */
2112 if (inet_pfil_hook.ph_hashooks > 0
2114 || inet6_pfil_hook.ph_hashooks > 0
2117 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2122 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2127 bridge_handoff(dst_if, m);
2130 * ifp's serializer was held on entry and is expected to be held
2134 lwkt_serialize_enter(ifp->if_serializer);
2140 * Receive input from a member interface. Queue the packet for
2141 * bridging if it is not for us.
2143 static struct mbuf *
2144 bridge_input(struct ifnet *ifp, struct mbuf *m)
2146 struct bridge_softc *sc = ifp->if_bridge;
2147 struct bridge_iflist *bif;
2148 struct ifnet *bifp, *new_ifp;
2149 struct ether_header *eh;
2150 struct mbuf *mc, *mc2;
2153 * Make sure that we are still a member of a bridge interface.
2161 lwkt_serialize_enter(bifp->if_serializer);
2163 if ((bifp->if_flags & IFF_RUNNING) == 0)
2167 * Implement support for bridge monitoring. If this flag has been
2168 * set on this interface, discard the packet once we push it through
2169 * the bpf(4) machinery, but before we do, increment various counters
2170 * associated with this bridge.
2172 if (bifp->if_flags & IFF_MONITOR) {
2173 /* Change input interface to this bridge */
2174 m->m_pkthdr.rcvif = bifp;
2178 /* Update bridge's ifnet statistics */
2179 bifp->if_ipackets++;
2180 bifp->if_ibytes += m->m_pkthdr.len;
2181 if (m->m_flags & (M_MCAST | M_BCAST))
2189 eh = mtod(m, struct ether_header *);
2191 m->m_flags &= ~M_PROTO1; /* XXX Hack - loop prevention */
2193 if (memcmp(eh->ether_dhost, IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0) {
2195 * If the packet is for us, set the packets source as the
2196 * bridge, and return the packet back to ifnet.if_input for
2199 KASSERT(bifp->if_bridge == NULL,
2200 ("loop created in bridge_input"));
2206 * Tap all packets arriving on the bridge, no matter if
2207 * they are local destinations or not. In is in.
2211 bif = bridge_lookup_member_if(sc, ifp);
2217 if (m->m_flags & (M_BCAST | M_MCAST)) {
2218 /* Tap off 802.1D packets; they do not get forwarded. */
2219 if (memcmp(eh->ether_dhost, bstp_etheraddr,
2220 ETHER_ADDR_LEN) == 0) {
2221 m = bstp_input(sc, bif, m);
2223 ("attempt to deliver 802.1D packet\n"));
2227 if (bif->bif_flags & IFBIF_STP) {
2228 switch (bif->bif_state) {
2229 case BSTP_IFSTATE_BLOCKING:
2230 case BSTP_IFSTATE_LISTENING:
2231 case BSTP_IFSTATE_DISABLED:
2237 * Make a deep copy of the packet and enqueue the copy
2238 * for bridge processing; return the original packet for
2241 mc = m_dup(m, MB_DONTWAIT);
2245 bridge_forward(sc, mc);
2248 * Reinject the mbuf as arriving on the bridge so we have a
2249 * chance at claiming multicast packets. We can not loop back
2250 * here from ether_input as a bridge is never a member of a
2253 KASSERT(bifp->if_bridge == NULL,
2254 ("loop created in bridge_input"));
2255 mc2 = m_dup(m, MB_DONTWAIT);
2258 /* Keep the layer3 header aligned */
2259 int i = min(mc2->m_pkthdr.len, max_protohdr);
2260 mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2264 mc2->m_pkthdr.rcvif = bifp;
2265 bifp->if_ipackets++;
2266 bifp->if_input(bifp, mc2);
2269 /* Return the original packet for local processing. */
2273 if (bif->bif_flags & IFBIF_STP) {
2274 switch (bif->bif_state) {
2275 case BSTP_IFSTATE_BLOCKING:
2276 case BSTP_IFSTATE_LISTENING:
2277 case BSTP_IFSTATE_DISABLED:
2283 * Unicast. Make sure it's not for us.
2285 * This loop is MPSAFE; the only blocking operation (bridge_rtupdate)
2286 * is followed by breaking out of the loop.
2288 LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
2289 if (bif->bif_ifp->if_type != IFT_ETHER)
2292 /* It is destined for us. */
2293 if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_dhost,
2294 ETHER_ADDR_LEN) == 0) {
2295 if (bif->bif_ifp != ifp) {
2296 /* XXX loop prevention */
2297 m->m_flags |= M_PROTO1;
2298 new_ifp = bif->bif_ifp;
2300 if (bif->bif_flags & IFBIF_LEARNING) {
2301 bridge_rtupdate(sc, eh->ether_shost,
2302 ifp, IFBAF_DYNAMIC);
2307 /* We just received a packet that we sent out. */
2308 if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_shost,
2309 ETHER_ADDR_LEN) == 0) {
2316 /* Perform the bridge forwarding function. */
2317 bridge_forward(sc, m);
2320 lwkt_serialize_exit(bifp->if_serializer);
2322 if (new_ifp != NULL) {
2323 lwkt_serialize_enter(new_ifp->if_serializer);
2325 m->m_pkthdr.rcvif = new_ifp;
2326 new_ifp->if_ipackets++;
2327 new_ifp->if_input(new_ifp, m);
2330 lwkt_serialize_exit(new_ifp->if_serializer);
2338 * Send a frame to all interfaces that are members of
2339 * the bridge, except for the one on which the packet
2343 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2344 struct mbuf *m, int runfilt)
2346 struct bridge_iflist *bif;
2348 struct ifnet *dst_if, *bifp;
2353 ASSERT_SERIALIZED(bifp->if_serializer);
2355 /* run the packet filter */
2356 if (runfilt && (inet_pfil_hook.ph_hashooks > 0
2358 || inet6_pfil_hook.ph_hashooks > 0
2361 lwkt_serialize_exit(bifp->if_serializer);
2363 /* Filter on the bridge interface before broadcasting */
2365 if (bridge_pfil(&m, bifp, src_if, PFIL_IN) != 0)
2370 if (bridge_pfil(&m, bifp, NULL, PFIL_OUT) != 0)
2373 lwkt_serialize_enter(bifp->if_serializer);
2379 * Following loop is MPSAFE; nothing is blocking
2382 LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
2383 dst_if = bif->bif_ifp;
2384 if (dst_if == src_if)
2387 if (bif->bif_flags & IFBIF_STP) {
2388 switch (bif->bif_state) {
2389 case BSTP_IFSTATE_BLOCKING:
2390 case BSTP_IFSTATE_DISABLED:
2395 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 &&
2396 (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2399 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2402 if (LIST_NEXT(bif, bif_next) == NULL) {
2406 mc = m_copypacket(m, MB_DONTWAIT);
2408 sc->sc_ifp->if_oerrors++;
2412 bridge_pfil_enqueue(dst_if, mc, runfilt);
2421 * Duplicate a packet out one or more interfaces that are in span mode,
2422 * the original mbuf is unmodified.
2425 bridge_span(struct bridge_softc *sc, struct mbuf *m)
2427 struct bridge_iflist *bif;
2428 struct ifnet *dst_if;
2431 if (LIST_EMPTY(&sc->sc_spanlist))
2434 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
2435 dst_if = bif->bif_ifp;
2437 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2440 mc = m_copypacket(m, MB_DONTWAIT);
2442 sc->sc_ifp->if_oerrors++;
2446 bridge_enqueue(dst_if, mc);
2451 bridge_rtmsg_sync_handler(struct netmsg *nmsg)
2453 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2457 bridge_rtmsg_sync(struct bridge_softc *sc)
2461 ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2463 netmsg_init(&nmsg, &curthread->td_msgport, 0,
2464 bridge_rtmsg_sync_handler);
2465 ifnet_domsg(&nmsg.nm_lmsg, 0);
2468 static __inline void
2469 bridge_rtinfo_update(struct bridge_rtinfo *bri, struct ifnet *dst_if,
2470 int setflags, uint8_t flags, uint32_t timeo)
2472 if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2473 bri->bri_ifp != dst_if)
2474 bri->bri_ifp = dst_if;
2475 if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2476 bri->bri_expire != time_second + timeo)
2477 bri->bri_expire = time_second + timeo;
2479 bri->bri_flags = flags;
2483 bridge_rtinstall_oncpu(struct bridge_softc *sc, const uint8_t *dst,
2484 struct ifnet *dst_if, int setflags, uint8_t flags,
2485 struct bridge_rtinfo **bri0)
2487 struct bridge_rtnode *brt;
2488 struct bridge_rtinfo *bri;
2491 brt = bridge_rtnode_lookup(sc, dst);
2494 * rtnode for 'dst' already exists. We inform the
2495 * caller about this by leaving bri0 as NULL. The
2496 * caller will terminate the intallation upon getting
2497 * NULL bri0. However, we still need to update the
2500 KKASSERT(*bri0 == NULL);
2503 bridge_rtinfo_update(brt->brt_info, dst_if, setflags,
2504 flags, sc->sc_brttimeout);
2509 * We only need to check brtcnt on CPU0, since if limit
2510 * is to be exceeded, ENOSPC is returned. Caller knows
2511 * this and will terminate the installation.
2513 if (sc->sc_brtcnt >= sc->sc_brtmax)
2516 KKASSERT(*bri0 == NULL);
2517 bri = kmalloc(sizeof(struct bridge_rtinfo), M_DEVBUF,
2522 bri->bri_flags = IFBAF_DYNAMIC;
2523 bridge_rtinfo_update(bri, dst_if, setflags, flags,
2527 KKASSERT(bri != NULL);
2530 brt = kmalloc(sizeof(struct bridge_rtnode), M_DEVBUF,
2532 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2533 brt->brt_info = bri;
2535 bridge_rtnode_insert(sc, brt);
2540 bridge_rtinstall_handler(struct netmsg *nmsg)
2542 struct netmsg_brsaddr *brmsg = (struct netmsg_brsaddr *)nmsg;
2545 error = bridge_rtinstall_oncpu(brmsg->br_softc,
2546 brmsg->br_dst, brmsg->br_dst_if,
2547 brmsg->br_setflags, brmsg->br_flags,
2550 KKASSERT(mycpuid == 0 && brmsg->br_rtinfo == NULL);
2551 lwkt_replymsg(&nmsg->nm_lmsg, error);
2553 } else if (brmsg->br_rtinfo == NULL) {
2554 /* rtnode already exists for 'dst' */
2555 KKASSERT(mycpuid == 0);
2556 lwkt_replymsg(&nmsg->nm_lmsg, 0);
2559 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2565 * Add/Update a bridge routing entry.
2568 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2569 struct ifnet *dst_if, uint8_t flags)
2571 struct bridge_rtnode *brt;
2574 * A route for this destination might already exist. If so,
2575 * update it, otherwise create a new one.
2577 if ((brt = bridge_rtnode_lookup(sc, dst)) == NULL) {
2578 struct netmsg_brsaddr *brmsg;
2580 if (sc->sc_brtcnt >= sc->sc_brtmax)
2583 brmsg = kmalloc(sizeof(*brmsg), M_LWKTMSG, M_WAITOK | M_NULLOK);
2587 netmsg_init(&brmsg->br_nmsg, &netisr_afree_rport, 0,
2588 bridge_rtinstall_handler);
2589 memcpy(brmsg->br_dst, dst, ETHER_ADDR_LEN);
2590 brmsg->br_dst_if = dst_if;
2591 brmsg->br_flags = flags;
2592 brmsg->br_setflags = 0;
2593 brmsg->br_softc = sc;
2594 brmsg->br_rtinfo = NULL;
2596 ifnet_sendmsg(&brmsg->br_nmsg.nm_lmsg, 0);
2599 bridge_rtinfo_update(brt->brt_info, dst_if, 0, flags,
2605 bridge_rtsaddr(struct bridge_softc *sc, const uint8_t *dst,
2606 struct ifnet *dst_if, uint8_t flags)
2608 struct netmsg_brsaddr brmsg;
2610 ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2612 netmsg_init(&brmsg.br_nmsg, &curthread->td_msgport, MSGF_PRIORITY,
2613 bridge_rtinstall_handler);
2614 memcpy(brmsg.br_dst, dst, ETHER_ADDR_LEN);
2615 brmsg.br_dst_if = dst_if;
2616 brmsg.br_flags = flags;
2617 brmsg.br_setflags = 1;
2618 brmsg.br_softc = sc;
2619 brmsg.br_rtinfo = NULL;
2621 return ifnet_domsg(&brmsg.br_nmsg.nm_lmsg, 0);
2627 * Lookup the destination interface for an address.
2629 static struct ifnet *
2630 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2632 struct bridge_rtnode *brt;
2634 if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
2636 return brt->brt_info->bri_ifp;
2640 bridge_rtreap_handler(struct netmsg *nmsg)
2642 struct bridge_softc *sc = nmsg->nm_lmsg.u.ms_resultp;
2643 struct bridge_rtnode *brt, *nbrt;
2645 LIST_FOREACH_MUTABLE(brt, &sc->sc_rtlists[mycpuid], brt_list, nbrt) {
2646 if (brt->brt_info->bri_dead)
2647 bridge_rtnode_destroy(sc, brt);
2649 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2653 bridge_rtreap(struct bridge_softc *sc)
2657 ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2659 netmsg_init(&nmsg, &curthread->td_msgport, 0, bridge_rtreap_handler);
2660 nmsg.nm_lmsg.u.ms_resultp = sc;
2662 ifnet_domsg(&nmsg.nm_lmsg, 0);
2668 * Trim the routine table so that we have a number
2669 * of routing entries less than or equal to the
2673 bridge_rttrim(struct bridge_softc *sc)
2675 struct bridge_rtnode *brt;
2678 ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2680 /* Make sure we actually need to do this. */
2681 if (sc->sc_brtcnt <= sc->sc_brtmax)
2685 * Find out how many rtnodes are dead
2687 dead = bridge_rtage_finddead(sc);
2688 KKASSERT(dead <= sc->sc_brtcnt);
2690 if (sc->sc_brtcnt - dead <= sc->sc_brtmax) {
2691 /* Enough dead rtnodes are found */
2697 * Kill some dynamic rtnodes to meet the brtmax
2699 LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2700 struct bridge_rtinfo *bri = brt->brt_info;
2702 if (bri->bri_dead) {
2704 * We have counted this rtnode in
2705 * bridge_rtage_finddead()
2710 if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2713 KKASSERT(dead <= sc->sc_brtcnt);
2715 if (sc->sc_brtcnt - dead <= sc->sc_brtmax) {
2716 /* Enough rtnodes are collected */
2728 * Aging timer for the bridge.
2731 bridge_timer(void *arg)
2733 struct bridge_softc *sc = arg;
2734 struct lwkt_msg *lmsg;
2736 KKASSERT(mycpuid == BRIDGE_CFGCPU);
2740 if (callout_pending(&sc->sc_brcallout) ||
2741 !callout_active(&sc->sc_brcallout)) {
2745 callout_deactivate(&sc->sc_brcallout);
2747 lmsg = &sc->sc_brtimemsg.nm_lmsg;
2748 KKASSERT(lmsg->ms_flags & MSGF_DONE);
2749 lwkt_sendmsg(BRIDGE_CFGPORT, lmsg);
2755 bridge_timer_handler(struct netmsg *nmsg)
2757 struct bridge_softc *sc = nmsg->nm_lmsg.u.ms_resultp;
2759 KKASSERT(&curthread->td_msgport == BRIDGE_CFGPORT);
2763 lwkt_replymsg(&nmsg->nm_lmsg, 0);
2767 if (sc->sc_ifp->if_flags & IFF_RUNNING) {
2768 callout_reset(&sc->sc_brcallout,
2769 bridge_rtable_prune_period * hz, bridge_timer, sc);
2774 bridge_rtage_finddead(struct bridge_softc *sc)
2776 struct bridge_rtnode *brt;
2779 LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2780 struct bridge_rtinfo *bri = brt->brt_info;
2782 if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2783 time_second >= bri->bri_expire) {
2786 KKASSERT(dead <= sc->sc_brtcnt);
2795 * Perform an aging cycle.
2798 bridge_rtage(struct bridge_softc *sc)
2800 ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2802 if (bridge_rtage_finddead(sc))
2809 * Remove all dynamic addresses from the bridge.
2812 bridge_rtflush(struct bridge_softc *sc, int full)
2814 struct bridge_rtnode *brt;
2817 ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2820 LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2821 struct bridge_rtinfo *bri = brt->brt_info;
2824 (bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2836 * Remove an address from the table.
2839 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2841 struct bridge_rtnode *brt;
2843 ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2845 if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
2848 /* TODO: add a cheaper delete operation */
2849 brt->brt_info->bri_dead = 1;
2857 * Delete routes to a speicifc member interface.
2860 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
2862 struct bridge_rtnode *brt;
2865 ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2868 LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2869 struct bridge_rtinfo *bri = brt->brt_info;
2871 if (bri->bri_ifp == ifp &&
2873 (bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) {
2883 * bridge_rtable_init:
2885 * Initialize the route table for this bridge.
2888 bridge_rtable_init(struct bridge_softc *sc)
2893 * Initialize per-cpu hash tables
2895 sc->sc_rthashs = kmalloc(sizeof(*sc->sc_rthashs) * ncpus,
2896 M_DEVBUF, M_WAITOK);
2897 for (cpu = 0; cpu < ncpus; ++cpu) {
2900 sc->sc_rthashs[cpu] =
2901 kmalloc(sizeof(struct bridge_rtnode_head) * BRIDGE_RTHASH_SIZE,
2902 M_DEVBUF, M_WAITOK);
2904 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2905 LIST_INIT(&sc->sc_rthashs[cpu][i]);
2907 sc->sc_rthash_key = karc4random();
2910 * Initialize per-cpu lists
2912 sc->sc_rtlists = kmalloc(sizeof(struct bridge_rtnode_head) * ncpus,
2913 M_DEVBUF, M_WAITOK);
2914 for (cpu = 0; cpu < ncpus; ++cpu)
2915 LIST_INIT(&sc->sc_rtlists[cpu]);
2919 * bridge_rtable_fini:
2921 * Deconstruct the route table for this bridge.
2924 bridge_rtable_fini(struct bridge_softc *sc)
2929 * Free per-cpu hash tables
2931 for (cpu = 0; cpu < ncpus; ++cpu)
2932 kfree(sc->sc_rthashs[cpu], M_DEVBUF);
2933 kfree(sc->sc_rthashs, M_DEVBUF);
2936 * Free per-cpu lists
2938 kfree(sc->sc_rtlists, M_DEVBUF);
2942 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2943 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2945 #define mix(a, b, c) \
2947 a -= b; a -= c; a ^= (c >> 13); \
2948 b -= c; b -= a; b ^= (a << 8); \
2949 c -= a; c -= b; c ^= (b >> 13); \
2950 a -= b; a -= c; a ^= (c >> 12); \
2951 b -= c; b -= a; b ^= (a << 16); \
2952 c -= a; c -= b; c ^= (b >> 5); \
2953 a -= b; a -= c; a ^= (c >> 3); \
2954 b -= c; b -= a; b ^= (a << 10); \
2955 c -= a; c -= b; c ^= (b >> 15); \
2956 } while (/*CONSTCOND*/0)
2958 static __inline uint32_t
2959 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2961 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2972 return (c & BRIDGE_RTHASH_MASK);
2978 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
2982 for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
2983 d = ((int)a[i]) - ((int)b[i]);
2990 * bridge_rtnode_lookup:
2992 * Look up a bridge route node for the specified destination.
2994 static struct bridge_rtnode *
2995 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
2997 struct bridge_rtnode *brt;
3001 hash = bridge_rthash(sc, addr);
3002 LIST_FOREACH(brt, &sc->sc_rthashs[mycpuid][hash], brt_hash) {
3003 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
3014 * bridge_rtnode_insert:
3016 * Insert the specified bridge node into the route table.
3017 * Caller has to make sure that rtnode does not exist.
3020 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
3022 struct bridge_rtnode *lbrt;
3026 hash = bridge_rthash(sc, brt->brt_addr);
3028 lbrt = LIST_FIRST(&sc->sc_rthashs[mycpuid][hash]);
3030 LIST_INSERT_HEAD(&sc->sc_rthashs[mycpuid][hash], brt, brt_hash);
3035 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
3036 KASSERT(dir != 0, ("rtnode already exist\n"));
3039 LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
3042 if (LIST_NEXT(lbrt, brt_hash) == NULL) {
3043 LIST_INSERT_AFTER(lbrt, brt, brt_hash);
3046 lbrt = LIST_NEXT(lbrt, brt_hash);
3047 } while (lbrt != NULL);
3049 panic("no suitable position found for rtnode\n");
3051 LIST_INSERT_HEAD(&sc->sc_rtlists[mycpuid], brt, brt_list);
3054 * Update the brtcnt.
3055 * We only need to do it once and we do it on CPU0.
3062 * bridge_rtnode_destroy:
3064 * Destroy a bridge rtnode.
3067 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3069 LIST_REMOVE(brt, brt_hash);
3070 LIST_REMOVE(brt, brt_list);
3072 if (mycpuid + 1 == ncpus) {
3073 /* Free rtinfo associated with rtnode on the last cpu */
3074 kfree(brt->brt_info, M_DEVBUF);
3076 kfree(brt, M_DEVBUF);
3079 /* Update brtcnt only on CPU0 */
3085 bridge_post_pfil(struct mbuf *m)
3087 if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED)
3091 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED)
3098 * Send bridge packets through pfil if they are one of the types pfil can deal
3099 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without
3100 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3104 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3106 int snap, error, i, hlen;
3107 struct ether_header *eh1, eh2;
3110 u_int16_t ether_type;
3113 error = -1; /* Default error if not error == 0 */
3115 if (pfil_bridge == 0 && pfil_member == 0)
3116 return (0); /* filtering is disabled */
3118 i = min((*mp)->m_pkthdr.len, max_protohdr);
3119 if ((*mp)->m_len < i) {
3120 *mp = m_pullup(*mp, i);
3122 kprintf("%s: m_pullup failed\n", __func__);
3127 eh1 = mtod(*mp, struct ether_header *);
3128 ether_type = ntohs(eh1->ether_type);
3131 * Check for SNAP/LLC.
3133 if (ether_type < ETHERMTU) {
3134 struct llc *llc2 = (struct llc *)(eh1 + 1);
3136 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
3137 llc2->llc_dsap == LLC_SNAP_LSAP &&
3138 llc2->llc_ssap == LLC_SNAP_LSAP &&
3139 llc2->llc_control == LLC_UI) {
3140 ether_type = htons(llc2->llc_un.type_snap.ether_type);
3146 * If we're trying to filter bridge traffic, don't look at anything
3147 * other than IP and ARP traffic. If the filter doesn't understand
3148 * IPv6, don't allow IPv6 through the bridge either. This is lame
3149 * since if we really wanted, say, an AppleTalk filter, we are hosed,
3150 * but of course we don't have an AppleTalk filter to begin with.
3151 * (Note that since pfil doesn't understand ARP it will pass *ALL*
3154 switch (ether_type) {
3156 case ETHERTYPE_REVARP:
3157 return (0); /* Automatically pass */
3161 case ETHERTYPE_IPV6:
3167 * Check to see if the user wants to pass non-ip
3168 * packets, these will not be checked by pfil(9)
3169 * and passed unconditionally so the default is to drop.
3175 /* Strip off the Ethernet header and keep a copy. */
3176 m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
3177 m_adj(*mp, ETHER_HDR_LEN);
3179 /* Strip off snap header, if present */
3181 m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
3182 m_adj(*mp, sizeof(struct llc));
3186 * Check the IP header for alignment and errors
3188 if (dir == PFIL_IN) {
3189 switch (ether_type) {
3191 error = bridge_ip_checkbasic(mp);
3194 case ETHERTYPE_IPV6:
3195 error = bridge_ip6_checkbasic(mp);
3208 * Run the packet through pfil
3210 switch (ether_type) {
3213 * before calling the firewall, swap fields the same as
3214 * IP does. here we assume the header is contiguous
3216 ip = mtod(*mp, struct ip *);
3218 ip->ip_len = ntohs(ip->ip_len);
3219 ip->ip_off = ntohs(ip->ip_off);
3222 * Run pfil on the member interface and the bridge, both can
3223 * be skipped by clearing pfil_member or pfil_bridge.
3226 * in_if -> bridge_if -> out_if
3228 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) {
3229 error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir);
3230 if (*mp == NULL || error != 0) /* filter may consume */
3232 error = bridge_post_pfil(*mp);
3237 if (pfil_member && ifp != NULL) {
3238 error = pfil_run_hooks(&inet_pfil_hook, mp, ifp, dir);
3239 if (*mp == NULL || error != 0) /* filter may consume */
3241 error = bridge_post_pfil(*mp);
3246 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) {
3247 error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir);
3248 if (*mp == NULL || error != 0) /* filter may consume */
3250 error = bridge_post_pfil(*mp);
3255 /* check if we need to fragment the packet */
3256 if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
3257 i = (*mp)->m_pkthdr.len;
3258 if (i > ifp->if_mtu) {
3259 error = bridge_fragment(ifp, *mp, &eh2, snap,
3265 /* Recalculate the ip checksum and restore byte ordering */
3266 ip = mtod(*mp, struct ip *);
3267 hlen = ip->ip_hl << 2;
3268 if (hlen < sizeof(struct ip))
3270 if (hlen > (*mp)->m_len) {
3271 if ((*mp = m_pullup(*mp, hlen)) == 0)
3273 ip = mtod(*mp, struct ip *);
3277 ip->ip_len = htons(ip->ip_len);
3278 ip->ip_off = htons(ip->ip_off);
3280 if (hlen == sizeof(struct ip))
3281 ip->ip_sum = in_cksum_hdr(ip);
3283 ip->ip_sum = in_cksum(*mp, hlen);
3287 case ETHERTYPE_IPV6:
3288 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
3289 error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3292 if (*mp == NULL || error != 0) /* filter may consume */
3295 if (pfil_member && ifp != NULL)
3296 error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp,
3299 if (*mp == NULL || error != 0) /* filter may consume */
3302 if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
3303 error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3320 * Finally, put everything back the way it was and return
3323 M_PREPEND(*mp, sizeof(struct llc), MB_DONTWAIT);
3326 bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
3329 M_PREPEND(*mp, ETHER_HDR_LEN, MB_DONTWAIT);
3332 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3343 * Perform basic checks on header size since
3344 * pfil assumes ip_input has already processed
3345 * it for it. Cut-and-pasted from ip_input.c.
3346 * Given how simple the IPv6 version is,
3347 * does the IPv4 version really need to be
3350 * XXX Should we update ipstat here, or not?
3351 * XXX Right now we update ipstat but not
3355 bridge_ip_checkbasic(struct mbuf **mp)
3357 struct mbuf *m = *mp;
3365 if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3366 if ((m = m_copyup(m, sizeof(struct ip),
3367 (max_linkhdr + 3) & ~3)) == NULL) {
3368 /* XXXJRT new stat, please */
3369 ipstat.ips_toosmall++;
3374 #ifndef __predict_false
3375 #define __predict_false(x) x
3377 if (__predict_false(m->m_len < sizeof (struct ip))) {
3378 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
3379 ipstat.ips_toosmall++;
3383 ip = mtod(m, struct ip *);
3384 if (ip == NULL) goto bad;
3386 if (ip->ip_v != IPVERSION) {
3387 ipstat.ips_badvers++;
3390 hlen = ip->ip_hl << 2;
3391 if (hlen < sizeof(struct ip)) { /* minimum header length */
3392 ipstat.ips_badhlen++;
3395 if (hlen > m->m_len) {
3396 if ((m = m_pullup(m, hlen)) == 0) {
3397 ipstat.ips_badhlen++;
3400 ip = mtod(m, struct ip *);
3401 if (ip == NULL) goto bad;
3404 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3405 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3407 if (hlen == sizeof(struct ip)) {
3408 sum = in_cksum_hdr(ip);
3410 sum = in_cksum(m, hlen);
3414 ipstat.ips_badsum++;
3418 /* Retrieve the packet length. */
3419 len = ntohs(ip->ip_len);
3422 * Check for additional length bogosity
3425 ipstat.ips_badlen++;
3430 * Check that the amount of data in the buffers
3431 * is as at least much as the IP header would have us expect.
3432 * Drop packet if shorter than we expect.
3434 if (m->m_pkthdr.len < len) {
3435 ipstat.ips_tooshort++;
3439 /* Checks out, proceed */
3450 * Same as above, but for IPv6.
3451 * Cut-and-pasted from ip6_input.c.
3452 * XXX Should we update ip6stat, or not?
3455 bridge_ip6_checkbasic(struct mbuf **mp)
3457 struct mbuf *m = *mp;
3458 struct ip6_hdr *ip6;
3461 * If the IPv6 header is not aligned, slurp it up into a new
3462 * mbuf with space for link headers, in the event we forward
3463 * it. Otherwise, if it is aligned, make sure the entire base
3464 * IPv6 header is in the first mbuf of the chain.
3467 if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3468 struct ifnet *inifp = m->m_pkthdr.rcvif;
3469 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
3470 (max_linkhdr + 3) & ~3)) == NULL) {
3471 /* XXXJRT new stat, please */
3472 ip6stat.ip6s_toosmall++;
3473 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3478 if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
3479 struct ifnet *inifp = m->m_pkthdr.rcvif;
3480 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
3481 ip6stat.ip6s_toosmall++;
3482 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3487 ip6 = mtod(m, struct ip6_hdr *);
3489 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
3490 ip6stat.ip6s_badvers++;
3491 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
3495 /* Checks out, proceed */
3508 * Return a fragmented mbuf chain.
3511 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
3512 int snap, struct llc *llc)
3518 if (m->m_len < sizeof(struct ip) &&
3519 (m = m_pullup(m, sizeof(struct ip))) == NULL)
3521 ip = mtod(m, struct ip *);
3523 error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
3528 /* walk the chain and re-add the Ethernet header */
3529 for (m0 = m; m0; m0 = m0->m_nextpkt) {
3532 M_PREPEND(m0, sizeof(struct llc), MB_DONTWAIT);
3537 bcopy(llc, mtod(m0, caddr_t),
3538 sizeof(struct llc));
3540 M_PREPEND(m0, ETHER_HDR_LEN, MB_DONTWAIT);
3545 bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
3551 ipstat.ips_fragmented++;
3562 bridge_enqueue_handler(struct netmsg *nmsg)
3564 struct netmsg_packet *nmp;
3565 struct ifnet *dst_ifp;
3568 nmp = (struct netmsg_packet *)nmsg;
3570 dst_ifp = nmp->nm_netmsg.nm_lmsg.u.ms_resultp;
3572 bridge_handoff(dst_ifp, m);
3576 bridge_pfil_enqueue_handler(struct netmsg *nmsg)
3578 struct netmsg_packet *nmp;
3579 struct ifnet *dst_ifp;
3582 nmp = (struct netmsg_packet *)nmsg;
3584 dst_ifp = nmp->nm_netmsg.nm_lmsg.u.ms_resultp;
3587 * Filter on the output interface. Pass a NULL bridge interface
3588 * pointer so we do not redundantly filter on the bridge for
3589 * each interface we broadcast on.
3591 if (inet_pfil_hook.ph_hashooks > 0
3593 || inet6_pfil_hook.ph_hashooks > 0
3596 if (bridge_pfil(&m, NULL, dst_ifp, PFIL_OUT) != 0)
3601 bridge_handoff(dst_ifp, m);
3605 bridge_handoff(struct ifnet *dst_ifp, struct mbuf *m)
3609 lwkt_serialize_enter(dst_ifp->if_serializer);
3611 /* We may be sending a fragment so traverse the mbuf */
3613 struct altq_pktattr pktattr;
3616 m->m_nextpkt = NULL;
3618 if (ifq_is_enabled(&dst_ifp->if_snd))
3619 altq_etherclassify(&dst_ifp->if_snd, m, &pktattr);
3621 ifq_handoff(dst_ifp, m, &pktattr);
3624 lwkt_serialize_exit(dst_ifp->if_serializer);
3628 bridge_control_dispatch(struct netmsg *nmsg)
3630 struct netmsg_brctl *bc_msg = (struct netmsg_brctl *)nmsg;
3631 struct ifnet *bifp = bc_msg->bc_sc->sc_ifp;
3634 lwkt_serialize_enter(bifp->if_serializer);
3635 error = bc_msg->bc_func(bc_msg->bc_sc, bc_msg->bc_arg);
3636 lwkt_serialize_exit(bifp->if_serializer);
3638 lwkt_replymsg(&nmsg->nm_lmsg, error);
3642 bridge_control(struct bridge_softc *sc, u_long cmd,
3643 bridge_ctl_t bc_func, void *bc_arg)
3645 struct ifnet *bifp = sc->sc_ifp;
3646 struct netmsg_brctl bc_msg;
3647 struct netmsg *nmsg;
3650 ASSERT_SERIALIZED(bifp->if_serializer);
3652 bzero(&bc_msg, sizeof(bc_msg));
3653 nmsg = &bc_msg.bc_nmsg;
3655 netmsg_init(nmsg, &curthread->td_msgport, 0, bridge_control_dispatch);
3656 bc_msg.bc_func = bc_func;
3658 bc_msg.bc_arg = bc_arg;
3660 lwkt_serialize_exit(bifp->if_serializer);
3661 error = lwkt_domsg(BRIDGE_CFGPORT, &nmsg->nm_lmsg, 0);
3662 lwkt_serialize_enter(bifp->if_serializer);
3667 bridge_add_bif_handler(struct netmsg *nmsg)
3669 struct netmsg_braddbif *amsg = (struct netmsg_braddbif *)nmsg;
3670 struct bridge_softc *sc;
3671 struct bridge_iflist *bif;
3673 sc = amsg->br_softc;
3675 bif = kmalloc(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
3676 bif->bif_ifp = amsg->br_bif_ifp;
3677 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
3678 bif->bif_onlist = 1;
3679 bif->bif_info = amsg->br_bif_info;
3681 LIST_INSERT_HEAD(&sc->sc_iflists[mycpuid], bif, bif_next);
3683 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3687 bridge_add_bif(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3690 struct netmsg_braddbif amsg;
3692 ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
3694 netmsg_init(&amsg.br_nmsg, &curthread->td_msgport, 0,
3695 bridge_add_bif_handler);
3697 amsg.br_bif_info = bif_info;
3698 amsg.br_bif_ifp = ifp;
3700 ifnet_domsg(&amsg.br_nmsg.nm_lmsg, 0);
3704 bridge_del_bif_handler(struct netmsg *nmsg)
3706 struct netmsg_brdelbif *dmsg = (struct netmsg_brdelbif *)nmsg;
3707 struct bridge_softc *sc;
3708 struct bridge_iflist *bif;
3710 sc = dmsg->br_softc;
3713 * Locate the bif associated with the br_bif_info
3714 * on the current CPU
3716 bif = bridge_lookup_member_ifinfo(sc, dmsg->br_bif_info);
3717 KKASSERT(bif != NULL && bif->bif_onlist);
3719 /* Remove the bif from the current CPU's iflist */
3720 bif->bif_onlist = 0;
3721 LIST_REMOVE(bif, bif_next);
3723 /* Save the removed bif for later freeing */
3724 LIST_INSERT_HEAD(dmsg->br_bif_list, bif, bif_next);
3726 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3730 bridge_del_bif(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3731 struct bridge_iflist_head *saved_bifs)
3733 struct netmsg_brdelbif dmsg;
3735 ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
3737 netmsg_init(&dmsg.br_nmsg, &curthread->td_msgport, 0,
3738 bridge_del_bif_handler);
3740 dmsg.br_bif_info = bif_info;
3741 dmsg.br_bif_list = saved_bifs;
3743 ifnet_domsg(&dmsg.br_nmsg.nm_lmsg, 0);
3747 bridge_set_bifflags_handler(struct netmsg *nmsg)
3749 struct netmsg_brsflags *smsg = (struct netmsg_brsflags *)nmsg;
3750 struct bridge_softc *sc;
3751 struct bridge_iflist *bif;
3753 sc = smsg->br_softc;
3756 * Locate the bif associated with the br_bif_info
3757 * on the current CPU
3759 bif = bridge_lookup_member_ifinfo(sc, smsg->br_bif_info);
3760 KKASSERT(bif != NULL && bif->bif_onlist);
3762 bif->bif_flags = smsg->br_bif_flags;
3764 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3768 bridge_set_bifflags(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3771 struct netmsg_brsflags smsg;
3773 ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
3775 netmsg_init(&smsg.br_nmsg, &curthread->td_msgport, 0,
3776 bridge_set_bifflags_handler);
3778 smsg.br_bif_info = bif_info;
3779 smsg.br_bif_flags = bif_flags;
3781 ifnet_domsg(&smsg.br_nmsg.nm_lmsg, 0);