kernel - Clean up if_bridge bif_state tests
[dragonfly.git] / sys / net / bridge / if_bridge.c
1 /*
2  * Copyright 2001 Wasabi Systems, Inc.
3  * All rights reserved.
4  *
5  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *      This product includes software developed for the NetBSD Project by
18  *      Wasabi Systems, Inc.
19  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
20  *    or promote products derived from this software without specific prior
21  *    written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35
36 /*
37  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. All advertising materials mentioning features or use of this software
49  *    must display the following acknowledgement:
50  *      This product includes software developed by Jason L. Wright
51  * 4. The name of the author may not be used to endorse or promote products
52  *    derived from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
56  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
57  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
58  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
59  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
60  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
62  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
63  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64  * POSSIBILITY OF SUCH DAMAGE.
65  *
66  * $OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp $
67  * $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $
68  * $FreeBSD: src/sys/net/if_bridge.c,v 1.26 2005/10/13 23:05:55 thompsa Exp $
69  */
70
71 /*
72  * Network interface bridge support.
73  *
74  * TODO:
75  *
76  *      - Currently only supports Ethernet-like interfaces (Ethernet,
77  *        802.11, VLANs on Ethernet, etc.)  Figure out a nice way
78  *        to bridge other types of interfaces (FDDI-FDDI, and maybe
79  *        consider heterogenous bridges).
80  *
81  *
82  * Bridge's route information is duplicated to each CPUs:
83  *
84  *      CPU0          CPU1          CPU2          CPU3
85  * +-----------+ +-----------+ +-----------+ +-----------+
86  * |  rtnode   | |  rtnode   | |  rtnode   | |  rtnode   |
87  * |           | |           | |           | |           |
88  * | dst eaddr | | dst eaddr | | dst eaddr | | dst eaddr |
89  * +-----------+ +-----------+ +-----------+ +-----------+
90  *       |         |                     |         |
91  *       |         |                     |         |
92  *       |         |     +----------+    |         |
93  *       |         |     |  rtinfo  |    |         |
94  *       |         +---->|          |<---+         |
95  *       |               |  flags   |              |
96  *       +-------------->|  timeout |<-------------+
97  *                       |  dst_ifp |
98  *                       +----------+
99  *
100  * We choose to put timeout and dst_ifp into shared part, so updating
101  * them will be cheaper than using message forwarding.  Also there is
102  * not need to use spinlock to protect the updating: timeout and dst_ifp
103  * is not related and specific field's updating order has no importance.
104  * The cache pollution by the share part should not be heavy: in a stable
105  * setup, dst_ifp probably will be not changed in rtnode's life time,
106  * while timeout is refreshed once per second; most of the time, timeout
107  * and dst_ifp are read-only accessed.
108  *
109  *
110  * Bridge route information installation on bridge_input path:
111  *
112  *      CPU0           CPU1         CPU2          CPU3
113  *
114  *                               tcp_thread2
115  *                                    |
116  *                                alloc nmsg
117  *                    snd nmsg        |
118  *                    w/o rtinfo      |
119  *      ifnet0<-----------------------+
120  *        |                           :
121  *    lookup dst                      :
122  *   rtnode exists?(Y)free nmsg       :
123  *        |(N)                        :
124  *        |
125  *  alloc rtinfo
126  *  alloc rtnode
127  * install rtnode
128  *        |
129  *        +---------->ifnet1
130  *        : fwd nmsg    |
131  *        : w/ rtinfo   |
132  *        :             |
133  *        :             |
134  *                 alloc rtnode
135  *               (w/ nmsg's rtinfo)
136  *                install rtnode
137  *                      |
138  *                      +---------->ifnet2
139  *                      : fwd nmsg    |
140  *                      : w/ rtinfo   |
141  *                      :             |
142  *                      :         same as ifnet1
143  *                                    |
144  *                                    +---------->ifnet3
145  *                                    : fwd nmsg    |
146  *                                    : w/ rtinfo   |
147  *                                    :             |
148  *                                    :         same as ifnet1
149  *                                               free nmsg
150  *                                                  :
151  *                                                  :
152  *
153  * The netmsgs forwarded between protocol threads and ifnet threads are
154  * allocated with (M_WAITOK|M_NULLOK), so it will not fail under most
155  * cases (route information is too precious to be not installed :).
156  * Since multiple threads may try to install route information for the
157  * same dst eaddr, we look up route information in ifnet0.  However, this
158  * looking up only need to be performed on ifnet0, which is the start
159  * point of the route information installation process.
160  *
161  *
162  * Bridge route information deleting/flushing:
163  *
164  *  CPU0            CPU1             CPU2             CPU3
165  *
166  * netisr0
167  *   |
168  * find suitable rtnodes,
169  * mark their rtinfo dead
170  *   |
171  *   | domsg <------------------------------------------+
172  *   |                                                  | replymsg
173  *   |                                                  |
174  *   V     fwdmsg           fwdmsg           fwdmsg     |
175  * ifnet0 --------> ifnet1 --------> ifnet2 --------> ifnet3
176  * delete rtnodes   delete rtnodes   delete rtnodes   delete rtnodes
177  * w/ dead rtinfo   w/ dead rtinfo   w/ dead rtinfo   w/ dead rtinfo
178  *                                                    free dead rtinfos
179  *
180  * All deleting/flushing operations are serialized by netisr0, so each
181  * operation only reaps the route information marked dead by itself.
182  *
183  *
184  * Bridge route information adding/deleting/flushing:
185  * Since all operation is serialized by the fixed message flow between
186  * ifnet threads, it is not possible to create corrupted per-cpu route
187  * information.
188  *
189  *
190  *
191  * Percpu member interface list iteration with blocking operation:
192  * Since one bridge could only delete one member interface at a time and
193  * the deleted member interface is not freed after netmsg_service_sync(),
194  * following way is used to make sure that even if the certain member
195  * interface is ripped from the percpu list during the blocking operation,
196  * the iteration still could keep going:
197  *
198  * TAILQ_FOREACH_MUTABLE(bif, sc->sc_iflists[mycpuid], bif_next, nbif) {
199  *     blocking operation;
200  *     blocking operation;
201  *     ...
202  *     ...
203  *     if (nbif != NULL && !nbif->bif_onlist) {
204  *         KKASSERT(bif->bif_onlist);
205  *         nbif = TAILQ_NEXT(bif, bif_next);
206  *     }
207  * }
208  *
209  * As mentioned above only one member interface could be unlinked from the
210  * percpu member interface list, so either bif or nbif may be not on the list,
211  * but _not_ both.  To keep the list iteration, we don't care about bif, but
212  * only nbif.  Since removed member interface will only be freed after we
213  * finish our work, it is safe to access any field in an unlinked bif (here
214  * bif_onlist).  If nbif is no longer on the list, then bif must be on the
215  * list, so we change nbif to the next element of bif and keep going.
216  */
217
218 #include "opt_inet.h"
219 #include "opt_inet6.h"
220
221 #include <sys/param.h>
222 #include <sys/mbuf.h>
223 #include <sys/malloc.h>
224 #include <sys/protosw.h>
225 #include <sys/systm.h>
226 #include <sys/time.h>
227 #include <sys/socket.h> /* for net/if.h */
228 #include <sys/sockio.h>
229 #include <sys/ctype.h>  /* string functions */
230 #include <sys/kernel.h>
231 #include <sys/random.h>
232 #include <sys/sysctl.h>
233 #include <sys/module.h>
234 #include <sys/proc.h>
235 #include <sys/priv.h>
236 #include <sys/lock.h>
237 #include <sys/thread.h>
238 #include <sys/thread2.h>
239 #include <sys/mpipe.h>
240
241 #include <net/bpf.h>
242 #include <net/if.h>
243 #include <net/if_dl.h>
244 #include <net/if_types.h>
245 #include <net/if_var.h>
246 #include <net/pfil.h>
247 #include <net/ifq_var.h>
248 #include <net/if_clone.h>
249
250 #include <netinet/in.h> /* for struct arpcom */
251 #include <netinet/in_systm.h>
252 #include <netinet/in_var.h>
253 #include <netinet/ip.h>
254 #include <netinet/ip_var.h>
255 #ifdef INET6
256 #include <netinet/ip6.h>
257 #include <netinet6/ip6_var.h>
258 #endif
259 #include <netinet/if_ether.h> /* for struct arpcom */
260 #include <net/bridge/if_bridgevar.h>
261 #include <net/if_llc.h>
262 #include <net/netmsg2.h>
263
264 #include <net/route.h>
265 #include <sys/in_cksum.h>
266
267 /*
268  * Size of the route hash table.  Must be a power of two.
269  */
270 #ifndef BRIDGE_RTHASH_SIZE
271 #define BRIDGE_RTHASH_SIZE              1024
272 #endif
273
274 #define BRIDGE_RTHASH_MASK              (BRIDGE_RTHASH_SIZE - 1)
275
276 /*
277  * Maximum number of addresses to cache.
278  */
279 #ifndef BRIDGE_RTABLE_MAX
280 #define BRIDGE_RTABLE_MAX               100
281 #endif
282
283 /*
284  * Spanning tree defaults.
285  */
286 #define BSTP_DEFAULT_MAX_AGE            (20 * 256)
287 #define BSTP_DEFAULT_HELLO_TIME         (2 * 256)
288 #define BSTP_DEFAULT_FORWARD_DELAY      (15 * 256)
289 #define BSTP_DEFAULT_HOLD_TIME          (1 * 256)
290 #define BSTP_DEFAULT_BRIDGE_PRIORITY    0x8000
291 #define BSTP_DEFAULT_PORT_PRIORITY      0x80
292 #define BSTP_DEFAULT_PATH_COST          55
293
294 /*
295  * Timeout (in seconds) for entries learned dynamically.
296  */
297 #ifndef BRIDGE_RTABLE_TIMEOUT
298 #define BRIDGE_RTABLE_TIMEOUT           (20 * 60)       /* same as ARP */
299 #endif
300
301 /*
302  * Number of seconds between walks of the route list.
303  */
304 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
305 #define BRIDGE_RTABLE_PRUNE_PERIOD      (5 * 60)
306 #endif
307
308 /*
309  * List of capabilities to mask on the member interface.
310  */
311 #define BRIDGE_IFCAPS_MASK              (IFCAP_TXCSUM | IFCAP_TSO)
312
313 typedef int     (*bridge_ctl_t)(struct bridge_softc *, void *);
314
315 struct netmsg_brctl {
316         struct netmsg_base      base;
317         bridge_ctl_t            bc_func;
318         struct bridge_softc     *bc_sc;
319         void                    *bc_arg;
320 };
321
322 struct netmsg_brsaddr {
323         struct netmsg_base      base;
324         struct bridge_softc     *br_softc;
325         struct ifnet            *br_dst_if;
326         struct bridge_rtinfo    *br_rtinfo;
327         int                     br_setflags;
328         uint8_t                 br_dst[ETHER_ADDR_LEN];
329         uint8_t                 br_flags;
330 };
331
332 struct netmsg_braddbif {
333         struct netmsg_base      base;
334         struct bridge_softc     *br_softc;
335         struct bridge_ifinfo    *br_bif_info;
336         struct ifnet            *br_bif_ifp;
337 };
338
339 struct netmsg_brdelbif {
340         struct netmsg_base      base;
341         struct bridge_softc     *br_softc;
342         struct bridge_ifinfo    *br_bif_info;
343         struct bridge_iflist_head *br_bif_list;
344 };
345
346 struct netmsg_brsflags {
347         struct netmsg_base      base;
348         struct bridge_softc     *br_softc;
349         struct bridge_ifinfo    *br_bif_info;
350         uint32_t                br_bif_flags;
351 };
352
353 eventhandler_tag        bridge_detach_cookie = NULL;
354
355 extern  struct mbuf *(*bridge_input_p)(struct ifnet *, struct mbuf *);
356 extern  int (*bridge_output_p)(struct ifnet *, struct mbuf *);
357 extern  void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
358 extern  struct ifnet *(*bridge_interface_p)(void *if_bridge);
359
360 static int      bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
361
362 static int      bridge_clone_create(struct if_clone *, int, caddr_t);
363 static int      bridge_clone_destroy(struct ifnet *);
364
365 static int      bridge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
366 static void     bridge_mutecaps(struct bridge_ifinfo *, struct ifnet *, int);
367 static void     bridge_ifdetach(void *, struct ifnet *);
368 static void     bridge_init(void *);
369 static int      bridge_from_us(struct bridge_softc *, struct ether_header *);
370 static void     bridge_stop(struct ifnet *);
371 static void     bridge_start(struct ifnet *, struct ifaltq_subque *);
372 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
373 static int      bridge_output(struct ifnet *, struct mbuf *);
374 static struct ifnet *bridge_interface(void *if_bridge);
375
376 static void     bridge_forward(struct bridge_softc *, struct mbuf *m);
377
378 static void     bridge_timer_handler(netmsg_t);
379 static void     bridge_timer(void *);
380
381 static void     bridge_start_bcast(struct bridge_softc *, struct mbuf *);
382 static void     bridge_broadcast(struct bridge_softc *, struct ifnet *,
383                     struct mbuf *);
384 static void     bridge_span(struct bridge_softc *, struct mbuf *);
385
386 static int      bridge_rtupdate(struct bridge_softc *, const uint8_t *,
387                     struct ifnet *, uint8_t);
388 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
389 static void     bridge_rtreap(struct bridge_softc *);
390 static void     bridge_rtreap_async(struct bridge_softc *);
391 static void     bridge_rttrim(struct bridge_softc *);
392 static int      bridge_rtage_finddead(struct bridge_softc *);
393 static void     bridge_rtage(struct bridge_softc *);
394 static void     bridge_rtflush(struct bridge_softc *, int);
395 static int      bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
396 static int      bridge_rtsaddr(struct bridge_softc *, const uint8_t *,
397                     struct ifnet *, uint8_t);
398 static void     bridge_rtmsg_sync(struct bridge_softc *sc);
399 static void     bridge_rtreap_handler(netmsg_t);
400 static void     bridge_rtinstall_handler(netmsg_t);
401 static int      bridge_rtinstall_oncpu(struct bridge_softc *, const uint8_t *,
402                     struct ifnet *, int, uint8_t, struct bridge_rtinfo **);
403
404 static void     bridge_rtable_init(struct bridge_softc *);
405 static void     bridge_rtable_fini(struct bridge_softc *);
406
407 static int      bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
408 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
409                     const uint8_t *);
410 static void     bridge_rtnode_insert(struct bridge_softc *,
411                     struct bridge_rtnode *);
412 static void     bridge_rtnode_destroy(struct bridge_softc *,
413                     struct bridge_rtnode *);
414
415 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
416                     const char *name);
417 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
418                     struct ifnet *ifp);
419 static struct bridge_iflist *bridge_lookup_member_ifinfo(struct bridge_softc *,
420                     struct bridge_ifinfo *);
421 static void     bridge_delete_member(struct bridge_softc *,
422                     struct bridge_iflist *, int);
423 static void     bridge_delete_span(struct bridge_softc *,
424                     struct bridge_iflist *);
425
426 static int      bridge_control(struct bridge_softc *, u_long,
427                                bridge_ctl_t, void *);
428 static int      bridge_ioctl_init(struct bridge_softc *, void *);
429 static int      bridge_ioctl_stop(struct bridge_softc *, void *);
430 static int      bridge_ioctl_add(struct bridge_softc *, void *);
431 static int      bridge_ioctl_del(struct bridge_softc *, void *);
432 static void     bridge_ioctl_fillflags(struct bridge_softc *sc,
433                                 struct bridge_iflist *bif, struct ifbreq *req);
434 static int      bridge_ioctl_gifflags(struct bridge_softc *, void *);
435 static int      bridge_ioctl_sifflags(struct bridge_softc *, void *);
436 static int      bridge_ioctl_scache(struct bridge_softc *, void *);
437 static int      bridge_ioctl_gcache(struct bridge_softc *, void *);
438 static int      bridge_ioctl_gifs(struct bridge_softc *, void *);
439 static int      bridge_ioctl_rts(struct bridge_softc *, void *);
440 static int      bridge_ioctl_saddr(struct bridge_softc *, void *);
441 static int      bridge_ioctl_sto(struct bridge_softc *, void *);
442 static int      bridge_ioctl_gto(struct bridge_softc *, void *);
443 static int      bridge_ioctl_daddr(struct bridge_softc *, void *);
444 static int      bridge_ioctl_flush(struct bridge_softc *, void *);
445 static int      bridge_ioctl_gpri(struct bridge_softc *, void *);
446 static int      bridge_ioctl_spri(struct bridge_softc *, void *);
447 static int      bridge_ioctl_reinit(struct bridge_softc *, void *);
448 static int      bridge_ioctl_ght(struct bridge_softc *, void *);
449 static int      bridge_ioctl_sht(struct bridge_softc *, void *);
450 static int      bridge_ioctl_gfd(struct bridge_softc *, void *);
451 static int      bridge_ioctl_sfd(struct bridge_softc *, void *);
452 static int      bridge_ioctl_gma(struct bridge_softc *, void *);
453 static int      bridge_ioctl_sma(struct bridge_softc *, void *);
454 static int      bridge_ioctl_sifprio(struct bridge_softc *, void *);
455 static int      bridge_ioctl_sifcost(struct bridge_softc *, void *);
456 static int      bridge_ioctl_addspan(struct bridge_softc *, void *);
457 static int      bridge_ioctl_delspan(struct bridge_softc *, void *);
458 static int      bridge_ioctl_sifbondwght(struct bridge_softc *, void *);
459 static int      bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
460                     int);
461 static int      bridge_ip_checkbasic(struct mbuf **mp);
462 #ifdef INET6
463 static int      bridge_ip6_checkbasic(struct mbuf **mp);
464 #endif /* INET6 */
465 static int      bridge_fragment(struct ifnet *, struct mbuf *,
466                     struct ether_header *, int, struct llc *);
467 static void     bridge_enqueue_handler(netmsg_t);
468 static void     bridge_handoff(struct bridge_softc *, struct ifnet *,
469                     struct mbuf *, int);
470
471 static void     bridge_del_bif_handler(netmsg_t);
472 static void     bridge_add_bif_handler(netmsg_t);
473 static void     bridge_del_bif(struct bridge_softc *, struct bridge_ifinfo *,
474                     struct bridge_iflist_head *);
475 static void     bridge_add_bif(struct bridge_softc *, struct bridge_ifinfo *,
476                     struct ifnet *);
477
478 SYSCTL_DECL(_net_link);
479 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge");
480
481 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
482 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
483 static int pfil_member = 1; /* run pfil hooks on the member interface */
484 static int bridge_debug;
485 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW,
486     &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
487 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW,
488     &pfil_bridge, 0, "Packet filter on the bridge interface");
489 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW,
490     &pfil_member, 0, "Packet filter on the member interface");
491 SYSCTL_INT(_net_link_bridge, OID_AUTO, debug, CTLFLAG_RW,
492     &bridge_debug, 0, "Bridge debug mode");
493
494 struct bridge_control_arg {
495         union {
496                 struct ifbreq ifbreq;
497                 struct ifbifconf ifbifconf;
498                 struct ifbareq ifbareq;
499                 struct ifbaconf ifbaconf;
500                 struct ifbrparam ifbrparam;
501         } bca_u;
502         int     bca_len;
503         void    *bca_uptr;
504         void    *bca_kptr;
505 };
506
507 struct bridge_control {
508         bridge_ctl_t    bc_func;
509         int             bc_argsize;
510         int             bc_flags;
511 };
512
513 #define BC_F_COPYIN             0x01    /* copy arguments in */
514 #define BC_F_COPYOUT            0x02    /* copy arguments out */
515 #define BC_F_SUSER              0x04    /* do super-user check */
516
517 const struct bridge_control bridge_control_table[] = {
518         { bridge_ioctl_add,             sizeof(struct ifbreq),
519           BC_F_COPYIN|BC_F_SUSER },
520         { bridge_ioctl_del,             sizeof(struct ifbreq),
521           BC_F_COPYIN|BC_F_SUSER },
522
523         { bridge_ioctl_gifflags,        sizeof(struct ifbreq),
524           BC_F_COPYIN|BC_F_COPYOUT },
525         { bridge_ioctl_sifflags,        sizeof(struct ifbreq),
526           BC_F_COPYIN|BC_F_SUSER },
527
528         { bridge_ioctl_scache,          sizeof(struct ifbrparam),
529           BC_F_COPYIN|BC_F_SUSER },
530         { bridge_ioctl_gcache,          sizeof(struct ifbrparam),
531           BC_F_COPYOUT },
532
533         { bridge_ioctl_gifs,            sizeof(struct ifbifconf),
534           BC_F_COPYIN|BC_F_COPYOUT },
535         { bridge_ioctl_rts,             sizeof(struct ifbaconf),
536           BC_F_COPYIN|BC_F_COPYOUT },
537
538         { bridge_ioctl_saddr,           sizeof(struct ifbareq),
539           BC_F_COPYIN|BC_F_SUSER },
540
541         { bridge_ioctl_sto,             sizeof(struct ifbrparam),
542           BC_F_COPYIN|BC_F_SUSER },
543         { bridge_ioctl_gto,             sizeof(struct ifbrparam),
544           BC_F_COPYOUT },
545
546         { bridge_ioctl_daddr,           sizeof(struct ifbareq),
547           BC_F_COPYIN|BC_F_SUSER },
548
549         { bridge_ioctl_flush,           sizeof(struct ifbreq),
550           BC_F_COPYIN|BC_F_SUSER },
551
552         { bridge_ioctl_gpri,            sizeof(struct ifbrparam),
553           BC_F_COPYOUT },
554         { bridge_ioctl_spri,            sizeof(struct ifbrparam),
555           BC_F_COPYIN|BC_F_SUSER },
556
557         { bridge_ioctl_ght,             sizeof(struct ifbrparam),
558           BC_F_COPYOUT },
559         { bridge_ioctl_sht,             sizeof(struct ifbrparam),
560           BC_F_COPYIN|BC_F_SUSER },
561
562         { bridge_ioctl_gfd,             sizeof(struct ifbrparam),
563           BC_F_COPYOUT },
564         { bridge_ioctl_sfd,             sizeof(struct ifbrparam),
565           BC_F_COPYIN|BC_F_SUSER },
566
567         { bridge_ioctl_gma,             sizeof(struct ifbrparam),
568           BC_F_COPYOUT },
569         { bridge_ioctl_sma,             sizeof(struct ifbrparam),
570           BC_F_COPYIN|BC_F_SUSER },
571
572         { bridge_ioctl_sifprio,         sizeof(struct ifbreq),
573           BC_F_COPYIN|BC_F_SUSER },
574
575         { bridge_ioctl_sifcost,         sizeof(struct ifbreq),
576           BC_F_COPYIN|BC_F_SUSER },
577
578         { bridge_ioctl_addspan,         sizeof(struct ifbreq),
579           BC_F_COPYIN|BC_F_SUSER },
580         { bridge_ioctl_delspan,         sizeof(struct ifbreq),
581           BC_F_COPYIN|BC_F_SUSER },
582
583         { bridge_ioctl_sifbondwght,     sizeof(struct ifbreq),
584           BC_F_COPYIN|BC_F_SUSER },
585
586 };
587 static const int bridge_control_table_size = NELEM(bridge_control_table);
588
589 LIST_HEAD(, bridge_softc) bridge_list;
590
591 struct if_clone bridge_cloner = IF_CLONE_INITIALIZER("bridge",
592                                 bridge_clone_create,
593                                 bridge_clone_destroy, 0, IF_MAXUNIT);
594
595 static int
596 bridge_modevent(module_t mod, int type, void *data)
597 {
598         switch (type) {
599         case MOD_LOAD:
600                 LIST_INIT(&bridge_list);
601                 if_clone_attach(&bridge_cloner);
602                 bridge_input_p = bridge_input;
603                 bridge_output_p = bridge_output;
604                 bridge_interface_p = bridge_interface;
605                 bridge_detach_cookie = EVENTHANDLER_REGISTER(
606                     ifnet_detach_event, bridge_ifdetach, NULL,
607                     EVENTHANDLER_PRI_ANY);
608 #if 0 /* notyet */
609                 bstp_linkstate_p = bstp_linkstate;
610 #endif
611                 break;
612         case MOD_UNLOAD:
613                 if (!LIST_EMPTY(&bridge_list))
614                         return (EBUSY);
615                 EVENTHANDLER_DEREGISTER(ifnet_detach_event,
616                     bridge_detach_cookie);
617                 if_clone_detach(&bridge_cloner);
618                 bridge_input_p = NULL;
619                 bridge_output_p = NULL;
620                 bridge_interface_p = NULL;
621 #if 0 /* notyet */
622                 bstp_linkstate_p = NULL;
623 #endif
624                 break;
625         default:
626                 return (EOPNOTSUPP);
627         }
628         return (0);
629 }
630
631 static moduledata_t bridge_mod = {
632         "if_bridge",
633         bridge_modevent,
634         0
635 };
636
637 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
638
639
640 /*
641  * bridge_clone_create:
642  *
643  *      Create a new bridge instance.
644  */
645 static int
646 bridge_clone_create(struct if_clone *ifc, int unit, caddr_t param __unused)
647 {
648         struct bridge_softc *sc;
649         struct ifnet *ifp;
650         u_char eaddr[6];
651         int cpu, rnd;
652
653         sc = kmalloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);
654         ifp = sc->sc_ifp = &sc->sc_if;
655
656         sc->sc_brtmax = BRIDGE_RTABLE_MAX;
657         sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
658         sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
659         sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
660         sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
661         sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
662         sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
663
664         /* Initialize our routing table. */
665         bridge_rtable_init(sc);
666
667         callout_init(&sc->sc_brcallout);
668         netmsg_init(&sc->sc_brtimemsg, NULL, &netisr_adone_rport,
669                     MSGF_DROPABLE, bridge_timer_handler);
670         sc->sc_brtimemsg.lmsg.u.ms_resultp = sc;
671
672         callout_init(&sc->sc_bstpcallout);
673         netmsg_init(&sc->sc_bstptimemsg, NULL, &netisr_adone_rport,
674                     MSGF_DROPABLE, bstp_tick_handler);
675         sc->sc_bstptimemsg.lmsg.u.ms_resultp = sc;
676
677         /* Initialize per-cpu member iface lists */
678         sc->sc_iflists = kmalloc(sizeof(*sc->sc_iflists) * ncpus,
679                                  M_DEVBUF, M_WAITOK);
680         for (cpu = 0; cpu < ncpus; ++cpu)
681                 TAILQ_INIT(&sc->sc_iflists[cpu]);
682
683         TAILQ_INIT(&sc->sc_spanlist);
684
685         ifp->if_softc = sc;
686         if_initname(ifp, ifc->ifc_name, unit);
687         ifp->if_mtu = ETHERMTU;
688         ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
689         ifp->if_ioctl = bridge_ioctl;
690         ifp->if_start = bridge_start;
691         ifp->if_init = bridge_init;
692         ifp->if_type = IFT_ETHER;
693         ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
694         ifq_set_ready(&ifp->if_snd);
695         ifp->if_hdrlen = ETHER_HDR_LEN;
696
697         /*
698          * Generate a random ethernet address and use the private AC:DE:48
699          * OUI code.
700          */
701         rnd = karc4random();
702         bcopy(&rnd, &eaddr[0], 4); /* ETHER_ADDR_LEN == 6 */
703         rnd = karc4random();
704         bcopy(&rnd, &eaddr[2], 4); /* ETHER_ADDR_LEN == 6 */
705
706         eaddr[0] &= ~1; /* clear multicast bit */
707         eaddr[0] |= 2;  /* set the LAA bit */
708
709         ether_ifattach(ifp, eaddr, NULL);
710         /* Now undo some of the damage... */
711         ifp->if_baudrate = 0;
712         /*ifp->if_type = IFT_BRIDGE;*/
713
714         crit_enter();   /* XXX MP */
715         LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
716         crit_exit();
717
718         return (0);
719 }
720
721 static void
722 bridge_delete_dispatch(netmsg_t msg)
723 {
724         struct bridge_softc *sc = msg->lmsg.u.ms_resultp;
725         struct ifnet *bifp = sc->sc_ifp;
726         struct bridge_iflist *bif;
727
728         ifnet_serialize_all(bifp);
729
730         while ((bif = TAILQ_FIRST(&sc->sc_iflists[mycpuid])) != NULL)
731                 bridge_delete_member(sc, bif, 0);
732
733         while ((bif = TAILQ_FIRST(&sc->sc_spanlist)) != NULL)
734                 bridge_delete_span(sc, bif);
735
736         ifnet_deserialize_all(bifp);
737
738         lwkt_replymsg(&msg->lmsg, 0);
739 }
740
741 /*
742  * bridge_clone_destroy:
743  *
744  *      Destroy a bridge instance.
745  */
746 static int
747 bridge_clone_destroy(struct ifnet *ifp)
748 {
749         struct bridge_softc *sc = ifp->if_softc;
750         struct netmsg_base msg;
751
752         ifnet_serialize_all(ifp);
753
754         bridge_stop(ifp);
755         ifp->if_flags &= ~IFF_UP;
756
757         ifnet_deserialize_all(ifp);
758
759         netmsg_init(&msg, NULL, &curthread->td_msgport,
760                     0, bridge_delete_dispatch);
761         msg.lmsg.u.ms_resultp = sc;
762         lwkt_domsg(BRIDGE_CFGPORT, &msg.lmsg, 0);
763
764         crit_enter();   /* XXX MP */
765         LIST_REMOVE(sc, sc_list);
766         crit_exit();
767
768         ether_ifdetach(ifp);
769
770         /* Tear down the routing table. */
771         bridge_rtable_fini(sc);
772
773         /* Free per-cpu member iface lists */
774         kfree(sc->sc_iflists, M_DEVBUF);
775
776         kfree(sc, M_DEVBUF);
777
778         return 0;
779 }
780
781 /*
782  * bridge_ioctl:
783  *
784  *      Handle a control request from the operator.
785  */
786 static int
787 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
788 {
789         struct bridge_softc *sc = ifp->if_softc;
790         struct bridge_control_arg args;
791         struct ifdrv *ifd = (struct ifdrv *) data;
792         const struct bridge_control *bc;
793         int error = 0;
794
795         ASSERT_IFNET_SERIALIZED_ALL(ifp);
796
797         switch (cmd) {
798         case SIOCADDMULTI:
799         case SIOCDELMULTI:
800                 break;
801
802         case SIOCGDRVSPEC:
803         case SIOCSDRVSPEC:
804                 if (ifd->ifd_cmd >= bridge_control_table_size) {
805                         error = EINVAL;
806                         break;
807                 }
808                 bc = &bridge_control_table[ifd->ifd_cmd];
809
810                 if (cmd == SIOCGDRVSPEC &&
811                     (bc->bc_flags & BC_F_COPYOUT) == 0) {
812                         error = EINVAL;
813                         break;
814                 } else if (cmd == SIOCSDRVSPEC &&
815                            (bc->bc_flags & BC_F_COPYOUT)) {
816                         error = EINVAL;
817                         break;
818                 }
819
820                 if (bc->bc_flags & BC_F_SUSER) {
821                         error = priv_check_cred(cr, PRIV_ROOT, NULL_CRED_OKAY);
822                         if (error)
823                                 break;
824                 }
825
826                 if (ifd->ifd_len != bc->bc_argsize ||
827                     ifd->ifd_len > sizeof(args.bca_u)) {
828                         error = EINVAL;
829                         break;
830                 }
831
832                 memset(&args, 0, sizeof(args));
833                 if (bc->bc_flags & BC_F_COPYIN) {
834                         error = copyin(ifd->ifd_data, &args.bca_u,
835                                        ifd->ifd_len);
836                         if (error)
837                                 break;
838                 }
839
840                 error = bridge_control(sc, cmd, bc->bc_func, &args);
841                 if (error) {
842                         KKASSERT(args.bca_len == 0 && args.bca_kptr == NULL);
843                         break;
844                 }
845
846                 if (bc->bc_flags & BC_F_COPYOUT) {
847                         error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
848                         if (args.bca_len != 0) {
849                                 KKASSERT(args.bca_kptr != NULL);
850                                 if (!error) {
851                                         error = copyout(args.bca_kptr,
852                                                 args.bca_uptr, args.bca_len);
853                                 }
854                                 kfree(args.bca_kptr, M_TEMP);
855                         } else {
856                                 KKASSERT(args.bca_kptr == NULL);
857                         }
858                 } else {
859                         KKASSERT(args.bca_len == 0 && args.bca_kptr == NULL);
860                 }
861                 break;
862
863         case SIOCSIFFLAGS:
864                 if (!(ifp->if_flags & IFF_UP) &&
865                     (ifp->if_flags & IFF_RUNNING)) {
866                         /*
867                          * If interface is marked down and it is running,
868                          * then stop it.
869                          */
870                         bridge_stop(ifp);
871                 } else if ((ifp->if_flags & IFF_UP) &&
872                     !(ifp->if_flags & IFF_RUNNING)) {
873                         /*
874                          * If interface is marked up and it is stopped, then
875                          * start it.
876                          */
877                         ifp->if_init(sc);
878                 }
879
880                 /*
881                  * If running and link flag state change we have to
882                  * reinitialize as well.
883                  */
884                 if ((ifp->if_flags & IFF_RUNNING) &&
885                     (ifp->if_flags & (IFF_LINK0|IFF_LINK1|IFF_LINK2)) !=
886                     sc->sc_copy_flags) {
887                         sc->sc_copy_flags = ifp->if_flags &
888                                         (IFF_LINK0|IFF_LINK1|IFF_LINK2);
889                         bridge_control(sc, 0, bridge_ioctl_reinit, NULL);
890                 }
891
892                 break;
893
894         case SIOCSIFMTU:
895                 /* Do not allow the MTU to be changed on the bridge */
896                 error = EINVAL;
897                 break;
898
899         default:
900                 error = ether_ioctl(ifp, cmd, data);
901                 break;
902         }
903         return (error);
904 }
905
906 /*
907  * bridge_mutecaps:
908  *
909  *      Clear or restore unwanted capabilities on the member interface
910  */
911 static void
912 bridge_mutecaps(struct bridge_ifinfo *bif_info, struct ifnet *ifp, int mute)
913 {
914         struct ifreq ifr;
915
916         if (ifp->if_ioctl == NULL)
917                 return;
918
919         bzero(&ifr, sizeof(ifr));
920         ifr.ifr_reqcap = ifp->if_capenable;
921
922         if (mute) {
923                 /* mask off and save capabilities */
924                 bif_info->bifi_mutecap = ifr.ifr_reqcap & BRIDGE_IFCAPS_MASK;
925                 if (bif_info->bifi_mutecap != 0)
926                         ifr.ifr_reqcap &= ~BRIDGE_IFCAPS_MASK;
927         } else {
928                 /* restore muted capabilities */
929                 ifr.ifr_reqcap |= bif_info->bifi_mutecap;
930         }
931
932         if (bif_info->bifi_mutecap != 0) {
933                 ifnet_serialize_all(ifp);
934                 ifp->if_ioctl(ifp, SIOCSIFCAP, (caddr_t)&ifr, NULL);
935                 ifnet_deserialize_all(ifp);
936         }
937 }
938
939 /*
940  * bridge_lookup_member:
941  *
942  *      Lookup a bridge member interface.
943  */
944 static struct bridge_iflist *
945 bridge_lookup_member(struct bridge_softc *sc, const char *name)
946 {
947         struct bridge_iflist *bif;
948
949         TAILQ_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
950                 if (strcmp(bif->bif_ifp->if_xname, name) == 0)
951                         return (bif);
952         }
953         return (NULL);
954 }
955
956 /*
957  * bridge_lookup_member_if:
958  *
959  *      Lookup a bridge member interface by ifnet*.
960  */
961 static struct bridge_iflist *
962 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
963 {
964         struct bridge_iflist *bif;
965
966         TAILQ_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
967                 if (bif->bif_ifp == member_ifp)
968                         return (bif);
969         }
970         return (NULL);
971 }
972
973 /*
974  * bridge_lookup_member_ifinfo:
975  *
976  *      Lookup a bridge member interface by bridge_ifinfo.
977  */
978 static struct bridge_iflist *
979 bridge_lookup_member_ifinfo(struct bridge_softc *sc,
980                             struct bridge_ifinfo *bif_info)
981 {
982         struct bridge_iflist *bif;
983
984         TAILQ_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
985                 if (bif->bif_info == bif_info)
986                         return (bif);
987         }
988         return (NULL);
989 }
990
991 /*
992  * bridge_delete_member:
993  *
994  *      Delete the specified member interface.
995  */
996 static void
997 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
998     int gone)
999 {
1000         struct ifnet *ifs = bif->bif_ifp;
1001         struct ifnet *bifp = sc->sc_ifp;
1002         struct bridge_ifinfo *bif_info = bif->bif_info;
1003         struct bridge_iflist_head saved_bifs;
1004
1005         ASSERT_IFNET_SERIALIZED_ALL(bifp);
1006         KKASSERT(bif_info != NULL);
1007
1008         ifs->if_bridge = NULL;
1009
1010         /*
1011          * Release bridge interface's serializer:
1012          * - To avoid possible dead lock.
1013          * - Various sync operation will block the current thread.
1014          */
1015         ifnet_deserialize_all(bifp);
1016
1017         if (!gone) {
1018                 switch (ifs->if_type) {
1019                 case IFT_ETHER:
1020                 case IFT_L2VLAN:
1021                         /*
1022                          * Take the interface out of promiscuous mode.
1023                          */
1024                         ifpromisc(ifs, 0);
1025                         bridge_mutecaps(bif_info, ifs, 0);
1026                         break;
1027
1028                 case IFT_GIF:
1029                         break;
1030
1031                 default:
1032                         panic("bridge_delete_member: impossible");
1033                         break;
1034                 }
1035         }
1036
1037         /*
1038          * Remove bifs from percpu linked list.
1039          *
1040          * Removed bifs are not freed immediately, instead,
1041          * they are saved in saved_bifs.  They will be freed
1042          * after we make sure that no one is accessing them,
1043          * i.e. after following netmsg_service_sync()
1044          */
1045         TAILQ_INIT(&saved_bifs);
1046         bridge_del_bif(sc, bif_info, &saved_bifs);
1047
1048         /*
1049          * Make sure that all protocol threads:
1050          * o  see 'ifs' if_bridge is changed
1051          * o  know that bif is removed from the percpu linked list
1052          */
1053         netmsg_service_sync();
1054
1055         /*
1056          * Free the removed bifs
1057          */
1058         KKASSERT(!TAILQ_EMPTY(&saved_bifs));
1059         while ((bif = TAILQ_FIRST(&saved_bifs)) != NULL) {
1060                 TAILQ_REMOVE(&saved_bifs, bif, bif_next);
1061                 kfree(bif, M_DEVBUF);
1062         }
1063
1064         /* See the comment in bridge_ioctl_stop() */
1065         bridge_rtmsg_sync(sc);
1066         bridge_rtdelete(sc, ifs, IFBF_FLUSHALL | IFBF_FLUSHSYNC);
1067
1068         ifnet_serialize_all(bifp);
1069
1070         if (bifp->if_flags & IFF_RUNNING)
1071                 bstp_initialization(sc);
1072
1073         /*
1074          * Free the bif_info after bstp_initialization(), so that
1075          * bridge_softc.sc_root_port will not reference a dangling
1076          * pointer.
1077          */
1078         kfree(bif_info, M_DEVBUF);
1079 }
1080
1081 /*
1082  * bridge_delete_span:
1083  *
1084  *      Delete the specified span interface.
1085  */
1086 static void
1087 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1088 {
1089         KASSERT(bif->bif_ifp->if_bridge == NULL,
1090             ("%s: not a span interface", __func__));
1091
1092         TAILQ_REMOVE(&sc->sc_iflists[mycpuid], bif, bif_next);
1093         kfree(bif, M_DEVBUF);
1094 }
1095
1096 static int
1097 bridge_ioctl_init(struct bridge_softc *sc, void *arg __unused)
1098 {
1099         struct ifnet *ifp = sc->sc_ifp;
1100
1101         if (ifp->if_flags & IFF_RUNNING)
1102                 return 0;
1103
1104         callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1105             bridge_timer, sc);
1106
1107         ifp->if_flags |= IFF_RUNNING;
1108         bstp_initialization(sc);
1109         return 0;
1110 }
1111
1112 static int
1113 bridge_ioctl_stop(struct bridge_softc *sc, void *arg __unused)
1114 {
1115         struct ifnet *ifp = sc->sc_ifp;
1116         struct lwkt_msg *lmsg;
1117
1118         if ((ifp->if_flags & IFF_RUNNING) == 0)
1119                 return 0;
1120
1121         callout_stop(&sc->sc_brcallout);
1122
1123         crit_enter();
1124         lmsg = &sc->sc_brtimemsg.lmsg;
1125         if ((lmsg->ms_flags & MSGF_DONE) == 0) {
1126                 /* Pending to be processed; drop it */
1127                 lwkt_dropmsg(lmsg);
1128         }
1129         crit_exit();
1130
1131         bstp_stop(sc);
1132
1133         ifp->if_flags &= ~IFF_RUNNING;
1134
1135         ifnet_deserialize_all(ifp);
1136
1137         /* Let everyone know that we are stopped */
1138         netmsg_service_sync();
1139
1140         /*
1141          * Sync ifnetX msgports in the order we forward rtnode
1142          * installation message.  This is used to make sure that
1143          * all rtnode installation messages sent by bridge_rtupdate()
1144          * during above netmsg_service_sync() are flushed.
1145          */
1146         bridge_rtmsg_sync(sc);
1147         bridge_rtflush(sc, IFBF_FLUSHDYN | IFBF_FLUSHSYNC);
1148
1149         ifnet_serialize_all(ifp);
1150         return 0;
1151 }
1152
1153 static int
1154 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1155 {
1156         struct ifbreq *req = arg;
1157         struct bridge_iflist *bif;
1158         struct bridge_ifinfo *bif_info;
1159         struct ifnet *ifs, *bifp;
1160         int error = 0;
1161
1162         bifp = sc->sc_ifp;
1163         ASSERT_IFNET_SERIALIZED_ALL(bifp);
1164
1165         ifs = ifunit(req->ifbr_ifsname);
1166         if (ifs == NULL)
1167                 return (ENOENT);
1168
1169         /* If it's in the span list, it can't be a member. */
1170         TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
1171                 if (ifs == bif->bif_ifp)
1172                         return (EBUSY);
1173
1174         /* Allow the first Ethernet member to define the MTU */
1175         if (ifs->if_type != IFT_GIF) {
1176                 if (TAILQ_EMPTY(&sc->sc_iflists[mycpuid])) {
1177                         bifp->if_mtu = ifs->if_mtu;
1178                 } else if (bifp->if_mtu != ifs->if_mtu) {
1179                         if_printf(bifp, "invalid MTU for %s\n", ifs->if_xname);
1180                         return (EINVAL);
1181                 }
1182         }
1183
1184         if (ifs->if_bridge == sc)
1185                 return (EEXIST);
1186
1187         if (ifs->if_bridge != NULL)
1188                 return (EBUSY);
1189
1190         bif_info = kmalloc(sizeof(*bif_info), M_DEVBUF, M_WAITOK | M_ZERO);
1191         bif_info->bifi_priority = BSTP_DEFAULT_PORT_PRIORITY;
1192         bif_info->bifi_path_cost = BSTP_DEFAULT_PATH_COST;
1193         bif_info->bifi_ifp = ifs;
1194         bif_info->bifi_bond_weight = 1;
1195
1196         /*
1197          * Release bridge interface's serializer:
1198          * - To avoid possible dead lock.
1199          * - Various sync operation will block the current thread.
1200          */
1201         ifnet_deserialize_all(bifp);
1202
1203         switch (ifs->if_type) {
1204         case IFT_ETHER:
1205         case IFT_L2VLAN:
1206                 /*
1207                  * Place the interface into promiscuous mode.
1208                  */
1209                 error = ifpromisc(ifs, 1);
1210                 if (error) {
1211                         ifnet_serialize_all(bifp);
1212                         goto out;
1213                 }
1214                 bridge_mutecaps(bif_info, ifs, 1);
1215                 break;
1216
1217         case IFT_GIF: /* :^) */
1218                 break;
1219
1220         default:
1221                 error = EINVAL;
1222                 ifnet_serialize_all(bifp);
1223                 goto out;
1224         }
1225
1226         /*
1227          * Add bifs to percpu linked lists
1228          */
1229         bridge_add_bif(sc, bif_info, ifs);
1230
1231         ifnet_serialize_all(bifp);
1232
1233         if (bifp->if_flags & IFF_RUNNING)
1234                 bstp_initialization(sc);
1235         else
1236                 bstp_stop(sc);
1237
1238         /*
1239          * Everything has been setup, so let the member interface
1240          * deliver packets to this bridge on its input/output path.
1241          */
1242         ifs->if_bridge = sc;
1243 out:
1244         if (error) {
1245                 if (bif_info != NULL)
1246                         kfree(bif_info, M_DEVBUF);
1247         }
1248         return (error);
1249 }
1250
1251 static int
1252 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1253 {
1254         struct ifbreq *req = arg;
1255         struct bridge_iflist *bif;
1256
1257         bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1258         if (bif == NULL)
1259                 return (ENOENT);
1260
1261         bridge_delete_member(sc, bif, 0);
1262
1263         return (0);
1264 }
1265
1266 static int
1267 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1268 {
1269         struct ifbreq *req = arg;
1270         struct bridge_iflist *bif;
1271
1272         bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1273         if (bif == NULL)
1274                 return (ENOENT);
1275         bridge_ioctl_fillflags(sc, bif, req);
1276         return (0);
1277 }
1278
1279 static void
1280 bridge_ioctl_fillflags(struct bridge_softc *sc, struct bridge_iflist *bif,
1281                        struct ifbreq *req)
1282 {
1283         req->ifbr_ifsflags = bif->bif_flags;
1284         req->ifbr_state = bif->bif_state;
1285         req->ifbr_priority = bif->bif_priority;
1286         req->ifbr_path_cost = bif->bif_path_cost;
1287         req->ifbr_bond_weight = bif->bif_bond_weight;
1288         req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1289         if (bif->bif_flags & IFBIF_STP) {
1290                 req->ifbr_peer_root = bif->bif_peer_root;
1291                 req->ifbr_peer_bridge = bif->bif_peer_bridge;
1292                 req->ifbr_peer_cost = bif->bif_peer_cost;
1293                 req->ifbr_peer_port = bif->bif_peer_port;
1294                 if (bstp_supersedes_port_info(sc, bif)) {
1295                         req->ifbr_designated_root = bif->bif_peer_root;
1296                         req->ifbr_designated_bridge = bif->bif_peer_bridge;
1297                         req->ifbr_designated_cost = bif->bif_peer_cost;
1298                         req->ifbr_designated_port = bif->bif_peer_port;
1299                 } else {
1300                         req->ifbr_designated_root = sc->sc_bridge_id;
1301                         req->ifbr_designated_bridge = sc->sc_bridge_id;
1302                         req->ifbr_designated_cost = bif->bif_path_cost +
1303                                                     bif->bif_peer_cost;
1304                         req->ifbr_designated_port = bif->bif_port_id;
1305                 }
1306         } else {
1307                 req->ifbr_peer_root = 0;
1308                 req->ifbr_peer_bridge = 0;
1309                 req->ifbr_peer_cost = 0;
1310                 req->ifbr_peer_port = 0;
1311                 req->ifbr_designated_root = 0;
1312                 req->ifbr_designated_bridge = 0;
1313                 req->ifbr_designated_cost = 0;
1314                 req->ifbr_designated_port = 0;
1315         }
1316 }
1317
1318 static int
1319 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1320 {
1321         struct ifbreq *req = arg;
1322         struct bridge_iflist *bif;
1323         struct ifnet *bifp = sc->sc_ifp;
1324
1325         bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1326         if (bif == NULL)
1327                 return (ENOENT);
1328
1329         if (req->ifbr_ifsflags & IFBIF_SPAN) {
1330                 /* SPAN is readonly */
1331                 return (EINVAL);
1332         }
1333
1334         if (req->ifbr_ifsflags & IFBIF_STP) {
1335                 switch (bif->bif_ifp->if_type) {
1336                 case IFT_ETHER:
1337                         /* These can do spanning tree. */
1338                         break;
1339
1340                 default:
1341                         /* Nothing else can. */
1342                         return (EINVAL);
1343                 }
1344         }
1345
1346         bif->bif_flags = (bif->bif_flags & IFBIF_KEEPMASK) |
1347                          (req->ifbr_ifsflags & ~IFBIF_KEEPMASK);
1348         if (bifp->if_flags & IFF_RUNNING)
1349                 bstp_initialization(sc);
1350
1351         return (0);
1352 }
1353
1354 static int
1355 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1356 {
1357         struct ifbrparam *param = arg;
1358         struct ifnet *ifp = sc->sc_ifp;
1359
1360         sc->sc_brtmax = param->ifbrp_csize;
1361
1362         ifnet_deserialize_all(ifp);
1363         bridge_rttrim(sc);
1364         ifnet_serialize_all(ifp);
1365
1366         return (0);
1367 }
1368
1369 static int
1370 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1371 {
1372         struct ifbrparam *param = arg;
1373
1374         param->ifbrp_csize = sc->sc_brtmax;
1375
1376         return (0);
1377 }
1378
1379 static int
1380 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1381 {
1382         struct bridge_control_arg *bc_arg = arg;
1383         struct ifbifconf *bifc = arg;
1384         struct bridge_iflist *bif;
1385         struct ifbreq *breq;
1386         int count, len;
1387
1388         count = 0;
1389         TAILQ_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next)
1390                 count++;
1391         TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
1392                 count++;
1393
1394         if (bifc->ifbic_len == 0) {
1395                 bifc->ifbic_len = sizeof(*breq) * count;
1396                 return 0;
1397         } else if (count == 0 || bifc->ifbic_len < sizeof(*breq)) {
1398                 bifc->ifbic_len = 0;
1399                 return 0;
1400         }
1401
1402         len = min(bifc->ifbic_len, sizeof(*breq) * count);
1403         KKASSERT(len >= sizeof(*breq));
1404
1405         breq = kmalloc(len, M_TEMP, M_WAITOK | M_NULLOK | M_ZERO);
1406         if (breq == NULL) {
1407                 bifc->ifbic_len = 0;
1408                 return ENOMEM;
1409         }
1410         bc_arg->bca_kptr = breq;
1411
1412         count = 0;
1413         TAILQ_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
1414                 if (len < sizeof(*breq))
1415                         break;
1416
1417                 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1418                         sizeof(breq->ifbr_ifsname));
1419                 bridge_ioctl_fillflags(sc, bif, breq);
1420                 breq++;
1421                 count++;
1422                 len -= sizeof(*breq);
1423         }
1424         TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1425                 if (len < sizeof(*breq))
1426                         break;
1427
1428                 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1429                         sizeof(breq->ifbr_ifsname));
1430                 breq->ifbr_ifsflags = bif->bif_flags;
1431                 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1432                 breq++;
1433                 count++;
1434                 len -= sizeof(*breq);
1435         }
1436
1437         bifc->ifbic_len = sizeof(*breq) * count;
1438         KKASSERT(bifc->ifbic_len > 0);
1439
1440         bc_arg->bca_len = bifc->ifbic_len;
1441         bc_arg->bca_uptr = bifc->ifbic_req;
1442         return 0;
1443 }
1444
1445 static int
1446 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1447 {
1448         struct bridge_control_arg *bc_arg = arg;
1449         struct ifbaconf *bac = arg;
1450         struct bridge_rtnode *brt;
1451         struct ifbareq *bareq;
1452         int count, len;
1453
1454         count = 0;
1455         LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list)
1456                 count++;
1457
1458         if (bac->ifbac_len == 0) {
1459                 bac->ifbac_len = sizeof(*bareq) * count;
1460                 return 0;
1461         } else if (count == 0 || bac->ifbac_len < sizeof(*bareq)) {
1462                 bac->ifbac_len = 0;
1463                 return 0;
1464         }
1465
1466         len = min(bac->ifbac_len, sizeof(*bareq) * count);
1467         KKASSERT(len >= sizeof(*bareq));
1468
1469         bareq = kmalloc(len, M_TEMP, M_WAITOK | M_NULLOK | M_ZERO);
1470         if (bareq == NULL) {
1471                 bac->ifbac_len = 0;
1472                 return ENOMEM;
1473         }
1474         bc_arg->bca_kptr = bareq;
1475
1476         count = 0;
1477         LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
1478                 struct bridge_rtinfo *bri = brt->brt_info;
1479                 unsigned long expire;
1480
1481                 if (len < sizeof(*bareq))
1482                         break;
1483
1484                 strlcpy(bareq->ifba_ifsname, bri->bri_ifp->if_xname,
1485                         sizeof(bareq->ifba_ifsname));
1486                 memcpy(bareq->ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1487                 expire = bri->bri_expire;
1488                 if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1489                     time_second < expire)
1490                         bareq->ifba_expire = expire - time_second;
1491                 else
1492                         bareq->ifba_expire = 0;
1493                 bareq->ifba_flags = bri->bri_flags;
1494                 bareq++;
1495                 count++;
1496                 len -= sizeof(*bareq);
1497         }
1498
1499         bac->ifbac_len = sizeof(*bareq) * count;
1500         KKASSERT(bac->ifbac_len > 0);
1501
1502         bc_arg->bca_len = bac->ifbac_len;
1503         bc_arg->bca_uptr = bac->ifbac_req;
1504         return 0;
1505 }
1506
1507 static int
1508 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1509 {
1510         struct ifbareq *req = arg;
1511         struct bridge_iflist *bif;
1512         struct ifnet *ifp = sc->sc_ifp;
1513         int error;
1514
1515         ASSERT_IFNET_SERIALIZED_ALL(ifp);
1516
1517         bif = bridge_lookup_member(sc, req->ifba_ifsname);
1518         if (bif == NULL)
1519                 return (ENOENT);
1520
1521         ifnet_deserialize_all(ifp);
1522         error = bridge_rtsaddr(sc, req->ifba_dst, bif->bif_ifp,
1523                                req->ifba_flags);
1524         ifnet_serialize_all(ifp);
1525         return (error);
1526 }
1527
1528 static int
1529 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1530 {
1531         struct ifbrparam *param = arg;
1532
1533         sc->sc_brttimeout = param->ifbrp_ctime;
1534
1535         return (0);
1536 }
1537
1538 static int
1539 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1540 {
1541         struct ifbrparam *param = arg;
1542
1543         param->ifbrp_ctime = sc->sc_brttimeout;
1544
1545         return (0);
1546 }
1547
1548 static int
1549 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1550 {
1551         struct ifbareq *req = arg;
1552         struct ifnet *ifp = sc->sc_ifp;
1553         int error;
1554
1555         ifnet_deserialize_all(ifp);
1556         error = bridge_rtdaddr(sc, req->ifba_dst);
1557         ifnet_serialize_all(ifp);
1558         return error;
1559 }
1560
1561 static int
1562 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1563 {
1564         struct ifbreq *req = arg;
1565         struct ifnet *ifp = sc->sc_ifp;
1566
1567         ifnet_deserialize_all(ifp);
1568         bridge_rtflush(sc, req->ifbr_ifsflags | IFBF_FLUSHSYNC);
1569         ifnet_serialize_all(ifp);
1570
1571         return (0);
1572 }
1573
1574 static int
1575 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1576 {
1577         struct ifbrparam *param = arg;
1578
1579         param->ifbrp_prio = sc->sc_bridge_priority;
1580
1581         return (0);
1582 }
1583
1584 static int
1585 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1586 {
1587         struct ifbrparam *param = arg;
1588
1589         sc->sc_bridge_priority = param->ifbrp_prio;
1590
1591         if (sc->sc_ifp->if_flags & IFF_RUNNING)
1592                 bstp_initialization(sc);
1593
1594         return (0);
1595 }
1596
1597 static int
1598 bridge_ioctl_reinit(struct bridge_softc *sc, void *arg __unused)
1599 {
1600         if (sc->sc_ifp->if_flags & IFF_RUNNING)
1601                 bstp_initialization(sc);
1602         return (0);
1603 }
1604
1605 static int
1606 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1607 {
1608         struct ifbrparam *param = arg;
1609
1610         param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1611
1612         return (0);
1613 }
1614
1615 static int
1616 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1617 {
1618         struct ifbrparam *param = arg;
1619
1620         if (param->ifbrp_hellotime == 0)
1621                 return (EINVAL);
1622         sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1623
1624         if (sc->sc_ifp->if_flags & IFF_RUNNING)
1625                 bstp_initialization(sc);
1626
1627         return (0);
1628 }
1629
1630 static int
1631 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1632 {
1633         struct ifbrparam *param = arg;
1634
1635         param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1636
1637         return (0);
1638 }
1639
1640 static int
1641 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1642 {
1643         struct ifbrparam *param = arg;
1644
1645         if (param->ifbrp_fwddelay == 0)
1646                 return (EINVAL);
1647         sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1648
1649         if (sc->sc_ifp->if_flags & IFF_RUNNING)
1650                 bstp_initialization(sc);
1651
1652         return (0);
1653 }
1654
1655 static int
1656 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1657 {
1658         struct ifbrparam *param = arg;
1659
1660         param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1661
1662         return (0);
1663 }
1664
1665 static int
1666 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1667 {
1668         struct ifbrparam *param = arg;
1669
1670         if (param->ifbrp_maxage == 0)
1671                 return (EINVAL);
1672         sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1673
1674         if (sc->sc_ifp->if_flags & IFF_RUNNING)
1675                 bstp_initialization(sc);
1676
1677         return (0);
1678 }
1679
1680 static int
1681 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1682 {
1683         struct ifbreq *req = arg;
1684         struct bridge_iflist *bif;
1685
1686         bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1687         if (bif == NULL)
1688                 return (ENOENT);
1689
1690         bif->bif_priority = req->ifbr_priority;
1691
1692         if (sc->sc_ifp->if_flags & IFF_RUNNING)
1693                 bstp_initialization(sc);
1694
1695         return (0);
1696 }
1697
1698 static int
1699 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1700 {
1701         struct ifbreq *req = arg;
1702         struct bridge_iflist *bif;
1703
1704         bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1705         if (bif == NULL)
1706                 return (ENOENT);
1707
1708         bif->bif_path_cost = req->ifbr_path_cost;
1709
1710         if (sc->sc_ifp->if_flags & IFF_RUNNING)
1711                 bstp_initialization(sc);
1712
1713         return (0);
1714 }
1715
1716 static int
1717 bridge_ioctl_sifbondwght(struct bridge_softc *sc, void *arg)
1718 {
1719         struct ifbreq *req = arg;
1720         struct bridge_iflist *bif;
1721
1722         bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1723         if (bif == NULL)
1724                 return (ENOENT);
1725
1726         bif->bif_bond_weight = req->ifbr_bond_weight;
1727
1728         /* no reinit needed */
1729
1730         return (0);
1731 }
1732
1733 static int
1734 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1735 {
1736         struct ifbreq *req = arg;
1737         struct bridge_iflist *bif;
1738         struct ifnet *ifs;
1739         struct bridge_ifinfo *bif_info;
1740
1741         ifs = ifunit(req->ifbr_ifsname);
1742         if (ifs == NULL)
1743                 return (ENOENT);
1744
1745         TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
1746                 if (ifs == bif->bif_ifp)
1747                         return (EBUSY);
1748
1749         if (ifs->if_bridge != NULL)
1750                 return (EBUSY);
1751
1752         switch (ifs->if_type) {
1753         case IFT_ETHER:
1754         case IFT_GIF:
1755         case IFT_L2VLAN:
1756                 break;
1757
1758         default:
1759                 return (EINVAL);
1760         }
1761
1762         /*
1763          * bif_info is needed for bif_flags
1764          */
1765         bif_info = kmalloc(sizeof(*bif_info), M_DEVBUF, M_WAITOK | M_ZERO);
1766         bif_info->bifi_ifp = ifs;
1767
1768         bif = kmalloc(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
1769         bif->bif_ifp = ifs;
1770         bif->bif_info = bif_info;
1771         bif->bif_flags = IFBIF_SPAN;
1772         /* NOTE: span bif does not need bridge_ifinfo */
1773
1774         TAILQ_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1775
1776         sc->sc_span = 1;
1777
1778         return (0);
1779 }
1780
1781 static int
1782 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1783 {
1784         struct ifbreq *req = arg;
1785         struct bridge_iflist *bif;
1786         struct ifnet *ifs;
1787
1788         ifs = ifunit(req->ifbr_ifsname);
1789         if (ifs == NULL)
1790                 return (ENOENT);
1791
1792         TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
1793                 if (ifs == bif->bif_ifp)
1794                         break;
1795
1796         if (bif == NULL)
1797                 return (ENOENT);
1798
1799         bridge_delete_span(sc, bif);
1800
1801         if (TAILQ_EMPTY(&sc->sc_spanlist))
1802                 sc->sc_span = 0;
1803
1804         return (0);
1805 }
1806
1807 static void
1808 bridge_ifdetach_dispatch(netmsg_t msg)
1809 {
1810         struct ifnet *ifp, *bifp;
1811         struct bridge_softc *sc;
1812         struct bridge_iflist *bif;
1813
1814         ifp = msg->lmsg.u.ms_resultp;
1815         sc = ifp->if_bridge;
1816
1817         /* Check if the interface is a bridge member */
1818         if (sc != NULL) {
1819                 bifp = sc->sc_ifp;
1820
1821                 ifnet_serialize_all(bifp);
1822
1823                 bif = bridge_lookup_member_if(sc, ifp);
1824                 if (bif != NULL) {
1825                         bridge_delete_member(sc, bif, 1);
1826                 } else {
1827                         /* XXX Why bif will be NULL? */
1828                 }
1829
1830                 ifnet_deserialize_all(bifp);
1831                 goto reply;
1832         }
1833
1834         crit_enter();   /* XXX MP */
1835
1836         /* Check if the interface is a span port */
1837         LIST_FOREACH(sc, &bridge_list, sc_list) {
1838                 bifp = sc->sc_ifp;
1839
1840                 ifnet_serialize_all(bifp);
1841
1842                 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
1843                         if (ifp == bif->bif_ifp) {
1844                                 bridge_delete_span(sc, bif);
1845                                 break;
1846                         }
1847
1848                 ifnet_deserialize_all(bifp);
1849         }
1850
1851         crit_exit();
1852
1853 reply:
1854         lwkt_replymsg(&msg->lmsg, 0);
1855 }
1856
1857 /*
1858  * bridge_ifdetach:
1859  *
1860  *      Detach an interface from a bridge.  Called when a member
1861  *      interface is detaching.
1862  */
1863 static void
1864 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
1865 {
1866         struct netmsg_base msg;
1867
1868         netmsg_init(&msg, NULL, &curthread->td_msgport,
1869                     0, bridge_ifdetach_dispatch);
1870         msg.lmsg.u.ms_resultp = ifp;
1871
1872         lwkt_domsg(BRIDGE_CFGPORT, &msg.lmsg, 0);
1873 }
1874
1875 /*
1876  * bridge_init:
1877  *
1878  *      Initialize a bridge interface.
1879  */
1880 static void
1881 bridge_init(void *xsc)
1882 {
1883         bridge_control(xsc, SIOCSIFFLAGS, bridge_ioctl_init, NULL);
1884 }
1885
1886 /*
1887  * bridge_stop:
1888  *
1889  *      Stop the bridge interface.
1890  */
1891 static void
1892 bridge_stop(struct ifnet *ifp)
1893 {
1894         bridge_control(ifp->if_softc, SIOCSIFFLAGS, bridge_ioctl_stop, NULL);
1895 }
1896
1897 /*
1898  * Returns TRUE if the packet is being sent 'from us'... from our bridge
1899  * interface or from any member of our bridge interface.  This is used
1900  * later on to force the MAC to be the MAC of our bridge interface.
1901  */
1902 static int
1903 bridge_from_us(struct bridge_softc *sc, struct ether_header *eh)
1904 {
1905         struct bridge_iflist *bif;
1906
1907         if (memcmp(eh->ether_shost, IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN) == 0)
1908                 return (1);
1909
1910         TAILQ_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
1911                 if (memcmp(eh->ether_shost, IF_LLADDR(bif->bif_ifp),
1912                            ETHER_ADDR_LEN) == 0) {
1913                         return (1);
1914                 }
1915         }
1916         return (0);
1917 }
1918
1919 /*
1920  * bridge_enqueue:
1921  *
1922  *      Enqueue a packet on a bridge member interface.
1923  *
1924  */
1925 void
1926 bridge_enqueue(struct ifnet *dst_ifp, struct mbuf *m)
1927 {
1928         struct netmsg_packet *nmp;
1929
1930         mbuftrackid(m, 64);
1931
1932         nmp = &m->m_hdr.mh_netmsg;
1933         netmsg_init(&nmp->base, NULL, &netisr_apanic_rport,
1934                     0, bridge_enqueue_handler);
1935         nmp->nm_packet = m;
1936         nmp->base.lmsg.u.ms_resultp = dst_ifp;
1937
1938         lwkt_sendmsg(netisr_portfn(mycpu->gd_cpuid), &nmp->base.lmsg);
1939 }
1940
1941 /*
1942  * After looking up dst_if in our forwarding table we still have to
1943  * deal with channel bonding.  Find the best interface in the bonding set.
1944  */
1945 static struct ifnet *
1946 bridge_select_unicast(struct bridge_softc *sc, struct ifnet *dst_if,
1947                       int from_blocking, struct mbuf *m)
1948 {
1949         struct bridge_iflist *bif, *nbif;
1950         struct ifnet *alt_if;
1951         int alt_priority;
1952         int priority;
1953
1954         /*
1955          * Unicast, kinda replicates the output side of bridge_output().
1956          *
1957          * Even though this is a uni-cast packet we may have to select
1958          * an interface from a bonding set.
1959          */
1960         bif = bridge_lookup_member_if(sc, dst_if);
1961         if (bif == NULL) {
1962                 /* Not a member of the bridge (anymore?) */
1963                 return NULL;
1964         }
1965
1966         /*
1967          * If STP is enabled on the target we are an equal opportunity
1968          * employer and do not necessarily output to dst_if.  Instead
1969          * scan available links with the same MAC as the current dst_if
1970          * and choose the best one.
1971          *
1972          * We also need to do this because arp entries tag onto a particular
1973          * interface and if it happens to be dead then the packets will
1974          * go into a bit bucket.
1975          *
1976          * If LINK2 is set the matching links are bonded and we-round robin.
1977          * (the MAC address must be the same for the participating links).
1978          * In this case links in a STP FORWARDING or BONDED state are
1979          * allowed for unicast packets.
1980          */
1981         if (bif->bif_flags & IFBIF_STP) {
1982                 alt_if = NULL;
1983                 alt_priority = 0;
1984                 priority = 0;
1985
1986                 TAILQ_FOREACH_MUTABLE(bif, &sc->sc_iflists[mycpuid],
1987                                      bif_next, nbif) {
1988                         /*
1989                          * dst_if may imply a bonding set so we must compare
1990                          * MAC addresses.
1991                          */
1992                         if (memcmp(IF_LLADDR(bif->bif_ifp),
1993                                    IF_LLADDR(dst_if),
1994                                    ETHER_ADDR_LEN) != 0) {
1995                                 continue;
1996                         }
1997
1998                         if ((bif->bif_ifp->if_flags & IFF_RUNNING) == 0)
1999                                 continue;
2000
2001                         /*
2002                          * NOTE: We allow tranmissions through a BLOCKING
2003                          *       or LEARNING interface only as a last resort.
2004                          *       We DISALLOW both cases if the receiving
2005                          *
2006                          * NOTE: If we send a packet through a learning
2007                          *       interface the receiving end (if also in
2008                          *       LEARNING) will throw it away, so this is
2009                          *       the ultimate last resort.
2010                          */
2011                         switch(bif->bif_state) {
2012                         case BSTP_IFSTATE_BLOCKING:
2013                                 if (from_blocking == 0 &&
2014                                     bif->bif_priority + 256 > alt_priority) {
2015                                         alt_priority = bif->bif_priority + 256;
2016                                         alt_if = bif->bif_ifp;
2017                                 }
2018                                 continue;
2019                         case BSTP_IFSTATE_LEARNING:
2020                                 if (from_blocking == 0 &&
2021                                     bif->bif_priority > alt_priority) {
2022                                         alt_priority = bif->bif_priority;
2023                                         alt_if = bif->bif_ifp;
2024                                 }
2025                                 continue;
2026                         case BSTP_IFSTATE_L1BLOCKING:
2027                         case BSTP_IFSTATE_LISTENING:
2028                         case BSTP_IFSTATE_DISABLED:
2029                                 continue;
2030                         default:
2031                                 /* FORWARDING, BONDED */
2032                                 break;
2033                         }
2034
2035                         /*
2036                          * XXX we need to use the toepliz hash or
2037                          *     something like that instead of
2038                          *     round-robining.
2039                          */
2040                         if (sc->sc_ifp->if_flags & IFF_LINK2) {
2041                                 dst_if = bif->bif_ifp;
2042                                 if (++bif->bif_bond_count >=
2043                                     bif->bif_bond_weight) {
2044                                         bif->bif_bond_count = 0;
2045                                         TAILQ_REMOVE(&sc->sc_iflists[mycpuid],
2046                                                      bif, bif_next);
2047                                         TAILQ_INSERT_TAIL(
2048                                                      &sc->sc_iflists[mycpuid],
2049                                                      bif, bif_next);
2050                                 }
2051                                 priority = 1;
2052                                 break;
2053                         }
2054
2055                         /*
2056                          * Select best interface in the FORWARDING or
2057                          * BONDED set.  Well, there shouldn't be any
2058                          * in a BONDED state if LINK2 is not set (they
2059                          * will all be in a BLOCKING) state, but there
2060                          * could be a transitory condition here.
2061                          */
2062                         if (bif->bif_priority > priority) {
2063                                 priority = bif->bif_priority;
2064                                 dst_if = bif->bif_ifp;
2065                         }
2066                 }
2067
2068                 /*
2069                  * If no suitable interfaces were found but a suitable
2070                  * alternative interface was found, use the alternative
2071                  * interface.
2072                  */
2073                 if (priority == 0 && alt_if)
2074                         dst_if = alt_if;
2075         }
2076
2077         /*
2078          * At this point, we're dealing with a unicast frame
2079          * going to a different interface.
2080          */
2081         if ((dst_if->if_flags & IFF_RUNNING) == 0)
2082                 dst_if = NULL;
2083         return (dst_if);
2084 }
2085
2086
2087 /*
2088  * bridge_output:
2089  *
2090  *      Send output from a bridge member interface.  This
2091  *      performs the bridging function for locally originated
2092  *      packets.
2093  *
2094  *      The mbuf has the Ethernet header already attached.  We must
2095  *      enqueue or free the mbuf before returning.
2096  */
2097 static int
2098 bridge_output(struct ifnet *ifp, struct mbuf *m)
2099 {
2100         struct bridge_softc *sc = ifp->if_bridge;
2101         struct bridge_iflist *bif, *nbif;
2102         struct ether_header *eh;
2103         struct ifnet *dst_if, *alt_if, *bifp;
2104         int from_us;
2105         int alt_priority;
2106
2107         ASSERT_IFNET_NOT_SERIALIZED_ALL(ifp);
2108         mbuftrackid(m, 65);
2109
2110         /*
2111          * Make sure that we are still a member of a bridge interface.
2112          */
2113         if (sc == NULL) {
2114                 m_freem(m);
2115                 return (0);
2116         }
2117         bifp = sc->sc_ifp;
2118
2119         /*
2120          * Acquire header
2121          */
2122         if (m->m_len < ETHER_HDR_LEN) {
2123                 m = m_pullup(m, ETHER_HDR_LEN);
2124                 if (m == NULL) {
2125                         IFNET_STAT_INC(bifp, oerrors, 1);
2126                         return (0);
2127                 }
2128         }
2129         eh = mtod(m, struct ether_header *);
2130         from_us = bridge_from_us(sc, eh);
2131
2132         /*
2133          * If bridge is down, but the original output interface is up,
2134          * go ahead and send out that interface.  Otherwise, the packet
2135          * is dropped below.
2136          */
2137         if ((bifp->if_flags & IFF_RUNNING) == 0) {
2138                 dst_if = ifp;
2139                 goto sendunicast;
2140         }
2141
2142         /*
2143          * If the packet is a multicast, or we don't know a better way to
2144          * get there, send to all interfaces.
2145          */
2146         if (ETHER_IS_MULTICAST(eh->ether_dhost))
2147                 dst_if = NULL;
2148         else
2149                 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
2150
2151         if (dst_if == NULL) {
2152                 struct mbuf *mc;
2153                 int used = 0;
2154                 int found = 0;
2155
2156                 if (sc->sc_span)
2157                         bridge_span(sc, m);
2158
2159                 alt_if = NULL;
2160                 alt_priority = 0;
2161                 TAILQ_FOREACH_MUTABLE(bif, &sc->sc_iflists[mycpuid],
2162                                      bif_next, nbif) {
2163                         dst_if = bif->bif_ifp;
2164
2165                         if ((dst_if->if_flags & IFF_RUNNING) == 0)
2166                                 continue;
2167
2168                         /*
2169                          * If this is not the original output interface,
2170                          * and the interface is participating in spanning
2171                          * tree, make sure the port is in a state that
2172                          * allows forwarding.
2173                          *
2174                          * We keep track of a possible backup IF if we are
2175                          * unable to find any interfaces to forward through.
2176                          *
2177                          * NOTE: Currently round-robining is not implemented
2178                          *       across bonded interface groups (needs an
2179                          *       algorithm to track each group somehow).
2180                          *
2181                          *       Similarly we track only one alternative
2182                          *       interface if no suitable interfaces are
2183                          *       found.
2184                          */
2185                         if (dst_if != ifp &&
2186                             (bif->bif_flags & IFBIF_STP) != 0) {
2187                                 switch (bif->bif_state) {
2188                                 case BSTP_IFSTATE_BONDED:
2189                                         if (bif->bif_priority + 512 >
2190                                             alt_priority) {
2191                                                 alt_priority =
2192                                                     bif->bif_priority + 512;
2193                                                 alt_if = bif->bif_ifp;
2194                                         }
2195                                         continue;
2196                                 case BSTP_IFSTATE_BLOCKING:
2197                                         if (bif->bif_priority + 256 >
2198                                             alt_priority) {
2199                                                 alt_priority =
2200                                                     bif->bif_priority + 256;
2201                                                 alt_if = bif->bif_ifp;
2202                                         }
2203                                         continue;
2204                                 case BSTP_IFSTATE_LEARNING:
2205                                         if (bif->bif_priority > alt_priority) {
2206                                                 alt_priority =
2207                                                     bif->bif_priority;
2208                                                 alt_if = bif->bif_ifp;
2209                                         }
2210                                         continue;
2211                                 case BSTP_IFSTATE_L1BLOCKING:
2212                                 case BSTP_IFSTATE_LISTENING:
2213                                 case BSTP_IFSTATE_DISABLED:
2214                                         continue;
2215                                 default:
2216                                         /* FORWARDING */
2217                                         break;
2218                                 }
2219                         }
2220
2221                         KKASSERT(used == 0);
2222                         if (TAILQ_NEXT(bif, bif_next) == NULL) {
2223                                 used = 1;
2224                                 mc = m;
2225                         } else {
2226                                 mc = m_copypacket(m, MB_DONTWAIT);
2227                                 if (mc == NULL) {
2228                                         IFNET_STAT_INC(bifp, oerrors, 1);
2229                                         continue;
2230                                 }
2231                         }
2232
2233                         /*
2234                          * If the packet is 'from' us override ether_shost.
2235                          */
2236                         bridge_handoff(sc, dst_if, mc, from_us);
2237                         found = 1;
2238
2239                         if (nbif != NULL && !nbif->bif_onlist) {
2240                                 KKASSERT(bif->bif_onlist);
2241                                 nbif = TAILQ_NEXT(bif, bif_next);
2242                         }
2243                 }
2244
2245                 /*
2246                  * If we couldn't find anything use the backup interface
2247                  * if we have one.
2248                  */
2249                 if (found == 0 && alt_if) {
2250                         KKASSERT(used == 0);
2251                         mc = m;
2252                         used = 1;
2253                         bridge_handoff(sc, alt_if, mc, from_us);
2254                 }
2255
2256                 if (used == 0)
2257                         m_freem(m);
2258                 return (0);
2259         }
2260
2261         /*
2262          * Unicast
2263          */
2264 sendunicast:
2265         dst_if = bridge_select_unicast(sc, dst_if, 0, m);
2266
2267         if (sc->sc_span)
2268                 bridge_span(sc, m);
2269         if (dst_if == NULL)
2270                 m_freem(m);
2271         else
2272                 bridge_handoff(sc, dst_if, m, from_us);
2273         return (0);
2274 }
2275
2276 /*
2277  * Returns the bridge interface associated with an ifc.
2278  * Pass ifp->if_bridge (must not be NULL).  Used by the ARP
2279  * code to supply the bridge for the is-at info, making
2280  * the bridge responsible for matching local addresses.
2281  *
2282  * Without this the ARP code will supply bridge member interfaces
2283  * for the is-at which makes it difficult the bridge to fail-over
2284  * interfaces (amoung other things).
2285  */
2286 static struct ifnet *
2287 bridge_interface(void *if_bridge)
2288 {
2289         struct bridge_softc *sc = if_bridge;
2290         return (sc->sc_ifp);
2291 }
2292
2293 /*
2294  * bridge_start:
2295  *
2296  *      Start output on a bridge.
2297  */
2298 static void
2299 bridge_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
2300 {
2301         struct bridge_softc *sc = ifp->if_softc;
2302
2303         ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
2304         ASSERT_IFNET_SERIALIZED_TX(ifp, ifsq);
2305
2306         ifsq_set_oactive(ifsq);
2307         for (;;) {
2308                 struct ifnet *dst_if = NULL;
2309                 struct ether_header *eh;
2310                 struct mbuf *m;
2311
2312                 m = ifsq_dequeue(ifsq, NULL);
2313                 if (m == NULL)
2314                         break;
2315                 mbuftrackid(m, 75);
2316
2317                 if (m->m_len < sizeof(*eh)) {
2318                         m = m_pullup(m, sizeof(*eh));
2319                         if (m == NULL) {
2320                                 IFNET_STAT_INC(ifp, oerrors, 1);
2321                                 continue;
2322                         }
2323                 }
2324                 eh = mtod(m, struct ether_header *);
2325
2326                 BPF_MTAP(ifp, m);
2327                 IFNET_STAT_INC(ifp, opackets, 1);
2328
2329                 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0)
2330                         dst_if = bridge_rtlookup(sc, eh->ether_dhost);
2331
2332                 /*
2333                  * Multicast or broadcast
2334                  */
2335                 if (dst_if == NULL) {
2336                         bridge_start_bcast(sc, m);
2337                         continue;
2338                 }
2339
2340                 /*
2341                  * Unicast
2342                  */
2343                 dst_if = bridge_select_unicast(sc, dst_if, 0, m);
2344
2345                 if (dst_if == NULL)
2346                         m_freem(m);
2347                 else
2348                         bridge_enqueue(dst_if, m);
2349         }
2350         ifsq_clr_oactive(ifsq);
2351 }
2352
2353 /*
2354  * bridge_forward:
2355  *
2356  *      Forward packets received on a bridge interface via the input
2357  *      path.
2358  *
2359  *      This implements the forwarding function of the bridge.
2360  */
2361 static void
2362 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
2363 {
2364         struct bridge_iflist *bif;
2365         struct ifnet *src_if, *dst_if, *ifp;
2366         struct ether_header *eh;
2367         int from_blocking;
2368
2369         mbuftrackid(m, 66);
2370         src_if = m->m_pkthdr.rcvif;
2371         ifp = sc->sc_ifp;
2372
2373         ASSERT_IFNET_NOT_SERIALIZED_ALL(ifp);
2374
2375         IFNET_STAT_INC(ifp, ipackets, 1);
2376         IFNET_STAT_INC(ifp, ibytes, m->m_pkthdr.len);
2377
2378         /*
2379          * Look up the bridge_iflist.
2380          */
2381         bif = bridge_lookup_member_if(sc, src_if);
2382         if (bif == NULL) {
2383                 /* Interface is not a bridge member (anymore?) */
2384                 m_freem(m);
2385                 return;
2386         }
2387
2388         /*
2389          * In spanning tree mode receiving a packet from an interface
2390          * in a BLOCKING state is allowed, it could be a member of last
2391          * resort from the sender's point of view, but forwarding it is
2392          * not allowed.
2393          *
2394          * The sender's spanning tree will eventually sync up and the
2395          * sender will go into a BLOCKING state too (but this still may be
2396          * an interface of last resort during state changes).
2397          */
2398         if (bif->bif_flags & IFBIF_STP) {
2399                 switch (bif->bif_state) {
2400                 case BSTP_IFSTATE_L1BLOCKING:
2401                 case BSTP_IFSTATE_LISTENING:
2402                 case BSTP_IFSTATE_DISABLED:
2403                         m_freem(m);
2404                         return;
2405                 default:
2406                         /* learning, blocking, bonded, forwarding */
2407                         break;
2408                 }
2409                 from_blocking = (bif->bif_state == BSTP_IFSTATE_BLOCKING);
2410         } else {
2411                 from_blocking = 0;
2412         }
2413
2414         eh = mtod(m, struct ether_header *);
2415
2416         /*
2417          * If the interface is learning, and the source
2418          * address is valid and not multicast, record
2419          * the address.
2420          */
2421         if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
2422             from_blocking == 0 &&
2423             ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
2424             (eh->ether_shost[0] == 0 &&
2425              eh->ether_shost[1] == 0 &&
2426              eh->ether_shost[2] == 0 &&
2427              eh->ether_shost[3] == 0 &&
2428              eh->ether_shost[4] == 0 &&
2429              eh->ether_shost[5] == 0) == 0) {
2430                 bridge_rtupdate(sc, eh->ether_shost, src_if, IFBAF_DYNAMIC);
2431         }
2432
2433         /*
2434          * Don't forward from an interface in the listening or learning
2435          * state.  That is, in the learning state we learn information
2436          * but we throw away the packets.
2437          *
2438          * We let through packets on interfaces in the blocking state.
2439          * The blocking state is applicable to the send side, not the
2440          * receive side.
2441          */
2442         if ((bif->bif_flags & IFBIF_STP) != 0 &&
2443             (bif->bif_state == BSTP_IFSTATE_LISTENING ||
2444              bif->bif_state == BSTP_IFSTATE_LEARNING)) {
2445                 m_freem(m);
2446                 return;
2447         }
2448
2449         /*
2450          * At this point, the port either doesn't participate
2451          * in spanning tree or it is in the forwarding state.
2452          */
2453
2454         /*
2455          * If the packet is unicast, destined for someone on
2456          * "this" side of the bridge, drop it.
2457          *
2458          * src_if implies the entire bonding set so we have to compare MAC
2459          * addresses and not just if pointers.
2460          */
2461         if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2462                 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
2463                 if (dst_if && memcmp(IF_LLADDR(src_if), IF_LLADDR(dst_if),
2464                                      ETHER_ADDR_LEN) == 0) {
2465                         m_freem(m);
2466                         return;
2467                 }
2468         } else {
2469                 /* ...forward it to all interfaces. */
2470                 IFNET_STAT_INC(ifp, imcasts, 1);
2471                 dst_if = NULL;
2472         }
2473
2474         /*
2475          * Brodcast if we do not have forwarding information.  However, if
2476          * we received the packet on a blocking interface we do not do this
2477          * (unless you really want to blow up your network).
2478          */
2479         if (dst_if == NULL) {
2480                 if (from_blocking)
2481                         m_freem(m);
2482                 else
2483                         bridge_broadcast(sc, src_if, m);
2484                 return;
2485         }
2486
2487         dst_if = bridge_select_unicast(sc, dst_if, from_blocking, m);
2488
2489         if (dst_if == NULL) {
2490                 m_freem(m);
2491                 return;
2492         }
2493
2494         if (inet_pfil_hook.ph_hashooks > 0
2495 #ifdef INET6
2496             || inet6_pfil_hook.ph_hashooks > 0
2497 #endif
2498             ) {
2499                 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2500                         return;
2501                 if (m == NULL)
2502                         return;
2503
2504                 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2505                         return;
2506                 if (m == NULL)
2507                         return;
2508         }
2509         bridge_handoff(sc, dst_if, m, 0);
2510 }
2511
2512 /*
2513  * bridge_input:
2514  *
2515  *      Receive input from a member interface.  Queue the packet for
2516  *      bridging if it is not for us.
2517  */
2518 static struct mbuf *
2519 bridge_input(struct ifnet *ifp, struct mbuf *m)
2520 {
2521         struct bridge_softc *sc = ifp->if_bridge;
2522         struct bridge_iflist *bif;
2523         struct ifnet *bifp, *new_ifp;
2524         struct ether_header *eh;
2525         struct mbuf *mc, *mc2;
2526
2527         ASSERT_IFNET_NOT_SERIALIZED_ALL(ifp);
2528         mbuftrackid(m, 67);
2529
2530         /*
2531          * Make sure that we are still a member of a bridge interface.
2532          */
2533         if (sc == NULL)
2534                 return m;
2535
2536         new_ifp = NULL;
2537         bifp = sc->sc_ifp;
2538
2539         if ((bifp->if_flags & IFF_RUNNING) == 0)
2540                 goto out;
2541
2542         /*
2543          * Implement support for bridge monitoring.  If this flag has been
2544          * set on this interface, discard the packet once we push it through
2545          * the bpf(4) machinery, but before we do, increment various counters
2546          * associated with this bridge.
2547          */
2548         if (bifp->if_flags & IFF_MONITOR) {
2549                 /* Change input interface to this bridge */
2550                 m->m_pkthdr.rcvif = bifp;
2551
2552                 BPF_MTAP(bifp, m);
2553
2554                 /* Update bridge's ifnet statistics */
2555                 IFNET_STAT_INC(bifp, ipackets, 1);
2556                 IFNET_STAT_INC(bifp, ibytes, m->m_pkthdr.len);
2557                 if (m->m_flags & (M_MCAST | M_BCAST))
2558                         IFNET_STAT_INC(bifp, imcasts, 1);
2559
2560                 m_freem(m);
2561                 m = NULL;
2562                 goto out;
2563         }
2564
2565         /*
2566          * Handle the ether_header
2567          *
2568          * In all cases if the packet is destined for us via our MAC
2569          * we must clear BRIDGE_MBUF_TAGGED to ensure that we don't
2570          * repeat the source MAC out the same interface.
2571          *
2572          * This first test against our bridge MAC is the fast-path.
2573          *
2574          * NOTE!  The bridge interface can serve as an endpoint for
2575          *        communication but normally there are no IPs associated
2576          *        with it so you cannot route through it.  Instead what
2577          *        you do is point your default route *THROUGH* the bridge
2578          *        to the actual default router for one of the bridged spaces.
2579          *
2580          *        Another possibility is to put all your IP specifications
2581          *        on the bridge instead of on the individual interfaces.  If
2582          *        you do this it should be possible to use the bridge as an
2583          *        end point and route (rather than switch) through it using
2584          *        the default route or ipfw forwarding rules.
2585          */
2586
2587         /*
2588          * Acquire header
2589          */
2590         if (m->m_len < ETHER_HDR_LEN) {
2591                 m = m_pullup(m, ETHER_HDR_LEN);
2592                 if (m == NULL)
2593                         goto out;
2594         }
2595         eh = mtod(m, struct ether_header *);
2596         m->m_pkthdr.fw_flags |= BRIDGE_MBUF_TAGGED;
2597         bcopy(eh, &m->m_pkthdr.br.ether, sizeof(*eh));
2598
2599         if ((bridge_debug & 1) &&
2600             (ntohs(eh->ether_type) == ETHERTYPE_ARP ||
2601             ntohs(eh->ether_type) == ETHERTYPE_REVARP)) {
2602                 kprintf("%02x:%02x:%02x:%02x:%02x:%02x "
2603                         "%02x:%02x:%02x:%02x:%02x:%02x type %04x "
2604                         "lla %02x:%02x:%02x:%02x:%02x:%02x\n",
2605                         eh->ether_dhost[0],
2606                         eh->ether_dhost[1],
2607                         eh->ether_dhost[2],
2608                         eh->ether_dhost[3],
2609                         eh->ether_dhost[4],
2610                         eh->ether_dhost[5],
2611                         eh->ether_shost[0],
2612                         eh->ether_shost[1],
2613                         eh->ether_shost[2],
2614                         eh->ether_shost[3],
2615                         eh->ether_shost[4],
2616                         eh->ether_shost[5],
2617                         eh->ether_type,
2618                         ((u_char *)IF_LLADDR(bifp))[0],
2619                         ((u_char *)IF_LLADDR(bifp))[1],
2620                         ((u_char *)IF_LLADDR(bifp))[2],
2621                         ((u_char *)IF_LLADDR(bifp))[3],
2622                         ((u_char *)IF_LLADDR(bifp))[4],
2623                         ((u_char *)IF_LLADDR(bifp))[5]
2624                 );
2625         }
2626
2627         if (memcmp(eh->ether_dhost, IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0) {
2628                 /*
2629                  * If the packet is for us, set the packets source as the
2630                  * bridge, and return the packet back to ifnet.if_input for
2631                  * local processing.
2632                  */
2633                 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
2634                 KASSERT(bifp->if_bridge == NULL,
2635                         ("loop created in bridge_input"));
2636                 if (pfil_member != 0) {
2637                         if (inet_pfil_hook.ph_hashooks > 0
2638 #ifdef INET6
2639                             || inet6_pfil_hook.ph_hashooks > 0
2640 #endif
2641                         ) {
2642                                 if (bridge_pfil(&m, NULL, ifp, PFIL_IN) != 0)
2643                                         goto out;
2644                                 if (m == NULL)
2645                                         goto out;
2646                         }
2647                 }
2648                 new_ifp = bifp;
2649                 goto out;
2650         }
2651
2652         /*
2653          * Tap all packets arriving on the bridge, no matter if
2654          * they are local destinations or not.  In is in.
2655          */
2656         BPF_MTAP(bifp, m);
2657
2658         bif = bridge_lookup_member_if(sc, ifp);
2659         if (bif == NULL)
2660                 goto out;
2661
2662         if (sc->sc_span)
2663                 bridge_span(sc, m);
2664
2665         if (m->m_flags & (M_BCAST | M_MCAST)) {
2666                 /*
2667                  * Tap off 802.1D packets; they do not get forwarded.
2668                  */
2669                 if (memcmp(eh->ether_dhost, bstp_etheraddr,
2670                             ETHER_ADDR_LEN) == 0) {
2671                         ifnet_serialize_all(bifp);
2672                         bstp_input(sc, bif, m);
2673                         ifnet_deserialize_all(bifp);
2674
2675                         /* m is freed by bstp_input */
2676                         m = NULL;
2677                         goto out;
2678                 }
2679
2680                 /*
2681                  * Other than 802.11d packets, ignore packets if the
2682                  * interface is not in a good state.
2683                  *
2684                  * NOTE: Broadcast/mcast packets received on a blocking or
2685                  *       learning interface are allowed for local processing.
2686                  *
2687                  *       The sending side of a blocked port will stop
2688                  *       transmitting when a better alternative is found.
2689                  *       However, later on we will disallow the forwarding
2690                  *       of bcast/mcsat packets over a blocking interface.
2691                  */
2692                 if (bif->bif_flags & IFBIF_STP) {
2693                         switch (bif->bif_state) {
2694                         case BSTP_IFSTATE_L1BLOCKING:
2695                         case BSTP_IFSTATE_LISTENING:
2696                         case BSTP_IFSTATE_DISABLED:
2697                                 goto out;
2698                         default:
2699                                 /* blocking, learning, bonded, forwarding */
2700                                 break;
2701                         }
2702                 }
2703
2704                 /*
2705                  * Make a deep copy of the packet and enqueue the copy
2706                  * for bridge processing; return the original packet for
2707                  * local processing.
2708                  */
2709                 mc = m_dup(m, MB_DONTWAIT);
2710                 if (mc == NULL)
2711                         goto out;
2712
2713                 /*
2714                  * It's just too dangerous to allow bcast/mcast over a
2715                  * blocked interface, eventually the network will sort
2716                  * itself out and a better path will be found.
2717                  */
2718                 if ((bif->bif_flags & IFBIF_STP) == 0 ||
2719                     bif->bif_state != BSTP_IFSTATE_BLOCKING) {
2720                         bridge_forward(sc, mc);
2721                 }
2722
2723                 /*
2724                  * Reinject the mbuf as arriving on the bridge so we have a
2725                  * chance at claiming multicast packets. We can not loop back
2726                  * here from ether_input as a bridge is never a member of a
2727                  * bridge.
2728                  */
2729                 KASSERT(bifp->if_bridge == NULL,
2730                         ("loop created in bridge_input"));
2731                 mc2 = m_dup(m, MB_DONTWAIT);
2732 #ifdef notyet
2733                 if (mc2 != NULL) {
2734                         /* Keep the layer3 header aligned */
2735                         int i = min(mc2->m_pkthdr.len, max_protohdr);
2736                         mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2737                 }
2738 #endif
2739                 if (mc2 != NULL) {
2740                         /*
2741                          * Don't tap to bpf(4) again; we have already done
2742                          * the tapping.
2743                          *
2744                          * Leave m_pkthdr.rcvif alone, so ARP replies are
2745                          * processed as coming in on the correct interface.
2746                          *
2747                          * Clear the bridge flag for local processing in
2748                          * case the packet gets routed.
2749                          */
2750                         mc2->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
2751                         ether_reinput_oncpu(bifp, mc2, 0);
2752                 }
2753
2754                 /* Return the original packet for local processing. */
2755                 goto out;
2756         }
2757
2758         /*
2759          * Input of a unicast packet.  We have to allow unicast packets
2760          * input from links in the BLOCKING state as this might be an
2761          * interface of last resort.
2762          *
2763          * NOTE: We explicitly ignore normal packets received on a link
2764          *       in the BLOCKING state.  The point of being in that state
2765          *       is to avoid getting duplicate packets.
2766          *
2767          *       HOWEVER, if LINK2 is set the normal spanning tree code
2768          *       will mark an interface BLOCKING to avoid multi-cast/broadcast
2769          *       loops.  Unicast packets CAN still loop if we allow the
2770          *       case (hence we only do it in LINK2), but it isn't quite as
2771          *       bad as a broadcast packet looping.
2772          */
2773         if (bif->bif_flags & IFBIF_STP) {
2774                 switch (bif->bif_state) {
2775                 case BSTP_IFSTATE_L1BLOCKING:
2776                 case BSTP_IFSTATE_LISTENING:
2777                 case BSTP_IFSTATE_DISABLED:
2778                         goto out;
2779                 default:
2780                         /* blocking, bonded, forwarding, learning */
2781                         break;
2782                 }
2783         }
2784
2785         /*
2786          * Unicast.  Make sure it's not for us.
2787          *
2788          * This loop is MPSAFE; the only blocking operation (bridge_rtupdate)
2789          * is followed by breaking out of the loop.
2790          */
2791         TAILQ_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
2792                 if (bif->bif_ifp->if_type != IFT_ETHER)
2793                         continue;
2794
2795                 /*
2796                  * It is destined for an interface linked to the bridge.
2797                  * We want the bridge itself to take care of link level
2798                  * forwarding to member interfaces so reinput on the bridge.
2799                  * i.e. if you ping an IP on a target interface associated
2800                  * with the bridge, the arp is-at response should indicate
2801                  * the bridge MAC.
2802                  *
2803                  * Only update our addr list when learning if the port
2804                  * is not in a blocking state.  If it is we still allow
2805                  * the packet but we do not try to learn from it.
2806                  */
2807                 if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_dhost,
2808                            ETHER_ADDR_LEN) == 0) {
2809                         if (bif->bif_ifp != ifp) {
2810                                 /* XXX loop prevention */
2811                                 m->m_flags |= M_ETHER_BRIDGED;
2812                         }
2813                         if ((bif->bif_flags & IFBIF_LEARNING) &&
2814                             ((bif->bif_flags & IFBIF_STP) == 0 ||
2815                              bif->bif_state != BSTP_IFSTATE_BLOCKING)) {
2816                                 bridge_rtupdate(sc, eh->ether_shost,
2817                                                 ifp, IFBAF_DYNAMIC);
2818                         }
2819                         new_ifp = bifp; /* not bif->bif_ifp */
2820                         m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
2821                         goto out;
2822                 }
2823
2824                 /*
2825                  * Ignore received packets that were sent by us.
2826                  */
2827                 if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_shost,
2828                            ETHER_ADDR_LEN) == 0) {
2829                         m_freem(m);
2830                         m = NULL;
2831                         goto out;
2832                 }
2833         }
2834
2835         /*
2836          * It isn't for us.
2837          *
2838          * Perform the bridge forwarding function, but disallow bridging
2839          * to interfaces in the blocking state if the packet came in on
2840          * an interface in the blocking state.
2841          */
2842         bridge_forward(sc, m);
2843         m = NULL;
2844
2845         /*
2846          * ether_reinput_oncpu() will reprocess rcvif as
2847          * coming from new_ifp (since we do not specify
2848          * REINPUT_KEEPRCVIF).
2849          */
2850 out:
2851         if (new_ifp != NULL) {
2852                 /*
2853                  * Clear the bridge flag for local processing in
2854                  * case the packet gets routed.
2855                  */
2856                 ether_reinput_oncpu(new_ifp, m, REINPUT_RUNBPF);
2857                 m = NULL;
2858         }
2859         return (m);
2860 }
2861
2862 /*
2863  * bridge_start_bcast:
2864  *
2865  *      Broadcast the packet sent from bridge to all member
2866  *      interfaces.
2867  *      This is a simplified version of bridge_broadcast(), however,
2868  *      this function expects caller to hold bridge's serializer.
2869  */
2870 static void
2871 bridge_start_bcast(struct bridge_softc *sc, struct mbuf *m)
2872 {
2873         struct bridge_iflist *bif;
2874         struct mbuf *mc;
2875         struct ifnet *dst_if, *alt_if, *bifp;
2876         int used = 0;
2877         int found = 0;
2878         int alt_priority;
2879
2880         mbuftrackid(m, 68);
2881         bifp = sc->sc_ifp;
2882         ASSERT_IFNET_SERIALIZED_ALL(bifp);
2883
2884         /*
2885          * Following loop is MPSAFE; nothing is blocking
2886          * in the loop body.
2887          *
2888          * NOTE: We transmit through an member in the BLOCKING state only
2889          *       as a last resort.
2890          */
2891         alt_if = NULL;
2892         alt_priority = 0;
2893
2894         TAILQ_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
2895                 dst_if = bif->bif_ifp;
2896
2897                 if (bif->bif_flags & IFBIF_STP) {
2898                         switch (bif->bif_state) {
2899                         case BSTP_IFSTATE_BLOCKING:
2900                                 if (bif->bif_priority > alt_priority) {
2901                                         alt_priority = bif->bif_priority;
2902                                         alt_if = bif->bif_ifp;
2903                                 }
2904                                 /* fall through */
2905                         case BSTP_IFSTATE_L1BLOCKING:
2906                         case BSTP_IFSTATE_DISABLED:
2907                                 continue;
2908                         default:
2909                                 /* listening, learning, bonded, forwarding */
2910                                 break;
2911                         }
2912                 }
2913
2914                 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 &&
2915                     (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2916                         continue;
2917
2918                 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2919                         continue;
2920
2921                 if (TAILQ_NEXT(bif, bif_next) == NULL) {
2922                         mc = m;
2923                         used = 1;
2924                 } else {
2925                         mc = m_copypacket(m, MB_DONTWAIT);
2926                         if (mc == NULL) {
2927                                 IFNET_STAT_INC(bifp, oerrors, 1);
2928                                 continue;
2929                         }
2930                 }
2931                 found = 1;
2932                 bridge_enqueue(dst_if, mc);
2933         }
2934
2935         if (found == 0 && alt_if) {
2936                 KKASSERT(used == 0);
2937                 mc = m;
2938                 used = 1;
2939                 bridge_enqueue(alt_if, mc);
2940         }
2941
2942         if (used == 0)
2943                 m_freem(m);
2944 }
2945
2946 /*
2947  * bridge_broadcast:
2948  *
2949  *      Send a frame to all interfaces that are members of
2950  *      the bridge, except for the one on which the packet
2951  *      arrived.
2952  */
2953 static void
2954 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2955                  struct mbuf *m)
2956 {
2957         struct bridge_iflist *bif, *nbif;
2958         struct ether_header *eh;
2959         struct mbuf *mc;
2960         struct ifnet *dst_if, *alt_if, *bifp;
2961         int used;
2962         int found;
2963         int alt_priority;
2964         int from_us;
2965
2966         mbuftrackid(m, 69);
2967         bifp = sc->sc_ifp;
2968         ASSERT_IFNET_NOT_SERIALIZED_ALL(bifp);
2969
2970         eh = mtod(m, struct ether_header *);
2971         from_us = bridge_from_us(sc, eh);
2972
2973         if (inet_pfil_hook.ph_hashooks > 0
2974 #ifdef INET6
2975             || inet6_pfil_hook.ph_hashooks > 0
2976 #endif
2977             ) {
2978                 if (bridge_pfil(&m, bifp, src_if, PFIL_IN) != 0)
2979                         return;
2980                 if (m == NULL)
2981                         return;
2982
2983                 /* Filter on the bridge interface before broadcasting */
2984                 if (bridge_pfil(&m, bifp, NULL, PFIL_OUT) != 0)
2985                         return;
2986                 if (m == NULL)
2987                         return;
2988         }
2989
2990         alt_if = NULL;
2991         alt_priority = 0;
2992         found = 0;
2993         used = 0;
2994
2995         TAILQ_FOREACH_MUTABLE(bif, &sc->sc_iflists[mycpuid], bif_next, nbif) {
2996                 dst_if = bif->bif_ifp;
2997
2998                 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2999                         continue;
3000
3001                 /*
3002                  * Don't bounce the packet out the same interface it came
3003                  * in on.  We have to test MAC addresses because a packet
3004                  * can come in a bonded interface and we don't want it to
3005                  * be echod out the forwarding interface for the same bonding
3006                  * set.
3007                  */
3008                 if (src_if && memcmp(IF_LLADDR(src_if), IF_LLADDR(dst_if),
3009                                      ETHER_ADDR_LEN) == 0) {
3010                         continue;
3011                 }
3012
3013                 /*
3014                  * Generally speaking we only broadcast through forwarding
3015                  * interfaces.  If no interfaces are available we select
3016                  * a BONDED, BLOCKING, or LEARNING interface to forward
3017                  * through.
3018                  */
3019                 if (bif->bif_flags & IFBIF_STP) {
3020                         switch (bif->bif_state) {
3021                         case BSTP_IFSTATE_BONDED:
3022                                 if (bif->bif_priority + 512 > alt_priority) {
3023                                         alt_priority = bif->bif_priority + 512;
3024                                         alt_if = bif->bif_ifp;
3025                                 }
3026                                 continue;
3027                         case BSTP_IFSTATE_BLOCKING:
3028                                 if (bif->bif_priority + 256 > alt_priority) {
3029                                         alt_priority = bif->bif_priority + 256;
3030                                         alt_if = bif->bif_ifp;
3031                                 }
3032                                 continue;
3033                         case BSTP_IFSTATE_LEARNING:
3034                                 if (bif->bif_priority > alt_priority) {
3035                                         alt_priority = bif->bif_priority;
3036                                         alt_if = bif->bif_ifp;
3037                                 }
3038                                 continue;
3039                         case BSTP_IFSTATE_L1BLOCKING:
3040                         case BSTP_IFSTATE_DISABLED:
3041                         case BSTP_IFSTATE_LISTENING:
3042                                 continue;
3043                         default:
3044                                 /* forwarding */
3045                                 break;
3046                         }
3047                 }
3048
3049                 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 &&
3050                     (m->m_flags & (M_BCAST|M_MCAST)) == 0) {
3051                         continue;
3052                 }
3053
3054                 if (TAILQ_NEXT(bif, bif_next) == NULL) {
3055                         mc = m;
3056                         used = 1;
3057                 } else {
3058                         mc = m_copypacket(m, MB_DONTWAIT);
3059                         if (mc == NULL) {
3060                                 IFNET_STAT_INC(sc->sc_ifp, oerrors, 1);
3061                                 continue;
3062                         }
3063                 }
3064                 found = 1;
3065
3066                 /*
3067                  * Filter on the output interface.  Pass a NULL bridge
3068                  * interface pointer so we do not redundantly filter on
3069                  * the bridge for each interface we broadcast on.
3070                  */
3071                 if (inet_pfil_hook.ph_hashooks > 0
3072 #ifdef INET6
3073                     || inet6_pfil_hook.ph_hashooks > 0
3074 #endif
3075                     ) {
3076                         if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
3077                                 continue;
3078                         if (mc == NULL)
3079                                 continue;
3080                 }
3081                 bridge_handoff(sc, dst_if, mc, from_us);
3082
3083                 if (nbif != NULL && !nbif->bif_onlist) {
3084                         KKASSERT(bif->bif_onlist);
3085                         nbif = TAILQ_NEXT(bif, bif_next);
3086                 }
3087         }
3088
3089         if (found == 0 && alt_if) {
3090                 KKASSERT(used == 0);
3091                 mc = m;
3092                 used = 1;
3093                 bridge_enqueue(alt_if, mc);
3094         }
3095
3096         if (used == 0)
3097                 m_freem(m);
3098 }
3099
3100 /*
3101  * bridge_span:
3102  *
3103  *      Duplicate a packet out one or more interfaces that are in span mode,
3104  *      the original mbuf is unmodified.
3105  */
3106 static void
3107 bridge_span(struct bridge_softc *sc, struct mbuf *m)
3108 {
3109         struct bridge_iflist *bif;
3110         struct ifnet *dst_if, *bifp;
3111         struct mbuf *mc;
3112
3113         mbuftrackid(m, 70);
3114         bifp = sc->sc_ifp;
3115         ifnet_serialize_all(bifp);
3116
3117         TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) {
3118                 dst_if = bif->bif_ifp;
3119
3120                 if ((dst_if->if_flags & IFF_RUNNING) == 0)
3121                         continue;
3122
3123                 mc = m_copypacket(m, MB_DONTWAIT);
3124                 if (mc == NULL) {
3125                         IFNET_STAT_INC(sc->sc_ifp, oerrors, 1);
3126                         continue;
3127                 }
3128                 bridge_enqueue(dst_if, mc);
3129         }
3130
3131         ifnet_deserialize_all(bifp);
3132 }
3133
3134 static void
3135 bridge_rtmsg_sync_handler(netmsg_t msg)
3136 {
3137         ifnet_forwardmsg(&msg->lmsg, mycpuid + 1);
3138 }
3139
3140 static void
3141 bridge_rtmsg_sync(struct bridge_softc *sc)
3142 {
3143         struct netmsg_base msg;
3144
3145         ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3146
3147         netmsg_init(&msg, NULL, &curthread->td_msgport,
3148                     0, bridge_rtmsg_sync_handler);
3149         ifnet_domsg(&msg.lmsg, 0);
3150 }
3151
3152 static __inline void
3153 bridge_rtinfo_update(struct bridge_rtinfo *bri, struct ifnet *dst_if,
3154                      int setflags, uint8_t flags, uint32_t timeo)
3155 {
3156         if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
3157             bri->bri_ifp != dst_if)
3158                 bri->bri_ifp = dst_if;
3159         if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
3160             bri->bri_expire != time_second + timeo)
3161                 bri->bri_expire = time_second + timeo;
3162         if (setflags)
3163                 bri->bri_flags = flags;
3164 }
3165
3166 static int
3167 bridge_rtinstall_oncpu(struct bridge_softc *sc, const uint8_t *dst,
3168                        struct ifnet *dst_if, int setflags, uint8_t flags,
3169                        struct bridge_rtinfo **bri0)
3170 {
3171         struct bridge_rtnode *brt;
3172         struct bridge_rtinfo *bri;
3173
3174         if (mycpuid == 0) {
3175                 brt = bridge_rtnode_lookup(sc, dst);
3176                 if (brt != NULL) {
3177                         /*
3178                          * rtnode for 'dst' already exists.  We inform the
3179                          * caller about this by leaving bri0 as NULL.  The
3180                          * caller will terminate the intallation upon getting
3181                          * NULL bri0.  However, we still need to update the
3182                          * rtinfo.
3183                          */
3184                         KKASSERT(*bri0 == NULL);
3185
3186                         /* Update rtinfo */
3187                         bridge_rtinfo_update(brt->brt_info, dst_if, setflags,
3188                                              flags, sc->sc_brttimeout);
3189                         return 0;
3190                 }
3191
3192                 /*
3193                  * We only need to check brtcnt on CPU0, since if limit
3194                  * is to be exceeded, ENOSPC is returned.  Caller knows
3195                  * this and will terminate the installation.
3196                  */
3197                 if (sc->sc_brtcnt >= sc->sc_brtmax)
3198                         return ENOSPC;
3199
3200                 KKASSERT(*bri0 == NULL);
3201                 bri = kmalloc(sizeof(struct bridge_rtinfo), M_DEVBUF,
3202                                   M_WAITOK | M_ZERO);
3203                 *bri0 = bri;
3204
3205                 /* Setup rtinfo */
3206                 bri->bri_flags = IFBAF_DYNAMIC;
3207                 bridge_rtinfo_update(bri, dst_if, setflags, flags,
3208                                      sc->sc_brttimeout);
3209         } else {
3210                 bri = *bri0;
3211                 KKASSERT(bri != NULL);
3212         }
3213
3214         brt = kmalloc(sizeof(struct bridge_rtnode), M_DEVBUF,
3215                       M_WAITOK | M_ZERO);
3216         memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
3217         brt->brt_info = bri;
3218
3219         bridge_rtnode_insert(sc, brt);
3220         return 0;
3221 }
3222
3223 static void
3224 bridge_rtinstall_handler(netmsg_t msg)
3225 {
3226         struct netmsg_brsaddr *brmsg = (struct netmsg_brsaddr *)msg;
3227         int error;
3228
3229         error = bridge_rtinstall_oncpu(brmsg->br_softc,
3230                                        brmsg->br_dst, brmsg->br_dst_if,
3231                                        brmsg->br_setflags, brmsg->br_flags,
3232                                        &brmsg->br_rtinfo);
3233         if (error) {
3234                 KKASSERT(mycpuid == 0 && brmsg->br_rtinfo == NULL);
3235                 lwkt_replymsg(&brmsg->base.lmsg, error);
3236                 return;
3237         } else if (brmsg->br_rtinfo == NULL) {
3238                 /* rtnode already exists for 'dst' */
3239                 KKASSERT(mycpuid == 0);
3240                 lwkt_replymsg(&brmsg->base.lmsg, 0);
3241                 return;
3242         }
3243         ifnet_forwardmsg(&brmsg->base.lmsg, mycpuid + 1);
3244 }
3245
3246 /*
3247  * bridge_rtupdate:
3248  *
3249  *      Add/Update a bridge routing entry.
3250  */
3251 static int
3252 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
3253                 struct ifnet *dst_if, uint8_t flags)
3254 {
3255         struct bridge_rtnode *brt;
3256
3257         /*
3258          * A route for this destination might already exist.  If so,
3259          * update it, otherwise create a new one.
3260          */
3261         if ((brt = bridge_rtnode_lookup(sc, dst)) == NULL) {
3262                 struct netmsg_brsaddr *brmsg;
3263
3264                 if (sc->sc_brtcnt >= sc->sc_brtmax)
3265                         return ENOSPC;
3266
3267                 brmsg = kmalloc(sizeof(*brmsg), M_LWKTMSG, M_WAITOK | M_NULLOK);
3268                 if (brmsg == NULL)
3269                         return ENOMEM;
3270
3271                 netmsg_init(&brmsg->base, NULL, &netisr_afree_rport,
3272                             0, bridge_rtinstall_handler);
3273                 memcpy(brmsg->br_dst, dst, ETHER_ADDR_LEN);
3274                 brmsg->br_dst_if = dst_if;
3275                 brmsg->br_flags = flags;
3276                 brmsg->br_setflags = 0;
3277                 brmsg->br_softc = sc;
3278                 brmsg->br_rtinfo = NULL;
3279
3280                 ifnet_sendmsg(&brmsg->base.lmsg, 0);
3281                 return 0;
3282         }
3283         bridge_rtinfo_update(brt->brt_info, dst_if, 0, flags,
3284                              sc->sc_brttimeout);
3285         return 0;
3286 }
3287
3288 static int
3289 bridge_rtsaddr(struct bridge_softc *sc, const uint8_t *dst,
3290                struct ifnet *dst_if, uint8_t flags)
3291 {
3292         struct netmsg_brsaddr brmsg;
3293
3294         ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3295
3296         netmsg_init(&brmsg.base, NULL, &curthread->td_msgport,
3297                     0, bridge_rtinstall_handler);
3298         memcpy(brmsg.br_dst, dst, ETHER_ADDR_LEN);
3299         brmsg.br_dst_if = dst_if;
3300         brmsg.br_flags = flags;
3301         brmsg.br_setflags = 1;
3302         brmsg.br_softc = sc;
3303         brmsg.br_rtinfo = NULL;
3304
3305         return ifnet_domsg(&brmsg.base.lmsg, 0);
3306 }
3307
3308 /*
3309  * bridge_rtlookup:
3310  *
3311  *      Lookup the destination interface for an address.
3312  */
3313 static struct ifnet *
3314 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
3315 {
3316         struct bridge_rtnode *brt;
3317
3318         if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
3319                 return NULL;
3320         return brt->brt_info->bri_ifp;
3321 }
3322
3323 static void
3324 bridge_rtreap_handler(netmsg_t msg)
3325 {
3326         struct bridge_softc *sc = msg->lmsg.u.ms_resultp;
3327         struct bridge_rtnode *brt, *nbrt;
3328
3329         LIST_FOREACH_MUTABLE(brt, &sc->sc_rtlists[mycpuid], brt_list, nbrt) {
3330                 if (brt->brt_info->bri_dead)
3331                         bridge_rtnode_destroy(sc, brt);
3332         }
3333         ifnet_forwardmsg(&msg->lmsg, mycpuid + 1);
3334 }
3335
3336 static void
3337 bridge_rtreap(struct bridge_softc *sc)
3338 {
3339         struct netmsg_base msg;
3340
3341         ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3342
3343         netmsg_init(&msg, NULL, &curthread->td_msgport,
3344                     0, bridge_rtreap_handler);
3345         msg.lmsg.u.ms_resultp = sc;
3346
3347         ifnet_domsg(&msg.lmsg, 0);
3348 }
3349
3350 static void
3351 bridge_rtreap_async(struct bridge_softc *sc)
3352 {
3353         struct netmsg_base *msg;
3354
3355         msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK);
3356
3357         netmsg_init(msg, NULL, &netisr_afree_rport,
3358                     0, bridge_rtreap_handler);
3359         msg->lmsg.u.ms_resultp = sc;
3360
3361         ifnet_sendmsg(&msg->lmsg, 0);
3362 }
3363
3364 /*
3365  * bridge_rttrim:
3366  *
3367  *      Trim the routine table so that we have a number
3368  *      of routing entries less than or equal to the
3369  *      maximum number.
3370  */
3371 static void
3372 bridge_rttrim(struct bridge_softc *sc)
3373 {
3374         struct bridge_rtnode *brt;
3375         int dead;
3376
3377         ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3378
3379         /* Make sure we actually need to do this. */
3380         if (sc->sc_brtcnt <= sc->sc_brtmax)
3381                 return;
3382
3383         /*
3384          * Find out how many rtnodes are dead
3385          */
3386         dead = bridge_rtage_finddead(sc);
3387         KKASSERT(dead <= sc->sc_brtcnt);
3388
3389         if (sc->sc_brtcnt - dead <= sc->sc_brtmax) {
3390                 /* Enough dead rtnodes are found */
3391                 bridge_rtreap(sc);
3392                 return;
3393         }
3394
3395         /*
3396          * Kill some dynamic rtnodes to meet the brtmax
3397          */
3398         LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
3399                 struct bridge_rtinfo *bri = brt->brt_info;
3400
3401                 if (bri->bri_dead) {
3402                         /*
3403                          * We have counted this rtnode in
3404                          * bridge_rtage_finddead()
3405                          */
3406                         continue;
3407                 }
3408
3409                 if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
3410                         bri->bri_dead = 1;
3411                         ++dead;
3412                         KKASSERT(dead <= sc->sc_brtcnt);
3413
3414                         if (sc->sc_brtcnt - dead <= sc->sc_brtmax) {
3415                                 /* Enough rtnodes are collected */
3416                                 break;
3417                         }
3418                 }
3419         }
3420         if (dead)
3421                 bridge_rtreap(sc);
3422 }
3423
3424 /*
3425  * bridge_timer:
3426  *
3427  *      Aging timer for the bridge.
3428  */
3429 static void
3430 bridge_timer(void *arg)
3431 {
3432         struct bridge_softc *sc = arg;
3433         struct netmsg_base *msg;
3434
3435         KKASSERT(mycpuid == BRIDGE_CFGCPU);
3436
3437         crit_enter();
3438
3439         if (callout_pending(&sc->sc_brcallout) ||
3440             !callout_active(&sc->sc_brcallout)) {
3441                 crit_exit();
3442                 return;
3443         }
3444         callout_deactivate(&sc->sc_brcallout);
3445
3446         msg = &sc->sc_brtimemsg;
3447         KKASSERT(msg->lmsg.ms_flags & MSGF_DONE);
3448         lwkt_sendmsg(BRIDGE_CFGPORT, &msg->lmsg);
3449
3450         crit_exit();
3451 }
3452
3453 static void
3454 bridge_timer_handler(netmsg_t msg)
3455 {
3456         struct bridge_softc *sc = msg->lmsg.u.ms_resultp;
3457
3458         KKASSERT(&curthread->td_msgport == BRIDGE_CFGPORT);
3459
3460         crit_enter();
3461         /* Reply ASAP */
3462         lwkt_replymsg(&msg->lmsg, 0);
3463         crit_exit();
3464
3465         bridge_rtage(sc);
3466         if (sc->sc_ifp->if_flags & IFF_RUNNING) {
3467                 callout_reset(&sc->sc_brcallout,
3468                     bridge_rtable_prune_period * hz, bridge_timer, sc);
3469         }
3470 }
3471
3472 static int
3473 bridge_rtage_finddead(struct bridge_softc *sc)
3474 {
3475         struct bridge_rtnode *brt;
3476         int dead = 0;
3477
3478         LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
3479                 struct bridge_rtinfo *bri = brt->brt_info;
3480
3481                 if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
3482                     time_second >= bri->bri_expire) {
3483                         bri->bri_dead = 1;
3484                         ++dead;
3485                         KKASSERT(dead <= sc->sc_brtcnt);
3486                 }
3487         }
3488         return dead;
3489 }
3490
3491 /*
3492  * bridge_rtage:
3493  *
3494  *      Perform an aging cycle.
3495  */
3496 static void
3497 bridge_rtage(struct bridge_softc *sc)
3498 {
3499         ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3500
3501         if (bridge_rtage_finddead(sc))
3502                 bridge_rtreap(sc);
3503 }
3504
3505 /*
3506  * bridge_rtflush:
3507  *
3508  *      Remove all dynamic addresses from the bridge.
3509  */
3510 static void
3511 bridge_rtflush(struct bridge_softc *sc, int bf)
3512 {
3513         struct bridge_rtnode *brt;
3514         int reap;
3515
3516         reap = 0;
3517         LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
3518                 struct bridge_rtinfo *bri = brt->brt_info;
3519
3520                 if ((bf & IFBF_FLUSHALL) ||
3521                     (bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
3522                         bri->bri_dead = 1;
3523                         reap = 1;
3524                 }
3525         }
3526         if (reap) {
3527                 if (bf & IFBF_FLUSHSYNC)
3528                         bridge_rtreap(sc);
3529                 else
3530                         bridge_rtreap_async(sc);
3531         }
3532 }
3533
3534 /*
3535  * bridge_rtdaddr:
3536  *
3537  *      Remove an address from the table.
3538  */
3539 static int
3540 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
3541 {
3542         struct bridge_rtnode *brt;
3543
3544         ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3545
3546         if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
3547                 return (ENOENT);
3548
3549         /* TODO: add a cheaper delete operation */
3550         brt->brt_info->bri_dead = 1;
3551         bridge_rtreap(sc);
3552         return (0);
3553 }
3554
3555 /*
3556  * bridge_rtdelete:
3557  *
3558  *      Delete routes to a speicifc member interface.
3559  */
3560 void
3561 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int bf)
3562 {
3563         struct bridge_rtnode *brt;
3564         int reap;
3565
3566         reap = 0;
3567         LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
3568                 struct bridge_rtinfo *bri = brt->brt_info;
3569
3570                 if (bri->bri_ifp == ifp &&
3571                     ((bf & IFBF_FLUSHALL) ||
3572                      (bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) {
3573                         bri->bri_dead = 1;
3574                         reap = 1;
3575                 }
3576         }
3577         if (reap) {
3578                 if (bf & IFBF_FLUSHSYNC)
3579                         bridge_rtreap(sc);
3580                 else
3581                         bridge_rtreap_async(sc);
3582         }
3583 }
3584
3585 /*
3586  * bridge_rtable_init:
3587  *
3588  *      Initialize the route table for this bridge.
3589  */
3590 static void
3591 bridge_rtable_init(struct bridge_softc *sc)
3592 {
3593         int cpu;
3594
3595         /*
3596          * Initialize per-cpu hash tables
3597          */
3598         sc->sc_rthashs = kmalloc(sizeof(*sc->sc_rthashs) * ncpus,
3599                                  M_DEVBUF, M_WAITOK);
3600         for (cpu = 0; cpu < ncpus; ++cpu) {
3601                 int i;
3602
3603                 sc->sc_rthashs[cpu] =
3604                 kmalloc(sizeof(struct bridge_rtnode_head) * BRIDGE_RTHASH_SIZE,
3605                         M_DEVBUF, M_WAITOK);
3606
3607                 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
3608                         LIST_INIT(&sc->sc_rthashs[cpu][i]);
3609         }
3610         sc->sc_rthash_key = karc4random();
3611
3612         /*
3613          * Initialize per-cpu lists
3614          */
3615         sc->sc_rtlists = kmalloc(sizeof(struct bridge_rtnode_head) * ncpus,
3616                                  M_DEVBUF, M_WAITOK);
3617         for (cpu = 0; cpu < ncpus; ++cpu)
3618                 LIST_INIT(&sc->sc_rtlists[cpu]);
3619 }
3620
3621 /*
3622  * bridge_rtable_fini:
3623  *
3624  *      Deconstruct the route table for this bridge.
3625  */
3626 static void
3627 bridge_rtable_fini(struct bridge_softc *sc)
3628 {
3629         int cpu;
3630
3631         /*
3632          * Free per-cpu hash tables
3633          */
3634         for (cpu = 0; cpu < ncpus; ++cpu)
3635                 kfree(sc->sc_rthashs[cpu], M_DEVBUF);
3636         kfree(sc->sc_rthashs, M_DEVBUF);
3637
3638         /*
3639          * Free per-cpu lists
3640          */
3641         kfree(sc->sc_rtlists, M_DEVBUF);
3642 }
3643
3644 /*
3645  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
3646  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
3647  */
3648 #define mix(a, b, c)                                                    \
3649 do {                                                                    \
3650         a -= b; a -= c; a ^= (c >> 13);                                 \
3651         b -= c; b -= a; b ^= (a << 8);                                  \
3652         c -= a; c -= b; c ^= (b >> 13);                                 \
3653         a -= b; a -= c; a ^= (c >> 12);                                 \
3654         b -= c; b -= a; b ^= (a << 16);                                 \
3655         c -= a; c -= b; c ^= (b >> 5);                                  \
3656         a -= b; a -= c; a ^= (c >> 3);                                  \
3657         b -= c; b -= a; b ^= (a << 10);                                 \
3658         c -= a; c -= b; c ^= (b >> 15);                                 \
3659 } while (/*CONSTCOND*/0)
3660
3661 static __inline uint32_t
3662 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
3663 {
3664         uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
3665
3666         b += addr[5] << 8;
3667         b += addr[4];
3668         a += addr[3] << 24;
3669         a += addr[2] << 16;
3670         a += addr[1] << 8;
3671         a += addr[0];
3672
3673         mix(a, b, c);
3674
3675         return (c & BRIDGE_RTHASH_MASK);
3676 }
3677
3678 #undef mix
3679
3680 static int
3681 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
3682 {
3683         int i, d;
3684
3685         for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
3686                 d = ((int)a[i]) - ((int)b[i]);
3687         }
3688
3689         return (d);
3690 }
3691
3692 /*
3693  * bridge_rtnode_lookup:
3694  *
3695  *      Look up a bridge route node for the specified destination.
3696  */
3697 static struct bridge_rtnode *
3698 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
3699 {
3700         struct bridge_rtnode *brt;
3701         uint32_t hash;
3702         int dir;
3703
3704         hash = bridge_rthash(sc, addr);
3705         LIST_FOREACH(brt, &sc->sc_rthashs[mycpuid][hash], brt_hash) {
3706                 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
3707                 if (dir == 0)
3708                         return (brt);
3709                 if (dir > 0)
3710                         return (NULL);
3711         }
3712
3713         return (NULL);
3714 }
3715
3716 /*
3717  * bridge_rtnode_insert:
3718  *
3719  *      Insert the specified bridge node into the route table.
3720  *      Caller has to make sure that rtnode does not exist.
3721  */
3722 static void
3723 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
3724 {
3725         struct bridge_rtnode *lbrt;
3726         uint32_t hash;
3727         int dir;
3728
3729         hash = bridge_rthash(sc, brt->brt_addr);
3730
3731         lbrt = LIST_FIRST(&sc->sc_rthashs[mycpuid][hash]);
3732         if (lbrt == NULL) {
3733                 LIST_INSERT_HEAD(&sc->sc_rthashs[mycpuid][hash],
3734                                   brt, brt_hash);
3735                 goto out;
3736         }
3737
3738         do {
3739                 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
3740                 KASSERT(dir != 0, ("rtnode already exist"));
3741
3742                 if (dir > 0) {
3743                         LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
3744                         goto out;
3745                 }
3746                 if (LIST_NEXT(lbrt, brt_hash) == NULL) {
3747                         LIST_INSERT_AFTER(lbrt, brt, brt_hash);
3748                         goto out;
3749                 }
3750                 lbrt = LIST_NEXT(lbrt, brt_hash);
3751         } while (lbrt != NULL);
3752
3753         panic("no suitable position found for rtnode");
3754 out:
3755         LIST_INSERT_HEAD(&sc->sc_rtlists[mycpuid], brt, brt_list);
3756         if (mycpuid == 0) {
3757                 /*
3758                  * Update the brtcnt.
3759                  * We only need to do it once and we do it on CPU0.
3760                  */
3761                 sc->sc_brtcnt++;
3762         }
3763 }
3764
3765 /*
3766  * bridge_rtnode_destroy:
3767  *
3768  *      Destroy a bridge rtnode.
3769  */
3770 static void
3771 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3772 {
3773         LIST_REMOVE(brt, brt_hash);
3774         LIST_REMOVE(brt, brt_list);
3775
3776         if (mycpuid + 1 == ncpus) {
3777                 /* Free rtinfo associated with rtnode on the last cpu */
3778                 kfree(brt->brt_info, M_DEVBUF);
3779         }
3780         kfree(brt, M_DEVBUF);
3781
3782         if (mycpuid == 0) {
3783                 /* Update brtcnt only on CPU0 */
3784                 sc->sc_brtcnt--;
3785         }
3786 }
3787
3788 static __inline int
3789 bridge_post_pfil(struct mbuf *m)
3790 {
3791         if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED)
3792                 return EOPNOTSUPP;
3793
3794         /* Not yet */
3795         if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED)
3796                 return EOPNOTSUPP;
3797
3798         return 0;
3799 }
3800
3801 /*
3802  * Send bridge packets through pfil if they are one of the types pfil can deal
3803  * with, or if they are ARP or REVARP.  (pfil will pass ARP and REVARP without
3804  * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3805  * that interface.
3806  */
3807 static int
3808 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3809 {
3810         int snap, error, i, hlen;
3811         struct ether_header *eh1, eh2;
3812         struct ip *ip;
3813         struct llc llc1;
3814         u_int16_t ether_type;
3815
3816         snap = 0;
3817         error = -1;     /* Default error if not error == 0 */
3818
3819         if (pfil_bridge == 0 && pfil_member == 0)
3820                 return (0); /* filtering is disabled */
3821
3822         i = min((*mp)->m_pkthdr.len, max_protohdr);
3823         if ((*mp)->m_len < i) {
3824                 *mp = m_pullup(*mp, i);
3825                 if (*mp == NULL) {
3826                         kprintf("%s: m_pullup failed\n", __func__);
3827                         return (-1);
3828                 }
3829         }
3830
3831         eh1 = mtod(*mp, struct ether_header *);
3832         ether_type = ntohs(eh1->ether_type);
3833
3834         /*
3835          * Check for SNAP/LLC.
3836          */
3837         if (ether_type < ETHERMTU) {
3838                 struct llc *llc2 = (struct llc *)(eh1 + 1);
3839
3840                 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
3841                     llc2->llc_dsap == LLC_SNAP_LSAP &&
3842                     llc2->llc_ssap == LLC_SNAP_LSAP &&
3843                     llc2->llc_control == LLC_UI) {
3844                         ether_type = htons(llc2->llc_un.type_snap.ether_type);
3845                         snap = 1;
3846                 }
3847         }
3848
3849         /*
3850          * If we're trying to filter bridge traffic, don't look at anything
3851          * other than IP and ARP traffic.  If the filter doesn't understand
3852          * IPv6, don't allow IPv6 through the bridge either.  This is lame
3853          * since if we really wanted, say, an AppleTalk filter, we are hosed,
3854          * but of course we don't have an AppleTalk filter to begin with.
3855          * (Note that since pfil doesn't understand ARP it will pass *ALL*
3856          * ARP traffic.)
3857          */
3858         switch (ether_type) {
3859         case ETHERTYPE_ARP:
3860         case ETHERTYPE_REVARP:
3861                 return (0); /* Automatically pass */
3862
3863         case ETHERTYPE_IP:
3864 #ifdef INET6
3865         case ETHERTYPE_IPV6:
3866 #endif /* INET6 */
3867                 break;
3868
3869         default:
3870                 /*
3871                  * Check to see if the user wants to pass non-ip
3872                  * packets, these will not be checked by pfil(9)
3873                  * and passed unconditionally so the default is to drop.
3874                  */
3875                 if (pfil_onlyip)
3876                         goto bad;
3877         }
3878
3879         /* Strip off the Ethernet header and keep a copy. */
3880         m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
3881         m_adj(*mp, ETHER_HDR_LEN);
3882
3883         /* Strip off snap header, if present */
3884         if (snap) {
3885                 m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
3886                 m_adj(*mp, sizeof(struct llc));
3887         }
3888
3889         /*
3890          * Check the IP header for alignment and errors
3891          */
3892         if (dir == PFIL_IN) {
3893                 switch (ether_type) {
3894                 case ETHERTYPE_IP:
3895                         error = bridge_ip_checkbasic(mp);
3896                         break;
3897 #ifdef INET6
3898                 case ETHERTYPE_IPV6:
3899                         error = bridge_ip6_checkbasic(mp);
3900                         break;
3901 #endif /* INET6 */
3902                 default:
3903                         error = 0;
3904                 }
3905                 if (error)
3906                         goto bad;
3907         }
3908
3909         error = 0;
3910
3911         /*
3912          * Run the packet through pfil
3913          */
3914         switch (ether_type) {
3915         case ETHERTYPE_IP:
3916                 /*
3917                  * before calling the firewall, swap fields the same as
3918                  * IP does. here we assume the header is contiguous
3919                  */
3920                 ip = mtod(*mp, struct ip *);
3921
3922                 ip->ip_len = ntohs(ip->ip_len);
3923                 ip->ip_off = ntohs(ip->ip_off);
3924
3925                 /*
3926                  * Run pfil on the member interface and the bridge, both can
3927                  * be skipped by clearing pfil_member or pfil_bridge.
3928                  *
3929                  * Keep the order:
3930                  *   in_if -> bridge_if -> out_if
3931                  */
3932                 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) {
3933                         error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir);
3934                         if (*mp == NULL || error != 0) /* filter may consume */
3935                                 break;
3936                         error = bridge_post_pfil(*mp);
3937                         if (error)
3938                                 break;
3939                 }
3940
3941                 if (pfil_member && ifp != NULL) {
3942                         error = pfil_run_hooks(&inet_pfil_hook, mp, ifp, dir);
3943                         if (*mp == NULL || error != 0) /* filter may consume */
3944                                 break;
3945                         error = bridge_post_pfil(*mp);
3946                         if (error)
3947                                 break;
3948                 }
3949
3950                 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) {
3951                         error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir);
3952                         if (*mp == NULL || error != 0) /* filter may consume */
3953                                 break;
3954                         error = bridge_post_pfil(*mp);
3955                         if (error)
3956                                 break;
3957                 }
3958
3959                 /* check if we need to fragment the packet */
3960                 if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
3961                         i = (*mp)->m_pkthdr.len;
3962                         if (i > ifp->if_mtu) {
3963                                 error = bridge_fragment(ifp, *mp, &eh2, snap,
3964                                             &llc1);
3965                                 return (error);
3966                         }
3967                 }
3968
3969                 /* Recalculate the ip checksum and restore byte ordering */
3970                 ip = mtod(*mp, struct ip *);
3971                 hlen = ip->ip_hl << 2;
3972                 if (hlen < sizeof(struct ip))
3973                         goto bad;
3974                 if (hlen > (*mp)->m_len) {
3975                         if ((*mp = m_pullup(*mp, hlen)) == NULL)
3976                                 goto bad;
3977                         ip = mtod(*mp, struct ip *);
3978                         if (ip == NULL)
3979                                 goto bad;
3980                 }
3981                 ip->ip_len = htons(ip->ip_len);
3982                 ip->ip_off = htons(ip->ip_off);
3983                 ip->ip_sum = 0;
3984                 if (hlen == sizeof(struct ip))
3985                         ip->ip_sum = in_cksum_hdr(ip);
3986                 else
3987                         ip->ip_sum = in_cksum(*mp, hlen);
3988
3989                 break;
3990 #ifdef INET6
3991         case ETHERTYPE_IPV6:
3992                 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
3993                         error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3994                                         dir);
3995
3996                 if (*mp == NULL || error != 0) /* filter may consume */
3997                         break;
3998
3999                 if (pfil_member && ifp != NULL)
4000                         error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp,
4001                                         dir);
4002
4003                 if (*mp == NULL || error != 0) /* filter may consume */
4004                         break;
4005
4006                 if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
4007                         error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
4008                                         dir);
4009                 break;
4010 #endif
4011         default:
4012                 error = 0;
4013                 break;
4014         }
4015
4016         if (*mp == NULL)
4017                 return (error);
4018         if (error != 0)
4019                 goto bad;
4020
4021         error = -1;
4022
4023         /*
4024          * Finally, put everything back the way it was and return
4025          */
4026         if (snap) {
4027                 M_PREPEND(*mp, sizeof(struct llc), MB_DONTWAIT);
4028                 if (*mp == NULL)
4029                         return (error);
4030                 bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
4031         }
4032
4033         M_PREPEND(*mp, ETHER_HDR_LEN, MB_DONTWAIT);
4034         if (*mp == NULL)
4035                 return (error);
4036         bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
4037
4038         return (0);
4039
4040 bad:
4041         m_freem(*mp);
4042         *mp = NULL;
4043         return (error);
4044 }
4045
4046 /*
4047  * Perform basic checks on header size since
4048  * pfil assumes ip_input has already processed
4049  * it for it.  Cut-and-pasted from ip_input.c.
4050  * Given how simple the IPv6 version is,
4051  * does the IPv4 version really need to be
4052  * this complicated?
4053  *
4054  * XXX Should we update ipstat here, or not?
4055  * XXX Right now we update ipstat but not
4056  * XXX csum_counter.
4057  */
4058 static int
4059 bridge_ip_checkbasic(struct mbuf **mp)
4060 {
4061         struct mbuf *m = *mp;
4062         struct ip *ip;
4063         int len, hlen;
4064         u_short sum;
4065
4066         if (*mp == NULL)
4067                 return (-1);
4068 #if 0 /* notyet */
4069         if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
4070                 if ((m = m_copyup(m, sizeof(struct ip),
4071                         (max_linkhdr + 3) & ~3)) == NULL) {
4072                         /* XXXJRT new stat, please */
4073                         ipstat.ips_toosmall++;
4074                         goto bad;
4075                 }
4076         } else
4077 #endif
4078 #ifndef __predict_false
4079 #define __predict_false(x) x
4080 #endif
4081          if (__predict_false(m->m_len < sizeof (struct ip))) {
4082                 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
4083                         ipstat.ips_toosmall++;
4084                         goto bad;
4085                 }
4086         }
4087         ip = mtod(m, struct ip *);
4088         if (ip == NULL) goto bad;
4089
4090         if (ip->ip_v != IPVERSION) {
4091                 ipstat.ips_badvers++;
4092                 goto bad;
4093         }
4094         hlen = ip->ip_hl << 2;
4095         if (hlen < sizeof(struct ip)) { /* minimum header length */
4096                 ipstat.ips_badhlen++;
4097                 goto bad;
4098         }
4099         if (hlen > m->m_len) {
4100                 if ((m = m_pullup(m, hlen)) == NULL) {
4101                         ipstat.ips_badhlen++;
4102                         goto bad;
4103                 }
4104                 ip = mtod(m, struct ip *);
4105                 if (ip == NULL) goto bad;
4106         }
4107
4108         if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
4109                 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
4110         } else {
4111                 if (hlen == sizeof(struct ip)) {
4112                         sum = in_cksum_hdr(ip);
4113                 } else {
4114                         sum = in_cksum(m, hlen);
4115                 }
4116         }
4117         if (sum) {
4118                 ipstat.ips_badsum++;
4119                 goto bad;
4120         }
4121
4122         /* Retrieve the packet length. */
4123         len = ntohs(ip->ip_len);
4124
4125         /*
4126          * Check for additional length bogosity
4127          */
4128         if (len < hlen) {
4129                 ipstat.ips_badlen++;
4130                 goto bad;
4131         }
4132
4133         /*
4134          * Check that the amount of data in the buffers
4135          * is as at least much as the IP header would have us expect.
4136          * Drop packet if shorter than we expect.
4137          */
4138         if (m->m_pkthdr.len < len) {
4139                 ipstat.ips_tooshort++;
4140                 goto bad;
4141         }
4142
4143         /* Checks out, proceed */
4144         *mp = m;
4145         return (0);
4146
4147 bad:
4148         *mp = m;
4149         return (-1);
4150 }
4151
4152 #ifdef INET6
4153 /*
4154  * Same as above, but for IPv6.
4155  * Cut-and-pasted from ip6_input.c.
4156  * XXX Should we update ip6stat, or not?
4157  */
4158 static int
4159 bridge_ip6_checkbasic(struct mbuf **mp)
4160 {
4161         struct mbuf *m = *mp;
4162         struct ip6_hdr *ip6;
4163
4164         /*
4165          * If the IPv6 header is not aligned, slurp it up into a new
4166          * mbuf with space for link headers, in the event we forward
4167          * it.  Otherwise, if it is aligned, make sure the entire base
4168          * IPv6 header is in the first mbuf of the chain.
4169          */
4170 #if 0 /* notyet */
4171         if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
4172                 struct ifnet *inifp = m->m_pkthdr.rcvif;
4173                 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
4174                             (max_linkhdr + 3) & ~3)) == NULL) {
4175                         /* XXXJRT new stat, please */
4176                         ip6stat.ip6s_toosmall++;
4177                         in6_ifstat_inc(inifp, ifs6_in_hdrerr);
4178                         goto bad;
4179                 }
4180         } else
4181 #endif
4182         if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
4183                 struct ifnet *inifp = m->m_pkthdr.rcvif;
4184                 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
4185                         ip6stat.ip6s_toosmall++;
4186                         in6_ifstat_inc(inifp, ifs6_in_hdrerr);
4187                         goto bad;
4188                 }
4189         }
4190
4191         ip6 = mtod(m, struct ip6_hdr *);
4192
4193         if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
4194                 ip6stat.ip6s_badvers++;
4195                 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
4196                 goto bad;
4197         }
4198
4199         /* Checks out, proceed */
4200         *mp = m;
4201         return (0);
4202
4203 bad:
4204         *mp = m;
4205         return (-1);
4206 }
4207 #endif /* INET6 */
4208
4209 /*
4210  * bridge_fragment:
4211  *
4212  *      Return a fragmented mbuf chain.
4213  */
4214 static int
4215 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
4216     int snap, struct llc *llc)
4217 {
4218         struct mbuf *m0;
4219         struct ip *ip;
4220         int error = -1;
4221
4222         if (m->m_len < sizeof(struct ip) &&
4223             (m = m_pullup(m, sizeof(struct ip))) == NULL)
4224                 goto out;
4225         ip = mtod(m, struct ip *);
4226
4227         error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
4228                     CSUM_DELAY_IP);
4229         if (error)
4230                 goto out;
4231
4232         /* walk the chain and re-add the Ethernet header */
4233         for (m0 = m; m0; m0 = m0->m_nextpkt) {
4234                 if (error == 0) {
4235                         if (snap) {
4236                                 M_PREPEND(m0, sizeof(struct llc), MB_DONTWAIT);
4237                                 if (m0 == NULL) {
4238                                         error = ENOBUFS;
4239                                         continue;
4240                                 }
4241                                 bcopy(llc, mtod(m0, caddr_t),
4242                                     sizeof(struct llc));
4243                         }
4244                         M_PREPEND(m0, ETHER_HDR_LEN, MB_DONTWAIT);
4245                         if (m0 == NULL) {
4246                                 error = ENOBUFS;
4247                                 continue;
4248                         }
4249                         bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
4250                 } else 
4251                         m_freem(m);
4252         }
4253
4254         if (error == 0)
4255                 ipstat.ips_fragmented++;
4256
4257         return (error);
4258
4259 out:
4260         if (m != NULL)
4261                 m_freem(m);
4262         return (error);
4263 }
4264
4265 static void
4266 bridge_enqueue_handler(netmsg_t msg)
4267 {
4268         struct netmsg_packet *nmp;
4269         struct ifnet *dst_ifp;
4270         struct mbuf *m;
4271
4272         nmp = &msg->packet;
4273         m = nmp->nm_packet;
4274         dst_ifp = nmp->base.lmsg.u.ms_resultp;
4275         mbuftrackid(m, 71);
4276
4277         bridge_handoff(dst_ifp->if_bridge, dst_ifp, m, 1);
4278 }
4279
4280 static void
4281 bridge_handoff(struct bridge_softc *sc, struct ifnet *dst_ifp,
4282                struct mbuf *m, int from_us)
4283 {
4284         struct mbuf *m0;
4285         struct ifnet *bifp;
4286
4287         bifp = sc->sc_ifp;
4288         mbuftrackid(m, 72);
4289
4290         /* We may be sending a fragment so traverse the mbuf */
4291         for (; m; m = m0) {
4292                 struct altq_pktattr pktattr;
4293
4294                 m0 = m->m_nextpkt;
4295                 m->m_nextpkt = NULL;
4296
4297                 /*
4298                  * If being sent from our host override ether_shost
4299                  * with the bridge MAC.  This is mandatory for ARP
4300                  * so things don't get confused.  In particular we
4301                  * don't want ARPs to get associated with link interfaces
4302                  * under the bridge which might or might not stay valid.
4303                  *
4304                  * Also override ether_shost when relaying a packet out
4305                  * the same interface it came in on, due to multi-homed
4306                  * addresses & default routes, otherwise switches will
4307                  * get very confused.
4308                  *
4309                  * Otherwise if we are in transparent mode.
4310                  */
4311                 if (from_us || m->m_pkthdr.rcvif == dst_ifp) {
4312                         m_copyback(m,
4313                                    offsetof(struct ether_header, ether_shost),
4314                                    ETHER_ADDR_LEN, IF_LLADDR(sc->sc_ifp));
4315                 } else if ((bifp->if_flags & IFF_LINK0) &&
4316                            (m->m_pkthdr.fw_flags & BRIDGE_MBUF_TAGGED)) {
4317                         m_copyback(m,
4318                                    offsetof(struct ether_header, ether_shost),
4319                                    ETHER_ADDR_LEN,
4320                                    m->m_pkthdr.br.ether.ether_shost);
4321                 } /* else retain shost */
4322
4323                 if (ifq_is_enabled(&dst_ifp->if_snd))
4324                         altq_etherclassify(&dst_ifp->if_snd, m, &pktattr);
4325
4326                 ifq_dispatch(dst_ifp, m, &pktattr);
4327         }
4328 }
4329
4330 static void
4331 bridge_control_dispatch(netmsg_t msg)
4332 {
4333         struct netmsg_brctl *bc_msg = (struct netmsg_brctl *)msg;
4334         struct ifnet *bifp = bc_msg->bc_sc->sc_ifp;
4335         int error;
4336
4337         ifnet_serialize_all(bifp);
4338         error = bc_msg->bc_func(bc_msg->bc_sc, bc_msg->bc_arg);
4339         ifnet_deserialize_all(bifp);
4340
4341         lwkt_replymsg(&bc_msg->base.lmsg, error);
4342 }
4343
4344 static int
4345 bridge_control(struct bridge_softc *sc, u_long cmd,
4346                bridge_ctl_t bc_func, void *bc_arg)
4347 {
4348         struct ifnet *bifp = sc->sc_ifp;
4349         struct netmsg_brctl bc_msg;
4350         int error;
4351
4352         ASSERT_IFNET_SERIALIZED_ALL(bifp);
4353
4354         bzero(&bc_msg, sizeof(bc_msg));
4355
4356         netmsg_init(&bc_msg.base, NULL, &curthread->td_msgport,
4357                     0, bridge_control_dispatch);
4358         bc_msg.bc_func = bc_func;
4359         bc_msg.bc_sc = sc;
4360         bc_msg.bc_arg = bc_arg;
4361
4362         ifnet_deserialize_all(bifp);
4363         error = lwkt_domsg(BRIDGE_CFGPORT, &bc_msg.base.lmsg, 0);
4364         ifnet_serialize_all(bifp);
4365         return error;
4366 }
4367
4368 static void
4369 bridge_add_bif_handler(netmsg_t msg)
4370 {
4371         struct netmsg_braddbif *amsg = (struct netmsg_braddbif *)msg;
4372         struct bridge_softc *sc;
4373         struct bridge_iflist *bif;
4374
4375         sc = amsg->br_softc;
4376
4377         bif = kmalloc(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
4378         bif->bif_ifp = amsg->br_bif_ifp;
4379         bif->bif_onlist = 1;
4380         bif->bif_info = amsg->br_bif_info;
4381
4382         /*
4383          * runs through bif_info
4384          */
4385         bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
4386
4387         TAILQ_INSERT_HEAD(&sc->sc_iflists[mycpuid], bif, bif_next);
4388
4389         ifnet_forwardmsg(&amsg->base.lmsg, mycpuid + 1);
4390 }
4391
4392 static void
4393 bridge_add_bif(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
4394                struct ifnet *ifp)
4395 {
4396         struct netmsg_braddbif amsg;
4397
4398         ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
4399
4400         netmsg_init(&amsg.base, NULL, &curthread->td_msgport,
4401                     0, bridge_add_bif_handler);
4402         amsg.br_softc = sc;
4403         amsg.br_bif_info = bif_info;
4404         amsg.br_bif_ifp = ifp;
4405
4406         ifnet_domsg(&amsg.base.lmsg, 0);
4407 }
4408
4409 static void
4410 bridge_del_bif_handler(netmsg_t msg)
4411 {
4412         struct netmsg_brdelbif *dmsg = (struct netmsg_brdelbif *)msg;
4413         struct bridge_softc *sc;
4414         struct bridge_iflist *bif;
4415
4416         sc = dmsg->br_softc;
4417
4418         /*
4419          * Locate the bif associated with the br_bif_info
4420          * on the current CPU
4421          */
4422         bif = bridge_lookup_member_ifinfo(sc, dmsg->br_bif_info);
4423         KKASSERT(bif != NULL && bif->bif_onlist);
4424
4425         /* Remove the bif from the current CPU's iflist */
4426         bif->bif_onlist = 0;
4427         TAILQ_REMOVE(dmsg->br_bif_list, bif, bif_next);
4428
4429         /* Save the removed bif for later freeing */
4430         TAILQ_INSERT_HEAD(dmsg->br_bif_list, bif, bif_next);
4431
4432         ifnet_forwardmsg(&dmsg->base.lmsg, mycpuid + 1);
4433 }
4434
4435 static void
4436 bridge_del_bif(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
4437                struct bridge_iflist_head *saved_bifs)
4438 {
4439         struct netmsg_brdelbif dmsg;
4440
4441         ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
4442
4443         netmsg_init(&dmsg.base, NULL, &curthread->td_msgport,
4444                     0, bridge_del_bif_handler);
4445         dmsg.br_softc = sc;
4446         dmsg.br_bif_info = bif_info;
4447         dmsg.br_bif_list = saved_bifs;
4448
4449         ifnet_domsg(&dmsg.base.lmsg, 0);
4450 }