Merge commit 'crater/vendor/OPENPAM'
[dragonfly.git] / sys / net / bridge / if_bridge.c
1 /*
2  * Copyright 2001 Wasabi Systems, Inc.
3  * All rights reserved.
4  *
5  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *      This product includes software developed for the NetBSD Project by
18  *      Wasabi Systems, Inc.
19  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
20  *    or promote products derived from this software without specific prior
21  *    written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35
36 /*
37  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. All advertising materials mentioning features or use of this software
49  *    must display the following acknowledgement:
50  *      This product includes software developed by Jason L. Wright
51  * 4. The name of the author may not be used to endorse or promote products
52  *    derived from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
56  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
57  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
58  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
59  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
60  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
62  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
63  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64  * POSSIBILITY OF SUCH DAMAGE.
65  *
66  * $OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp $
67  * $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $
68  * $FreeBSD: src/sys/net/if_bridge.c,v 1.26 2005/10/13 23:05:55 thompsa Exp $
69  * $DragonFly: src/sys/net/bridge/if_bridge.c,v 1.60 2008/11/26 12:49:43 sephe Exp $
70  */
71
72 /*
73  * Network interface bridge support.
74  *
75  * TODO:
76  *
77  *      - Currently only supports Ethernet-like interfaces (Ethernet,
78  *        802.11, VLANs on Ethernet, etc.)  Figure out a nice way
79  *        to bridge other types of interfaces (FDDI-FDDI, and maybe
80  *        consider heterogenous bridges).
81  *
82  *
83  * Bridge's route information is duplicated to each CPUs:
84  *
85  *      CPU0          CPU1          CPU2          CPU3
86  * +-----------+ +-----------+ +-----------+ +-----------+
87  * |  rtnode   | |  rtnode   | |  rtnode   | |  rtnode   |
88  * |           | |           | |           | |           |
89  * | dst eaddr | | dst eaddr | | dst eaddr | | dst eaddr |
90  * +-----------+ +-----------+ +-----------+ +-----------+
91  *       |         |                     |         |
92  *       |         |                     |         |
93  *       |         |     +----------+    |         |
94  *       |         |     |  rtinfo  |    |         |
95  *       |         +---->|          |<---+         |
96  *       |               |  flags   |              |
97  *       +-------------->|  timeout |<-------------+
98  *                       |  dst_ifp |
99  *                       +----------+
100  *
101  * We choose to put timeout and dst_ifp into shared part, so updating
102  * them will be cheaper than using message forwarding.  Also there is
103  * not need to use spinlock to protect the updating: timeout and dst_ifp
104  * is not related and specific field's updating order has no importance.
105  * The cache pollution by the share part should not be heavy: in a stable
106  * setup, dst_ifp probably will be not changed in rtnode's life time,
107  * while timeout is refreshed once per second; most of the time, timeout
108  * and dst_ifp are read-only accessed.
109  *
110  *
111  * Bridge route information installation on bridge_input path:
112  *
113  *      CPU0           CPU1         CPU2          CPU3
114  *
115  *                               tcp_thread2
116  *                                    |
117  *                                alloc nmsg
118  *                    snd nmsg        |
119  *                    w/o rtinfo      |
120  *      ifnet0<-----------------------+
121  *        |                           :
122  *    lookup dst                      :
123  *   rtnode exists?(Y)free nmsg       :
124  *        |(N)                        :
125  *        |
126  *  alloc rtinfo
127  *  alloc rtnode
128  * install rtnode
129  *        |
130  *        +---------->ifnet1
131  *        : fwd nmsg    |
132  *        : w/ rtinfo   |
133  *        :             |
134  *        :             |
135  *                 alloc rtnode
136  *               (w/ nmsg's rtinfo)
137  *                install rtnode
138  *                      |
139  *                      +---------->ifnet2
140  *                      : fwd nmsg    |
141  *                      : w/ rtinfo   |
142  *                      :             |
143  *                      :         same as ifnet1
144  *                                    |
145  *                                    +---------->ifnet3
146  *                                    : fwd nmsg    |
147  *                                    : w/ rtinfo   |
148  *                                    :             |
149  *                                    :         same as ifnet1
150  *                                               free nmsg
151  *                                                  :
152  *                                                  :
153  *
154  * The netmsgs forwarded between protocol threads and ifnet threads are
155  * allocated with (M_WAITOK|M_NULLOK), so it will not fail under most
156  * cases (route information is too precious to be not installed :).
157  * Since multiple threads may try to install route information for the
158  * same dst eaddr, we look up route information in ifnet0.  However, this
159  * looking up only need to be performed on ifnet0, which is the start
160  * point of the route information installation process.
161  *
162  *
163  * Bridge route information deleting/flushing:
164  *
165  *  CPU0            CPU1             CPU2             CPU3
166  *
167  * netisr0
168  *   |
169  * find suitable rtnodes,
170  * mark their rtinfo dead
171  *   |
172  *   | domsg <------------------------------------------+
173  *   |                                                  | replymsg
174  *   |                                                  |
175  *   V     fwdmsg           fwdmsg           fwdmsg     |
176  * ifnet0 --------> ifnet1 --------> ifnet2 --------> ifnet3
177  * delete rtnodes   delete rtnodes   delete rtnodes   delete rtnodes
178  * w/ dead rtinfo   w/ dead rtinfo   w/ dead rtinfo   w/ dead rtinfo
179  *                                                    free dead rtinfos
180  *
181  * All deleting/flushing operations are serialized by netisr0, so each
182  * operation only reaps the route information marked dead by itself.
183  *
184  *
185  * Bridge route information adding/deleting/flushing:
186  * Since all operation is serialized by the fixed message flow between
187  * ifnet threads, it is not possible to create corrupted per-cpu route
188  * information.
189  *
190  *
191  *
192  * Percpu member interface list iteration with blocking operation:
193  * Since one bridge could only delete one member interface at a time and
194  * the deleted member interface is not freed after netmsg_service_sync(),
195  * following way is used to make sure that even if the certain member
196  * interface is ripped from the percpu list during the blocking operation,
197  * the iteration still could keep going:
198  *
199  * LIST_FOREACH_MUTABLE(bif, sc->sc_iflists[mycpuid], bif_next, nbif) {
200  *     blocking operation;
201  *     blocking operation;
202  *     ...
203  *     ...
204  *     if (nbif != NULL && !nbif->bif_onlist) {
205  *         KKASSERT(bif->bif_onlist);
206  *         nbif = LIST_NEXT(bif, bif_next);
207  *     }
208  * }
209  *
210  * As mentioned above only one member interface could be unlinked from the
211  * percpu member interface list, so either bif or nbif may be not on the list,
212  * but _not_ both.  To keep the list iteration, we don't care about bif, but
213  * only nbif.  Since removed member interface will only be freed after we
214  * finish our work, it is safe to access any field in an unlinked bif (here
215  * bif_onlist).  If nbif is no longer on the list, then bif must be on the
216  * list, so we change nbif to the next element of bif and keep going.
217  */
218
219 #include "opt_inet.h"
220 #include "opt_inet6.h"
221
222 #include <sys/param.h>
223 #include <sys/mbuf.h>
224 #include <sys/malloc.h>
225 #include <sys/protosw.h>
226 #include <sys/systm.h>
227 #include <sys/time.h>
228 #include <sys/socket.h> /* for net/if.h */
229 #include <sys/sockio.h>
230 #include <sys/ctype.h>  /* string functions */
231 #include <sys/kernel.h>
232 #include <sys/random.h>
233 #include <sys/sysctl.h>
234 #include <sys/module.h>
235 #include <sys/proc.h>
236 #include <sys/lock.h>
237 #include <sys/thread.h>
238 #include <sys/thread2.h>
239 #include <sys/mpipe.h>
240
241 #include <net/bpf.h>
242 #include <net/if.h>
243 #include <net/if_dl.h>
244 #include <net/if_types.h>
245 #include <net/if_var.h>
246 #include <net/pfil.h>
247 #include <net/ifq_var.h>
248 #include <net/if_clone.h>
249
250 #include <netinet/in.h> /* for struct arpcom */
251 #include <netinet/in_systm.h>
252 #include <netinet/in_var.h>
253 #include <netinet/ip.h>
254 #include <netinet/ip_var.h>
255 #ifdef INET6
256 #include <netinet/ip6.h>
257 #include <netinet6/ip6_var.h>
258 #endif
259 #include <netinet/if_ether.h> /* for struct arpcom */
260 #include <net/bridge/if_bridgevar.h>
261 #include <net/if_llc.h>
262 #include <net/netmsg2.h>
263
264 #include <net/route.h>
265 #include <sys/in_cksum.h>
266
267 /*
268  * Size of the route hash table.  Must be a power of two.
269  */
270 #ifndef BRIDGE_RTHASH_SIZE
271 #define BRIDGE_RTHASH_SIZE              1024
272 #endif
273
274 #define BRIDGE_RTHASH_MASK              (BRIDGE_RTHASH_SIZE - 1)
275
276 /*
277  * Maximum number of addresses to cache.
278  */
279 #ifndef BRIDGE_RTABLE_MAX
280 #define BRIDGE_RTABLE_MAX               100
281 #endif
282
283 /*
284  * Spanning tree defaults.
285  */
286 #define BSTP_DEFAULT_MAX_AGE            (20 * 256)
287 #define BSTP_DEFAULT_HELLO_TIME         (2 * 256)
288 #define BSTP_DEFAULT_FORWARD_DELAY      (15 * 256)
289 #define BSTP_DEFAULT_HOLD_TIME          (1 * 256)
290 #define BSTP_DEFAULT_BRIDGE_PRIORITY    0x8000
291 #define BSTP_DEFAULT_PORT_PRIORITY      0x80
292 #define BSTP_DEFAULT_PATH_COST          55
293
294 /*
295  * Timeout (in seconds) for entries learned dynamically.
296  */
297 #ifndef BRIDGE_RTABLE_TIMEOUT
298 #define BRIDGE_RTABLE_TIMEOUT           (20 * 60)       /* same as ARP */
299 #endif
300
301 /*
302  * Number of seconds between walks of the route list.
303  */
304 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
305 #define BRIDGE_RTABLE_PRUNE_PERIOD      (5 * 60)
306 #endif
307
308 /*
309  * List of capabilities to mask on the member interface.
310  */
311 #define BRIDGE_IFCAPS_MASK              IFCAP_TXCSUM
312
313 typedef int     (*bridge_ctl_t)(struct bridge_softc *, void *);
314
315 struct netmsg_brctl {
316         struct netmsg           bc_nmsg;
317         bridge_ctl_t            bc_func;
318         struct bridge_softc     *bc_sc;
319         void                    *bc_arg;
320 };
321
322 struct netmsg_brsaddr {
323         struct netmsg           br_nmsg;
324         struct bridge_softc     *br_softc;
325         struct ifnet            *br_dst_if;
326         struct bridge_rtinfo    *br_rtinfo;
327         int                     br_setflags;
328         uint8_t                 br_dst[ETHER_ADDR_LEN];
329         uint8_t                 br_flags;
330 };
331
332 struct netmsg_braddbif {
333         struct netmsg           br_nmsg;
334         struct bridge_softc     *br_softc;
335         struct bridge_ifinfo    *br_bif_info;
336         struct ifnet            *br_bif_ifp;
337 };
338
339 struct netmsg_brdelbif {
340         struct netmsg           br_nmsg;
341         struct bridge_softc     *br_softc;
342         struct bridge_ifinfo    *br_bif_info;
343         struct bridge_iflist_head *br_bif_list;
344 };
345
346 struct netmsg_brsflags {
347         struct netmsg           br_nmsg;
348         struct bridge_softc     *br_softc;
349         struct bridge_ifinfo    *br_bif_info;
350         uint32_t                br_bif_flags;
351 };
352
353 eventhandler_tag        bridge_detach_cookie = NULL;
354
355 extern  struct mbuf *(*bridge_input_p)(struct ifnet *, struct mbuf *);
356 extern  int (*bridge_output_p)(struct ifnet *, struct mbuf *);
357 extern  void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
358
359 static int      bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
360
361 static int      bridge_clone_create(struct if_clone *, int);
362 static void     bridge_clone_destroy(struct ifnet *);
363
364 static int      bridge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
365 static void     bridge_mutecaps(struct bridge_ifinfo *, struct ifnet *, int);
366 static void     bridge_ifdetach(void *, struct ifnet *);
367 static void     bridge_init(void *);
368 static void     bridge_stop(struct ifnet *);
369 static void     bridge_start(struct ifnet *);
370 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
371 static int      bridge_output(struct ifnet *, struct mbuf *);
372
373 static void     bridge_forward(struct bridge_softc *, struct mbuf *m);
374
375 static void     bridge_timer_handler(struct netmsg *);
376 static void     bridge_timer(void *);
377
378 static void     bridge_start_bcast(struct bridge_softc *, struct mbuf *);
379 static void     bridge_broadcast(struct bridge_softc *, struct ifnet *,
380                     struct mbuf *);
381 static void     bridge_span(struct bridge_softc *, struct mbuf *);
382
383 static int      bridge_rtupdate(struct bridge_softc *, const uint8_t *,
384                     struct ifnet *, uint8_t);
385 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
386 static void     bridge_rtreap(struct bridge_softc *);
387 static void     bridge_rtreap_async(struct bridge_softc *);
388 static void     bridge_rttrim(struct bridge_softc *);
389 static int      bridge_rtage_finddead(struct bridge_softc *);
390 static void     bridge_rtage(struct bridge_softc *);
391 static void     bridge_rtflush(struct bridge_softc *, int);
392 static int      bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
393 static int      bridge_rtsaddr(struct bridge_softc *, const uint8_t *,
394                     struct ifnet *, uint8_t);
395 static void     bridge_rtmsg_sync(struct bridge_softc *sc);
396 static void     bridge_rtreap_handler(struct netmsg *);
397 static void     bridge_rtinstall_handler(struct netmsg *);
398 static int      bridge_rtinstall_oncpu(struct bridge_softc *, const uint8_t *,
399                     struct ifnet *, int, uint8_t, struct bridge_rtinfo **);
400
401 static void     bridge_rtable_init(struct bridge_softc *);
402 static void     bridge_rtable_fini(struct bridge_softc *);
403
404 static int      bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
405 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
406                     const uint8_t *);
407 static void     bridge_rtnode_insert(struct bridge_softc *,
408                     struct bridge_rtnode *);
409 static void     bridge_rtnode_destroy(struct bridge_softc *,
410                     struct bridge_rtnode *);
411
412 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
413                     const char *name);
414 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
415                     struct ifnet *ifp);
416 static struct bridge_iflist *bridge_lookup_member_ifinfo(struct bridge_softc *,
417                     struct bridge_ifinfo *);
418 static void     bridge_delete_member(struct bridge_softc *,
419                     struct bridge_iflist *, int);
420 static void     bridge_delete_span(struct bridge_softc *,
421                     struct bridge_iflist *);
422
423 static int      bridge_control(struct bridge_softc *, u_long,
424                                bridge_ctl_t, void *);
425 static int      bridge_ioctl_init(struct bridge_softc *, void *);
426 static int      bridge_ioctl_stop(struct bridge_softc *, void *);
427 static int      bridge_ioctl_add(struct bridge_softc *, void *);
428 static int      bridge_ioctl_del(struct bridge_softc *, void *);
429 static int      bridge_ioctl_gifflags(struct bridge_softc *, void *);
430 static int      bridge_ioctl_sifflags(struct bridge_softc *, void *);
431 static int      bridge_ioctl_scache(struct bridge_softc *, void *);
432 static int      bridge_ioctl_gcache(struct bridge_softc *, void *);
433 static int      bridge_ioctl_gifs(struct bridge_softc *, void *);
434 static int      bridge_ioctl_rts(struct bridge_softc *, void *);
435 static int      bridge_ioctl_saddr(struct bridge_softc *, void *);
436 static int      bridge_ioctl_sto(struct bridge_softc *, void *);
437 static int      bridge_ioctl_gto(struct bridge_softc *, void *);
438 static int      bridge_ioctl_daddr(struct bridge_softc *, void *);
439 static int      bridge_ioctl_flush(struct bridge_softc *, void *);
440 static int      bridge_ioctl_gpri(struct bridge_softc *, void *);
441 static int      bridge_ioctl_spri(struct bridge_softc *, void *);
442 static int      bridge_ioctl_ght(struct bridge_softc *, void *);
443 static int      bridge_ioctl_sht(struct bridge_softc *, void *);
444 static int      bridge_ioctl_gfd(struct bridge_softc *, void *);
445 static int      bridge_ioctl_sfd(struct bridge_softc *, void *);
446 static int      bridge_ioctl_gma(struct bridge_softc *, void *);
447 static int      bridge_ioctl_sma(struct bridge_softc *, void *);
448 static int      bridge_ioctl_sifprio(struct bridge_softc *, void *);
449 static int      bridge_ioctl_sifcost(struct bridge_softc *, void *);
450 static int      bridge_ioctl_addspan(struct bridge_softc *, void *);
451 static int      bridge_ioctl_delspan(struct bridge_softc *, void *);
452 static int      bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
453                     int);
454 static int      bridge_ip_checkbasic(struct mbuf **mp);
455 #ifdef INET6
456 static int      bridge_ip6_checkbasic(struct mbuf **mp);
457 #endif /* INET6 */
458 static int      bridge_fragment(struct ifnet *, struct mbuf *,
459                     struct ether_header *, int, struct llc *);
460 static void     bridge_enqueue_handler(struct netmsg *);
461 static void     bridge_handoff(struct ifnet *, struct mbuf *);
462
463 static void     bridge_del_bif_handler(struct netmsg *);
464 static void     bridge_add_bif_handler(struct netmsg *);
465 static void     bridge_set_bifflags_handler(struct netmsg *);
466 static void     bridge_del_bif(struct bridge_softc *, struct bridge_ifinfo *,
467                     struct bridge_iflist_head *);
468 static void     bridge_add_bif(struct bridge_softc *, struct bridge_ifinfo *,
469                     struct ifnet *);
470 static void     bridge_set_bifflags(struct bridge_softc *,
471                     struct bridge_ifinfo *, uint32_t);
472
473 SYSCTL_DECL(_net_link);
474 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge");
475
476 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
477 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
478 static int pfil_member = 1; /* run pfil hooks on the member interface */
479 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW,
480     &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
481 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW,
482     &pfil_bridge, 0, "Packet filter on the bridge interface");
483 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW,
484     &pfil_member, 0, "Packet filter on the member interface");
485
486 struct bridge_control_arg {
487         union {
488                 struct ifbreq ifbreq;
489                 struct ifbifconf ifbifconf;
490                 struct ifbareq ifbareq;
491                 struct ifbaconf ifbaconf;
492                 struct ifbrparam ifbrparam;
493         } bca_u;
494         int     bca_len;
495         void    *bca_uptr;
496         void    *bca_kptr;
497 };
498
499 struct bridge_control {
500         bridge_ctl_t    bc_func;
501         int             bc_argsize;
502         int             bc_flags;
503 };
504
505 #define BC_F_COPYIN             0x01    /* copy arguments in */
506 #define BC_F_COPYOUT            0x02    /* copy arguments out */
507 #define BC_F_SUSER              0x04    /* do super-user check */
508
509 const struct bridge_control bridge_control_table[] = {
510         { bridge_ioctl_add,             sizeof(struct ifbreq),
511           BC_F_COPYIN|BC_F_SUSER },
512         { bridge_ioctl_del,             sizeof(struct ifbreq),
513           BC_F_COPYIN|BC_F_SUSER },
514
515         { bridge_ioctl_gifflags,        sizeof(struct ifbreq),
516           BC_F_COPYIN|BC_F_COPYOUT },
517         { bridge_ioctl_sifflags,        sizeof(struct ifbreq),
518           BC_F_COPYIN|BC_F_SUSER },
519
520         { bridge_ioctl_scache,          sizeof(struct ifbrparam),
521           BC_F_COPYIN|BC_F_SUSER },
522         { bridge_ioctl_gcache,          sizeof(struct ifbrparam),
523           BC_F_COPYOUT },
524
525         { bridge_ioctl_gifs,            sizeof(struct ifbifconf),
526           BC_F_COPYIN|BC_F_COPYOUT },
527         { bridge_ioctl_rts,             sizeof(struct ifbaconf),
528           BC_F_COPYIN|BC_F_COPYOUT },
529
530         { bridge_ioctl_saddr,           sizeof(struct ifbareq),
531           BC_F_COPYIN|BC_F_SUSER },
532
533         { bridge_ioctl_sto,             sizeof(struct ifbrparam),
534           BC_F_COPYIN|BC_F_SUSER },
535         { bridge_ioctl_gto,             sizeof(struct ifbrparam),
536           BC_F_COPYOUT },
537
538         { bridge_ioctl_daddr,           sizeof(struct ifbareq),
539           BC_F_COPYIN|BC_F_SUSER },
540
541         { bridge_ioctl_flush,           sizeof(struct ifbreq),
542           BC_F_COPYIN|BC_F_SUSER },
543
544         { bridge_ioctl_gpri,            sizeof(struct ifbrparam),
545           BC_F_COPYOUT },
546         { bridge_ioctl_spri,            sizeof(struct ifbrparam),
547           BC_F_COPYIN|BC_F_SUSER },
548
549         { bridge_ioctl_ght,             sizeof(struct ifbrparam),
550           BC_F_COPYOUT },
551         { bridge_ioctl_sht,             sizeof(struct ifbrparam),
552           BC_F_COPYIN|BC_F_SUSER },
553
554         { bridge_ioctl_gfd,             sizeof(struct ifbrparam),
555           BC_F_COPYOUT },
556         { bridge_ioctl_sfd,             sizeof(struct ifbrparam),
557           BC_F_COPYIN|BC_F_SUSER },
558
559         { bridge_ioctl_gma,             sizeof(struct ifbrparam),
560           BC_F_COPYOUT },
561         { bridge_ioctl_sma,             sizeof(struct ifbrparam),
562           BC_F_COPYIN|BC_F_SUSER },
563
564         { bridge_ioctl_sifprio,         sizeof(struct ifbreq),
565           BC_F_COPYIN|BC_F_SUSER },
566
567         { bridge_ioctl_sifcost,         sizeof(struct ifbreq),
568           BC_F_COPYIN|BC_F_SUSER },
569
570         { bridge_ioctl_addspan,         sizeof(struct ifbreq),
571           BC_F_COPYIN|BC_F_SUSER },
572         { bridge_ioctl_delspan,         sizeof(struct ifbreq),
573           BC_F_COPYIN|BC_F_SUSER },
574 };
575 static const int bridge_control_table_size =
576     sizeof(bridge_control_table) / sizeof(bridge_control_table[0]);
577
578 LIST_HEAD(, bridge_softc) bridge_list;
579
580 struct if_clone bridge_cloner = IF_CLONE_INITIALIZER("bridge",
581                                 bridge_clone_create,
582                                 bridge_clone_destroy, 0, IF_MAXUNIT);
583
584 static int
585 bridge_modevent(module_t mod, int type, void *data)
586 {
587         switch (type) {
588         case MOD_LOAD:
589                 LIST_INIT(&bridge_list);
590                 if_clone_attach(&bridge_cloner);
591                 bridge_input_p = bridge_input;
592                 bridge_output_p = bridge_output;
593                 bridge_detach_cookie = EVENTHANDLER_REGISTER(
594                     ifnet_detach_event, bridge_ifdetach, NULL,
595                     EVENTHANDLER_PRI_ANY);
596 #if notyet
597                 bstp_linkstate_p = bstp_linkstate;
598 #endif
599                 break;
600         case MOD_UNLOAD:
601                 if (!LIST_EMPTY(&bridge_list))
602                         return (EBUSY);
603                 EVENTHANDLER_DEREGISTER(ifnet_detach_event,
604                     bridge_detach_cookie);
605                 if_clone_detach(&bridge_cloner);
606                 bridge_input_p = NULL;
607                 bridge_output_p = NULL;
608 #if notyet
609                 bstp_linkstate_p = NULL;
610 #endif
611                 break;
612         default:
613                 return (EOPNOTSUPP);
614         }
615         return (0);
616 }
617
618 static moduledata_t bridge_mod = {
619         "if_bridge",
620         bridge_modevent,
621         0
622 };
623
624 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
625
626
627 /*
628  * bridge_clone_create:
629  *
630  *      Create a new bridge instance.
631  */
632 static int
633 bridge_clone_create(struct if_clone *ifc, int unit)
634 {
635         struct bridge_softc *sc;
636         struct ifnet *ifp;
637         u_char eaddr[6];
638         int cpu, rnd;
639
640         sc = kmalloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);
641         ifp = sc->sc_ifp = &sc->sc_if;
642
643         sc->sc_brtmax = BRIDGE_RTABLE_MAX;
644         sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
645         sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
646         sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
647         sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
648         sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
649         sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
650
651         /* Initialize our routing table. */
652         bridge_rtable_init(sc);
653
654         callout_init(&sc->sc_brcallout);
655         netmsg_init(&sc->sc_brtimemsg, &netisr_adone_rport,
656                     MSGF_DROPABLE, bridge_timer_handler);
657         sc->sc_brtimemsg.nm_lmsg.u.ms_resultp = sc;
658
659         callout_init(&sc->sc_bstpcallout);
660         netmsg_init(&sc->sc_bstptimemsg, &netisr_adone_rport,
661                     MSGF_DROPABLE, bstp_tick_handler);
662         sc->sc_bstptimemsg.nm_lmsg.u.ms_resultp = sc;
663
664         /* Initialize per-cpu member iface lists */
665         sc->sc_iflists = kmalloc(sizeof(*sc->sc_iflists) * ncpus,
666                                  M_DEVBUF, M_WAITOK);
667         for (cpu = 0; cpu < ncpus; ++cpu)
668                 LIST_INIT(&sc->sc_iflists[cpu]);
669
670         LIST_INIT(&sc->sc_spanlist);
671
672         ifp->if_softc = sc;
673         if_initname(ifp, ifc->ifc_name, unit);
674         ifp->if_mtu = ETHERMTU;
675         ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
676         ifp->if_ioctl = bridge_ioctl;
677         ifp->if_start = bridge_start;
678         ifp->if_init = bridge_init;
679         ifp->if_type = IFT_BRIDGE;
680         ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
681         ifp->if_snd.ifq_maxlen = ifqmaxlen;
682         ifq_set_ready(&ifp->if_snd);
683         ifp->if_hdrlen = ETHER_HDR_LEN;
684
685         /*
686          * Generate a random ethernet address and use the private AC:DE:48
687          * OUI code.
688          */
689         rnd = karc4random();
690         bcopy(&rnd, &eaddr[0], 4); /* ETHER_ADDR_LEN == 6 */
691         rnd = karc4random();
692         bcopy(&rnd, &eaddr[2], 4); /* ETHER_ADDR_LEN == 6 */
693
694         eaddr[0] &= ~1; /* clear multicast bit */
695         eaddr[0] |= 2;  /* set the LAA bit */
696
697         ether_ifattach(ifp, eaddr, NULL);
698         /* Now undo some of the damage... */
699         ifp->if_baudrate = 0;
700         ifp->if_type = IFT_BRIDGE;
701
702         crit_enter();   /* XXX MP */
703         LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
704         crit_exit();
705
706         return (0);
707 }
708
709 static void
710 bridge_delete_dispatch(struct netmsg *nmsg)
711 {
712         struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
713         struct bridge_softc *sc = lmsg->u.ms_resultp;
714         struct ifnet *bifp = sc->sc_ifp;
715         struct bridge_iflist *bif;
716
717         lwkt_serialize_enter(bifp->if_serializer);
718
719         while ((bif = LIST_FIRST(&sc->sc_iflists[mycpuid])) != NULL)
720                 bridge_delete_member(sc, bif, 0);
721
722         while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL)
723                 bridge_delete_span(sc, bif);
724
725         lwkt_serialize_exit(bifp->if_serializer);
726
727         lwkt_replymsg(lmsg, 0);
728 }
729
730 /*
731  * bridge_clone_destroy:
732  *
733  *      Destroy a bridge instance.
734  */
735 static void
736 bridge_clone_destroy(struct ifnet *ifp)
737 {
738         struct bridge_softc *sc = ifp->if_softc;
739         struct lwkt_msg *lmsg;
740         struct netmsg nmsg;
741
742         lwkt_serialize_enter(ifp->if_serializer);
743
744         bridge_stop(ifp);
745         ifp->if_flags &= ~IFF_UP;
746
747         lwkt_serialize_exit(ifp->if_serializer);
748
749         netmsg_init(&nmsg, &curthread->td_msgport, 0, bridge_delete_dispatch);
750         lmsg = &nmsg.nm_lmsg;
751         lmsg->u.ms_resultp = sc;
752         lwkt_domsg(BRIDGE_CFGPORT, lmsg, 0);
753
754         crit_enter();   /* XXX MP */
755         LIST_REMOVE(sc, sc_list);
756         crit_exit();
757
758         ether_ifdetach(ifp);
759
760         /* Tear down the routing table. */
761         bridge_rtable_fini(sc);
762
763         /* Free per-cpu member iface lists */
764         kfree(sc->sc_iflists, M_DEVBUF);
765
766         kfree(sc, M_DEVBUF);
767 }
768
769 /*
770  * bridge_ioctl:
771  *
772  *      Handle a control request from the operator.
773  */
774 static int
775 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
776 {
777         struct bridge_softc *sc = ifp->if_softc;
778         struct bridge_control_arg args;
779         struct ifdrv *ifd = (struct ifdrv *) data;
780         const struct bridge_control *bc;
781         int error = 0;
782
783         ASSERT_SERIALIZED(ifp->if_serializer);
784
785         switch (cmd) {
786         case SIOCADDMULTI:
787         case SIOCDELMULTI:
788                 break;
789
790         case SIOCGDRVSPEC:
791         case SIOCSDRVSPEC:
792                 if (ifd->ifd_cmd >= bridge_control_table_size) {
793                         error = EINVAL;
794                         break;
795                 }
796                 bc = &bridge_control_table[ifd->ifd_cmd];
797
798                 if (cmd == SIOCGDRVSPEC &&
799                     (bc->bc_flags & BC_F_COPYOUT) == 0) {
800                         error = EINVAL;
801                         break;
802                 } else if (cmd == SIOCSDRVSPEC &&
803                            (bc->bc_flags & BC_F_COPYOUT)) {
804                         error = EINVAL;
805                         break;
806                 }
807
808                 if (bc->bc_flags & BC_F_SUSER) {
809                         error = suser_cred(cr, NULL_CRED_OKAY);
810                         if (error)
811                                 break;
812                 }
813
814                 if (ifd->ifd_len != bc->bc_argsize ||
815                     ifd->ifd_len > sizeof(args.bca_u)) {
816                         error = EINVAL;
817                         break;
818                 }
819
820                 memset(&args, 0, sizeof(args));
821                 if (bc->bc_flags & BC_F_COPYIN) {
822                         error = copyin(ifd->ifd_data, &args.bca_u,
823                                        ifd->ifd_len);
824                         if (error)
825                                 break;
826                 }
827
828                 error = bridge_control(sc, cmd, bc->bc_func, &args);
829                 if (error) {
830                         KKASSERT(args.bca_len == 0 && args.bca_kptr == NULL);
831                         break;
832                 }
833
834                 if (bc->bc_flags & BC_F_COPYOUT) {
835                         error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
836                         if (args.bca_len != 0) {
837                                 KKASSERT(args.bca_kptr != NULL);
838                                 if (!error) {
839                                         error = copyout(args.bca_kptr,
840                                                 args.bca_uptr, args.bca_len);
841                                 }
842                                 kfree(args.bca_kptr, M_TEMP);
843                         } else {
844                                 KKASSERT(args.bca_kptr == NULL);
845                         }
846                 } else {
847                         KKASSERT(args.bca_len == 0 && args.bca_kptr == NULL);
848                 }
849                 break;
850
851         case SIOCSIFFLAGS:
852                 if (!(ifp->if_flags & IFF_UP) &&
853                     (ifp->if_flags & IFF_RUNNING)) {
854                         /*
855                          * If interface is marked down and it is running,
856                          * then stop it.
857                          */
858                         bridge_stop(ifp);
859                 } else if ((ifp->if_flags & IFF_UP) &&
860                     !(ifp->if_flags & IFF_RUNNING)) {
861                         /*
862                          * If interface is marked up and it is stopped, then
863                          * start it.
864                          */
865                         ifp->if_init(sc);
866                 }
867                 break;
868
869         case SIOCSIFMTU:
870                 /* Do not allow the MTU to be changed on the bridge */
871                 error = EINVAL;
872                 break;
873
874         default:
875                 error = ether_ioctl(ifp, cmd, data);
876                 break;
877         }
878         return (error);
879 }
880
881 /*
882  * bridge_mutecaps:
883  *
884  *      Clear or restore unwanted capabilities on the member interface
885  */
886 static void
887 bridge_mutecaps(struct bridge_ifinfo *bif_info, struct ifnet *ifp, int mute)
888 {
889         struct ifreq ifr;
890         int error;
891
892         if (ifp->if_ioctl == NULL)
893                 return;
894
895         bzero(&ifr, sizeof(ifr));
896         ifr.ifr_reqcap = ifp->if_capenable;
897
898         if (mute) {
899                 /* mask off and save capabilities */
900                 bif_info->bifi_mutecap = ifr.ifr_reqcap & BRIDGE_IFCAPS_MASK;
901                 if (bif_info->bifi_mutecap != 0)
902                         ifr.ifr_reqcap &= ~BRIDGE_IFCAPS_MASK;
903         } else {
904                 /* restore muted capabilities */
905                 ifr.ifr_reqcap |= bif_info->bifi_mutecap;
906         }
907
908         if (bif_info->bifi_mutecap != 0) {
909                 lwkt_serialize_enter(ifp->if_serializer);
910                 error = ifp->if_ioctl(ifp, SIOCSIFCAP, (caddr_t)&ifr, NULL);
911                 lwkt_serialize_exit(ifp->if_serializer);
912         }
913 }
914
915 /*
916  * bridge_lookup_member:
917  *
918  *      Lookup a bridge member interface.
919  */
920 static struct bridge_iflist *
921 bridge_lookup_member(struct bridge_softc *sc, const char *name)
922 {
923         struct bridge_iflist *bif;
924
925         LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
926                 if (strcmp(bif->bif_ifp->if_xname, name) == 0)
927                         return (bif);
928         }
929         return (NULL);
930 }
931
932 /*
933  * bridge_lookup_member_if:
934  *
935  *      Lookup a bridge member interface by ifnet*.
936  */
937 static struct bridge_iflist *
938 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
939 {
940         struct bridge_iflist *bif;
941
942         LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
943                 if (bif->bif_ifp == member_ifp)
944                         return (bif);
945         }
946         return (NULL);
947 }
948
949 /*
950  * bridge_lookup_member_ifinfo:
951  *
952  *      Lookup a bridge member interface by bridge_ifinfo.
953  */
954 static struct bridge_iflist *
955 bridge_lookup_member_ifinfo(struct bridge_softc *sc,
956                             struct bridge_ifinfo *bif_info)
957 {
958         struct bridge_iflist *bif;
959
960         LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
961                 if (bif->bif_info == bif_info)
962                         return (bif);
963         }
964         return (NULL);
965 }
966
967 /*
968  * bridge_delete_member:
969  *
970  *      Delete the specified member interface.
971  */
972 static void
973 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
974     int gone)
975 {
976         struct ifnet *ifs = bif->bif_ifp;
977         struct ifnet *bifp = sc->sc_ifp;
978         struct bridge_ifinfo *bif_info = bif->bif_info;
979         struct bridge_iflist_head saved_bifs;
980
981         ASSERT_SERIALIZED(bifp->if_serializer);
982         KKASSERT(bif_info != NULL);
983
984         ifs->if_bridge = NULL;
985
986         /*
987          * Release bridge interface's serializer:
988          * - To avoid possible dead lock.
989          * - Various sync operation will block the current thread.
990          */
991         lwkt_serialize_exit(bifp->if_serializer);
992
993         if (!gone) {
994                 switch (ifs->if_type) {
995                 case IFT_ETHER:
996                 case IFT_L2VLAN:
997                         /*
998                          * Take the interface out of promiscuous mode.
999                          */
1000                         ifpromisc(ifs, 0);
1001                         bridge_mutecaps(bif_info, ifs, 0);
1002                         break;
1003
1004                 case IFT_GIF:
1005                         break;
1006
1007                 default:
1008                         panic("bridge_delete_member: impossible");
1009                         break;
1010                 }
1011         }
1012
1013         /*
1014          * Remove bifs from percpu linked list.
1015          *
1016          * Removed bifs are not freed immediately, instead,
1017          * they are saved in saved_bifs.  They will be freed
1018          * after we make sure that no one is accessing them,
1019          * i.e. after following netmsg_service_sync()
1020          */
1021         LIST_INIT(&saved_bifs);
1022         bridge_del_bif(sc, bif_info, &saved_bifs);
1023
1024         /*
1025          * Make sure that all protocol threads:
1026          * o  see 'ifs' if_bridge is changed
1027          * o  know that bif is removed from the percpu linked list
1028          */
1029         netmsg_service_sync();
1030
1031         /*
1032          * Free the removed bifs
1033          */
1034         KKASSERT(!LIST_EMPTY(&saved_bifs));
1035         while ((bif = LIST_FIRST(&saved_bifs)) != NULL) {
1036                 LIST_REMOVE(bif, bif_next);
1037                 kfree(bif, M_DEVBUF);
1038         }
1039
1040         /* See the comment in bridge_ioctl_stop() */
1041         bridge_rtmsg_sync(sc);
1042         bridge_rtdelete(sc, ifs, IFBF_FLUSHALL | IFBF_FLUSHSYNC);
1043
1044         lwkt_serialize_enter(bifp->if_serializer);
1045
1046         if (bifp->if_flags & IFF_RUNNING)
1047                 bstp_initialization(sc);
1048
1049         /*
1050          * Free the bif_info after bstp_initialization(), so that
1051          * bridge_softc.sc_root_port will not reference a dangling
1052          * pointer.
1053          */
1054         kfree(bif_info, M_DEVBUF);
1055 }
1056
1057 /*
1058  * bridge_delete_span:
1059  *
1060  *      Delete the specified span interface.
1061  */
1062 static void
1063 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1064 {
1065         KASSERT(bif->bif_ifp->if_bridge == NULL,
1066             ("%s: not a span interface", __func__));
1067
1068         LIST_REMOVE(bif, bif_next);
1069         kfree(bif, M_DEVBUF);
1070 }
1071
1072 static int
1073 bridge_ioctl_init(struct bridge_softc *sc, void *arg __unused)
1074 {
1075         struct ifnet *ifp = sc->sc_ifp;
1076
1077         if (ifp->if_flags & IFF_RUNNING)
1078                 return 0;
1079
1080         callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1081             bridge_timer, sc);
1082
1083         ifp->if_flags |= IFF_RUNNING;
1084         bstp_initialization(sc);
1085         return 0;
1086 }
1087
1088 static int
1089 bridge_ioctl_stop(struct bridge_softc *sc, void *arg __unused)
1090 {
1091         struct ifnet *ifp = sc->sc_ifp;
1092         struct lwkt_msg *lmsg;
1093
1094         if ((ifp->if_flags & IFF_RUNNING) == 0)
1095                 return 0;
1096
1097         callout_stop(&sc->sc_brcallout);
1098
1099         crit_enter();
1100         lmsg = &sc->sc_brtimemsg.nm_lmsg;
1101         if ((lmsg->ms_flags & MSGF_DONE) == 0) {
1102                 /* Pending to be processed; drop it */
1103                 lwkt_dropmsg(lmsg);
1104         }
1105         crit_exit();
1106
1107         bstp_stop(sc);
1108
1109         ifp->if_flags &= ~IFF_RUNNING;
1110
1111         lwkt_serialize_exit(ifp->if_serializer);
1112
1113         /* Let everyone know that we are stopped */
1114         netmsg_service_sync();
1115
1116         /*
1117          * Sync ifnetX msgports in the order we forward rtnode
1118          * installation message.  This is used to make sure that
1119          * all rtnode installation messages sent by bridge_rtupdate()
1120          * during above netmsg_service_sync() are flushed.
1121          */
1122         bridge_rtmsg_sync(sc);
1123         bridge_rtflush(sc, IFBF_FLUSHDYN | IFBF_FLUSHSYNC);
1124
1125         lwkt_serialize_enter(ifp->if_serializer);
1126         return 0;
1127 }
1128
1129 static int
1130 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1131 {
1132         struct ifbreq *req = arg;
1133         struct bridge_iflist *bif;
1134         struct bridge_ifinfo *bif_info;
1135         struct ifnet *ifs, *bifp;
1136         int error = 0;
1137
1138         bifp = sc->sc_ifp;
1139         ASSERT_SERIALIZED(bifp->if_serializer);
1140
1141         ifs = ifunit(req->ifbr_ifsname);
1142         if (ifs == NULL)
1143                 return (ENOENT);
1144
1145         /* If it's in the span list, it can't be a member. */
1146         LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1147                 if (ifs == bif->bif_ifp)
1148                         return (EBUSY);
1149
1150         /* Allow the first Ethernet member to define the MTU */
1151         if (ifs->if_type != IFT_GIF) {
1152                 if (LIST_EMPTY(&sc->sc_iflists[mycpuid])) {
1153                         bifp->if_mtu = ifs->if_mtu;
1154                 } else if (bifp->if_mtu != ifs->if_mtu) {
1155                         if_printf(bifp, "invalid MTU for %s\n", ifs->if_xname);
1156                         return (EINVAL);
1157                 }
1158         }
1159
1160         if (ifs->if_bridge == sc)
1161                 return (EEXIST);
1162
1163         if (ifs->if_bridge != NULL)
1164                 return (EBUSY);
1165
1166         bif_info = kmalloc(sizeof(*bif_info), M_DEVBUF, M_WAITOK | M_ZERO);
1167         bif_info->bifi_priority = BSTP_DEFAULT_PORT_PRIORITY;
1168         bif_info->bifi_path_cost = BSTP_DEFAULT_PATH_COST;
1169         bif_info->bifi_ifp = ifs;
1170
1171         /*
1172          * Release bridge interface's serializer:
1173          * - To avoid possible dead lock.
1174          * - Various sync operation will block the current thread.
1175          */
1176         lwkt_serialize_exit(bifp->if_serializer);
1177
1178         switch (ifs->if_type) {
1179         case IFT_ETHER:
1180         case IFT_L2VLAN:
1181                 /*
1182                  * Place the interface into promiscuous mode.
1183                  */
1184                 error = ifpromisc(ifs, 1);
1185                 if (error) {
1186                         lwkt_serialize_enter(bifp->if_serializer);
1187                         goto out;
1188                 }
1189                 bridge_mutecaps(bif_info, ifs, 1);
1190                 break;
1191
1192         case IFT_GIF: /* :^) */
1193                 break;
1194
1195         default:
1196                 error = EINVAL;
1197                 lwkt_serialize_enter(bifp->if_serializer);
1198                 goto out;
1199         }
1200
1201         /*
1202          * Add bifs to percpu linked lists
1203          */
1204         bridge_add_bif(sc, bif_info, ifs);
1205
1206         lwkt_serialize_enter(bifp->if_serializer);
1207
1208         if (bifp->if_flags & IFF_RUNNING)
1209                 bstp_initialization(sc);
1210         else
1211                 bstp_stop(sc);
1212
1213         /*
1214          * Everything has been setup, so let the member interface
1215          * deliver packets to this bridge on its input/output path.
1216          */
1217         ifs->if_bridge = sc;
1218 out:
1219         if (error) {
1220                 if (bif_info != NULL)
1221                         kfree(bif_info, M_DEVBUF);
1222         }
1223         return (error);
1224 }
1225
1226 static int
1227 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1228 {
1229         struct ifbreq *req = arg;
1230         struct bridge_iflist *bif;
1231
1232         bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1233         if (bif == NULL)
1234                 return (ENOENT);
1235
1236         bridge_delete_member(sc, bif, 0);
1237
1238         return (0);
1239 }
1240
1241 static int
1242 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1243 {
1244         struct ifbreq *req = arg;
1245         struct bridge_iflist *bif;
1246
1247         bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1248         if (bif == NULL)
1249                 return (ENOENT);
1250
1251         req->ifbr_ifsflags = bif->bif_flags;
1252         req->ifbr_state = bif->bif_state;
1253         req->ifbr_priority = bif->bif_priority;
1254         req->ifbr_path_cost = bif->bif_path_cost;
1255         req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1256
1257         return (0);
1258 }
1259
1260 static int
1261 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1262 {
1263         struct ifbreq *req = arg;
1264         struct bridge_iflist *bif;
1265         struct ifnet *bifp = sc->sc_ifp;
1266
1267         bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1268         if (bif == NULL)
1269                 return (ENOENT);
1270
1271         if (req->ifbr_ifsflags & IFBIF_SPAN) {
1272                 /* SPAN is readonly */
1273                 return (EINVAL);
1274         }
1275
1276         if (req->ifbr_ifsflags & IFBIF_STP) {
1277                 switch (bif->bif_ifp->if_type) {
1278                 case IFT_ETHER:
1279                         /* These can do spanning tree. */
1280                         break;
1281
1282                 default:
1283                         /* Nothing else can. */
1284                         return (EINVAL);
1285                 }
1286         }
1287
1288         lwkt_serialize_exit(bifp->if_serializer);
1289         bridge_set_bifflags(sc, bif->bif_info, req->ifbr_ifsflags);
1290         lwkt_serialize_enter(bifp->if_serializer);
1291
1292         if (bifp->if_flags & IFF_RUNNING)
1293                 bstp_initialization(sc);
1294
1295         return (0);
1296 }
1297
1298 static int
1299 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1300 {
1301         struct ifbrparam *param = arg;
1302         struct ifnet *ifp = sc->sc_ifp;
1303
1304         sc->sc_brtmax = param->ifbrp_csize;
1305
1306         lwkt_serialize_exit(ifp->if_serializer);
1307         bridge_rttrim(sc);
1308         lwkt_serialize_enter(ifp->if_serializer);
1309
1310         return (0);
1311 }
1312
1313 static int
1314 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1315 {
1316         struct ifbrparam *param = arg;
1317
1318         param->ifbrp_csize = sc->sc_brtmax;
1319
1320         return (0);
1321 }
1322
1323 static int
1324 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1325 {
1326         struct bridge_control_arg *bc_arg = arg;
1327         struct ifbifconf *bifc = arg;
1328         struct bridge_iflist *bif;
1329         struct ifbreq *breq;
1330         int count, len;
1331
1332         count = 0;
1333         LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next)
1334                 count++;
1335         LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1336                 count++;
1337
1338         if (bifc->ifbic_len == 0) {
1339                 bifc->ifbic_len = sizeof(*breq) * count;
1340                 return 0;
1341         } else if (count == 0 || bifc->ifbic_len < sizeof(*breq)) {
1342                 bifc->ifbic_len = 0;
1343                 return 0;
1344         }
1345
1346         len = min(bifc->ifbic_len, sizeof(*breq) * count);
1347         KKASSERT(len >= sizeof(*breq));
1348
1349         breq = kmalloc(len, M_TEMP, M_INTWAIT | M_NULLOK | M_ZERO);
1350         if (breq == NULL) {
1351                 bifc->ifbic_len = 0;
1352                 return ENOMEM;
1353         }
1354         bc_arg->bca_kptr = breq;
1355
1356         count = 0;
1357         LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
1358                 if (len < sizeof(*breq))
1359                         break;
1360
1361                 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1362                         sizeof(breq->ifbr_ifsname));
1363                 breq->ifbr_ifsflags = bif->bif_flags;
1364                 breq->ifbr_state = bif->bif_state;
1365                 breq->ifbr_priority = bif->bif_priority;
1366                 breq->ifbr_path_cost = bif->bif_path_cost;
1367                 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1368                 breq++;
1369                 count++;
1370                 len -= sizeof(*breq);
1371         }
1372         LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1373                 if (len < sizeof(*breq))
1374                         break;
1375
1376                 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1377                         sizeof(breq->ifbr_ifsname));
1378                 breq->ifbr_ifsflags = bif->bif_flags;
1379                 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1380                 breq++;
1381                 count++;
1382                 len -= sizeof(*breq);
1383         }
1384
1385         bifc->ifbic_len = sizeof(*breq) * count;
1386         KKASSERT(bifc->ifbic_len > 0);
1387
1388         bc_arg->bca_len = bifc->ifbic_len;
1389         bc_arg->bca_uptr = bifc->ifbic_req;
1390         return 0;
1391 }
1392
1393 static int
1394 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1395 {
1396         struct bridge_control_arg *bc_arg = arg;
1397         struct ifbaconf *bac = arg;
1398         struct bridge_rtnode *brt;
1399         struct ifbareq *bareq;
1400         int count, len;
1401
1402         count = 0;
1403         LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list)
1404                 count++;
1405
1406         if (bac->ifbac_len == 0) {
1407                 bac->ifbac_len = sizeof(*bareq) * count;
1408                 return 0;
1409         } else if (count == 0 || bac->ifbac_len < sizeof(*bareq)) {
1410                 bac->ifbac_len = 0;
1411                 return 0;
1412         }
1413
1414         len = min(bac->ifbac_len, sizeof(*bareq) * count);
1415         KKASSERT(len >= sizeof(*bareq));
1416
1417         bareq = kmalloc(len, M_TEMP, M_INTWAIT | M_NULLOK | M_ZERO);
1418         if (bareq == NULL) {
1419                 bac->ifbac_len = 0;
1420                 return ENOMEM;
1421         }
1422         bc_arg->bca_kptr = bareq;
1423
1424         count = 0;
1425         LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
1426                 struct bridge_rtinfo *bri = brt->brt_info;
1427                 unsigned long expire;
1428
1429                 if (len < sizeof(*bareq))
1430                         break;
1431
1432                 strlcpy(bareq->ifba_ifsname, bri->bri_ifp->if_xname,
1433                         sizeof(bareq->ifba_ifsname));
1434                 memcpy(bareq->ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1435                 expire = bri->bri_expire;
1436                 if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1437                     time_second < expire)
1438                         bareq->ifba_expire = expire - time_second;
1439                 else
1440                         bareq->ifba_expire = 0;
1441                 bareq->ifba_flags = bri->bri_flags;
1442                 bareq++;
1443                 count++;
1444                 len -= sizeof(*bareq);
1445         }
1446
1447         bac->ifbac_len = sizeof(*bareq) * count;
1448         KKASSERT(bac->ifbac_len > 0);
1449
1450         bc_arg->bca_len = bac->ifbac_len;
1451         bc_arg->bca_uptr = bac->ifbac_req;
1452         return 0;
1453 }
1454
1455 static int
1456 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1457 {
1458         struct ifbareq *req = arg;
1459         struct bridge_iflist *bif;
1460         struct ifnet *ifp = sc->sc_ifp;
1461         int error;
1462
1463         ASSERT_SERIALIZED(ifp->if_serializer);
1464
1465         bif = bridge_lookup_member(sc, req->ifba_ifsname);
1466         if (bif == NULL)
1467                 return (ENOENT);
1468
1469         lwkt_serialize_exit(ifp->if_serializer);
1470         error = bridge_rtsaddr(sc, req->ifba_dst, bif->bif_ifp,
1471                                req->ifba_flags);
1472         lwkt_serialize_enter(ifp->if_serializer);
1473         return (error);
1474 }
1475
1476 static int
1477 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1478 {
1479         struct ifbrparam *param = arg;
1480
1481         sc->sc_brttimeout = param->ifbrp_ctime;
1482
1483         return (0);
1484 }
1485
1486 static int
1487 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1488 {
1489         struct ifbrparam *param = arg;
1490
1491         param->ifbrp_ctime = sc->sc_brttimeout;
1492
1493         return (0);
1494 }
1495
1496 static int
1497 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1498 {
1499         struct ifbareq *req = arg;
1500         struct ifnet *ifp = sc->sc_ifp;
1501         int error;
1502
1503         lwkt_serialize_exit(ifp->if_serializer);
1504         error = bridge_rtdaddr(sc, req->ifba_dst);
1505         lwkt_serialize_enter(ifp->if_serializer);
1506         return error;
1507 }
1508
1509 static int
1510 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1511 {
1512         struct ifbreq *req = arg;
1513         struct ifnet *ifp = sc->sc_ifp;
1514
1515         lwkt_serialize_exit(ifp->if_serializer);
1516         bridge_rtflush(sc, req->ifbr_ifsflags | IFBF_FLUSHSYNC);
1517         lwkt_serialize_enter(ifp->if_serializer);
1518
1519         return (0);
1520 }
1521
1522 static int
1523 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1524 {
1525         struct ifbrparam *param = arg;
1526
1527         param->ifbrp_prio = sc->sc_bridge_priority;
1528
1529         return (0);
1530 }
1531
1532 static int
1533 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1534 {
1535         struct ifbrparam *param = arg;
1536
1537         sc->sc_bridge_priority = param->ifbrp_prio;
1538
1539         if (sc->sc_ifp->if_flags & IFF_RUNNING)
1540                 bstp_initialization(sc);
1541
1542         return (0);
1543 }
1544
1545 static int
1546 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1547 {
1548         struct ifbrparam *param = arg;
1549
1550         param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1551
1552         return (0);
1553 }
1554
1555 static int
1556 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1557 {
1558         struct ifbrparam *param = arg;
1559
1560         if (param->ifbrp_hellotime == 0)
1561                 return (EINVAL);
1562         sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1563
1564         if (sc->sc_ifp->if_flags & IFF_RUNNING)
1565                 bstp_initialization(sc);
1566
1567         return (0);
1568 }
1569
1570 static int
1571 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1572 {
1573         struct ifbrparam *param = arg;
1574
1575         param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1576
1577         return (0);
1578 }
1579
1580 static int
1581 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1582 {
1583         struct ifbrparam *param = arg;
1584
1585         if (param->ifbrp_fwddelay == 0)
1586                 return (EINVAL);
1587         sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1588
1589         if (sc->sc_ifp->if_flags & IFF_RUNNING)
1590                 bstp_initialization(sc);
1591
1592         return (0);
1593 }
1594
1595 static int
1596 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1597 {
1598         struct ifbrparam *param = arg;
1599
1600         param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1601
1602         return (0);
1603 }
1604
1605 static int
1606 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1607 {
1608         struct ifbrparam *param = arg;
1609
1610         if (param->ifbrp_maxage == 0)
1611                 return (EINVAL);
1612         sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1613
1614         if (sc->sc_ifp->if_flags & IFF_RUNNING)
1615                 bstp_initialization(sc);
1616
1617         return (0);
1618 }
1619
1620 static int
1621 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1622 {
1623         struct ifbreq *req = arg;
1624         struct bridge_iflist *bif;
1625
1626         bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1627         if (bif == NULL)
1628                 return (ENOENT);
1629
1630         bif->bif_priority = req->ifbr_priority;
1631
1632         if (sc->sc_ifp->if_flags & IFF_RUNNING)
1633                 bstp_initialization(sc);
1634
1635         return (0);
1636 }
1637
1638 static int
1639 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1640 {
1641         struct ifbreq *req = arg;
1642         struct bridge_iflist *bif;
1643
1644         bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1645         if (bif == NULL)
1646                 return (ENOENT);
1647
1648         bif->bif_path_cost = req->ifbr_path_cost;
1649
1650         if (sc->sc_ifp->if_flags & IFF_RUNNING)
1651                 bstp_initialization(sc);
1652
1653         return (0);
1654 }
1655
1656 static int
1657 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1658 {
1659         struct ifbreq *req = arg;
1660         struct bridge_iflist *bif;
1661         struct ifnet *ifs;
1662
1663         ifs = ifunit(req->ifbr_ifsname);
1664         if (ifs == NULL)
1665                 return (ENOENT);
1666
1667         LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1668                 if (ifs == bif->bif_ifp)
1669                         return (EBUSY);
1670
1671         if (ifs->if_bridge != NULL)
1672                 return (EBUSY);
1673
1674         switch (ifs->if_type) {
1675         case IFT_ETHER:
1676         case IFT_GIF:
1677         case IFT_L2VLAN:
1678                 break;
1679
1680         default:
1681                 return (EINVAL);
1682         }
1683
1684         bif = kmalloc(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
1685         bif->bif_ifp = ifs;
1686         bif->bif_flags = IFBIF_SPAN;
1687         /* NOTE: span bif does not need bridge_ifinfo */
1688
1689         LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1690
1691         sc->sc_span = 1;
1692
1693         return (0);
1694 }
1695
1696 static int
1697 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1698 {
1699         struct ifbreq *req = arg;
1700         struct bridge_iflist *bif;
1701         struct ifnet *ifs;
1702
1703         ifs = ifunit(req->ifbr_ifsname);
1704         if (ifs == NULL)
1705                 return (ENOENT);
1706
1707         LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1708                 if (ifs == bif->bif_ifp)
1709                         break;
1710
1711         if (bif == NULL)
1712                 return (ENOENT);
1713
1714         bridge_delete_span(sc, bif);
1715
1716         if (LIST_EMPTY(&sc->sc_spanlist))
1717                 sc->sc_span = 0;
1718
1719         return (0);
1720 }
1721
1722 static void
1723 bridge_ifdetach_dispatch(struct netmsg *nmsg)
1724 {
1725         struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
1726         struct ifnet *ifp, *bifp;
1727         struct bridge_softc *sc;
1728         struct bridge_iflist *bif;
1729
1730         ifp = lmsg->u.ms_resultp;
1731         sc = ifp->if_bridge;
1732
1733         /* Check if the interface is a bridge member */
1734         if (sc != NULL) {
1735                 bifp = sc->sc_ifp;
1736
1737                 lwkt_serialize_enter(bifp->if_serializer);
1738
1739                 bif = bridge_lookup_member_if(sc, ifp);
1740                 if (bif != NULL) {
1741                         bridge_delete_member(sc, bif, 1);
1742                 } else {
1743                         /* XXX Why bif will be NULL? */
1744                 }
1745
1746                 lwkt_serialize_exit(bifp->if_serializer);
1747                 goto reply;
1748         }
1749
1750         crit_enter();   /* XXX MP */
1751
1752         /* Check if the interface is a span port */
1753         LIST_FOREACH(sc, &bridge_list, sc_list) {
1754                 bifp = sc->sc_ifp;
1755
1756                 lwkt_serialize_enter(bifp->if_serializer);
1757
1758                 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1759                         if (ifp == bif->bif_ifp) {
1760                                 bridge_delete_span(sc, bif);
1761                                 break;
1762                         }
1763
1764                 lwkt_serialize_exit(bifp->if_serializer);
1765         }
1766
1767         crit_exit();
1768
1769 reply:
1770         lwkt_replymsg(lmsg, 0);
1771 }
1772
1773 /*
1774  * bridge_ifdetach:
1775  *
1776  *      Detach an interface from a bridge.  Called when a member
1777  *      interface is detaching.
1778  */
1779 static void
1780 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
1781 {
1782         struct lwkt_msg *lmsg;
1783         struct netmsg nmsg;
1784
1785         netmsg_init(&nmsg, &curthread->td_msgport, 0, bridge_ifdetach_dispatch);
1786         lmsg = &nmsg.nm_lmsg;
1787         lmsg->u.ms_resultp = ifp;
1788
1789         lwkt_domsg(BRIDGE_CFGPORT, lmsg, 0);
1790 }
1791
1792 /*
1793  * bridge_init:
1794  *
1795  *      Initialize a bridge interface.
1796  */
1797 static void
1798 bridge_init(void *xsc)
1799 {
1800         bridge_control(xsc, SIOCSIFFLAGS, bridge_ioctl_init, NULL);
1801 }
1802
1803 /*
1804  * bridge_stop:
1805  *
1806  *      Stop the bridge interface.
1807  */
1808 static void
1809 bridge_stop(struct ifnet *ifp)
1810 {
1811         bridge_control(ifp->if_softc, SIOCSIFFLAGS, bridge_ioctl_stop, NULL);
1812 }
1813
1814 /*
1815  * bridge_enqueue:
1816  *
1817  *      Enqueue a packet on a bridge member interface.
1818  *
1819  */
1820 void
1821 bridge_enqueue(struct ifnet *dst_ifp, struct mbuf *m)
1822 {
1823         struct netmsg_packet *nmp;
1824         lwkt_port_t port;
1825
1826         nmp = &m->m_hdr.mh_netmsg;
1827         netmsg_init(&nmp->nm_netmsg, &netisr_apanic_rport, 0,
1828                     bridge_enqueue_handler);
1829         nmp->nm_packet = m;
1830         nmp->nm_netmsg.nm_lmsg.u.ms_resultp = dst_ifp;
1831
1832         if (curthread->td_flags & TDF_NETWORK)
1833                 port = &curthread->td_msgport;
1834         else
1835                 port = cpu_portfn(mycpuid);
1836         lwkt_sendmsg(port, &nmp->nm_netmsg.nm_lmsg);
1837 }
1838
1839 /*
1840  * bridge_output:
1841  *
1842  *      Send output from a bridge member interface.  This
1843  *      performs the bridging function for locally originated
1844  *      packets.
1845  *
1846  *      The mbuf has the Ethernet header already attached.  We must
1847  *      enqueue or free the mbuf before returning.
1848  */
1849 static int
1850 bridge_output(struct ifnet *ifp, struct mbuf *m)
1851 {
1852         struct bridge_softc *sc = ifp->if_bridge;
1853         struct ether_header *eh;
1854         struct ifnet *dst_if, *bifp;
1855
1856         ASSERT_NOT_SERIALIZED(ifp->if_serializer);
1857
1858         /*
1859          * Make sure that we are still a member of a bridge interface.
1860          */
1861         if (sc == NULL) {
1862                 m_freem(m);
1863                 return (0);
1864         }
1865         bifp = sc->sc_ifp;
1866
1867         if (m->m_len < ETHER_HDR_LEN) {
1868                 m = m_pullup(m, ETHER_HDR_LEN);
1869                 if (m == NULL)
1870                         return (0);
1871         }
1872         eh = mtod(m, struct ether_header *);
1873
1874         /*
1875          * If bridge is down, but the original output interface is up,
1876          * go ahead and send out that interface.  Otherwise, the packet
1877          * is dropped below.
1878          */
1879         if ((bifp->if_flags & IFF_RUNNING) == 0) {
1880                 dst_if = ifp;
1881                 goto sendunicast;
1882         }
1883
1884         /*
1885          * If the packet is a multicast, or we don't know a better way to
1886          * get there, send to all interfaces.
1887          */
1888         if (ETHER_IS_MULTICAST(eh->ether_dhost))
1889                 dst_if = NULL;
1890         else
1891                 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1892         if (dst_if == NULL) {
1893                 struct bridge_iflist *bif, *nbif;
1894                 struct mbuf *mc;
1895                 int used = 0;
1896
1897                 if (sc->sc_span)
1898                         bridge_span(sc, m);
1899
1900                 LIST_FOREACH_MUTABLE(bif, &sc->sc_iflists[mycpuid],
1901                                      bif_next, nbif) {
1902                         dst_if = bif->bif_ifp;
1903                         if ((dst_if->if_flags & IFF_RUNNING) == 0)
1904                                 continue;
1905
1906                         /*
1907                          * If this is not the original output interface,
1908                          * and the interface is participating in spanning
1909                          * tree, make sure the port is in a state that
1910                          * allows forwarding.
1911                          */
1912                         if (dst_if != ifp &&
1913                             (bif->bif_flags & IFBIF_STP) != 0) {
1914                                 switch (bif->bif_state) {
1915                                 case BSTP_IFSTATE_BLOCKING:
1916                                 case BSTP_IFSTATE_LISTENING:
1917                                 case BSTP_IFSTATE_DISABLED:
1918                                         continue;
1919                                 }
1920                         }
1921
1922                         if (LIST_NEXT(bif, bif_next) == NULL) {
1923                                 used = 1;
1924                                 mc = m;
1925                         } else {
1926                                 mc = m_copypacket(m, MB_DONTWAIT);
1927                                 if (mc == NULL) {
1928                                         bifp->if_oerrors++;
1929                                         continue;
1930                                 }
1931                         }
1932                         bridge_handoff(dst_if, mc);
1933
1934                         if (nbif != NULL && !nbif->bif_onlist) {
1935                                 KKASSERT(bif->bif_onlist);
1936                                 nbif = LIST_NEXT(bif, bif_next);
1937                         }
1938                 }
1939                 if (used == 0)
1940                         m_freem(m);
1941                 return (0);
1942         }
1943
1944 sendunicast:
1945         /*
1946          * XXX Spanning tree consideration here?
1947          */
1948         if (sc->sc_span)
1949                 bridge_span(sc, m);
1950         if ((dst_if->if_flags & IFF_RUNNING) == 0)
1951                 m_freem(m);
1952         else
1953                 bridge_handoff(dst_if, m);
1954         return (0);
1955 }
1956
1957 /*
1958  * bridge_start:
1959  *
1960  *      Start output on a bridge.
1961  *
1962  */
1963 static void
1964 bridge_start(struct ifnet *ifp)
1965 {
1966         struct bridge_softc *sc = ifp->if_softc;
1967
1968         ASSERT_SERIALIZED(ifp->if_serializer);
1969
1970         ifp->if_flags |= IFF_OACTIVE;
1971         for (;;) {
1972                 struct ifnet *dst_if = NULL;
1973                 struct ether_header *eh;
1974                 struct mbuf *m;
1975
1976                 m = ifq_dequeue(&ifp->if_snd, NULL);
1977                 if (m == NULL)
1978                         break;
1979
1980                 if (m->m_len < sizeof(*eh)) {
1981                         m = m_pullup(m, sizeof(*eh));
1982                         if (m == NULL) {
1983                                 ifp->if_oerrors++;
1984                                 continue;
1985                         }
1986                 }
1987                 eh = mtod(m, struct ether_header *);
1988
1989                 BPF_MTAP(ifp, m);
1990                 ifp->if_opackets++;
1991
1992                 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0)
1993                         dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1994
1995                 if (dst_if == NULL)
1996                         bridge_start_bcast(sc, m);
1997                 else
1998                         bridge_enqueue(dst_if, m);
1999         }
2000         ifp->if_flags &= ~IFF_OACTIVE;
2001 }
2002
2003 /*
2004  * bridge_forward:
2005  *
2006  *      The forwarding function of the bridge.
2007  */
2008 static void
2009 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
2010 {
2011         struct bridge_iflist *bif;
2012         struct ifnet *src_if, *dst_if, *ifp;
2013         struct ether_header *eh;
2014
2015         src_if = m->m_pkthdr.rcvif;
2016         ifp = sc->sc_ifp;
2017
2018         ASSERT_NOT_SERIALIZED(ifp->if_serializer);
2019
2020         ifp->if_ipackets++;
2021         ifp->if_ibytes += m->m_pkthdr.len;
2022
2023         /*
2024          * Look up the bridge_iflist.
2025          */
2026         bif = bridge_lookup_member_if(sc, src_if);
2027         if (bif == NULL) {
2028                 /* Interface is not a bridge member (anymore?) */
2029                 m_freem(m);
2030                 return;
2031         }
2032
2033         if (bif->bif_flags & IFBIF_STP) {
2034                 switch (bif->bif_state) {
2035                 case BSTP_IFSTATE_BLOCKING:
2036                 case BSTP_IFSTATE_LISTENING:
2037                 case BSTP_IFSTATE_DISABLED:
2038                         m_freem(m);
2039                         return;
2040                 }
2041         }
2042
2043         eh = mtod(m, struct ether_header *);
2044
2045         /*
2046          * If the interface is learning, and the source
2047          * address is valid and not multicast, record
2048          * the address.
2049          */
2050         if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
2051             ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
2052             (eh->ether_shost[0] == 0 &&
2053              eh->ether_shost[1] == 0 &&
2054              eh->ether_shost[2] == 0 &&
2055              eh->ether_shost[3] == 0 &&
2056              eh->ether_shost[4] == 0 &&
2057              eh->ether_shost[5] == 0) == 0)
2058                 bridge_rtupdate(sc, eh->ether_shost, src_if, IFBAF_DYNAMIC);
2059
2060         if ((bif->bif_flags & IFBIF_STP) != 0 &&
2061             bif->bif_state == BSTP_IFSTATE_LEARNING) {
2062                 m_freem(m);
2063                 return;
2064         }
2065
2066         /*
2067          * At this point, the port either doesn't participate
2068          * in spanning tree or it is in the forwarding state.
2069          */
2070
2071         /*
2072          * If the packet is unicast, destined for someone on
2073          * "this" side of the bridge, drop it.
2074          */
2075         if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2076                 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
2077                 if (src_if == dst_if) {
2078                         m_freem(m);
2079                         return;
2080                 }
2081         } else {
2082                 /* ...forward it to all interfaces. */
2083                 ifp->if_imcasts++;
2084                 dst_if = NULL;
2085         }
2086
2087         if (dst_if == NULL) {
2088                 bridge_broadcast(sc, src_if, m);
2089                 return;
2090         }
2091
2092         /*
2093          * At this point, we're dealing with a unicast frame
2094          * going to a different interface.
2095          */
2096         if ((dst_if->if_flags & IFF_RUNNING) == 0) {
2097                 m_freem(m);
2098                 return;
2099         }
2100         bif = bridge_lookup_member_if(sc, dst_if);
2101         if (bif == NULL) {
2102                 /* Not a member of the bridge (anymore?) */
2103                 m_freem(m);
2104                 return;
2105         }
2106
2107         if (bif->bif_flags & IFBIF_STP) {
2108                 switch (bif->bif_state) {
2109                 case BSTP_IFSTATE_DISABLED:
2110                 case BSTP_IFSTATE_BLOCKING:
2111                         m_freem(m);
2112                         return;
2113                 }
2114         }
2115
2116         if (inet_pfil_hook.ph_hashooks > 0
2117 #ifdef INET6
2118             || inet6_pfil_hook.ph_hashooks > 0
2119 #endif
2120             ) {
2121                 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2122                         return;
2123                 if (m == NULL)
2124                         return;
2125
2126                 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2127                         return;
2128                 if (m == NULL)
2129                         return;
2130         }
2131         bridge_handoff(dst_if, m);
2132 }
2133
2134 /*
2135  * bridge_input:
2136  *
2137  *      Receive input from a member interface.  Queue the packet for
2138  *      bridging if it is not for us.
2139  */
2140 static struct mbuf *
2141 bridge_input(struct ifnet *ifp, struct mbuf *m)
2142 {
2143         struct bridge_softc *sc = ifp->if_bridge;
2144         struct bridge_iflist *bif;
2145         struct ifnet *bifp, *new_ifp;
2146         struct ether_header *eh;
2147         struct mbuf *mc, *mc2;
2148
2149         ASSERT_NOT_SERIALIZED(ifp->if_serializer);
2150
2151         /*
2152          * Make sure that we are still a member of a bridge interface.
2153          */
2154         if (sc == NULL)
2155                 return m;
2156
2157         new_ifp = NULL;
2158         bifp = sc->sc_ifp;
2159
2160         if ((bifp->if_flags & IFF_RUNNING) == 0)
2161                 goto out;
2162
2163         /*
2164          * Implement support for bridge monitoring.  If this flag has been
2165          * set on this interface, discard the packet once we push it through
2166          * the bpf(4) machinery, but before we do, increment various counters
2167          * associated with this bridge.
2168          */
2169         if (bifp->if_flags & IFF_MONITOR) {
2170                 /* Change input interface to this bridge */
2171                 m->m_pkthdr.rcvif = bifp;
2172
2173                 BPF_MTAP(bifp, m);
2174
2175                 /* Update bridge's ifnet statistics */
2176                 bifp->if_ipackets++;
2177                 bifp->if_ibytes += m->m_pkthdr.len;
2178                 if (m->m_flags & (M_MCAST | M_BCAST))
2179                         bifp->if_imcasts++;
2180
2181                 m_freem(m);
2182                 m = NULL;
2183                 goto out;
2184         }
2185
2186         eh = mtod(m, struct ether_header *);
2187
2188         m->m_flags &= ~M_PROTO1; /* XXX Hack - loop prevention */
2189
2190         if (memcmp(eh->ether_dhost, IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0) {
2191                 /*
2192                  * If the packet is for us, set the packets source as the
2193                  * bridge, and return the packet back to ifnet.if_input for
2194                  * local processing.
2195                  */
2196                 KASSERT(bifp->if_bridge == NULL,
2197                         ("loop created in bridge_input"));
2198                 new_ifp = bifp;
2199                 goto out;
2200         }
2201
2202         /*
2203          * Tap all packets arriving on the bridge, no matter if
2204          * they are local destinations or not.  In is in.
2205          */
2206         BPF_MTAP(bifp, m);
2207
2208         bif = bridge_lookup_member_if(sc, ifp);
2209         if (bif == NULL)
2210                 goto out;
2211
2212         if (sc->sc_span)
2213                 bridge_span(sc, m);
2214
2215         if (m->m_flags & (M_BCAST | M_MCAST)) {
2216                 /* Tap off 802.1D packets; they do not get forwarded. */
2217                 if (memcmp(eh->ether_dhost, bstp_etheraddr,
2218                     ETHER_ADDR_LEN) == 0) {
2219                         lwkt_serialize_enter(bifp->if_serializer);
2220                         bstp_input(sc, bif, m);
2221                         lwkt_serialize_exit(bifp->if_serializer);
2222
2223                         /* m is freed by bstp_input */
2224                         m = NULL;
2225                         goto out;
2226                 }
2227
2228                 if (bif->bif_flags & IFBIF_STP) {
2229                         switch (bif->bif_state) {
2230                         case BSTP_IFSTATE_BLOCKING:
2231                         case BSTP_IFSTATE_LISTENING:
2232                         case BSTP_IFSTATE_DISABLED:
2233                                 goto out;
2234                         }
2235                 }
2236
2237                 /*
2238                  * Make a deep copy of the packet and enqueue the copy
2239                  * for bridge processing; return the original packet for
2240                  * local processing.
2241                  */
2242                 mc = m_dup(m, MB_DONTWAIT);
2243                 if (mc == NULL)
2244                         goto out;
2245
2246                 bridge_forward(sc, mc);
2247
2248                 /*
2249                  * Reinject the mbuf as arriving on the bridge so we have a
2250                  * chance at claiming multicast packets. We can not loop back
2251                  * here from ether_input as a bridge is never a member of a
2252                  * bridge.
2253                  */
2254                 KASSERT(bifp->if_bridge == NULL,
2255                         ("loop created in bridge_input"));
2256                 mc2 = m_dup(m, MB_DONTWAIT);
2257 #ifdef notyet
2258                 if (mc2 != NULL) {
2259                         /* Keep the layer3 header aligned */
2260                         int i = min(mc2->m_pkthdr.len, max_protohdr);
2261                         mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2262                 }
2263 #endif
2264                 if (mc2 != NULL) {
2265                         /*
2266                          * Don't tap to bpf(4) again; we have
2267                          * already done the tapping.
2268                          */
2269                         ether_reinput_oncpu(bifp, mc2, 0);
2270                 }
2271
2272                 /* Return the original packet for local processing. */
2273                 goto out;
2274         }
2275
2276         if (bif->bif_flags & IFBIF_STP) {
2277                 switch (bif->bif_state) {
2278                 case BSTP_IFSTATE_BLOCKING:
2279                 case BSTP_IFSTATE_LISTENING:
2280                 case BSTP_IFSTATE_DISABLED:
2281                         goto out;
2282                 }
2283         }
2284
2285         /*
2286          * Unicast.  Make sure it's not for us.
2287          *
2288          * This loop is MPSAFE; the only blocking operation (bridge_rtupdate)
2289          * is followed by breaking out of the loop.
2290          */
2291         LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
2292                 if (bif->bif_ifp->if_type != IFT_ETHER)
2293                         continue;
2294
2295                 /* It is destined for us. */
2296                 if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_dhost,
2297                     ETHER_ADDR_LEN) == 0) {
2298                         if (bif->bif_ifp != ifp) {
2299                                 /* XXX loop prevention */
2300                                 m->m_flags |= M_PROTO1;
2301                                 new_ifp = bif->bif_ifp;
2302                         }
2303                         if (bif->bif_flags & IFBIF_LEARNING) {
2304                                 bridge_rtupdate(sc, eh->ether_shost,
2305                                                 ifp, IFBAF_DYNAMIC);
2306                         }
2307                         goto out;
2308                 }
2309
2310                 /* We just received a packet that we sent out. */
2311                 if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_shost,
2312                     ETHER_ADDR_LEN) == 0) {
2313                         m_freem(m);
2314                         m = NULL;
2315                         goto out;
2316                 }
2317         }
2318
2319         /* Perform the bridge forwarding function. */
2320         bridge_forward(sc, m);
2321         m = NULL;
2322 out:
2323         if (new_ifp != NULL) {
2324                 ether_reinput_oncpu(new_ifp, m, 1);
2325                 m = NULL;
2326         }
2327         return (m);
2328 }
2329
2330 /*
2331  * bridge_start_bcast:
2332  *
2333  *      Broadcast the packet sent from bridge to all member
2334  *      interfaces.
2335  *      This is a simplified version of bridge_broadcast(), however,
2336  *      this function expects caller to hold bridge's serializer.
2337  */
2338 static void
2339 bridge_start_bcast(struct bridge_softc *sc, struct mbuf *m)
2340 {
2341         struct bridge_iflist *bif;
2342         struct mbuf *mc;
2343         struct ifnet *dst_if, *bifp;
2344         int used = 0;
2345
2346         bifp = sc->sc_ifp;
2347         ASSERT_SERIALIZED(bifp->if_serializer);
2348
2349         /*
2350          * Following loop is MPSAFE; nothing is blocking
2351          * in the loop body.
2352          */
2353         LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
2354                 dst_if = bif->bif_ifp;
2355
2356                 if (bif->bif_flags & IFBIF_STP) {
2357                         switch (bif->bif_state) {
2358                         case BSTP_IFSTATE_BLOCKING:
2359                         case BSTP_IFSTATE_DISABLED:
2360                                 continue;
2361                         }
2362                 }
2363
2364                 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 &&
2365                     (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2366                         continue;
2367
2368                 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2369                         continue;
2370
2371                 if (LIST_NEXT(bif, bif_next) == NULL) {
2372                         mc = m;
2373                         used = 1;
2374                 } else {
2375                         mc = m_copypacket(m, MB_DONTWAIT);
2376                         if (mc == NULL) {
2377                                 bifp->if_oerrors++;
2378                                 continue;
2379                         }
2380                 }
2381                 bridge_enqueue(dst_if, mc);
2382         }
2383         if (used == 0)
2384                 m_freem(m);
2385 }
2386
2387 /*
2388  * bridge_broadcast:
2389  *
2390  *      Send a frame to all interfaces that are members of
2391  *      the bridge, except for the one on which the packet
2392  *      arrived.
2393  */
2394 static void
2395 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2396     struct mbuf *m)
2397 {
2398         struct bridge_iflist *bif, *nbif;
2399         struct mbuf *mc;
2400         struct ifnet *dst_if, *bifp;
2401         int used = 0;
2402
2403         bifp = sc->sc_ifp;
2404         ASSERT_NOT_SERIALIZED(bifp->if_serializer);
2405
2406         if (inet_pfil_hook.ph_hashooks > 0
2407 #ifdef INET6
2408             || inet6_pfil_hook.ph_hashooks > 0
2409 #endif
2410             ) {
2411                 if (bridge_pfil(&m, bifp, src_if, PFIL_IN) != 0)
2412                         return;
2413                 if (m == NULL)
2414                         return;
2415
2416                 /* Filter on the bridge interface before broadcasting */
2417                 if (bridge_pfil(&m, bifp, NULL, PFIL_OUT) != 0)
2418                         return;
2419                 if (m == NULL)
2420                         return;
2421         }
2422
2423         LIST_FOREACH_MUTABLE(bif, &sc->sc_iflists[mycpuid], bif_next, nbif) {
2424                 dst_if = bif->bif_ifp;
2425                 if (dst_if == src_if)
2426                         continue;
2427
2428                 if (bif->bif_flags & IFBIF_STP) {
2429                         switch (bif->bif_state) {
2430                         case BSTP_IFSTATE_BLOCKING:
2431                         case BSTP_IFSTATE_DISABLED:
2432                                 continue;
2433                         }
2434                 }
2435
2436                 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 &&
2437                     (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2438                         continue;
2439
2440                 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2441                         continue;
2442
2443                 if (LIST_NEXT(bif, bif_next) == NULL) {
2444                         mc = m;
2445                         used = 1;
2446                 } else {
2447                         mc = m_copypacket(m, MB_DONTWAIT);
2448                         if (mc == NULL) {
2449                                 sc->sc_ifp->if_oerrors++;
2450                                 continue;
2451                         }
2452                 }
2453
2454                 /*
2455                  * Filter on the output interface.  Pass a NULL bridge
2456                  * interface pointer so we do not redundantly filter on
2457                  * the bridge for each interface we broadcast on.
2458                  */
2459                 if (inet_pfil_hook.ph_hashooks > 0
2460 #ifdef INET6
2461                     || inet6_pfil_hook.ph_hashooks > 0
2462 #endif
2463                     ) {
2464                         if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
2465                                 continue;
2466                         if (mc == NULL)
2467                                 continue;
2468                 }
2469                 bridge_handoff(dst_if, mc);
2470
2471                 if (nbif != NULL && !nbif->bif_onlist) {
2472                         KKASSERT(bif->bif_onlist);
2473                         nbif = LIST_NEXT(bif, bif_next);
2474                 }
2475         }
2476         if (used == 0)
2477                 m_freem(m);
2478 }
2479
2480 /*
2481  * bridge_span:
2482  *
2483  *      Duplicate a packet out one or more interfaces that are in span mode,
2484  *      the original mbuf is unmodified.
2485  */
2486 static void
2487 bridge_span(struct bridge_softc *sc, struct mbuf *m)
2488 {
2489         struct bridge_iflist *bif;
2490         struct ifnet *dst_if, *bifp;
2491         struct mbuf *mc;
2492
2493         bifp = sc->sc_ifp;
2494         lwkt_serialize_enter(bifp->if_serializer);
2495
2496         LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
2497                 dst_if = bif->bif_ifp;
2498
2499                 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2500                         continue;
2501
2502                 mc = m_copypacket(m, MB_DONTWAIT);
2503                 if (mc == NULL) {
2504                         sc->sc_ifp->if_oerrors++;
2505                         continue;
2506                 }
2507                 bridge_enqueue(dst_if, mc);
2508         }
2509
2510         lwkt_serialize_exit(bifp->if_serializer);
2511 }
2512
2513 static void
2514 bridge_rtmsg_sync_handler(struct netmsg *nmsg)
2515 {
2516         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2517 }
2518
2519 static void
2520 bridge_rtmsg_sync(struct bridge_softc *sc)
2521 {
2522         struct netmsg nmsg;
2523
2524         ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2525
2526         netmsg_init(&nmsg, &curthread->td_msgport, 0,
2527                     bridge_rtmsg_sync_handler);
2528         ifnet_domsg(&nmsg.nm_lmsg, 0);
2529 }
2530
2531 static __inline void
2532 bridge_rtinfo_update(struct bridge_rtinfo *bri, struct ifnet *dst_if,
2533                      int setflags, uint8_t flags, uint32_t timeo)
2534 {
2535         if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2536             bri->bri_ifp != dst_if)
2537                 bri->bri_ifp = dst_if;
2538         if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2539             bri->bri_expire != time_second + timeo)
2540                 bri->bri_expire = time_second + timeo;
2541         if (setflags)
2542                 bri->bri_flags = flags;
2543 }
2544
2545 static int
2546 bridge_rtinstall_oncpu(struct bridge_softc *sc, const uint8_t *dst,
2547                        struct ifnet *dst_if, int setflags, uint8_t flags,
2548                        struct bridge_rtinfo **bri0)
2549 {
2550         struct bridge_rtnode *brt;
2551         struct bridge_rtinfo *bri;
2552
2553         if (mycpuid == 0) {
2554                 brt = bridge_rtnode_lookup(sc, dst);
2555                 if (brt != NULL) {
2556                         /*
2557                          * rtnode for 'dst' already exists.  We inform the
2558                          * caller about this by leaving bri0 as NULL.  The
2559                          * caller will terminate the intallation upon getting
2560                          * NULL bri0.  However, we still need to update the
2561                          * rtinfo.
2562                          */
2563                         KKASSERT(*bri0 == NULL);
2564
2565                         /* Update rtinfo */
2566                         bridge_rtinfo_update(brt->brt_info, dst_if, setflags,
2567                                              flags, sc->sc_brttimeout);
2568                         return 0;
2569                 }
2570
2571                 /*
2572                  * We only need to check brtcnt on CPU0, since if limit
2573                  * is to be exceeded, ENOSPC is returned.  Caller knows
2574                  * this and will terminate the installation.
2575                  */
2576                 if (sc->sc_brtcnt >= sc->sc_brtmax)
2577                         return ENOSPC;
2578
2579                 KKASSERT(*bri0 == NULL);
2580                 bri = kmalloc(sizeof(struct bridge_rtinfo), M_DEVBUF,
2581                                   M_WAITOK | M_ZERO);
2582                 *bri0 = bri;
2583
2584                 /* Setup rtinfo */
2585                 bri->bri_flags = IFBAF_DYNAMIC;
2586                 bridge_rtinfo_update(bri, dst_if, setflags, flags,
2587                                      sc->sc_brttimeout);
2588         } else {
2589                 bri = *bri0;
2590                 KKASSERT(bri != NULL);
2591         }
2592
2593         brt = kmalloc(sizeof(struct bridge_rtnode), M_DEVBUF,
2594                       M_WAITOK | M_ZERO);
2595         memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2596         brt->brt_info = bri;
2597
2598         bridge_rtnode_insert(sc, brt);
2599         return 0;
2600 }
2601
2602 static void
2603 bridge_rtinstall_handler(struct netmsg *nmsg)
2604 {
2605         struct netmsg_brsaddr *brmsg = (struct netmsg_brsaddr *)nmsg;
2606         int error;
2607
2608         error = bridge_rtinstall_oncpu(brmsg->br_softc,
2609                                        brmsg->br_dst, brmsg->br_dst_if,
2610                                        brmsg->br_setflags, brmsg->br_flags,
2611                                        &brmsg->br_rtinfo);
2612         if (error) {
2613                 KKASSERT(mycpuid == 0 && brmsg->br_rtinfo == NULL);
2614                 lwkt_replymsg(&nmsg->nm_lmsg, error);
2615                 return;
2616         } else if (brmsg->br_rtinfo == NULL) {
2617                 /* rtnode already exists for 'dst' */
2618                 KKASSERT(mycpuid == 0);
2619                 lwkt_replymsg(&nmsg->nm_lmsg, 0);
2620                 return;
2621         }
2622         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2623 }
2624
2625 /*
2626  * bridge_rtupdate:
2627  *
2628  *      Add/Update a bridge routing entry.
2629  */
2630 static int
2631 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2632                 struct ifnet *dst_if, uint8_t flags)
2633 {
2634         struct bridge_rtnode *brt;
2635
2636         /*
2637          * A route for this destination might already exist.  If so,
2638          * update it, otherwise create a new one.
2639          */
2640         if ((brt = bridge_rtnode_lookup(sc, dst)) == NULL) {
2641                 struct netmsg_brsaddr *brmsg;
2642
2643                 if (sc->sc_brtcnt >= sc->sc_brtmax)
2644                         return ENOSPC;
2645
2646                 brmsg = kmalloc(sizeof(*brmsg), M_LWKTMSG, M_WAITOK | M_NULLOK);
2647                 if (brmsg == NULL)
2648                         return ENOMEM;
2649
2650                 netmsg_init(&brmsg->br_nmsg, &netisr_afree_rport, 0,
2651                             bridge_rtinstall_handler);
2652                 memcpy(brmsg->br_dst, dst, ETHER_ADDR_LEN);
2653                 brmsg->br_dst_if = dst_if;
2654                 brmsg->br_flags = flags;
2655                 brmsg->br_setflags = 0;
2656                 brmsg->br_softc = sc;
2657                 brmsg->br_rtinfo = NULL;
2658
2659                 ifnet_sendmsg(&brmsg->br_nmsg.nm_lmsg, 0);
2660                 return 0;
2661         }
2662         bridge_rtinfo_update(brt->brt_info, dst_if, 0, flags,
2663                              sc->sc_brttimeout);
2664         return 0;
2665 }
2666
2667 static int
2668 bridge_rtsaddr(struct bridge_softc *sc, const uint8_t *dst,
2669                struct ifnet *dst_if, uint8_t flags)
2670 {
2671         struct netmsg_brsaddr brmsg;
2672
2673         ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2674
2675         netmsg_init(&brmsg.br_nmsg, &curthread->td_msgport, 0,
2676                     bridge_rtinstall_handler);
2677         memcpy(brmsg.br_dst, dst, ETHER_ADDR_LEN);
2678         brmsg.br_dst_if = dst_if;
2679         brmsg.br_flags = flags;
2680         brmsg.br_setflags = 1;
2681         brmsg.br_softc = sc;
2682         brmsg.br_rtinfo = NULL;
2683
2684         return ifnet_domsg(&brmsg.br_nmsg.nm_lmsg, 0);
2685 }
2686
2687 /*
2688  * bridge_rtlookup:
2689  *
2690  *      Lookup the destination interface for an address.
2691  */
2692 static struct ifnet *
2693 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2694 {
2695         struct bridge_rtnode *brt;
2696
2697         if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
2698                 return NULL;
2699         return brt->brt_info->bri_ifp;
2700 }
2701
2702 static void
2703 bridge_rtreap_handler(struct netmsg *nmsg)
2704 {
2705         struct bridge_softc *sc = nmsg->nm_lmsg.u.ms_resultp;
2706         struct bridge_rtnode *brt, *nbrt;
2707
2708         LIST_FOREACH_MUTABLE(brt, &sc->sc_rtlists[mycpuid], brt_list, nbrt) {
2709                 if (brt->brt_info->bri_dead)
2710                         bridge_rtnode_destroy(sc, brt);
2711         }
2712         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2713 }
2714
2715 static void
2716 bridge_rtreap(struct bridge_softc *sc)
2717 {
2718         struct netmsg nmsg;
2719
2720         ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2721
2722         netmsg_init(&nmsg, &curthread->td_msgport, 0, bridge_rtreap_handler);
2723         nmsg.nm_lmsg.u.ms_resultp = sc;
2724
2725         ifnet_domsg(&nmsg.nm_lmsg, 0);
2726 }
2727
2728 static void
2729 bridge_rtreap_async(struct bridge_softc *sc)
2730 {
2731         struct netmsg *nmsg;
2732
2733         nmsg = kmalloc(sizeof(*nmsg), M_LWKTMSG, M_WAITOK);
2734
2735         netmsg_init(nmsg, &netisr_afree_rport, 0, bridge_rtreap_handler);
2736         nmsg->nm_lmsg.u.ms_resultp = sc;
2737
2738         ifnet_sendmsg(&nmsg->nm_lmsg, 0);
2739 }
2740
2741 /*
2742  * bridge_rttrim:
2743  *
2744  *      Trim the routine table so that we have a number
2745  *      of routing entries less than or equal to the
2746  *      maximum number.
2747  */
2748 static void
2749 bridge_rttrim(struct bridge_softc *sc)
2750 {
2751         struct bridge_rtnode *brt;
2752         int dead;
2753
2754         ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2755
2756         /* Make sure we actually need to do this. */
2757         if (sc->sc_brtcnt <= sc->sc_brtmax)
2758                 return;
2759
2760         /*
2761          * Find out how many rtnodes are dead
2762          */
2763         dead = bridge_rtage_finddead(sc);
2764         KKASSERT(dead <= sc->sc_brtcnt);
2765
2766         if (sc->sc_brtcnt - dead <= sc->sc_brtmax) {
2767                 /* Enough dead rtnodes are found */
2768                 bridge_rtreap(sc);
2769                 return;
2770         }
2771
2772         /*
2773          * Kill some dynamic rtnodes to meet the brtmax
2774          */
2775         LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2776                 struct bridge_rtinfo *bri = brt->brt_info;
2777
2778                 if (bri->bri_dead) {
2779                         /*
2780                          * We have counted this rtnode in
2781                          * bridge_rtage_finddead()
2782                          */
2783                         continue;
2784                 }
2785
2786                 if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2787                         bri->bri_dead = 1;
2788                         ++dead;
2789                         KKASSERT(dead <= sc->sc_brtcnt);
2790
2791                         if (sc->sc_brtcnt - dead <= sc->sc_brtmax) {
2792                                 /* Enough rtnodes are collected */
2793                                 break;
2794                         }
2795                 }
2796         }
2797         if (dead)
2798                 bridge_rtreap(sc);
2799 }
2800
2801 /*
2802  * bridge_timer:
2803  *
2804  *      Aging timer for the bridge.
2805  */
2806 static void
2807 bridge_timer(void *arg)
2808 {
2809         struct bridge_softc *sc = arg;
2810         struct lwkt_msg *lmsg;
2811
2812         KKASSERT(mycpuid == BRIDGE_CFGCPU);
2813
2814         crit_enter();
2815
2816         if (callout_pending(&sc->sc_brcallout) ||
2817             !callout_active(&sc->sc_brcallout)) {
2818                 crit_exit();
2819                 return;
2820         }
2821         callout_deactivate(&sc->sc_brcallout);
2822
2823         lmsg = &sc->sc_brtimemsg.nm_lmsg;
2824         KKASSERT(lmsg->ms_flags & MSGF_DONE);
2825         lwkt_sendmsg(BRIDGE_CFGPORT, lmsg);
2826
2827         crit_exit();
2828 }
2829
2830 static void
2831 bridge_timer_handler(struct netmsg *nmsg)
2832 {
2833         struct bridge_softc *sc = nmsg->nm_lmsg.u.ms_resultp;
2834
2835         KKASSERT(&curthread->td_msgport == BRIDGE_CFGPORT);
2836
2837         crit_enter();
2838         /* Reply ASAP */
2839         lwkt_replymsg(&nmsg->nm_lmsg, 0);
2840         crit_exit();
2841
2842         bridge_rtage(sc);
2843         if (sc->sc_ifp->if_flags & IFF_RUNNING) {
2844                 callout_reset(&sc->sc_brcallout,
2845                     bridge_rtable_prune_period * hz, bridge_timer, sc);
2846         }
2847 }
2848
2849 static int
2850 bridge_rtage_finddead(struct bridge_softc *sc)
2851 {
2852         struct bridge_rtnode *brt;
2853         int dead = 0;
2854
2855         LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2856                 struct bridge_rtinfo *bri = brt->brt_info;
2857
2858                 if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2859                     time_second >= bri->bri_expire) {
2860                         bri->bri_dead = 1;
2861                         ++dead;
2862                         KKASSERT(dead <= sc->sc_brtcnt);
2863                 }
2864         }
2865         return dead;
2866 }
2867
2868 /*
2869  * bridge_rtage:
2870  *
2871  *      Perform an aging cycle.
2872  */
2873 static void
2874 bridge_rtage(struct bridge_softc *sc)
2875 {
2876         ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2877
2878         if (bridge_rtage_finddead(sc))
2879                 bridge_rtreap(sc);
2880 }
2881
2882 /*
2883  * bridge_rtflush:
2884  *
2885  *      Remove all dynamic addresses from the bridge.
2886  */
2887 static void
2888 bridge_rtflush(struct bridge_softc *sc, int bf)
2889 {
2890         struct bridge_rtnode *brt;
2891         int reap;
2892
2893         reap = 0;
2894         LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2895                 struct bridge_rtinfo *bri = brt->brt_info;
2896
2897                 if ((bf & IFBF_FLUSHALL) ||
2898                     (bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2899                         bri->bri_dead = 1;
2900                         reap = 1;
2901                 }
2902         }
2903         if (reap) {
2904                 if (bf & IFBF_FLUSHSYNC)
2905                         bridge_rtreap(sc);
2906                 else
2907                         bridge_rtreap_async(sc);
2908         }
2909 }
2910
2911 /*
2912  * bridge_rtdaddr:
2913  *
2914  *      Remove an address from the table.
2915  */
2916 static int
2917 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2918 {
2919         struct bridge_rtnode *brt;
2920
2921         ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2922
2923         if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
2924                 return (ENOENT);
2925
2926         /* TODO: add a cheaper delete operation */
2927         brt->brt_info->bri_dead = 1;
2928         bridge_rtreap(sc);
2929         return (0);
2930 }
2931
2932 /*
2933  * bridge_rtdelete:
2934  *
2935  *      Delete routes to a speicifc member interface.
2936  */
2937 void
2938 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int bf)
2939 {
2940         struct bridge_rtnode *brt;
2941         int reap;
2942
2943         reap = 0;
2944         LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2945                 struct bridge_rtinfo *bri = brt->brt_info;
2946
2947                 if (bri->bri_ifp == ifp &&
2948                     ((bf & IFBF_FLUSHALL) ||
2949                      (bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) {
2950                         bri->bri_dead = 1;
2951                         reap = 1;
2952                 }
2953         }
2954         if (reap) {
2955                 if (bf & IFBF_FLUSHSYNC)
2956                         bridge_rtreap(sc);
2957                 else
2958                         bridge_rtreap_async(sc);
2959         }
2960 }
2961
2962 /*
2963  * bridge_rtable_init:
2964  *
2965  *      Initialize the route table for this bridge.
2966  */
2967 static void
2968 bridge_rtable_init(struct bridge_softc *sc)
2969 {
2970         int cpu;
2971
2972         /*
2973          * Initialize per-cpu hash tables
2974          */
2975         sc->sc_rthashs = kmalloc(sizeof(*sc->sc_rthashs) * ncpus,
2976                                  M_DEVBUF, M_WAITOK);
2977         for (cpu = 0; cpu < ncpus; ++cpu) {
2978                 int i;
2979
2980                 sc->sc_rthashs[cpu] =
2981                 kmalloc(sizeof(struct bridge_rtnode_head) * BRIDGE_RTHASH_SIZE,
2982                         M_DEVBUF, M_WAITOK);
2983
2984                 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2985                         LIST_INIT(&sc->sc_rthashs[cpu][i]);
2986         }
2987         sc->sc_rthash_key = karc4random();
2988
2989         /*
2990          * Initialize per-cpu lists
2991          */
2992         sc->sc_rtlists = kmalloc(sizeof(struct bridge_rtnode_head) * ncpus,
2993                                  M_DEVBUF, M_WAITOK);
2994         for (cpu = 0; cpu < ncpus; ++cpu)
2995                 LIST_INIT(&sc->sc_rtlists[cpu]);
2996 }
2997
2998 /*
2999  * bridge_rtable_fini:
3000  *
3001  *      Deconstruct the route table for this bridge.
3002  */
3003 static void
3004 bridge_rtable_fini(struct bridge_softc *sc)
3005 {
3006         int cpu;
3007
3008         /*
3009          * Free per-cpu hash tables
3010          */
3011         for (cpu = 0; cpu < ncpus; ++cpu)
3012                 kfree(sc->sc_rthashs[cpu], M_DEVBUF);
3013         kfree(sc->sc_rthashs, M_DEVBUF);
3014
3015         /*
3016          * Free per-cpu lists
3017          */
3018         kfree(sc->sc_rtlists, M_DEVBUF);
3019 }
3020
3021 /*
3022  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
3023  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
3024  */
3025 #define mix(a, b, c)                                                    \
3026 do {                                                                    \
3027         a -= b; a -= c; a ^= (c >> 13);                                 \
3028         b -= c; b -= a; b ^= (a << 8);                                  \
3029         c -= a; c -= b; c ^= (b >> 13);                                 \
3030         a -= b; a -= c; a ^= (c >> 12);                                 \
3031         b -= c; b -= a; b ^= (a << 16);                                 \
3032         c -= a; c -= b; c ^= (b >> 5);                                  \
3033         a -= b; a -= c; a ^= (c >> 3);                                  \
3034         b -= c; b -= a; b ^= (a << 10);                                 \
3035         c -= a; c -= b; c ^= (b >> 15);                                 \
3036 } while (/*CONSTCOND*/0)
3037
3038 static __inline uint32_t
3039 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
3040 {
3041         uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
3042
3043         b += addr[5] << 8;
3044         b += addr[4];
3045         a += addr[3] << 24;
3046         a += addr[2] << 16;
3047         a += addr[1] << 8;
3048         a += addr[0];
3049
3050         mix(a, b, c);
3051
3052         return (c & BRIDGE_RTHASH_MASK);
3053 }
3054
3055 #undef mix
3056
3057 static int
3058 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
3059 {
3060         int i, d;
3061
3062         for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
3063                 d = ((int)a[i]) - ((int)b[i]);
3064         }
3065
3066         return (d);
3067 }
3068
3069 /*
3070  * bridge_rtnode_lookup:
3071  *
3072  *      Look up a bridge route node for the specified destination.
3073  */
3074 static struct bridge_rtnode *
3075 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
3076 {
3077         struct bridge_rtnode *brt;
3078         uint32_t hash;
3079         int dir;
3080
3081         hash = bridge_rthash(sc, addr);
3082         LIST_FOREACH(brt, &sc->sc_rthashs[mycpuid][hash], brt_hash) {
3083                 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
3084                 if (dir == 0)
3085                         return (brt);
3086                 if (dir > 0)
3087                         return (NULL);
3088         }
3089
3090         return (NULL);
3091 }
3092
3093 /*
3094  * bridge_rtnode_insert:
3095  *
3096  *      Insert the specified bridge node into the route table.
3097  *      Caller has to make sure that rtnode does not exist.
3098  */
3099 static void
3100 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
3101 {
3102         struct bridge_rtnode *lbrt;
3103         uint32_t hash;
3104         int dir;
3105
3106         hash = bridge_rthash(sc, brt->brt_addr);
3107
3108         lbrt = LIST_FIRST(&sc->sc_rthashs[mycpuid][hash]);
3109         if (lbrt == NULL) {
3110                 LIST_INSERT_HEAD(&sc->sc_rthashs[mycpuid][hash], brt, brt_hash);
3111                 goto out;
3112         }
3113
3114         do {
3115                 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
3116                 KASSERT(dir != 0, ("rtnode already exist\n"));
3117
3118                 if (dir > 0) {
3119                         LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
3120                         goto out;
3121                 }
3122                 if (LIST_NEXT(lbrt, brt_hash) == NULL) {
3123                         LIST_INSERT_AFTER(lbrt, brt, brt_hash);
3124                         goto out;
3125                 }
3126                 lbrt = LIST_NEXT(lbrt, brt_hash);
3127         } while (lbrt != NULL);
3128
3129         panic("no suitable position found for rtnode\n");
3130 out:
3131         LIST_INSERT_HEAD(&sc->sc_rtlists[mycpuid], brt, brt_list);
3132         if (mycpuid == 0) {
3133                 /*
3134                  * Update the brtcnt.
3135                  * We only need to do it once and we do it on CPU0.
3136                  */
3137                 sc->sc_brtcnt++;
3138         }
3139 }
3140
3141 /*
3142  * bridge_rtnode_destroy:
3143  *
3144  *      Destroy a bridge rtnode.
3145  */
3146 static void
3147 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3148 {
3149         LIST_REMOVE(brt, brt_hash);
3150         LIST_REMOVE(brt, brt_list);
3151
3152         if (mycpuid + 1 == ncpus) {
3153                 /* Free rtinfo associated with rtnode on the last cpu */
3154                 kfree(brt->brt_info, M_DEVBUF);
3155         }
3156         kfree(brt, M_DEVBUF);
3157
3158         if (mycpuid == 0) {
3159                 /* Update brtcnt only on CPU0 */
3160                 sc->sc_brtcnt--;
3161         }
3162 }
3163
3164 static __inline int
3165 bridge_post_pfil(struct mbuf *m)
3166 {
3167         if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED)
3168                 return EOPNOTSUPP;
3169
3170         /* Not yet */
3171         if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED)
3172                 return EOPNOTSUPP;
3173
3174         return 0;
3175 }
3176
3177 /*
3178  * Send bridge packets through pfil if they are one of the types pfil can deal
3179  * with, or if they are ARP or REVARP.  (pfil will pass ARP and REVARP without
3180  * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3181  * that interface.
3182  */
3183 static int
3184 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3185 {
3186         int snap, error, i, hlen;
3187         struct ether_header *eh1, eh2;
3188         struct ip *ip;
3189         struct llc llc1;
3190         u_int16_t ether_type;
3191
3192         snap = 0;
3193         error = -1;     /* Default error if not error == 0 */
3194
3195         if (pfil_bridge == 0 && pfil_member == 0)
3196                 return (0); /* filtering is disabled */
3197
3198         i = min((*mp)->m_pkthdr.len, max_protohdr);
3199         if ((*mp)->m_len < i) {
3200                 *mp = m_pullup(*mp, i);
3201                 if (*mp == NULL) {
3202                         kprintf("%s: m_pullup failed\n", __func__);
3203                         return (-1);
3204                 }
3205         }
3206
3207         eh1 = mtod(*mp, struct ether_header *);
3208         ether_type = ntohs(eh1->ether_type);
3209
3210         /*
3211          * Check for SNAP/LLC.
3212          */
3213         if (ether_type < ETHERMTU) {
3214                 struct llc *llc2 = (struct llc *)(eh1 + 1);
3215
3216                 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
3217                     llc2->llc_dsap == LLC_SNAP_LSAP &&
3218                     llc2->llc_ssap == LLC_SNAP_LSAP &&
3219                     llc2->llc_control == LLC_UI) {
3220                         ether_type = htons(llc2->llc_un.type_snap.ether_type);
3221                         snap = 1;
3222                 }
3223         }
3224
3225         /*
3226          * If we're trying to filter bridge traffic, don't look at anything
3227          * other than IP and ARP traffic.  If the filter doesn't understand
3228          * IPv6, don't allow IPv6 through the bridge either.  This is lame
3229          * since if we really wanted, say, an AppleTalk filter, we are hosed,
3230          * but of course we don't have an AppleTalk filter to begin with.
3231          * (Note that since pfil doesn't understand ARP it will pass *ALL*
3232          * ARP traffic.)
3233          */
3234         switch (ether_type) {
3235         case ETHERTYPE_ARP:
3236         case ETHERTYPE_REVARP:
3237                 return (0); /* Automatically pass */
3238
3239         case ETHERTYPE_IP:
3240 #ifdef INET6
3241         case ETHERTYPE_IPV6:
3242 #endif /* INET6 */
3243                 break;
3244
3245         default:
3246                 /*
3247                  * Check to see if the user wants to pass non-ip
3248                  * packets, these will not be checked by pfil(9)
3249                  * and passed unconditionally so the default is to drop.
3250                  */
3251                 if (pfil_onlyip)
3252                         goto bad;
3253         }
3254
3255         /* Strip off the Ethernet header and keep a copy. */
3256         m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
3257         m_adj(*mp, ETHER_HDR_LEN);
3258
3259         /* Strip off snap header, if present */
3260         if (snap) {
3261                 m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
3262                 m_adj(*mp, sizeof(struct llc));
3263         }
3264
3265         /*
3266          * Check the IP header for alignment and errors
3267          */
3268         if (dir == PFIL_IN) {
3269                 switch (ether_type) {
3270                 case ETHERTYPE_IP:
3271                         error = bridge_ip_checkbasic(mp);
3272                         break;
3273 #ifdef INET6
3274                 case ETHERTYPE_IPV6:
3275                         error = bridge_ip6_checkbasic(mp);
3276                         break;
3277 #endif /* INET6 */
3278                 default:
3279                         error = 0;
3280                 }
3281                 if (error)
3282                         goto bad;
3283         }
3284
3285         error = 0;
3286
3287         /*
3288          * Run the packet through pfil
3289          */
3290         switch (ether_type) {
3291         case ETHERTYPE_IP:
3292                 /*
3293                  * before calling the firewall, swap fields the same as
3294                  * IP does. here we assume the header is contiguous
3295                  */
3296                 ip = mtod(*mp, struct ip *);
3297
3298                 ip->ip_len = ntohs(ip->ip_len);
3299                 ip->ip_off = ntohs(ip->ip_off);
3300
3301                 /*
3302                  * Run pfil on the member interface and the bridge, both can
3303                  * be skipped by clearing pfil_member or pfil_bridge.
3304                  *
3305                  * Keep the order:
3306                  *   in_if -> bridge_if -> out_if
3307                  */
3308                 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) {
3309                         error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir);
3310                         if (*mp == NULL || error != 0) /* filter may consume */
3311                                 break;
3312                         error = bridge_post_pfil(*mp);
3313                         if (error)
3314                                 break;
3315                 }
3316
3317                 if (pfil_member && ifp != NULL) {
3318                         error = pfil_run_hooks(&inet_pfil_hook, mp, ifp, dir);
3319                         if (*mp == NULL || error != 0) /* filter may consume */
3320                                 break;
3321                         error = bridge_post_pfil(*mp);
3322                         if (error)
3323                                 break;
3324                 }
3325
3326                 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) {
3327                         error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir);
3328                         if (*mp == NULL || error != 0) /* filter may consume */
3329                                 break;
3330                         error = bridge_post_pfil(*mp);
3331                         if (error)
3332                                 break;
3333                 }
3334
3335                 /* check if we need to fragment the packet */
3336                 if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
3337                         i = (*mp)->m_pkthdr.len;
3338                         if (i > ifp->if_mtu) {
3339                                 error = bridge_fragment(ifp, *mp, &eh2, snap,
3340                                             &llc1);
3341                                 return (error);
3342                         }
3343                 }
3344
3345                 /* Recalculate the ip checksum and restore byte ordering */
3346                 ip = mtod(*mp, struct ip *);
3347                 hlen = ip->ip_hl << 2;
3348                 if (hlen < sizeof(struct ip))
3349                         goto bad;
3350                 if (hlen > (*mp)->m_len) {
3351                         if ((*mp = m_pullup(*mp, hlen)) == 0)
3352                                 goto bad;
3353                         ip = mtod(*mp, struct ip *);
3354                         if (ip == NULL)
3355                                 goto bad;
3356                 }
3357                 ip->ip_len = htons(ip->ip_len);
3358                 ip->ip_off = htons(ip->ip_off);
3359                 ip->ip_sum = 0;
3360                 if (hlen == sizeof(struct ip))
3361                         ip->ip_sum = in_cksum_hdr(ip);
3362                 else
3363                         ip->ip_sum = in_cksum(*mp, hlen);
3364
3365                 break;
3366 #ifdef INET6
3367         case ETHERTYPE_IPV6:
3368                 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
3369                         error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3370                                         dir);
3371
3372                 if (*mp == NULL || error != 0) /* filter may consume */
3373                         break;
3374
3375                 if (pfil_member && ifp != NULL)
3376                         error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp,
3377                                         dir);
3378
3379                 if (*mp == NULL || error != 0) /* filter may consume */
3380                         break;
3381
3382                 if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
3383                         error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3384                                         dir);
3385                 break;
3386 #endif
3387         default:
3388                 error = 0;
3389                 break;
3390         }
3391
3392         if (*mp == NULL)
3393                 return (error);
3394         if (error != 0)
3395                 goto bad;
3396
3397         error = -1;
3398
3399         /*
3400          * Finally, put everything back the way it was and return
3401          */
3402         if (snap) {
3403                 M_PREPEND(*mp, sizeof(struct llc), MB_DONTWAIT);
3404                 if (*mp == NULL)
3405                         return (error);
3406                 bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
3407         }
3408
3409         M_PREPEND(*mp, ETHER_HDR_LEN, MB_DONTWAIT);
3410         if (*mp == NULL)
3411                 return (error);
3412         bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3413
3414         return (0);
3415
3416 bad:
3417         m_freem(*mp);
3418         *mp = NULL;
3419         return (error);
3420 }
3421
3422 /*
3423  * Perform basic checks on header size since
3424  * pfil assumes ip_input has already processed
3425  * it for it.  Cut-and-pasted from ip_input.c.
3426  * Given how simple the IPv6 version is,
3427  * does the IPv4 version really need to be
3428  * this complicated?
3429  *
3430  * XXX Should we update ipstat here, or not?
3431  * XXX Right now we update ipstat but not
3432  * XXX csum_counter.
3433  */
3434 static int
3435 bridge_ip_checkbasic(struct mbuf **mp)
3436 {
3437         struct mbuf *m = *mp;
3438         struct ip *ip;
3439         int len, hlen;
3440         u_short sum;
3441
3442         if (*mp == NULL)
3443                 return (-1);
3444 #if notyet
3445         if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3446                 if ((m = m_copyup(m, sizeof(struct ip),
3447                         (max_linkhdr + 3) & ~3)) == NULL) {
3448                         /* XXXJRT new stat, please */
3449                         ipstat.ips_toosmall++;
3450                         goto bad;
3451                 }
3452         } else
3453 #endif
3454 #ifndef __predict_false
3455 #define __predict_false(x) x
3456 #endif
3457          if (__predict_false(m->m_len < sizeof (struct ip))) {
3458                 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
3459                         ipstat.ips_toosmall++;
3460                         goto bad;
3461                 }
3462         }
3463         ip = mtod(m, struct ip *);
3464         if (ip == NULL) goto bad;
3465
3466         if (ip->ip_v != IPVERSION) {
3467                 ipstat.ips_badvers++;
3468                 goto bad;
3469         }
3470         hlen = ip->ip_hl << 2;
3471         if (hlen < sizeof(struct ip)) { /* minimum header length */
3472                 ipstat.ips_badhlen++;
3473                 goto bad;
3474         }
3475         if (hlen > m->m_len) {
3476                 if ((m = m_pullup(m, hlen)) == 0) {
3477                         ipstat.ips_badhlen++;
3478                         goto bad;
3479                 }
3480                 ip = mtod(m, struct ip *);
3481                 if (ip == NULL) goto bad;
3482         }
3483
3484         if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3485                 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3486         } else {
3487                 if (hlen == sizeof(struct ip)) {
3488                         sum = in_cksum_hdr(ip);
3489                 } else {
3490                         sum = in_cksum(m, hlen);
3491                 }
3492         }
3493         if (sum) {
3494                 ipstat.ips_badsum++;
3495                 goto bad;
3496         }
3497
3498         /* Retrieve the packet length. */
3499         len = ntohs(ip->ip_len);
3500
3501         /*
3502          * Check for additional length bogosity
3503          */
3504         if (len < hlen) {
3505                 ipstat.ips_badlen++;
3506                 goto bad;
3507         }
3508
3509         /*
3510          * Check that the amount of data in the buffers
3511          * is as at least much as the IP header would have us expect.
3512          * Drop packet if shorter than we expect.
3513          */
3514         if (m->m_pkthdr.len < len) {
3515                 ipstat.ips_tooshort++;
3516                 goto bad;
3517         }
3518
3519         /* Checks out, proceed */
3520         *mp = m;
3521         return (0);
3522
3523 bad:
3524         *mp = m;
3525         return (-1);
3526 }
3527
3528 #ifdef INET6
3529 /*
3530  * Same as above, but for IPv6.
3531  * Cut-and-pasted from ip6_input.c.
3532  * XXX Should we update ip6stat, or not?
3533  */
3534 static int
3535 bridge_ip6_checkbasic(struct mbuf **mp)
3536 {
3537         struct mbuf *m = *mp;
3538         struct ip6_hdr *ip6;
3539
3540         /*
3541          * If the IPv6 header is not aligned, slurp it up into a new
3542          * mbuf with space for link headers, in the event we forward
3543          * it.  Otherwise, if it is aligned, make sure the entire base
3544          * IPv6 header is in the first mbuf of the chain.
3545          */
3546 #if notyet
3547         if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3548                 struct ifnet *inifp = m->m_pkthdr.rcvif;
3549                 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
3550                             (max_linkhdr + 3) & ~3)) == NULL) {
3551                         /* XXXJRT new stat, please */
3552                         ip6stat.ip6s_toosmall++;
3553                         in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3554                         goto bad;
3555                 }
3556         } else
3557 #endif
3558         if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
3559                 struct ifnet *inifp = m->m_pkthdr.rcvif;
3560                 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
3561                         ip6stat.ip6s_toosmall++;
3562                         in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3563                         goto bad;
3564                 }
3565         }
3566
3567         ip6 = mtod(m, struct ip6_hdr *);
3568
3569         if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
3570                 ip6stat.ip6s_badvers++;
3571                 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
3572                 goto bad;
3573         }
3574
3575         /* Checks out, proceed */
3576         *mp = m;
3577         return (0);
3578
3579 bad:
3580         *mp = m;
3581         return (-1);
3582 }
3583 #endif /* INET6 */
3584
3585 /*
3586  * bridge_fragment:
3587  *
3588  *      Return a fragmented mbuf chain.
3589  */
3590 static int
3591 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
3592     int snap, struct llc *llc)
3593 {
3594         struct mbuf *m0;
3595         struct ip *ip;
3596         int error = -1;
3597
3598         if (m->m_len < sizeof(struct ip) &&
3599             (m = m_pullup(m, sizeof(struct ip))) == NULL)
3600                 goto out;
3601         ip = mtod(m, struct ip *);
3602
3603         error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
3604                     CSUM_DELAY_IP);
3605         if (error)
3606                 goto out;
3607
3608         /* walk the chain and re-add the Ethernet header */
3609         for (m0 = m; m0; m0 = m0->m_nextpkt) {
3610                 if (error == 0) {
3611                         if (snap) {
3612                                 M_PREPEND(m0, sizeof(struct llc), MB_DONTWAIT);
3613                                 if (m0 == NULL) {
3614                                         error = ENOBUFS;
3615                                         continue;
3616                                 }
3617                                 bcopy(llc, mtod(m0, caddr_t),
3618                                     sizeof(struct llc));
3619                         }
3620                         M_PREPEND(m0, ETHER_HDR_LEN, MB_DONTWAIT);
3621                         if (m0 == NULL) {
3622                                 error = ENOBUFS;
3623                                 continue;
3624                         }
3625                         bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
3626                 } else 
3627                         m_freem(m);
3628         }
3629
3630         if (error == 0)
3631                 ipstat.ips_fragmented++;
3632
3633         return (error);
3634
3635 out:
3636         if (m != NULL)
3637                 m_freem(m);
3638         return (error);
3639 }
3640
3641 static void
3642 bridge_enqueue_handler(struct netmsg *nmsg)
3643 {
3644         struct netmsg_packet *nmp;
3645         struct ifnet *dst_ifp;
3646         struct mbuf *m;
3647
3648         nmp = (struct netmsg_packet *)nmsg;
3649         m = nmp->nm_packet;
3650         dst_ifp = nmp->nm_netmsg.nm_lmsg.u.ms_resultp;
3651
3652         bridge_handoff(dst_ifp, m);
3653 }
3654
3655 static void
3656 bridge_handoff(struct ifnet *dst_ifp, struct mbuf *m)
3657 {
3658         struct mbuf *m0;
3659
3660         /* We may be sending a fragment so traverse the mbuf */
3661         for (; m; m = m0) {
3662                 struct altq_pktattr pktattr;
3663
3664                 m0 = m->m_nextpkt;
3665                 m->m_nextpkt = NULL;
3666
3667                 if (ifq_is_enabled(&dst_ifp->if_snd))
3668                         altq_etherclassify(&dst_ifp->if_snd, m, &pktattr);
3669
3670                 ifq_dispatch(dst_ifp, m, &pktattr);
3671         }
3672 }
3673
3674 static void
3675 bridge_control_dispatch(struct netmsg *nmsg)
3676 {
3677         struct netmsg_brctl *bc_msg = (struct netmsg_brctl *)nmsg;
3678         struct ifnet *bifp = bc_msg->bc_sc->sc_ifp;
3679         int error;
3680
3681         lwkt_serialize_enter(bifp->if_serializer);
3682         error = bc_msg->bc_func(bc_msg->bc_sc, bc_msg->bc_arg);
3683         lwkt_serialize_exit(bifp->if_serializer);
3684
3685         lwkt_replymsg(&nmsg->nm_lmsg, error);
3686 }
3687
3688 static int
3689 bridge_control(struct bridge_softc *sc, u_long cmd,
3690                bridge_ctl_t bc_func, void *bc_arg)
3691 {
3692         struct ifnet *bifp = sc->sc_ifp;
3693         struct netmsg_brctl bc_msg;
3694         struct netmsg *nmsg;
3695         int error;
3696
3697         ASSERT_SERIALIZED(bifp->if_serializer);
3698
3699         bzero(&bc_msg, sizeof(bc_msg));
3700         nmsg = &bc_msg.bc_nmsg;
3701
3702         netmsg_init(nmsg, &curthread->td_msgport, 0, bridge_control_dispatch);
3703         bc_msg.bc_func = bc_func;
3704         bc_msg.bc_sc = sc;
3705         bc_msg.bc_arg = bc_arg;
3706
3707         lwkt_serialize_exit(bifp->if_serializer);
3708         error = lwkt_domsg(BRIDGE_CFGPORT, &nmsg->nm_lmsg, 0);
3709         lwkt_serialize_enter(bifp->if_serializer);
3710         return error;
3711 }
3712
3713 static void
3714 bridge_add_bif_handler(struct netmsg *nmsg)
3715 {
3716         struct netmsg_braddbif *amsg = (struct netmsg_braddbif *)nmsg;
3717         struct bridge_softc *sc;
3718         struct bridge_iflist *bif;
3719
3720         sc = amsg->br_softc;
3721
3722         bif = kmalloc(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
3723         bif->bif_ifp = amsg->br_bif_ifp;
3724         bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
3725         bif->bif_onlist = 1;
3726         bif->bif_info = amsg->br_bif_info;
3727
3728         LIST_INSERT_HEAD(&sc->sc_iflists[mycpuid], bif, bif_next);
3729
3730         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3731 }
3732
3733 static void
3734 bridge_add_bif(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3735                struct ifnet *ifp)
3736 {
3737         struct netmsg_braddbif amsg;
3738
3739         ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
3740
3741         netmsg_init(&amsg.br_nmsg, &curthread->td_msgport, 0,
3742                     bridge_add_bif_handler);
3743         amsg.br_softc = sc;
3744         amsg.br_bif_info = bif_info;
3745         amsg.br_bif_ifp = ifp;
3746
3747         ifnet_domsg(&amsg.br_nmsg.nm_lmsg, 0);
3748 }
3749
3750 static void
3751 bridge_del_bif_handler(struct netmsg *nmsg)
3752 {
3753         struct netmsg_brdelbif *dmsg = (struct netmsg_brdelbif *)nmsg;
3754         struct bridge_softc *sc;
3755         struct bridge_iflist *bif;
3756
3757         sc = dmsg->br_softc;
3758
3759         /*
3760          * Locate the bif associated with the br_bif_info
3761          * on the current CPU
3762          */
3763         bif = bridge_lookup_member_ifinfo(sc, dmsg->br_bif_info);
3764         KKASSERT(bif != NULL && bif->bif_onlist);
3765
3766         /* Remove the bif from the current CPU's iflist */
3767         bif->bif_onlist = 0;
3768         LIST_REMOVE(bif, bif_next);
3769
3770         /* Save the removed bif for later freeing */
3771         LIST_INSERT_HEAD(dmsg->br_bif_list, bif, bif_next);
3772
3773         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3774 }
3775
3776 static void
3777 bridge_del_bif(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3778                struct bridge_iflist_head *saved_bifs)
3779 {
3780         struct netmsg_brdelbif dmsg;
3781
3782         ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
3783
3784         netmsg_init(&dmsg.br_nmsg, &curthread->td_msgport, 0,
3785                     bridge_del_bif_handler);
3786         dmsg.br_softc = sc;
3787         dmsg.br_bif_info = bif_info;
3788         dmsg.br_bif_list = saved_bifs;
3789
3790         ifnet_domsg(&dmsg.br_nmsg.nm_lmsg, 0);
3791 }
3792
3793 static void
3794 bridge_set_bifflags_handler(struct netmsg *nmsg)
3795 {
3796         struct netmsg_brsflags *smsg = (struct netmsg_brsflags *)nmsg;
3797         struct bridge_softc *sc;
3798         struct bridge_iflist *bif;
3799
3800         sc = smsg->br_softc;
3801
3802         /*
3803          * Locate the bif associated with the br_bif_info
3804          * on the current CPU
3805          */
3806         bif = bridge_lookup_member_ifinfo(sc, smsg->br_bif_info);
3807         KKASSERT(bif != NULL && bif->bif_onlist);
3808
3809         bif->bif_flags = smsg->br_bif_flags;
3810
3811         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3812 }
3813
3814 static void
3815 bridge_set_bifflags(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3816                     uint32_t bif_flags)
3817 {
3818         struct netmsg_brsflags smsg;
3819
3820         ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
3821
3822         netmsg_init(&smsg.br_nmsg, &curthread->td_msgport, 0,
3823                     bridge_set_bifflags_handler);
3824         smsg.br_softc = sc;
3825         smsg.br_bif_info = bif_info;
3826         smsg.br_bif_flags = bif_flags;
3827
3828         ifnet_domsg(&smsg.br_nmsg.nm_lmsg, 0);
3829 }