Merge branch 'vendor/PAM_PASSWDQC'
[dragonfly.git] / sys / net / bridge / if_bridge.c
1 /*
2  * Copyright 2001 Wasabi Systems, Inc.
3  * All rights reserved.
4  *
5  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *      This product includes software developed for the NetBSD Project by
18  *      Wasabi Systems, Inc.
19  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
20  *    or promote products derived from this software without specific prior
21  *    written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35
36 /*
37  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. All advertising materials mentioning features or use of this software
49  *    must display the following acknowledgement:
50  *      This product includes software developed by Jason L. Wright
51  * 4. The name of the author may not be used to endorse or promote products
52  *    derived from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
56  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
57  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
58  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
59  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
60  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
62  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
63  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64  * POSSIBILITY OF SUCH DAMAGE.
65  *
66  * $OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp $
67  * $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $
68  * $FreeBSD: src/sys/net/if_bridge.c,v 1.26 2005/10/13 23:05:55 thompsa Exp $
69  * $DragonFly: src/sys/net/bridge/if_bridge.c,v 1.60 2008/11/26 12:49:43 sephe Exp $
70  */
71
72 /*
73  * Network interface bridge support.
74  *
75  * TODO:
76  *
77  *      - Currently only supports Ethernet-like interfaces (Ethernet,
78  *        802.11, VLANs on Ethernet, etc.)  Figure out a nice way
79  *        to bridge other types of interfaces (FDDI-FDDI, and maybe
80  *        consider heterogenous bridges).
81  *
82  *
83  * Bridge's route information is duplicated to each CPUs:
84  *
85  *      CPU0          CPU1          CPU2          CPU3
86  * +-----------+ +-----------+ +-----------+ +-----------+
87  * |  rtnode   | |  rtnode   | |  rtnode   | |  rtnode   |
88  * |           | |           | |           | |           |
89  * | dst eaddr | | dst eaddr | | dst eaddr | | dst eaddr |
90  * +-----------+ +-----------+ +-----------+ +-----------+
91  *       |         |                     |         |
92  *       |         |                     |         |
93  *       |         |     +----------+    |         |
94  *       |         |     |  rtinfo  |    |         |
95  *       |         +---->|          |<---+         |
96  *       |               |  flags   |              |
97  *       +-------------->|  timeout |<-------------+
98  *                       |  dst_ifp |
99  *                       +----------+
100  *
101  * We choose to put timeout and dst_ifp into shared part, so updating
102  * them will be cheaper than using message forwarding.  Also there is
103  * not need to use spinlock to protect the updating: timeout and dst_ifp
104  * is not related and specific field's updating order has no importance.
105  * The cache pollution by the share part should not be heavy: in a stable
106  * setup, dst_ifp probably will be not changed in rtnode's life time,
107  * while timeout is refreshed once per second; most of the time, timeout
108  * and dst_ifp are read-only accessed.
109  *
110  *
111  * Bridge route information installation on bridge_input path:
112  *
113  *      CPU0           CPU1         CPU2          CPU3
114  *
115  *                               tcp_thread2
116  *                                    |
117  *                                alloc nmsg
118  *                    snd nmsg        |
119  *                    w/o rtinfo      |
120  *      ifnet0<-----------------------+
121  *        |                           :
122  *    lookup dst                      :
123  *   rtnode exists?(Y)free nmsg       :
124  *        |(N)                        :
125  *        |
126  *  alloc rtinfo
127  *  alloc rtnode
128  * install rtnode
129  *        |
130  *        +---------->ifnet1
131  *        : fwd nmsg    |
132  *        : w/ rtinfo   |
133  *        :             |
134  *        :             |
135  *                 alloc rtnode
136  *               (w/ nmsg's rtinfo)
137  *                install rtnode
138  *                      |
139  *                      +---------->ifnet2
140  *                      : fwd nmsg    |
141  *                      : w/ rtinfo   |
142  *                      :             |
143  *                      :         same as ifnet1
144  *                                    |
145  *                                    +---------->ifnet3
146  *                                    : fwd nmsg    |
147  *                                    : w/ rtinfo   |
148  *                                    :             |
149  *                                    :         same as ifnet1
150  *                                               free nmsg
151  *                                                  :
152  *                                                  :
153  *
154  * The netmsgs forwarded between protocol threads and ifnet threads are
155  * allocated with (M_WAITOK|M_NULLOK), so it will not fail under most
156  * cases (route information is too precious to be not installed :).
157  * Since multiple threads may try to install route information for the
158  * same dst eaddr, we look up route information in ifnet0.  However, this
159  * looking up only need to be performed on ifnet0, which is the start
160  * point of the route information installation process.
161  *
162  *
163  * Bridge route information deleting/flushing:
164  *
165  *  CPU0            CPU1             CPU2             CPU3
166  *
167  * netisr0
168  *   |
169  * find suitable rtnodes,
170  * mark their rtinfo dead
171  *   |
172  *   | domsg <------------------------------------------+
173  *   |                                                  | replymsg
174  *   |                                                  |
175  *   V     fwdmsg           fwdmsg           fwdmsg     |
176  * ifnet0 --------> ifnet1 --------> ifnet2 --------> ifnet3
177  * delete rtnodes   delete rtnodes   delete rtnodes   delete rtnodes
178  * w/ dead rtinfo   w/ dead rtinfo   w/ dead rtinfo   w/ dead rtinfo
179  *                                                    free dead rtinfos
180  *
181  * All deleting/flushing operations are serialized by netisr0, so each
182  * operation only reaps the route information marked dead by itself.
183  *
184  *
185  * Bridge route information adding/deleting/flushing:
186  * Since all operation is serialized by the fixed message flow between
187  * ifnet threads, it is not possible to create corrupted per-cpu route
188  * information.
189  *
190  *
191  *
192  * Percpu member interface list iteration with blocking operation:
193  * Since one bridge could only delete one member interface at a time and
194  * the deleted member interface is not freed after netmsg_service_sync(),
195  * following way is used to make sure that even if the certain member
196  * interface is ripped from the percpu list during the blocking operation,
197  * the iteration still could keep going:
198  *
199  * LIST_FOREACH_MUTABLE(bif, sc->sc_iflists[mycpuid], bif_next, nbif) {
200  *     blocking operation;
201  *     blocking operation;
202  *     ...
203  *     ...
204  *     if (nbif != NULL && !nbif->bif_onlist) {
205  *         KKASSERT(bif->bif_onlist);
206  *         nbif = LIST_NEXT(bif, bif_next);
207  *     }
208  * }
209  *
210  * As mentioned above only one member interface could be unlinked from the
211  * percpu member interface list, so either bif or nbif may be not on the list,
212  * but _not_ both.  To keep the list iteration, we don't care about bif, but
213  * only nbif.  Since removed member interface will only be freed after we
214  * finish our work, it is safe to access any field in an unlinked bif (here
215  * bif_onlist).  If nbif is no longer on the list, then bif must be on the
216  * list, so we change nbif to the next element of bif and keep going.
217  */
218
219 #include "opt_inet.h"
220 #include "opt_inet6.h"
221
222 #include <sys/param.h>
223 #include <sys/mbuf.h>
224 #include <sys/malloc.h>
225 #include <sys/protosw.h>
226 #include <sys/systm.h>
227 #include <sys/time.h>
228 #include <sys/socket.h> /* for net/if.h */
229 #include <sys/sockio.h>
230 #include <sys/ctype.h>  /* string functions */
231 #include <sys/kernel.h>
232 #include <sys/random.h>
233 #include <sys/sysctl.h>
234 #include <sys/module.h>
235 #include <sys/proc.h>
236 #include <sys/priv.h>
237 #include <sys/lock.h>
238 #include <sys/thread.h>
239 #include <sys/thread2.h>
240 #include <sys/mpipe.h>
241
242 #include <net/bpf.h>
243 #include <net/if.h>
244 #include <net/if_dl.h>
245 #include <net/if_types.h>
246 #include <net/if_var.h>
247 #include <net/pfil.h>
248 #include <net/ifq_var.h>
249 #include <net/if_clone.h>
250
251 #include <netinet/in.h> /* for struct arpcom */
252 #include <netinet/in_systm.h>
253 #include <netinet/in_var.h>
254 #include <netinet/ip.h>
255 #include <netinet/ip_var.h>
256 #ifdef INET6
257 #include <netinet/ip6.h>
258 #include <netinet6/ip6_var.h>
259 #endif
260 #include <netinet/if_ether.h> /* for struct arpcom */
261 #include <net/bridge/if_bridgevar.h>
262 #include <net/if_llc.h>
263 #include <net/netmsg2.h>
264
265 #include <net/route.h>
266 #include <sys/in_cksum.h>
267
268 /*
269  * Size of the route hash table.  Must be a power of two.
270  */
271 #ifndef BRIDGE_RTHASH_SIZE
272 #define BRIDGE_RTHASH_SIZE              1024
273 #endif
274
275 #define BRIDGE_RTHASH_MASK              (BRIDGE_RTHASH_SIZE - 1)
276
277 /*
278  * Maximum number of addresses to cache.
279  */
280 #ifndef BRIDGE_RTABLE_MAX
281 #define BRIDGE_RTABLE_MAX               100
282 #endif
283
284 /*
285  * Spanning tree defaults.
286  */
287 #define BSTP_DEFAULT_MAX_AGE            (20 * 256)
288 #define BSTP_DEFAULT_HELLO_TIME         (2 * 256)
289 #define BSTP_DEFAULT_FORWARD_DELAY      (15 * 256)
290 #define BSTP_DEFAULT_HOLD_TIME          (1 * 256)
291 #define BSTP_DEFAULT_BRIDGE_PRIORITY    0x8000
292 #define BSTP_DEFAULT_PORT_PRIORITY      0x80
293 #define BSTP_DEFAULT_PATH_COST          55
294
295 /*
296  * Timeout (in seconds) for entries learned dynamically.
297  */
298 #ifndef BRIDGE_RTABLE_TIMEOUT
299 #define BRIDGE_RTABLE_TIMEOUT           (20 * 60)       /* same as ARP */
300 #endif
301
302 /*
303  * Number of seconds between walks of the route list.
304  */
305 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
306 #define BRIDGE_RTABLE_PRUNE_PERIOD      (5 * 60)
307 #endif
308
309 /*
310  * List of capabilities to mask on the member interface.
311  */
312 #define BRIDGE_IFCAPS_MASK              IFCAP_TXCSUM
313
314 typedef int     (*bridge_ctl_t)(struct bridge_softc *, void *);
315
316 struct netmsg_brctl {
317         struct netmsg           bc_nmsg;
318         bridge_ctl_t            bc_func;
319         struct bridge_softc     *bc_sc;
320         void                    *bc_arg;
321 };
322
323 struct netmsg_brsaddr {
324         struct netmsg           br_nmsg;
325         struct bridge_softc     *br_softc;
326         struct ifnet            *br_dst_if;
327         struct bridge_rtinfo    *br_rtinfo;
328         int                     br_setflags;
329         uint8_t                 br_dst[ETHER_ADDR_LEN];
330         uint8_t                 br_flags;
331 };
332
333 struct netmsg_braddbif {
334         struct netmsg           br_nmsg;
335         struct bridge_softc     *br_softc;
336         struct bridge_ifinfo    *br_bif_info;
337         struct ifnet            *br_bif_ifp;
338 };
339
340 struct netmsg_brdelbif {
341         struct netmsg           br_nmsg;
342         struct bridge_softc     *br_softc;
343         struct bridge_ifinfo    *br_bif_info;
344         struct bridge_iflist_head *br_bif_list;
345 };
346
347 struct netmsg_brsflags {
348         struct netmsg           br_nmsg;
349         struct bridge_softc     *br_softc;
350         struct bridge_ifinfo    *br_bif_info;
351         uint32_t                br_bif_flags;
352 };
353
354 eventhandler_tag        bridge_detach_cookie = NULL;
355
356 extern  struct mbuf *(*bridge_input_p)(struct ifnet *, struct mbuf *);
357 extern  int (*bridge_output_p)(struct ifnet *, struct mbuf *);
358 extern  void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
359
360 static int      bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
361
362 static int      bridge_clone_create(struct if_clone *, int);
363 static void     bridge_clone_destroy(struct ifnet *);
364
365 static int      bridge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
366 static void     bridge_mutecaps(struct bridge_ifinfo *, struct ifnet *, int);
367 static void     bridge_ifdetach(void *, struct ifnet *);
368 static void     bridge_init(void *);
369 static void     bridge_stop(struct ifnet *);
370 static void     bridge_start(struct ifnet *);
371 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
372 static int      bridge_output(struct ifnet *, struct mbuf *);
373
374 static void     bridge_forward(struct bridge_softc *, struct mbuf *m);
375
376 static void     bridge_timer_handler(struct netmsg *);
377 static void     bridge_timer(void *);
378
379 static void     bridge_start_bcast(struct bridge_softc *, struct mbuf *);
380 static void     bridge_broadcast(struct bridge_softc *, struct ifnet *,
381                     struct mbuf *);
382 static void     bridge_span(struct bridge_softc *, struct mbuf *);
383
384 static int      bridge_rtupdate(struct bridge_softc *, const uint8_t *,
385                     struct ifnet *, uint8_t);
386 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
387 static void     bridge_rtreap(struct bridge_softc *);
388 static void     bridge_rtreap_async(struct bridge_softc *);
389 static void     bridge_rttrim(struct bridge_softc *);
390 static int      bridge_rtage_finddead(struct bridge_softc *);
391 static void     bridge_rtage(struct bridge_softc *);
392 static void     bridge_rtflush(struct bridge_softc *, int);
393 static int      bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
394 static int      bridge_rtsaddr(struct bridge_softc *, const uint8_t *,
395                     struct ifnet *, uint8_t);
396 static void     bridge_rtmsg_sync(struct bridge_softc *sc);
397 static void     bridge_rtreap_handler(struct netmsg *);
398 static void     bridge_rtinstall_handler(struct netmsg *);
399 static int      bridge_rtinstall_oncpu(struct bridge_softc *, const uint8_t *,
400                     struct ifnet *, int, uint8_t, struct bridge_rtinfo **);
401
402 static void     bridge_rtable_init(struct bridge_softc *);
403 static void     bridge_rtable_fini(struct bridge_softc *);
404
405 static int      bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
406 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
407                     const uint8_t *);
408 static void     bridge_rtnode_insert(struct bridge_softc *,
409                     struct bridge_rtnode *);
410 static void     bridge_rtnode_destroy(struct bridge_softc *,
411                     struct bridge_rtnode *);
412
413 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
414                     const char *name);
415 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
416                     struct ifnet *ifp);
417 static struct bridge_iflist *bridge_lookup_member_ifinfo(struct bridge_softc *,
418                     struct bridge_ifinfo *);
419 static void     bridge_delete_member(struct bridge_softc *,
420                     struct bridge_iflist *, int);
421 static void     bridge_delete_span(struct bridge_softc *,
422                     struct bridge_iflist *);
423
424 static int      bridge_control(struct bridge_softc *, u_long,
425                                bridge_ctl_t, void *);
426 static int      bridge_ioctl_init(struct bridge_softc *, void *);
427 static int      bridge_ioctl_stop(struct bridge_softc *, void *);
428 static int      bridge_ioctl_add(struct bridge_softc *, void *);
429 static int      bridge_ioctl_del(struct bridge_softc *, void *);
430 static int      bridge_ioctl_gifflags(struct bridge_softc *, void *);
431 static int      bridge_ioctl_sifflags(struct bridge_softc *, void *);
432 static int      bridge_ioctl_scache(struct bridge_softc *, void *);
433 static int      bridge_ioctl_gcache(struct bridge_softc *, void *);
434 static int      bridge_ioctl_gifs(struct bridge_softc *, void *);
435 static int      bridge_ioctl_rts(struct bridge_softc *, void *);
436 static int      bridge_ioctl_saddr(struct bridge_softc *, void *);
437 static int      bridge_ioctl_sto(struct bridge_softc *, void *);
438 static int      bridge_ioctl_gto(struct bridge_softc *, void *);
439 static int      bridge_ioctl_daddr(struct bridge_softc *, void *);
440 static int      bridge_ioctl_flush(struct bridge_softc *, void *);
441 static int      bridge_ioctl_gpri(struct bridge_softc *, void *);
442 static int      bridge_ioctl_spri(struct bridge_softc *, void *);
443 static int      bridge_ioctl_ght(struct bridge_softc *, void *);
444 static int      bridge_ioctl_sht(struct bridge_softc *, void *);
445 static int      bridge_ioctl_gfd(struct bridge_softc *, void *);
446 static int      bridge_ioctl_sfd(struct bridge_softc *, void *);
447 static int      bridge_ioctl_gma(struct bridge_softc *, void *);
448 static int      bridge_ioctl_sma(struct bridge_softc *, void *);
449 static int      bridge_ioctl_sifprio(struct bridge_softc *, void *);
450 static int      bridge_ioctl_sifcost(struct bridge_softc *, void *);
451 static int      bridge_ioctl_addspan(struct bridge_softc *, void *);
452 static int      bridge_ioctl_delspan(struct bridge_softc *, void *);
453 static int      bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
454                     int);
455 static int      bridge_ip_checkbasic(struct mbuf **mp);
456 #ifdef INET6
457 static int      bridge_ip6_checkbasic(struct mbuf **mp);
458 #endif /* INET6 */
459 static int      bridge_fragment(struct ifnet *, struct mbuf *,
460                     struct ether_header *, int, struct llc *);
461 static void     bridge_enqueue_handler(struct netmsg *);
462 static void     bridge_handoff(struct ifnet *, struct mbuf *);
463
464 static void     bridge_del_bif_handler(struct netmsg *);
465 static void     bridge_add_bif_handler(struct netmsg *);
466 static void     bridge_set_bifflags_handler(struct netmsg *);
467 static void     bridge_del_bif(struct bridge_softc *, struct bridge_ifinfo *,
468                     struct bridge_iflist_head *);
469 static void     bridge_add_bif(struct bridge_softc *, struct bridge_ifinfo *,
470                     struct ifnet *);
471 static void     bridge_set_bifflags(struct bridge_softc *,
472                     struct bridge_ifinfo *, uint32_t);
473
474 SYSCTL_DECL(_net_link);
475 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge");
476
477 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
478 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
479 static int pfil_member = 1; /* run pfil hooks on the member interface */
480 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW,
481     &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
482 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW,
483     &pfil_bridge, 0, "Packet filter on the bridge interface");
484 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW,
485     &pfil_member, 0, "Packet filter on the member interface");
486
487 struct bridge_control_arg {
488         union {
489                 struct ifbreq ifbreq;
490                 struct ifbifconf ifbifconf;
491                 struct ifbareq ifbareq;
492                 struct ifbaconf ifbaconf;
493                 struct ifbrparam ifbrparam;
494         } bca_u;
495         int     bca_len;
496         void    *bca_uptr;
497         void    *bca_kptr;
498 };
499
500 struct bridge_control {
501         bridge_ctl_t    bc_func;
502         int             bc_argsize;
503         int             bc_flags;
504 };
505
506 #define BC_F_COPYIN             0x01    /* copy arguments in */
507 #define BC_F_COPYOUT            0x02    /* copy arguments out */
508 #define BC_F_SUSER              0x04    /* do super-user check */
509
510 const struct bridge_control bridge_control_table[] = {
511         { bridge_ioctl_add,             sizeof(struct ifbreq),
512           BC_F_COPYIN|BC_F_SUSER },
513         { bridge_ioctl_del,             sizeof(struct ifbreq),
514           BC_F_COPYIN|BC_F_SUSER },
515
516         { bridge_ioctl_gifflags,        sizeof(struct ifbreq),
517           BC_F_COPYIN|BC_F_COPYOUT },
518         { bridge_ioctl_sifflags,        sizeof(struct ifbreq),
519           BC_F_COPYIN|BC_F_SUSER },
520
521         { bridge_ioctl_scache,          sizeof(struct ifbrparam),
522           BC_F_COPYIN|BC_F_SUSER },
523         { bridge_ioctl_gcache,          sizeof(struct ifbrparam),
524           BC_F_COPYOUT },
525
526         { bridge_ioctl_gifs,            sizeof(struct ifbifconf),
527           BC_F_COPYIN|BC_F_COPYOUT },
528         { bridge_ioctl_rts,             sizeof(struct ifbaconf),
529           BC_F_COPYIN|BC_F_COPYOUT },
530
531         { bridge_ioctl_saddr,           sizeof(struct ifbareq),
532           BC_F_COPYIN|BC_F_SUSER },
533
534         { bridge_ioctl_sto,             sizeof(struct ifbrparam),
535           BC_F_COPYIN|BC_F_SUSER },
536         { bridge_ioctl_gto,             sizeof(struct ifbrparam),
537           BC_F_COPYOUT },
538
539         { bridge_ioctl_daddr,           sizeof(struct ifbareq),
540           BC_F_COPYIN|BC_F_SUSER },
541
542         { bridge_ioctl_flush,           sizeof(struct ifbreq),
543           BC_F_COPYIN|BC_F_SUSER },
544
545         { bridge_ioctl_gpri,            sizeof(struct ifbrparam),
546           BC_F_COPYOUT },
547         { bridge_ioctl_spri,            sizeof(struct ifbrparam),
548           BC_F_COPYIN|BC_F_SUSER },
549
550         { bridge_ioctl_ght,             sizeof(struct ifbrparam),
551           BC_F_COPYOUT },
552         { bridge_ioctl_sht,             sizeof(struct ifbrparam),
553           BC_F_COPYIN|BC_F_SUSER },
554
555         { bridge_ioctl_gfd,             sizeof(struct ifbrparam),
556           BC_F_COPYOUT },
557         { bridge_ioctl_sfd,             sizeof(struct ifbrparam),
558           BC_F_COPYIN|BC_F_SUSER },
559
560         { bridge_ioctl_gma,             sizeof(struct ifbrparam),
561           BC_F_COPYOUT },
562         { bridge_ioctl_sma,             sizeof(struct ifbrparam),
563           BC_F_COPYIN|BC_F_SUSER },
564
565         { bridge_ioctl_sifprio,         sizeof(struct ifbreq),
566           BC_F_COPYIN|BC_F_SUSER },
567
568         { bridge_ioctl_sifcost,         sizeof(struct ifbreq),
569           BC_F_COPYIN|BC_F_SUSER },
570
571         { bridge_ioctl_addspan,         sizeof(struct ifbreq),
572           BC_F_COPYIN|BC_F_SUSER },
573         { bridge_ioctl_delspan,         sizeof(struct ifbreq),
574           BC_F_COPYIN|BC_F_SUSER },
575 };
576 static const int bridge_control_table_size =
577     sizeof(bridge_control_table) / sizeof(bridge_control_table[0]);
578
579 LIST_HEAD(, bridge_softc) bridge_list;
580
581 struct if_clone bridge_cloner = IF_CLONE_INITIALIZER("bridge",
582                                 bridge_clone_create,
583                                 bridge_clone_destroy, 0, IF_MAXUNIT);
584
585 static int
586 bridge_modevent(module_t mod, int type, void *data)
587 {
588         switch (type) {
589         case MOD_LOAD:
590                 LIST_INIT(&bridge_list);
591                 if_clone_attach(&bridge_cloner);
592                 bridge_input_p = bridge_input;
593                 bridge_output_p = bridge_output;
594                 bridge_detach_cookie = EVENTHANDLER_REGISTER(
595                     ifnet_detach_event, bridge_ifdetach, NULL,
596                     EVENTHANDLER_PRI_ANY);
597 #if notyet
598                 bstp_linkstate_p = bstp_linkstate;
599 #endif
600                 break;
601         case MOD_UNLOAD:
602                 if (!LIST_EMPTY(&bridge_list))
603                         return (EBUSY);
604                 EVENTHANDLER_DEREGISTER(ifnet_detach_event,
605                     bridge_detach_cookie);
606                 if_clone_detach(&bridge_cloner);
607                 bridge_input_p = NULL;
608                 bridge_output_p = NULL;
609 #if notyet
610                 bstp_linkstate_p = NULL;
611 #endif
612                 break;
613         default:
614                 return (EOPNOTSUPP);
615         }
616         return (0);
617 }
618
619 static moduledata_t bridge_mod = {
620         "if_bridge",
621         bridge_modevent,
622         0
623 };
624
625 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
626
627
628 /*
629  * bridge_clone_create:
630  *
631  *      Create a new bridge instance.
632  */
633 static int
634 bridge_clone_create(struct if_clone *ifc, int unit)
635 {
636         struct bridge_softc *sc;
637         struct ifnet *ifp;
638         u_char eaddr[6];
639         int cpu, rnd;
640
641         sc = kmalloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);
642         ifp = sc->sc_ifp = &sc->sc_if;
643
644         sc->sc_brtmax = BRIDGE_RTABLE_MAX;
645         sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
646         sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
647         sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
648         sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
649         sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
650         sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
651
652         /* Initialize our routing table. */
653         bridge_rtable_init(sc);
654
655         callout_init(&sc->sc_brcallout);
656         netmsg_init(&sc->sc_brtimemsg, &netisr_adone_rport,
657                     MSGF_DROPABLE, bridge_timer_handler);
658         sc->sc_brtimemsg.nm_lmsg.u.ms_resultp = sc;
659
660         callout_init(&sc->sc_bstpcallout);
661         netmsg_init(&sc->sc_bstptimemsg, &netisr_adone_rport,
662                     MSGF_DROPABLE, bstp_tick_handler);
663         sc->sc_bstptimemsg.nm_lmsg.u.ms_resultp = sc;
664
665         /* Initialize per-cpu member iface lists */
666         sc->sc_iflists = kmalloc(sizeof(*sc->sc_iflists) * ncpus,
667                                  M_DEVBUF, M_WAITOK);
668         for (cpu = 0; cpu < ncpus; ++cpu)
669                 LIST_INIT(&sc->sc_iflists[cpu]);
670
671         LIST_INIT(&sc->sc_spanlist);
672
673         ifp->if_softc = sc;
674         if_initname(ifp, ifc->ifc_name, unit);
675         ifp->if_mtu = ETHERMTU;
676         ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
677         ifp->if_ioctl = bridge_ioctl;
678         ifp->if_start = bridge_start;
679         ifp->if_init = bridge_init;
680         ifp->if_type = IFT_BRIDGE;
681         ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
682         ifp->if_snd.ifq_maxlen = ifqmaxlen;
683         ifq_set_ready(&ifp->if_snd);
684         ifp->if_hdrlen = ETHER_HDR_LEN;
685
686         /*
687          * Generate a random ethernet address and use the private AC:DE:48
688          * OUI code.
689          */
690         rnd = karc4random();
691         bcopy(&rnd, &eaddr[0], 4); /* ETHER_ADDR_LEN == 6 */
692         rnd = karc4random();
693         bcopy(&rnd, &eaddr[2], 4); /* ETHER_ADDR_LEN == 6 */
694
695         eaddr[0] &= ~1; /* clear multicast bit */
696         eaddr[0] |= 2;  /* set the LAA bit */
697
698         ether_ifattach(ifp, eaddr, NULL);
699         /* Now undo some of the damage... */
700         ifp->if_baudrate = 0;
701         ifp->if_type = IFT_BRIDGE;
702
703         crit_enter();   /* XXX MP */
704         LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
705         crit_exit();
706
707         return (0);
708 }
709
710 static void
711 bridge_delete_dispatch(struct netmsg *nmsg)
712 {
713         struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
714         struct bridge_softc *sc = lmsg->u.ms_resultp;
715         struct ifnet *bifp = sc->sc_ifp;
716         struct bridge_iflist *bif;
717
718         lwkt_serialize_enter(bifp->if_serializer);
719
720         while ((bif = LIST_FIRST(&sc->sc_iflists[mycpuid])) != NULL)
721                 bridge_delete_member(sc, bif, 0);
722
723         while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL)
724                 bridge_delete_span(sc, bif);
725
726         lwkt_serialize_exit(bifp->if_serializer);
727
728         lwkt_replymsg(lmsg, 0);
729 }
730
731 /*
732  * bridge_clone_destroy:
733  *
734  *      Destroy a bridge instance.
735  */
736 static void
737 bridge_clone_destroy(struct ifnet *ifp)
738 {
739         struct bridge_softc *sc = ifp->if_softc;
740         struct lwkt_msg *lmsg;
741         struct netmsg nmsg;
742
743         lwkt_serialize_enter(ifp->if_serializer);
744
745         bridge_stop(ifp);
746         ifp->if_flags &= ~IFF_UP;
747
748         lwkt_serialize_exit(ifp->if_serializer);
749
750         netmsg_init(&nmsg, &curthread->td_msgport, 0, bridge_delete_dispatch);
751         lmsg = &nmsg.nm_lmsg;
752         lmsg->u.ms_resultp = sc;
753         lwkt_domsg(BRIDGE_CFGPORT, lmsg, 0);
754
755         crit_enter();   /* XXX MP */
756         LIST_REMOVE(sc, sc_list);
757         crit_exit();
758
759         ether_ifdetach(ifp);
760
761         /* Tear down the routing table. */
762         bridge_rtable_fini(sc);
763
764         /* Free per-cpu member iface lists */
765         kfree(sc->sc_iflists, M_DEVBUF);
766
767         kfree(sc, M_DEVBUF);
768 }
769
770 /*
771  * bridge_ioctl:
772  *
773  *      Handle a control request from the operator.
774  */
775 static int
776 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
777 {
778         struct bridge_softc *sc = ifp->if_softc;
779         struct bridge_control_arg args;
780         struct ifdrv *ifd = (struct ifdrv *) data;
781         const struct bridge_control *bc;
782         int error = 0;
783
784         ASSERT_SERIALIZED(ifp->if_serializer);
785
786         switch (cmd) {
787         case SIOCADDMULTI:
788         case SIOCDELMULTI:
789                 break;
790
791         case SIOCGDRVSPEC:
792         case SIOCSDRVSPEC:
793                 if (ifd->ifd_cmd >= bridge_control_table_size) {
794                         error = EINVAL;
795                         break;
796                 }
797                 bc = &bridge_control_table[ifd->ifd_cmd];
798
799                 if (cmd == SIOCGDRVSPEC &&
800                     (bc->bc_flags & BC_F_COPYOUT) == 0) {
801                         error = EINVAL;
802                         break;
803                 } else if (cmd == SIOCSDRVSPEC &&
804                            (bc->bc_flags & BC_F_COPYOUT)) {
805                         error = EINVAL;
806                         break;
807                 }
808
809                 if (bc->bc_flags & BC_F_SUSER) {
810                         error = priv_check_cred(cr, PRIV_ROOT, NULL_CRED_OKAY);
811                         if (error)
812                                 break;
813                 }
814
815                 if (ifd->ifd_len != bc->bc_argsize ||
816                     ifd->ifd_len > sizeof(args.bca_u)) {
817                         error = EINVAL;
818                         break;
819                 }
820
821                 memset(&args, 0, sizeof(args));
822                 if (bc->bc_flags & BC_F_COPYIN) {
823                         error = copyin(ifd->ifd_data, &args.bca_u,
824                                        ifd->ifd_len);
825                         if (error)
826                                 break;
827                 }
828
829                 error = bridge_control(sc, cmd, bc->bc_func, &args);
830                 if (error) {
831                         KKASSERT(args.bca_len == 0 && args.bca_kptr == NULL);
832                         break;
833                 }
834
835                 if (bc->bc_flags & BC_F_COPYOUT) {
836                         error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
837                         if (args.bca_len != 0) {
838                                 KKASSERT(args.bca_kptr != NULL);
839                                 if (!error) {
840                                         error = copyout(args.bca_kptr,
841                                                 args.bca_uptr, args.bca_len);
842                                 }
843                                 kfree(args.bca_kptr, M_TEMP);
844                         } else {
845                                 KKASSERT(args.bca_kptr == NULL);
846                         }
847                 } else {
848                         KKASSERT(args.bca_len == 0 && args.bca_kptr == NULL);
849                 }
850                 break;
851
852         case SIOCSIFFLAGS:
853                 if (!(ifp->if_flags & IFF_UP) &&
854                     (ifp->if_flags & IFF_RUNNING)) {
855                         /*
856                          * If interface is marked down and it is running,
857                          * then stop it.
858                          */
859                         bridge_stop(ifp);
860                 } else if ((ifp->if_flags & IFF_UP) &&
861                     !(ifp->if_flags & IFF_RUNNING)) {
862                         /*
863                          * If interface is marked up and it is stopped, then
864                          * start it.
865                          */
866                         ifp->if_init(sc);
867                 }
868                 break;
869
870         case SIOCSIFMTU:
871                 /* Do not allow the MTU to be changed on the bridge */
872                 error = EINVAL;
873                 break;
874
875         default:
876                 error = ether_ioctl(ifp, cmd, data);
877                 break;
878         }
879         return (error);
880 }
881
882 /*
883  * bridge_mutecaps:
884  *
885  *      Clear or restore unwanted capabilities on the member interface
886  */
887 static void
888 bridge_mutecaps(struct bridge_ifinfo *bif_info, struct ifnet *ifp, int mute)
889 {
890         struct ifreq ifr;
891         int error;
892
893         if (ifp->if_ioctl == NULL)
894                 return;
895
896         bzero(&ifr, sizeof(ifr));
897         ifr.ifr_reqcap = ifp->if_capenable;
898
899         if (mute) {
900                 /* mask off and save capabilities */
901                 bif_info->bifi_mutecap = ifr.ifr_reqcap & BRIDGE_IFCAPS_MASK;
902                 if (bif_info->bifi_mutecap != 0)
903                         ifr.ifr_reqcap &= ~BRIDGE_IFCAPS_MASK;
904         } else {
905                 /* restore muted capabilities */
906                 ifr.ifr_reqcap |= bif_info->bifi_mutecap;
907         }
908
909         if (bif_info->bifi_mutecap != 0) {
910                 lwkt_serialize_enter(ifp->if_serializer);
911                 error = ifp->if_ioctl(ifp, SIOCSIFCAP, (caddr_t)&ifr, NULL);
912                 lwkt_serialize_exit(ifp->if_serializer);
913         }
914 }
915
916 /*
917  * bridge_lookup_member:
918  *
919  *      Lookup a bridge member interface.
920  */
921 static struct bridge_iflist *
922 bridge_lookup_member(struct bridge_softc *sc, const char *name)
923 {
924         struct bridge_iflist *bif;
925
926         LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
927                 if (strcmp(bif->bif_ifp->if_xname, name) == 0)
928                         return (bif);
929         }
930         return (NULL);
931 }
932
933 /*
934  * bridge_lookup_member_if:
935  *
936  *      Lookup a bridge member interface by ifnet*.
937  */
938 static struct bridge_iflist *
939 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
940 {
941         struct bridge_iflist *bif;
942
943         LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
944                 if (bif->bif_ifp == member_ifp)
945                         return (bif);
946         }
947         return (NULL);
948 }
949
950 /*
951  * bridge_lookup_member_ifinfo:
952  *
953  *      Lookup a bridge member interface by bridge_ifinfo.
954  */
955 static struct bridge_iflist *
956 bridge_lookup_member_ifinfo(struct bridge_softc *sc,
957                             struct bridge_ifinfo *bif_info)
958 {
959         struct bridge_iflist *bif;
960
961         LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
962                 if (bif->bif_info == bif_info)
963                         return (bif);
964         }
965         return (NULL);
966 }
967
968 /*
969  * bridge_delete_member:
970  *
971  *      Delete the specified member interface.
972  */
973 static void
974 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
975     int gone)
976 {
977         struct ifnet *ifs = bif->bif_ifp;
978         struct ifnet *bifp = sc->sc_ifp;
979         struct bridge_ifinfo *bif_info = bif->bif_info;
980         struct bridge_iflist_head saved_bifs;
981
982         ASSERT_SERIALIZED(bifp->if_serializer);
983         KKASSERT(bif_info != NULL);
984
985         ifs->if_bridge = NULL;
986
987         /*
988          * Release bridge interface's serializer:
989          * - To avoid possible dead lock.
990          * - Various sync operation will block the current thread.
991          */
992         lwkt_serialize_exit(bifp->if_serializer);
993
994         if (!gone) {
995                 switch (ifs->if_type) {
996                 case IFT_ETHER:
997                 case IFT_L2VLAN:
998                         /*
999                          * Take the interface out of promiscuous mode.
1000                          */
1001                         ifpromisc(ifs, 0);
1002                         bridge_mutecaps(bif_info, ifs, 0);
1003                         break;
1004
1005                 case IFT_GIF:
1006                         break;
1007
1008                 default:
1009                         panic("bridge_delete_member: impossible");
1010                         break;
1011                 }
1012         }
1013
1014         /*
1015          * Remove bifs from percpu linked list.
1016          *
1017          * Removed bifs are not freed immediately, instead,
1018          * they are saved in saved_bifs.  They will be freed
1019          * after we make sure that no one is accessing them,
1020          * i.e. after following netmsg_service_sync()
1021          */
1022         LIST_INIT(&saved_bifs);
1023         bridge_del_bif(sc, bif_info, &saved_bifs);
1024
1025         /*
1026          * Make sure that all protocol threads:
1027          * o  see 'ifs' if_bridge is changed
1028          * o  know that bif is removed from the percpu linked list
1029          */
1030         netmsg_service_sync();
1031
1032         /*
1033          * Free the removed bifs
1034          */
1035         KKASSERT(!LIST_EMPTY(&saved_bifs));
1036         while ((bif = LIST_FIRST(&saved_bifs)) != NULL) {
1037                 LIST_REMOVE(bif, bif_next);
1038                 kfree(bif, M_DEVBUF);
1039         }
1040
1041         /* See the comment in bridge_ioctl_stop() */
1042         bridge_rtmsg_sync(sc);
1043         bridge_rtdelete(sc, ifs, IFBF_FLUSHALL | IFBF_FLUSHSYNC);
1044
1045         lwkt_serialize_enter(bifp->if_serializer);
1046
1047         if (bifp->if_flags & IFF_RUNNING)
1048                 bstp_initialization(sc);
1049
1050         /*
1051          * Free the bif_info after bstp_initialization(), so that
1052          * bridge_softc.sc_root_port will not reference a dangling
1053          * pointer.
1054          */
1055         kfree(bif_info, M_DEVBUF);
1056 }
1057
1058 /*
1059  * bridge_delete_span:
1060  *
1061  *      Delete the specified span interface.
1062  */
1063 static void
1064 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1065 {
1066         KASSERT(bif->bif_ifp->if_bridge == NULL,
1067             ("%s: not a span interface", __func__));
1068
1069         LIST_REMOVE(bif, bif_next);
1070         kfree(bif, M_DEVBUF);
1071 }
1072
1073 static int
1074 bridge_ioctl_init(struct bridge_softc *sc, void *arg __unused)
1075 {
1076         struct ifnet *ifp = sc->sc_ifp;
1077
1078         if (ifp->if_flags & IFF_RUNNING)
1079                 return 0;
1080
1081         callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1082             bridge_timer, sc);
1083
1084         ifp->if_flags |= IFF_RUNNING;
1085         bstp_initialization(sc);
1086         return 0;
1087 }
1088
1089 static int
1090 bridge_ioctl_stop(struct bridge_softc *sc, void *arg __unused)
1091 {
1092         struct ifnet *ifp = sc->sc_ifp;
1093         struct lwkt_msg *lmsg;
1094
1095         if ((ifp->if_flags & IFF_RUNNING) == 0)
1096                 return 0;
1097
1098         callout_stop(&sc->sc_brcallout);
1099
1100         crit_enter();
1101         lmsg = &sc->sc_brtimemsg.nm_lmsg;
1102         if ((lmsg->ms_flags & MSGF_DONE) == 0) {
1103                 /* Pending to be processed; drop it */
1104                 lwkt_dropmsg(lmsg);
1105         }
1106         crit_exit();
1107
1108         bstp_stop(sc);
1109
1110         ifp->if_flags &= ~IFF_RUNNING;
1111
1112         lwkt_serialize_exit(ifp->if_serializer);
1113
1114         /* Let everyone know that we are stopped */
1115         netmsg_service_sync();
1116
1117         /*
1118          * Sync ifnetX msgports in the order we forward rtnode
1119          * installation message.  This is used to make sure that
1120          * all rtnode installation messages sent by bridge_rtupdate()
1121          * during above netmsg_service_sync() are flushed.
1122          */
1123         bridge_rtmsg_sync(sc);
1124         bridge_rtflush(sc, IFBF_FLUSHDYN | IFBF_FLUSHSYNC);
1125
1126         lwkt_serialize_enter(ifp->if_serializer);
1127         return 0;
1128 }
1129
1130 static int
1131 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1132 {
1133         struct ifbreq *req = arg;
1134         struct bridge_iflist *bif;
1135         struct bridge_ifinfo *bif_info;
1136         struct ifnet *ifs, *bifp;
1137         int error = 0;
1138
1139         bifp = sc->sc_ifp;
1140         ASSERT_SERIALIZED(bifp->if_serializer);
1141
1142         ifs = ifunit(req->ifbr_ifsname);
1143         if (ifs == NULL)
1144                 return (ENOENT);
1145
1146         /* If it's in the span list, it can't be a member. */
1147         LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1148                 if (ifs == bif->bif_ifp)
1149                         return (EBUSY);
1150
1151         /* Allow the first Ethernet member to define the MTU */
1152         if (ifs->if_type != IFT_GIF) {
1153                 if (LIST_EMPTY(&sc->sc_iflists[mycpuid])) {
1154                         bifp->if_mtu = ifs->if_mtu;
1155                 } else if (bifp->if_mtu != ifs->if_mtu) {
1156                         if_printf(bifp, "invalid MTU for %s\n", ifs->if_xname);
1157                         return (EINVAL);
1158                 }
1159         }
1160
1161         if (ifs->if_bridge == sc)
1162                 return (EEXIST);
1163
1164         if (ifs->if_bridge != NULL)
1165                 return (EBUSY);
1166
1167         bif_info = kmalloc(sizeof(*bif_info), M_DEVBUF, M_WAITOK | M_ZERO);
1168         bif_info->bifi_priority = BSTP_DEFAULT_PORT_PRIORITY;
1169         bif_info->bifi_path_cost = BSTP_DEFAULT_PATH_COST;
1170         bif_info->bifi_ifp = ifs;
1171
1172         /*
1173          * Release bridge interface's serializer:
1174          * - To avoid possible dead lock.
1175          * - Various sync operation will block the current thread.
1176          */
1177         lwkt_serialize_exit(bifp->if_serializer);
1178
1179         switch (ifs->if_type) {
1180         case IFT_ETHER:
1181         case IFT_L2VLAN:
1182                 /*
1183                  * Place the interface into promiscuous mode.
1184                  */
1185                 error = ifpromisc(ifs, 1);
1186                 if (error) {
1187                         lwkt_serialize_enter(bifp->if_serializer);
1188                         goto out;
1189                 }
1190                 bridge_mutecaps(bif_info, ifs, 1);
1191                 break;
1192
1193         case IFT_GIF: /* :^) */
1194                 break;
1195
1196         default:
1197                 error = EINVAL;
1198                 lwkt_serialize_enter(bifp->if_serializer);
1199                 goto out;
1200         }
1201
1202         /*
1203          * Add bifs to percpu linked lists
1204          */
1205         bridge_add_bif(sc, bif_info, ifs);
1206
1207         lwkt_serialize_enter(bifp->if_serializer);
1208
1209         if (bifp->if_flags & IFF_RUNNING)
1210                 bstp_initialization(sc);
1211         else
1212                 bstp_stop(sc);
1213
1214         /*
1215          * Everything has been setup, so let the member interface
1216          * deliver packets to this bridge on its input/output path.
1217          */
1218         ifs->if_bridge = sc;
1219 out:
1220         if (error) {
1221                 if (bif_info != NULL)
1222                         kfree(bif_info, M_DEVBUF);
1223         }
1224         return (error);
1225 }
1226
1227 static int
1228 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1229 {
1230         struct ifbreq *req = arg;
1231         struct bridge_iflist *bif;
1232
1233         bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1234         if (bif == NULL)
1235                 return (ENOENT);
1236
1237         bridge_delete_member(sc, bif, 0);
1238
1239         return (0);
1240 }
1241
1242 static int
1243 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1244 {
1245         struct ifbreq *req = arg;
1246         struct bridge_iflist *bif;
1247
1248         bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1249         if (bif == NULL)
1250                 return (ENOENT);
1251
1252         req->ifbr_ifsflags = bif->bif_flags;
1253         req->ifbr_state = bif->bif_state;
1254         req->ifbr_priority = bif->bif_priority;
1255         req->ifbr_path_cost = bif->bif_path_cost;
1256         req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1257
1258         return (0);
1259 }
1260
1261 static int
1262 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1263 {
1264         struct ifbreq *req = arg;
1265         struct bridge_iflist *bif;
1266         struct ifnet *bifp = sc->sc_ifp;
1267
1268         bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1269         if (bif == NULL)
1270                 return (ENOENT);
1271
1272         if (req->ifbr_ifsflags & IFBIF_SPAN) {
1273                 /* SPAN is readonly */
1274                 return (EINVAL);
1275         }
1276
1277         if (req->ifbr_ifsflags & IFBIF_STP) {
1278                 switch (bif->bif_ifp->if_type) {
1279                 case IFT_ETHER:
1280                         /* These can do spanning tree. */
1281                         break;
1282
1283                 default:
1284                         /* Nothing else can. */
1285                         return (EINVAL);
1286                 }
1287         }
1288
1289         lwkt_serialize_exit(bifp->if_serializer);
1290         bridge_set_bifflags(sc, bif->bif_info, req->ifbr_ifsflags);
1291         lwkt_serialize_enter(bifp->if_serializer);
1292
1293         if (bifp->if_flags & IFF_RUNNING)
1294                 bstp_initialization(sc);
1295
1296         return (0);
1297 }
1298
1299 static int
1300 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1301 {
1302         struct ifbrparam *param = arg;
1303         struct ifnet *ifp = sc->sc_ifp;
1304
1305         sc->sc_brtmax = param->ifbrp_csize;
1306
1307         lwkt_serialize_exit(ifp->if_serializer);
1308         bridge_rttrim(sc);
1309         lwkt_serialize_enter(ifp->if_serializer);
1310
1311         return (0);
1312 }
1313
1314 static int
1315 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1316 {
1317         struct ifbrparam *param = arg;
1318
1319         param->ifbrp_csize = sc->sc_brtmax;
1320
1321         return (0);
1322 }
1323
1324 static int
1325 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1326 {
1327         struct bridge_control_arg *bc_arg = arg;
1328         struct ifbifconf *bifc = arg;
1329         struct bridge_iflist *bif;
1330         struct ifbreq *breq;
1331         int count, len;
1332
1333         count = 0;
1334         LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next)
1335                 count++;
1336         LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1337                 count++;
1338
1339         if (bifc->ifbic_len == 0) {
1340                 bifc->ifbic_len = sizeof(*breq) * count;
1341                 return 0;
1342         } else if (count == 0 || bifc->ifbic_len < sizeof(*breq)) {
1343                 bifc->ifbic_len = 0;
1344                 return 0;
1345         }
1346
1347         len = min(bifc->ifbic_len, sizeof(*breq) * count);
1348         KKASSERT(len >= sizeof(*breq));
1349
1350         breq = kmalloc(len, M_TEMP, M_WAITOK | M_NULLOK | M_ZERO);
1351         if (breq == NULL) {
1352                 bifc->ifbic_len = 0;
1353                 return ENOMEM;
1354         }
1355         bc_arg->bca_kptr = breq;
1356
1357         count = 0;
1358         LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
1359                 if (len < sizeof(*breq))
1360                         break;
1361
1362                 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1363                         sizeof(breq->ifbr_ifsname));
1364                 breq->ifbr_ifsflags = bif->bif_flags;
1365                 breq->ifbr_state = bif->bif_state;
1366                 breq->ifbr_priority = bif->bif_priority;
1367                 breq->ifbr_path_cost = bif->bif_path_cost;
1368                 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1369                 breq++;
1370                 count++;
1371                 len -= sizeof(*breq);
1372         }
1373         LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1374                 if (len < sizeof(*breq))
1375                         break;
1376
1377                 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1378                         sizeof(breq->ifbr_ifsname));
1379                 breq->ifbr_ifsflags = bif->bif_flags;
1380                 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1381                 breq++;
1382                 count++;
1383                 len -= sizeof(*breq);
1384         }
1385
1386         bifc->ifbic_len = sizeof(*breq) * count;
1387         KKASSERT(bifc->ifbic_len > 0);
1388
1389         bc_arg->bca_len = bifc->ifbic_len;
1390         bc_arg->bca_uptr = bifc->ifbic_req;
1391         return 0;
1392 }
1393
1394 static int
1395 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1396 {
1397         struct bridge_control_arg *bc_arg = arg;
1398         struct ifbaconf *bac = arg;
1399         struct bridge_rtnode *brt;
1400         struct ifbareq *bareq;
1401         int count, len;
1402
1403         count = 0;
1404         LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list)
1405                 count++;
1406
1407         if (bac->ifbac_len == 0) {
1408                 bac->ifbac_len = sizeof(*bareq) * count;
1409                 return 0;
1410         } else if (count == 0 || bac->ifbac_len < sizeof(*bareq)) {
1411                 bac->ifbac_len = 0;
1412                 return 0;
1413         }
1414
1415         len = min(bac->ifbac_len, sizeof(*bareq) * count);
1416         KKASSERT(len >= sizeof(*bareq));
1417
1418         bareq = kmalloc(len, M_TEMP, M_WAITOK | M_NULLOK | M_ZERO);
1419         if (bareq == NULL) {
1420                 bac->ifbac_len = 0;
1421                 return ENOMEM;
1422         }
1423         bc_arg->bca_kptr = bareq;
1424
1425         count = 0;
1426         LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
1427                 struct bridge_rtinfo *bri = brt->brt_info;
1428                 unsigned long expire;
1429
1430                 if (len < sizeof(*bareq))
1431                         break;
1432
1433                 strlcpy(bareq->ifba_ifsname, bri->bri_ifp->if_xname,
1434                         sizeof(bareq->ifba_ifsname));
1435                 memcpy(bareq->ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1436                 expire = bri->bri_expire;
1437                 if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1438                     time_second < expire)
1439                         bareq->ifba_expire = expire - time_second;
1440                 else
1441                         bareq->ifba_expire = 0;
1442                 bareq->ifba_flags = bri->bri_flags;
1443                 bareq++;
1444                 count++;
1445                 len -= sizeof(*bareq);
1446         }
1447
1448         bac->ifbac_len = sizeof(*bareq) * count;
1449         KKASSERT(bac->ifbac_len > 0);
1450
1451         bc_arg->bca_len = bac->ifbac_len;
1452         bc_arg->bca_uptr = bac->ifbac_req;
1453         return 0;
1454 }
1455
1456 static int
1457 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1458 {
1459         struct ifbareq *req = arg;
1460         struct bridge_iflist *bif;
1461         struct ifnet *ifp = sc->sc_ifp;
1462         int error;
1463
1464         ASSERT_SERIALIZED(ifp->if_serializer);
1465
1466         bif = bridge_lookup_member(sc, req->ifba_ifsname);
1467         if (bif == NULL)
1468                 return (ENOENT);
1469
1470         lwkt_serialize_exit(ifp->if_serializer);
1471         error = bridge_rtsaddr(sc, req->ifba_dst, bif->bif_ifp,
1472                                req->ifba_flags);
1473         lwkt_serialize_enter(ifp->if_serializer);
1474         return (error);
1475 }
1476
1477 static int
1478 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1479 {
1480         struct ifbrparam *param = arg;
1481
1482         sc->sc_brttimeout = param->ifbrp_ctime;
1483
1484         return (0);
1485 }
1486
1487 static int
1488 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1489 {
1490         struct ifbrparam *param = arg;
1491
1492         param->ifbrp_ctime = sc->sc_brttimeout;
1493
1494         return (0);
1495 }
1496
1497 static int
1498 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1499 {
1500         struct ifbareq *req = arg;
1501         struct ifnet *ifp = sc->sc_ifp;
1502         int error;
1503
1504         lwkt_serialize_exit(ifp->if_serializer);
1505         error = bridge_rtdaddr(sc, req->ifba_dst);
1506         lwkt_serialize_enter(ifp->if_serializer);
1507         return error;
1508 }
1509
1510 static int
1511 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1512 {
1513         struct ifbreq *req = arg;
1514         struct ifnet *ifp = sc->sc_ifp;
1515
1516         lwkt_serialize_exit(ifp->if_serializer);
1517         bridge_rtflush(sc, req->ifbr_ifsflags | IFBF_FLUSHSYNC);
1518         lwkt_serialize_enter(ifp->if_serializer);
1519
1520         return (0);
1521 }
1522
1523 static int
1524 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1525 {
1526         struct ifbrparam *param = arg;
1527
1528         param->ifbrp_prio = sc->sc_bridge_priority;
1529
1530         return (0);
1531 }
1532
1533 static int
1534 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1535 {
1536         struct ifbrparam *param = arg;
1537
1538         sc->sc_bridge_priority = param->ifbrp_prio;
1539
1540         if (sc->sc_ifp->if_flags & IFF_RUNNING)
1541                 bstp_initialization(sc);
1542
1543         return (0);
1544 }
1545
1546 static int
1547 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1548 {
1549         struct ifbrparam *param = arg;
1550
1551         param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1552
1553         return (0);
1554 }
1555
1556 static int
1557 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1558 {
1559         struct ifbrparam *param = arg;
1560
1561         if (param->ifbrp_hellotime == 0)
1562                 return (EINVAL);
1563         sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1564
1565         if (sc->sc_ifp->if_flags & IFF_RUNNING)
1566                 bstp_initialization(sc);
1567
1568         return (0);
1569 }
1570
1571 static int
1572 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1573 {
1574         struct ifbrparam *param = arg;
1575
1576         param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1577
1578         return (0);
1579 }
1580
1581 static int
1582 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1583 {
1584         struct ifbrparam *param = arg;
1585
1586         if (param->ifbrp_fwddelay == 0)
1587                 return (EINVAL);
1588         sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1589
1590         if (sc->sc_ifp->if_flags & IFF_RUNNING)
1591                 bstp_initialization(sc);
1592
1593         return (0);
1594 }
1595
1596 static int
1597 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1598 {
1599         struct ifbrparam *param = arg;
1600
1601         param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1602
1603         return (0);
1604 }
1605
1606 static int
1607 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1608 {
1609         struct ifbrparam *param = arg;
1610
1611         if (param->ifbrp_maxage == 0)
1612                 return (EINVAL);
1613         sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1614
1615         if (sc->sc_ifp->if_flags & IFF_RUNNING)
1616                 bstp_initialization(sc);
1617
1618         return (0);
1619 }
1620
1621 static int
1622 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1623 {
1624         struct ifbreq *req = arg;
1625         struct bridge_iflist *bif;
1626
1627         bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1628         if (bif == NULL)
1629                 return (ENOENT);
1630
1631         bif->bif_priority = req->ifbr_priority;
1632
1633         if (sc->sc_ifp->if_flags & IFF_RUNNING)
1634                 bstp_initialization(sc);
1635
1636         return (0);
1637 }
1638
1639 static int
1640 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1641 {
1642         struct ifbreq *req = arg;
1643         struct bridge_iflist *bif;
1644
1645         bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1646         if (bif == NULL)
1647                 return (ENOENT);
1648
1649         bif->bif_path_cost = req->ifbr_path_cost;
1650
1651         if (sc->sc_ifp->if_flags & IFF_RUNNING)
1652                 bstp_initialization(sc);
1653
1654         return (0);
1655 }
1656
1657 static int
1658 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1659 {
1660         struct ifbreq *req = arg;
1661         struct bridge_iflist *bif;
1662         struct ifnet *ifs;
1663
1664         ifs = ifunit(req->ifbr_ifsname);
1665         if (ifs == NULL)
1666                 return (ENOENT);
1667
1668         LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1669                 if (ifs == bif->bif_ifp)
1670                         return (EBUSY);
1671
1672         if (ifs->if_bridge != NULL)
1673                 return (EBUSY);
1674
1675         switch (ifs->if_type) {
1676         case IFT_ETHER:
1677         case IFT_GIF:
1678         case IFT_L2VLAN:
1679                 break;
1680
1681         default:
1682                 return (EINVAL);
1683         }
1684
1685         bif = kmalloc(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
1686         bif->bif_ifp = ifs;
1687         bif->bif_flags = IFBIF_SPAN;
1688         /* NOTE: span bif does not need bridge_ifinfo */
1689
1690         LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1691
1692         sc->sc_span = 1;
1693
1694         return (0);
1695 }
1696
1697 static int
1698 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1699 {
1700         struct ifbreq *req = arg;
1701         struct bridge_iflist *bif;
1702         struct ifnet *ifs;
1703
1704         ifs = ifunit(req->ifbr_ifsname);
1705         if (ifs == NULL)
1706                 return (ENOENT);
1707
1708         LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1709                 if (ifs == bif->bif_ifp)
1710                         break;
1711
1712         if (bif == NULL)
1713                 return (ENOENT);
1714
1715         bridge_delete_span(sc, bif);
1716
1717         if (LIST_EMPTY(&sc->sc_spanlist))
1718                 sc->sc_span = 0;
1719
1720         return (0);
1721 }
1722
1723 static void
1724 bridge_ifdetach_dispatch(struct netmsg *nmsg)
1725 {
1726         struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
1727         struct ifnet *ifp, *bifp;
1728         struct bridge_softc *sc;
1729         struct bridge_iflist *bif;
1730
1731         ifp = lmsg->u.ms_resultp;
1732         sc = ifp->if_bridge;
1733
1734         /* Check if the interface is a bridge member */
1735         if (sc != NULL) {
1736                 bifp = sc->sc_ifp;
1737
1738                 lwkt_serialize_enter(bifp->if_serializer);
1739
1740                 bif = bridge_lookup_member_if(sc, ifp);
1741                 if (bif != NULL) {
1742                         bridge_delete_member(sc, bif, 1);
1743                 } else {
1744                         /* XXX Why bif will be NULL? */
1745                 }
1746
1747                 lwkt_serialize_exit(bifp->if_serializer);
1748                 goto reply;
1749         }
1750
1751         crit_enter();   /* XXX MP */
1752
1753         /* Check if the interface is a span port */
1754         LIST_FOREACH(sc, &bridge_list, sc_list) {
1755                 bifp = sc->sc_ifp;
1756
1757                 lwkt_serialize_enter(bifp->if_serializer);
1758
1759                 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1760                         if (ifp == bif->bif_ifp) {
1761                                 bridge_delete_span(sc, bif);
1762                                 break;
1763                         }
1764
1765                 lwkt_serialize_exit(bifp->if_serializer);
1766         }
1767
1768         crit_exit();
1769
1770 reply:
1771         lwkt_replymsg(lmsg, 0);
1772 }
1773
1774 /*
1775  * bridge_ifdetach:
1776  *
1777  *      Detach an interface from a bridge.  Called when a member
1778  *      interface is detaching.
1779  */
1780 static void
1781 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
1782 {
1783         struct lwkt_msg *lmsg;
1784         struct netmsg nmsg;
1785
1786         netmsg_init(&nmsg, &curthread->td_msgport, 0, bridge_ifdetach_dispatch);
1787         lmsg = &nmsg.nm_lmsg;
1788         lmsg->u.ms_resultp = ifp;
1789
1790         lwkt_domsg(BRIDGE_CFGPORT, lmsg, 0);
1791 }
1792
1793 /*
1794  * bridge_init:
1795  *
1796  *      Initialize a bridge interface.
1797  */
1798 static void
1799 bridge_init(void *xsc)
1800 {
1801         bridge_control(xsc, SIOCSIFFLAGS, bridge_ioctl_init, NULL);
1802 }
1803
1804 /*
1805  * bridge_stop:
1806  *
1807  *      Stop the bridge interface.
1808  */
1809 static void
1810 bridge_stop(struct ifnet *ifp)
1811 {
1812         bridge_control(ifp->if_softc, SIOCSIFFLAGS, bridge_ioctl_stop, NULL);
1813 }
1814
1815 /*
1816  * bridge_enqueue:
1817  *
1818  *      Enqueue a packet on a bridge member interface.
1819  *
1820  */
1821 void
1822 bridge_enqueue(struct ifnet *dst_ifp, struct mbuf *m)
1823 {
1824         struct netmsg_packet *nmp;
1825
1826         nmp = &m->m_hdr.mh_netmsg;
1827         netmsg_init(&nmp->nm_netmsg, &netisr_apanic_rport, 0,
1828                     bridge_enqueue_handler);
1829         nmp->nm_packet = m;
1830         nmp->nm_netmsg.nm_lmsg.u.ms_resultp = dst_ifp;
1831
1832         lwkt_sendmsg(curnetport, &nmp->nm_netmsg.nm_lmsg);
1833 }
1834
1835 /*
1836  * bridge_output:
1837  *
1838  *      Send output from a bridge member interface.  This
1839  *      performs the bridging function for locally originated
1840  *      packets.
1841  *
1842  *      The mbuf has the Ethernet header already attached.  We must
1843  *      enqueue or free the mbuf before returning.
1844  */
1845 static int
1846 bridge_output(struct ifnet *ifp, struct mbuf *m)
1847 {
1848         struct bridge_softc *sc = ifp->if_bridge;
1849         struct ether_header *eh;
1850         struct ifnet *dst_if, *bifp;
1851
1852         ASSERT_NOT_SERIALIZED(ifp->if_serializer);
1853
1854         /*
1855          * Make sure that we are still a member of a bridge interface.
1856          */
1857         if (sc == NULL) {
1858                 m_freem(m);
1859                 return (0);
1860         }
1861         bifp = sc->sc_ifp;
1862
1863         if (m->m_len < ETHER_HDR_LEN) {
1864                 m = m_pullup(m, ETHER_HDR_LEN);
1865                 if (m == NULL)
1866                         return (0);
1867         }
1868         eh = mtod(m, struct ether_header *);
1869
1870         /*
1871          * If bridge is down, but the original output interface is up,
1872          * go ahead and send out that interface.  Otherwise, the packet
1873          * is dropped below.
1874          */
1875         if ((bifp->if_flags & IFF_RUNNING) == 0) {
1876                 dst_if = ifp;
1877                 goto sendunicast;
1878         }
1879
1880         /*
1881          * If the packet is a multicast, or we don't know a better way to
1882          * get there, send to all interfaces.
1883          */
1884         if (ETHER_IS_MULTICAST(eh->ether_dhost))
1885                 dst_if = NULL;
1886         else
1887                 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1888         if (dst_if == NULL) {
1889                 struct bridge_iflist *bif, *nbif;
1890                 struct mbuf *mc;
1891                 int used = 0;
1892
1893                 if (sc->sc_span)
1894                         bridge_span(sc, m);
1895
1896                 LIST_FOREACH_MUTABLE(bif, &sc->sc_iflists[mycpuid],
1897                                      bif_next, nbif) {
1898                         dst_if = bif->bif_ifp;
1899                         if ((dst_if->if_flags & IFF_RUNNING) == 0)
1900                                 continue;
1901
1902                         /*
1903                          * If this is not the original output interface,
1904                          * and the interface is participating in spanning
1905                          * tree, make sure the port is in a state that
1906                          * allows forwarding.
1907                          */
1908                         if (dst_if != ifp &&
1909                             (bif->bif_flags & IFBIF_STP) != 0) {
1910                                 switch (bif->bif_state) {
1911                                 case BSTP_IFSTATE_BLOCKING:
1912                                 case BSTP_IFSTATE_LISTENING:
1913                                 case BSTP_IFSTATE_DISABLED:
1914                                         continue;
1915                                 }
1916                         }
1917
1918                         if (LIST_NEXT(bif, bif_next) == NULL) {
1919                                 used = 1;
1920                                 mc = m;
1921                         } else {
1922                                 mc = m_copypacket(m, MB_DONTWAIT);
1923                                 if (mc == NULL) {
1924                                         bifp->if_oerrors++;
1925                                         continue;
1926                                 }
1927                         }
1928                         bridge_handoff(dst_if, mc);
1929
1930                         if (nbif != NULL && !nbif->bif_onlist) {
1931                                 KKASSERT(bif->bif_onlist);
1932                                 nbif = LIST_NEXT(bif, bif_next);
1933                         }
1934                 }
1935                 if (used == 0)
1936                         m_freem(m);
1937                 return (0);
1938         }
1939
1940 sendunicast:
1941         /*
1942          * XXX Spanning tree consideration here?
1943          */
1944         if (sc->sc_span)
1945                 bridge_span(sc, m);
1946         if ((dst_if->if_flags & IFF_RUNNING) == 0)
1947                 m_freem(m);
1948         else
1949                 bridge_handoff(dst_if, m);
1950         return (0);
1951 }
1952
1953 /*
1954  * bridge_start:
1955  *
1956  *      Start output on a bridge.
1957  *
1958  */
1959 static void
1960 bridge_start(struct ifnet *ifp)
1961 {
1962         struct bridge_softc *sc = ifp->if_softc;
1963
1964         ASSERT_SERIALIZED(ifp->if_serializer);
1965
1966         ifp->if_flags |= IFF_OACTIVE;
1967         for (;;) {
1968                 struct ifnet *dst_if = NULL;
1969                 struct ether_header *eh;
1970                 struct mbuf *m;
1971
1972                 m = ifq_dequeue(&ifp->if_snd, NULL);
1973                 if (m == NULL)
1974                         break;
1975
1976                 if (m->m_len < sizeof(*eh)) {
1977                         m = m_pullup(m, sizeof(*eh));
1978                         if (m == NULL) {
1979                                 ifp->if_oerrors++;
1980                                 continue;
1981                         }
1982                 }
1983                 eh = mtod(m, struct ether_header *);
1984
1985                 BPF_MTAP(ifp, m);
1986                 ifp->if_opackets++;
1987
1988                 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0)
1989                         dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1990
1991                 if (dst_if == NULL)
1992                         bridge_start_bcast(sc, m);
1993                 else
1994                         bridge_enqueue(dst_if, m);
1995         }
1996         ifp->if_flags &= ~IFF_OACTIVE;
1997 }
1998
1999 /*
2000  * bridge_forward:
2001  *
2002  *      The forwarding function of the bridge.
2003  */
2004 static void
2005 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
2006 {
2007         struct bridge_iflist *bif;
2008         struct ifnet *src_if, *dst_if, *ifp;
2009         struct ether_header *eh;
2010
2011         src_if = m->m_pkthdr.rcvif;
2012         ifp = sc->sc_ifp;
2013
2014         ASSERT_NOT_SERIALIZED(ifp->if_serializer);
2015
2016         ifp->if_ipackets++;
2017         ifp->if_ibytes += m->m_pkthdr.len;
2018
2019         /*
2020          * Look up the bridge_iflist.
2021          */
2022         bif = bridge_lookup_member_if(sc, src_if);
2023         if (bif == NULL) {
2024                 /* Interface is not a bridge member (anymore?) */
2025                 m_freem(m);
2026                 return;
2027         }
2028
2029         if (bif->bif_flags & IFBIF_STP) {
2030                 switch (bif->bif_state) {
2031                 case BSTP_IFSTATE_BLOCKING:
2032                 case BSTP_IFSTATE_LISTENING:
2033                 case BSTP_IFSTATE_DISABLED:
2034                         m_freem(m);
2035                         return;
2036                 }
2037         }
2038
2039         eh = mtod(m, struct ether_header *);
2040
2041         /*
2042          * If the interface is learning, and the source
2043          * address is valid and not multicast, record
2044          * the address.
2045          */
2046         if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
2047             ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
2048             (eh->ether_shost[0] == 0 &&
2049              eh->ether_shost[1] == 0 &&
2050              eh->ether_shost[2] == 0 &&
2051              eh->ether_shost[3] == 0 &&
2052              eh->ether_shost[4] == 0 &&
2053              eh->ether_shost[5] == 0) == 0)
2054                 bridge_rtupdate(sc, eh->ether_shost, src_if, IFBAF_DYNAMIC);
2055
2056         if ((bif->bif_flags & IFBIF_STP) != 0 &&
2057             bif->bif_state == BSTP_IFSTATE_LEARNING) {
2058                 m_freem(m);
2059                 return;
2060         }
2061
2062         /*
2063          * At this point, the port either doesn't participate
2064          * in spanning tree or it is in the forwarding state.
2065          */
2066
2067         /*
2068          * If the packet is unicast, destined for someone on
2069          * "this" side of the bridge, drop it.
2070          */
2071         if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2072                 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
2073                 if (src_if == dst_if) {
2074                         m_freem(m);
2075                         return;
2076                 }
2077         } else {
2078                 /* ...forward it to all interfaces. */
2079                 ifp->if_imcasts++;
2080                 dst_if = NULL;
2081         }
2082
2083         if (dst_if == NULL) {
2084                 bridge_broadcast(sc, src_if, m);
2085                 return;
2086         }
2087
2088         /*
2089          * At this point, we're dealing with a unicast frame
2090          * going to a different interface.
2091          */
2092         if ((dst_if->if_flags & IFF_RUNNING) == 0) {
2093                 m_freem(m);
2094                 return;
2095         }
2096         bif = bridge_lookup_member_if(sc, dst_if);
2097         if (bif == NULL) {
2098                 /* Not a member of the bridge (anymore?) */
2099                 m_freem(m);
2100                 return;
2101         }
2102
2103         if (bif->bif_flags & IFBIF_STP) {
2104                 switch (bif->bif_state) {
2105                 case BSTP_IFSTATE_DISABLED:
2106                 case BSTP_IFSTATE_BLOCKING:
2107                         m_freem(m);
2108                         return;
2109                 }
2110         }
2111
2112         if (inet_pfil_hook.ph_hashooks > 0
2113 #ifdef INET6
2114             || inet6_pfil_hook.ph_hashooks > 0
2115 #endif
2116             ) {
2117                 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2118                         return;
2119                 if (m == NULL)
2120                         return;
2121
2122                 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2123                         return;
2124                 if (m == NULL)
2125                         return;
2126         }
2127         bridge_handoff(dst_if, m);
2128 }
2129
2130 /*
2131  * bridge_input:
2132  *
2133  *      Receive input from a member interface.  Queue the packet for
2134  *      bridging if it is not for us.
2135  */
2136 static struct mbuf *
2137 bridge_input(struct ifnet *ifp, struct mbuf *m)
2138 {
2139         struct bridge_softc *sc = ifp->if_bridge;
2140         struct bridge_iflist *bif;
2141         struct ifnet *bifp, *new_ifp;
2142         struct ether_header *eh;
2143         struct mbuf *mc, *mc2;
2144
2145         ASSERT_NOT_SERIALIZED(ifp->if_serializer);
2146
2147         /*
2148          * Make sure that we are still a member of a bridge interface.
2149          */
2150         if (sc == NULL)
2151                 return m;
2152
2153         new_ifp = NULL;
2154         bifp = sc->sc_ifp;
2155
2156         if ((bifp->if_flags & IFF_RUNNING) == 0)
2157                 goto out;
2158
2159         /*
2160          * Implement support for bridge monitoring.  If this flag has been
2161          * set on this interface, discard the packet once we push it through
2162          * the bpf(4) machinery, but before we do, increment various counters
2163          * associated with this bridge.
2164          */
2165         if (bifp->if_flags & IFF_MONITOR) {
2166                 /* Change input interface to this bridge */
2167                 m->m_pkthdr.rcvif = bifp;
2168
2169                 BPF_MTAP(bifp, m);
2170
2171                 /* Update bridge's ifnet statistics */
2172                 bifp->if_ipackets++;
2173                 bifp->if_ibytes += m->m_pkthdr.len;
2174                 if (m->m_flags & (M_MCAST | M_BCAST))
2175                         bifp->if_imcasts++;
2176
2177                 m_freem(m);
2178                 m = NULL;
2179                 goto out;
2180         }
2181
2182         eh = mtod(m, struct ether_header *);
2183
2184         m->m_flags &= ~M_PROTO1; /* XXX Hack - loop prevention */
2185
2186         if (memcmp(eh->ether_dhost, IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0) {
2187                 /*
2188                  * If the packet is for us, set the packets source as the
2189                  * bridge, and return the packet back to ifnet.if_input for
2190                  * local processing.
2191                  */
2192                 KASSERT(bifp->if_bridge == NULL,
2193                         ("loop created in bridge_input"));
2194                 new_ifp = bifp;
2195                 goto out;
2196         }
2197
2198         /*
2199          * Tap all packets arriving on the bridge, no matter if
2200          * they are local destinations or not.  In is in.
2201          */
2202         BPF_MTAP(bifp, m);
2203
2204         bif = bridge_lookup_member_if(sc, ifp);
2205         if (bif == NULL)
2206                 goto out;
2207
2208         if (sc->sc_span)
2209                 bridge_span(sc, m);
2210
2211         if (m->m_flags & (M_BCAST | M_MCAST)) {
2212                 /* Tap off 802.1D packets; they do not get forwarded. */
2213                 if (memcmp(eh->ether_dhost, bstp_etheraddr,
2214                     ETHER_ADDR_LEN) == 0) {
2215                         lwkt_serialize_enter(bifp->if_serializer);
2216                         bstp_input(sc, bif, m);
2217                         lwkt_serialize_exit(bifp->if_serializer);
2218
2219                         /* m is freed by bstp_input */
2220                         m = NULL;
2221                         goto out;
2222                 }
2223
2224                 if (bif->bif_flags & IFBIF_STP) {
2225                         switch (bif->bif_state) {
2226                         case BSTP_IFSTATE_BLOCKING:
2227                         case BSTP_IFSTATE_LISTENING:
2228                         case BSTP_IFSTATE_DISABLED:
2229                                 goto out;
2230                         }
2231                 }
2232
2233                 /*
2234                  * Make a deep copy of the packet and enqueue the copy
2235                  * for bridge processing; return the original packet for
2236                  * local processing.
2237                  */
2238                 mc = m_dup(m, MB_DONTWAIT);
2239                 if (mc == NULL)
2240                         goto out;
2241
2242                 bridge_forward(sc, mc);
2243
2244                 /*
2245                  * Reinject the mbuf as arriving on the bridge so we have a
2246                  * chance at claiming multicast packets. We can not loop back
2247                  * here from ether_input as a bridge is never a member of a
2248                  * bridge.
2249                  */
2250                 KASSERT(bifp->if_bridge == NULL,
2251                         ("loop created in bridge_input"));
2252                 mc2 = m_dup(m, MB_DONTWAIT);
2253 #ifdef notyet
2254                 if (mc2 != NULL) {
2255                         /* Keep the layer3 header aligned */
2256                         int i = min(mc2->m_pkthdr.len, max_protohdr);
2257                         mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2258                 }
2259 #endif
2260                 if (mc2 != NULL) {
2261                         /*
2262                          * Don't tap to bpf(4) again; we have
2263                          * already done the tapping.
2264                          */
2265                         ether_reinput_oncpu(bifp, mc2, 0);
2266                 }
2267
2268                 /* Return the original packet for local processing. */
2269                 goto out;
2270         }
2271
2272         if (bif->bif_flags & IFBIF_STP) {
2273                 switch (bif->bif_state) {
2274                 case BSTP_IFSTATE_BLOCKING:
2275                 case BSTP_IFSTATE_LISTENING:
2276                 case BSTP_IFSTATE_DISABLED:
2277                         goto out;
2278                 }
2279         }
2280
2281         /*
2282          * Unicast.  Make sure it's not for us.
2283          *
2284          * This loop is MPSAFE; the only blocking operation (bridge_rtupdate)
2285          * is followed by breaking out of the loop.
2286          */
2287         LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
2288                 if (bif->bif_ifp->if_type != IFT_ETHER)
2289                         continue;
2290
2291                 /* It is destined for us. */
2292                 if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_dhost,
2293                     ETHER_ADDR_LEN) == 0) {
2294                         if (bif->bif_ifp != ifp) {
2295                                 /* XXX loop prevention */
2296                                 m->m_flags |= M_PROTO1;
2297                                 new_ifp = bif->bif_ifp;
2298                         }
2299                         if (bif->bif_flags & IFBIF_LEARNING) {
2300                                 bridge_rtupdate(sc, eh->ether_shost,
2301                                                 ifp, IFBAF_DYNAMIC);
2302                         }
2303                         goto out;
2304                 }
2305
2306                 /* We just received a packet that we sent out. */
2307                 if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_shost,
2308                     ETHER_ADDR_LEN) == 0) {
2309                         m_freem(m);
2310                         m = NULL;
2311                         goto out;
2312                 }
2313         }
2314
2315         /* Perform the bridge forwarding function. */
2316         bridge_forward(sc, m);
2317         m = NULL;
2318 out:
2319         if (new_ifp != NULL) {
2320                 ether_reinput_oncpu(new_ifp, m, 1);
2321                 m = NULL;
2322         }
2323         return (m);
2324 }
2325
2326 /*
2327  * bridge_start_bcast:
2328  *
2329  *      Broadcast the packet sent from bridge to all member
2330  *      interfaces.
2331  *      This is a simplified version of bridge_broadcast(), however,
2332  *      this function expects caller to hold bridge's serializer.
2333  */
2334 static void
2335 bridge_start_bcast(struct bridge_softc *sc, struct mbuf *m)
2336 {
2337         struct bridge_iflist *bif;
2338         struct mbuf *mc;
2339         struct ifnet *dst_if, *bifp;
2340         int used = 0;
2341
2342         bifp = sc->sc_ifp;
2343         ASSERT_SERIALIZED(bifp->if_serializer);
2344
2345         /*
2346          * Following loop is MPSAFE; nothing is blocking
2347          * in the loop body.
2348          */
2349         LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
2350                 dst_if = bif->bif_ifp;
2351
2352                 if (bif->bif_flags & IFBIF_STP) {
2353                         switch (bif->bif_state) {
2354                         case BSTP_IFSTATE_BLOCKING:
2355                         case BSTP_IFSTATE_DISABLED:
2356                                 continue;
2357                         }
2358                 }
2359
2360                 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 &&
2361                     (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2362                         continue;
2363
2364                 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2365                         continue;
2366
2367                 if (LIST_NEXT(bif, bif_next) == NULL) {
2368                         mc = m;
2369                         used = 1;
2370                 } else {
2371                         mc = m_copypacket(m, MB_DONTWAIT);
2372                         if (mc == NULL) {
2373                                 bifp->if_oerrors++;
2374                                 continue;
2375                         }
2376                 }
2377                 bridge_enqueue(dst_if, mc);
2378         }
2379         if (used == 0)
2380                 m_freem(m);
2381 }
2382
2383 /*
2384  * bridge_broadcast:
2385  *
2386  *      Send a frame to all interfaces that are members of
2387  *      the bridge, except for the one on which the packet
2388  *      arrived.
2389  */
2390 static void
2391 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2392     struct mbuf *m)
2393 {
2394         struct bridge_iflist *bif, *nbif;
2395         struct mbuf *mc;
2396         struct ifnet *dst_if, *bifp;
2397         int used = 0;
2398
2399         bifp = sc->sc_ifp;
2400         ASSERT_NOT_SERIALIZED(bifp->if_serializer);
2401
2402         if (inet_pfil_hook.ph_hashooks > 0
2403 #ifdef INET6
2404             || inet6_pfil_hook.ph_hashooks > 0
2405 #endif
2406             ) {
2407                 if (bridge_pfil(&m, bifp, src_if, PFIL_IN) != 0)
2408                         return;
2409                 if (m == NULL)
2410                         return;
2411
2412                 /* Filter on the bridge interface before broadcasting */
2413                 if (bridge_pfil(&m, bifp, NULL, PFIL_OUT) != 0)
2414                         return;
2415                 if (m == NULL)
2416                         return;
2417         }
2418
2419         LIST_FOREACH_MUTABLE(bif, &sc->sc_iflists[mycpuid], bif_next, nbif) {
2420                 dst_if = bif->bif_ifp;
2421                 if (dst_if == src_if)
2422                         continue;
2423
2424                 if (bif->bif_flags & IFBIF_STP) {
2425                         switch (bif->bif_state) {
2426                         case BSTP_IFSTATE_BLOCKING:
2427                         case BSTP_IFSTATE_DISABLED:
2428                                 continue;
2429                         }
2430                 }
2431
2432                 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 &&
2433                     (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2434                         continue;
2435
2436                 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2437                         continue;
2438
2439                 if (LIST_NEXT(bif, bif_next) == NULL) {
2440                         mc = m;
2441                         used = 1;
2442                 } else {
2443                         mc = m_copypacket(m, MB_DONTWAIT);
2444                         if (mc == NULL) {
2445                                 sc->sc_ifp->if_oerrors++;
2446                                 continue;
2447                         }
2448                 }
2449
2450                 /*
2451                  * Filter on the output interface.  Pass a NULL bridge
2452                  * interface pointer so we do not redundantly filter on
2453                  * the bridge for each interface we broadcast on.
2454                  */
2455                 if (inet_pfil_hook.ph_hashooks > 0
2456 #ifdef INET6
2457                     || inet6_pfil_hook.ph_hashooks > 0
2458 #endif
2459                     ) {
2460                         if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
2461                                 continue;
2462                         if (mc == NULL)
2463                                 continue;
2464                 }
2465                 bridge_handoff(dst_if, mc);
2466
2467                 if (nbif != NULL && !nbif->bif_onlist) {
2468                         KKASSERT(bif->bif_onlist);
2469                         nbif = LIST_NEXT(bif, bif_next);
2470                 }
2471         }
2472         if (used == 0)
2473                 m_freem(m);
2474 }
2475
2476 /*
2477  * bridge_span:
2478  *
2479  *      Duplicate a packet out one or more interfaces that are in span mode,
2480  *      the original mbuf is unmodified.
2481  */
2482 static void
2483 bridge_span(struct bridge_softc *sc, struct mbuf *m)
2484 {
2485         struct bridge_iflist *bif;
2486         struct ifnet *dst_if, *bifp;
2487         struct mbuf *mc;
2488
2489         bifp = sc->sc_ifp;
2490         lwkt_serialize_enter(bifp->if_serializer);
2491
2492         LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
2493                 dst_if = bif->bif_ifp;
2494
2495                 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2496                         continue;
2497
2498                 mc = m_copypacket(m, MB_DONTWAIT);
2499                 if (mc == NULL) {
2500                         sc->sc_ifp->if_oerrors++;
2501                         continue;
2502                 }
2503                 bridge_enqueue(dst_if, mc);
2504         }
2505
2506         lwkt_serialize_exit(bifp->if_serializer);
2507 }
2508
2509 static void
2510 bridge_rtmsg_sync_handler(struct netmsg *nmsg)
2511 {
2512         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2513 }
2514
2515 static void
2516 bridge_rtmsg_sync(struct bridge_softc *sc)
2517 {
2518         struct netmsg nmsg;
2519
2520         ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2521
2522         netmsg_init(&nmsg, &curthread->td_msgport, 0,
2523                     bridge_rtmsg_sync_handler);
2524         ifnet_domsg(&nmsg.nm_lmsg, 0);
2525 }
2526
2527 static __inline void
2528 bridge_rtinfo_update(struct bridge_rtinfo *bri, struct ifnet *dst_if,
2529                      int setflags, uint8_t flags, uint32_t timeo)
2530 {
2531         if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2532             bri->bri_ifp != dst_if)
2533                 bri->bri_ifp = dst_if;
2534         if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2535             bri->bri_expire != time_second + timeo)
2536                 bri->bri_expire = time_second + timeo;
2537         if (setflags)
2538                 bri->bri_flags = flags;
2539 }
2540
2541 static int
2542 bridge_rtinstall_oncpu(struct bridge_softc *sc, const uint8_t *dst,
2543                        struct ifnet *dst_if, int setflags, uint8_t flags,
2544                        struct bridge_rtinfo **bri0)
2545 {
2546         struct bridge_rtnode *brt;
2547         struct bridge_rtinfo *bri;
2548
2549         if (mycpuid == 0) {
2550                 brt = bridge_rtnode_lookup(sc, dst);
2551                 if (brt != NULL) {
2552                         /*
2553                          * rtnode for 'dst' already exists.  We inform the
2554                          * caller about this by leaving bri0 as NULL.  The
2555                          * caller will terminate the intallation upon getting
2556                          * NULL bri0.  However, we still need to update the
2557                          * rtinfo.
2558                          */
2559                         KKASSERT(*bri0 == NULL);
2560
2561                         /* Update rtinfo */
2562                         bridge_rtinfo_update(brt->brt_info, dst_if, setflags,
2563                                              flags, sc->sc_brttimeout);
2564                         return 0;
2565                 }
2566
2567                 /*
2568                  * We only need to check brtcnt on CPU0, since if limit
2569                  * is to be exceeded, ENOSPC is returned.  Caller knows
2570                  * this and will terminate the installation.
2571                  */
2572                 if (sc->sc_brtcnt >= sc->sc_brtmax)
2573                         return ENOSPC;
2574
2575                 KKASSERT(*bri0 == NULL);
2576                 bri = kmalloc(sizeof(struct bridge_rtinfo), M_DEVBUF,
2577                                   M_WAITOK | M_ZERO);
2578                 *bri0 = bri;
2579
2580                 /* Setup rtinfo */
2581                 bri->bri_flags = IFBAF_DYNAMIC;
2582                 bridge_rtinfo_update(bri, dst_if, setflags, flags,
2583                                      sc->sc_brttimeout);
2584         } else {
2585                 bri = *bri0;
2586                 KKASSERT(bri != NULL);
2587         }
2588
2589         brt = kmalloc(sizeof(struct bridge_rtnode), M_DEVBUF,
2590                       M_WAITOK | M_ZERO);
2591         memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2592         brt->brt_info = bri;
2593
2594         bridge_rtnode_insert(sc, brt);
2595         return 0;
2596 }
2597
2598 static void
2599 bridge_rtinstall_handler(struct netmsg *nmsg)
2600 {
2601         struct netmsg_brsaddr *brmsg = (struct netmsg_brsaddr *)nmsg;
2602         int error;
2603
2604         error = bridge_rtinstall_oncpu(brmsg->br_softc,
2605                                        brmsg->br_dst, brmsg->br_dst_if,
2606                                        brmsg->br_setflags, brmsg->br_flags,
2607                                        &brmsg->br_rtinfo);
2608         if (error) {
2609                 KKASSERT(mycpuid == 0 && brmsg->br_rtinfo == NULL);
2610                 lwkt_replymsg(&nmsg->nm_lmsg, error);
2611                 return;
2612         } else if (brmsg->br_rtinfo == NULL) {
2613                 /* rtnode already exists for 'dst' */
2614                 KKASSERT(mycpuid == 0);
2615                 lwkt_replymsg(&nmsg->nm_lmsg, 0);
2616                 return;
2617         }
2618         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2619 }
2620
2621 /*
2622  * bridge_rtupdate:
2623  *
2624  *      Add/Update a bridge routing entry.
2625  */
2626 static int
2627 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2628                 struct ifnet *dst_if, uint8_t flags)
2629 {
2630         struct bridge_rtnode *brt;
2631
2632         /*
2633          * A route for this destination might already exist.  If so,
2634          * update it, otherwise create a new one.
2635          */
2636         if ((brt = bridge_rtnode_lookup(sc, dst)) == NULL) {
2637                 struct netmsg_brsaddr *brmsg;
2638
2639                 if (sc->sc_brtcnt >= sc->sc_brtmax)
2640                         return ENOSPC;
2641
2642                 brmsg = kmalloc(sizeof(*brmsg), M_LWKTMSG, M_WAITOK | M_NULLOK);
2643                 if (brmsg == NULL)
2644                         return ENOMEM;
2645
2646                 netmsg_init(&brmsg->br_nmsg, &netisr_afree_rport, 0,
2647                             bridge_rtinstall_handler);
2648                 memcpy(brmsg->br_dst, dst, ETHER_ADDR_LEN);
2649                 brmsg->br_dst_if = dst_if;
2650                 brmsg->br_flags = flags;
2651                 brmsg->br_setflags = 0;
2652                 brmsg->br_softc = sc;
2653                 brmsg->br_rtinfo = NULL;
2654
2655                 ifnet_sendmsg(&brmsg->br_nmsg.nm_lmsg, 0);
2656                 return 0;
2657         }
2658         bridge_rtinfo_update(brt->brt_info, dst_if, 0, flags,
2659                              sc->sc_brttimeout);
2660         return 0;
2661 }
2662
2663 static int
2664 bridge_rtsaddr(struct bridge_softc *sc, const uint8_t *dst,
2665                struct ifnet *dst_if, uint8_t flags)
2666 {
2667         struct netmsg_brsaddr brmsg;
2668
2669         ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2670
2671         netmsg_init(&brmsg.br_nmsg, &curthread->td_msgport, 0,
2672                     bridge_rtinstall_handler);
2673         memcpy(brmsg.br_dst, dst, ETHER_ADDR_LEN);
2674         brmsg.br_dst_if = dst_if;
2675         brmsg.br_flags = flags;
2676         brmsg.br_setflags = 1;
2677         brmsg.br_softc = sc;
2678         brmsg.br_rtinfo = NULL;
2679
2680         return ifnet_domsg(&brmsg.br_nmsg.nm_lmsg, 0);
2681 }
2682
2683 /*
2684  * bridge_rtlookup:
2685  *
2686  *      Lookup the destination interface for an address.
2687  */
2688 static struct ifnet *
2689 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2690 {
2691         struct bridge_rtnode *brt;
2692
2693         if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
2694                 return NULL;
2695         return brt->brt_info->bri_ifp;
2696 }
2697
2698 static void
2699 bridge_rtreap_handler(struct netmsg *nmsg)
2700 {
2701         struct bridge_softc *sc = nmsg->nm_lmsg.u.ms_resultp;
2702         struct bridge_rtnode *brt, *nbrt;
2703
2704         LIST_FOREACH_MUTABLE(brt, &sc->sc_rtlists[mycpuid], brt_list, nbrt) {
2705                 if (brt->brt_info->bri_dead)
2706                         bridge_rtnode_destroy(sc, brt);
2707         }
2708         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2709 }
2710
2711 static void
2712 bridge_rtreap(struct bridge_softc *sc)
2713 {
2714         struct netmsg nmsg;
2715
2716         ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2717
2718         netmsg_init(&nmsg, &curthread->td_msgport, 0, bridge_rtreap_handler);
2719         nmsg.nm_lmsg.u.ms_resultp = sc;
2720
2721         ifnet_domsg(&nmsg.nm_lmsg, 0);
2722 }
2723
2724 static void
2725 bridge_rtreap_async(struct bridge_softc *sc)
2726 {
2727         struct netmsg *nmsg;
2728
2729         nmsg = kmalloc(sizeof(*nmsg), M_LWKTMSG, M_WAITOK);
2730
2731         netmsg_init(nmsg, &netisr_afree_rport, 0, bridge_rtreap_handler);
2732         nmsg->nm_lmsg.u.ms_resultp = sc;
2733
2734         ifnet_sendmsg(&nmsg->nm_lmsg, 0);
2735 }
2736
2737 /*
2738  * bridge_rttrim:
2739  *
2740  *      Trim the routine table so that we have a number
2741  *      of routing entries less than or equal to the
2742  *      maximum number.
2743  */
2744 static void
2745 bridge_rttrim(struct bridge_softc *sc)
2746 {
2747         struct bridge_rtnode *brt;
2748         int dead;
2749
2750         ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2751
2752         /* Make sure we actually need to do this. */
2753         if (sc->sc_brtcnt <= sc->sc_brtmax)
2754                 return;
2755
2756         /*
2757          * Find out how many rtnodes are dead
2758          */
2759         dead = bridge_rtage_finddead(sc);
2760         KKASSERT(dead <= sc->sc_brtcnt);
2761
2762         if (sc->sc_brtcnt - dead <= sc->sc_brtmax) {
2763                 /* Enough dead rtnodes are found */
2764                 bridge_rtreap(sc);
2765                 return;
2766         }
2767
2768         /*
2769          * Kill some dynamic rtnodes to meet the brtmax
2770          */
2771         LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2772                 struct bridge_rtinfo *bri = brt->brt_info;
2773
2774                 if (bri->bri_dead) {
2775                         /*
2776                          * We have counted this rtnode in
2777                          * bridge_rtage_finddead()
2778                          */
2779                         continue;
2780                 }
2781
2782                 if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2783                         bri->bri_dead = 1;
2784                         ++dead;
2785                         KKASSERT(dead <= sc->sc_brtcnt);
2786
2787                         if (sc->sc_brtcnt - dead <= sc->sc_brtmax) {
2788                                 /* Enough rtnodes are collected */
2789                                 break;
2790                         }
2791                 }
2792         }
2793         if (dead)
2794                 bridge_rtreap(sc);
2795 }
2796
2797 /*
2798  * bridge_timer:
2799  *
2800  *      Aging timer for the bridge.
2801  */
2802 static void
2803 bridge_timer(void *arg)
2804 {
2805         struct bridge_softc *sc = arg;
2806         struct lwkt_msg *lmsg;
2807
2808         KKASSERT(mycpuid == BRIDGE_CFGCPU);
2809
2810         crit_enter();
2811
2812         if (callout_pending(&sc->sc_brcallout) ||
2813             !callout_active(&sc->sc_brcallout)) {
2814                 crit_exit();
2815                 return;
2816         }
2817         callout_deactivate(&sc->sc_brcallout);
2818
2819         lmsg = &sc->sc_brtimemsg.nm_lmsg;
2820         KKASSERT(lmsg->ms_flags & MSGF_DONE);
2821         lwkt_sendmsg(BRIDGE_CFGPORT, lmsg);
2822
2823         crit_exit();
2824 }
2825
2826 static void
2827 bridge_timer_handler(struct netmsg *nmsg)
2828 {
2829         struct bridge_softc *sc = nmsg->nm_lmsg.u.ms_resultp;
2830
2831         KKASSERT(&curthread->td_msgport == BRIDGE_CFGPORT);
2832
2833         crit_enter();
2834         /* Reply ASAP */
2835         lwkt_replymsg(&nmsg->nm_lmsg, 0);
2836         crit_exit();
2837
2838         bridge_rtage(sc);
2839         if (sc->sc_ifp->if_flags & IFF_RUNNING) {
2840                 callout_reset(&sc->sc_brcallout,
2841                     bridge_rtable_prune_period * hz, bridge_timer, sc);
2842         }
2843 }
2844
2845 static int
2846 bridge_rtage_finddead(struct bridge_softc *sc)
2847 {
2848         struct bridge_rtnode *brt;
2849         int dead = 0;
2850
2851         LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2852                 struct bridge_rtinfo *bri = brt->brt_info;
2853
2854                 if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2855                     time_second >= bri->bri_expire) {
2856                         bri->bri_dead = 1;
2857                         ++dead;
2858                         KKASSERT(dead <= sc->sc_brtcnt);
2859                 }
2860         }
2861         return dead;
2862 }
2863
2864 /*
2865  * bridge_rtage:
2866  *
2867  *      Perform an aging cycle.
2868  */
2869 static void
2870 bridge_rtage(struct bridge_softc *sc)
2871 {
2872         ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2873
2874         if (bridge_rtage_finddead(sc))
2875                 bridge_rtreap(sc);
2876 }
2877
2878 /*
2879  * bridge_rtflush:
2880  *
2881  *      Remove all dynamic addresses from the bridge.
2882  */
2883 static void
2884 bridge_rtflush(struct bridge_softc *sc, int bf)
2885 {
2886         struct bridge_rtnode *brt;
2887         int reap;
2888
2889         reap = 0;
2890         LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2891                 struct bridge_rtinfo *bri = brt->brt_info;
2892
2893                 if ((bf & IFBF_FLUSHALL) ||
2894                     (bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2895                         bri->bri_dead = 1;
2896                         reap = 1;
2897                 }
2898         }
2899         if (reap) {
2900                 if (bf & IFBF_FLUSHSYNC)
2901                         bridge_rtreap(sc);
2902                 else
2903                         bridge_rtreap_async(sc);
2904         }
2905 }
2906
2907 /*
2908  * bridge_rtdaddr:
2909  *
2910  *      Remove an address from the table.
2911  */
2912 static int
2913 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2914 {
2915         struct bridge_rtnode *brt;
2916
2917         ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2918
2919         if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
2920                 return (ENOENT);
2921
2922         /* TODO: add a cheaper delete operation */
2923         brt->brt_info->bri_dead = 1;
2924         bridge_rtreap(sc);
2925         return (0);
2926 }
2927
2928 /*
2929  * bridge_rtdelete:
2930  *
2931  *      Delete routes to a speicifc member interface.
2932  */
2933 void
2934 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int bf)
2935 {
2936         struct bridge_rtnode *brt;
2937         int reap;
2938
2939         reap = 0;
2940         LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2941                 struct bridge_rtinfo *bri = brt->brt_info;
2942
2943                 if (bri->bri_ifp == ifp &&
2944                     ((bf & IFBF_FLUSHALL) ||
2945                      (bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) {
2946                         bri->bri_dead = 1;
2947                         reap = 1;
2948                 }
2949         }
2950         if (reap) {
2951                 if (bf & IFBF_FLUSHSYNC)
2952                         bridge_rtreap(sc);
2953                 else
2954                         bridge_rtreap_async(sc);
2955         }
2956 }
2957
2958 /*
2959  * bridge_rtable_init:
2960  *
2961  *      Initialize the route table for this bridge.
2962  */
2963 static void
2964 bridge_rtable_init(struct bridge_softc *sc)
2965 {
2966         int cpu;
2967
2968         /*
2969          * Initialize per-cpu hash tables
2970          */
2971         sc->sc_rthashs = kmalloc(sizeof(*sc->sc_rthashs) * ncpus,
2972                                  M_DEVBUF, M_WAITOK);
2973         for (cpu = 0; cpu < ncpus; ++cpu) {
2974                 int i;
2975
2976                 sc->sc_rthashs[cpu] =
2977                 kmalloc(sizeof(struct bridge_rtnode_head) * BRIDGE_RTHASH_SIZE,
2978                         M_DEVBUF, M_WAITOK);
2979
2980                 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2981                         LIST_INIT(&sc->sc_rthashs[cpu][i]);
2982         }
2983         sc->sc_rthash_key = karc4random();
2984
2985         /*
2986          * Initialize per-cpu lists
2987          */
2988         sc->sc_rtlists = kmalloc(sizeof(struct bridge_rtnode_head) * ncpus,
2989                                  M_DEVBUF, M_WAITOK);
2990         for (cpu = 0; cpu < ncpus; ++cpu)
2991                 LIST_INIT(&sc->sc_rtlists[cpu]);
2992 }
2993
2994 /*
2995  * bridge_rtable_fini:
2996  *
2997  *      Deconstruct the route table for this bridge.
2998  */
2999 static void
3000 bridge_rtable_fini(struct bridge_softc *sc)
3001 {
3002         int cpu;
3003
3004         /*
3005          * Free per-cpu hash tables
3006          */
3007         for (cpu = 0; cpu < ncpus; ++cpu)
3008                 kfree(sc->sc_rthashs[cpu], M_DEVBUF);
3009         kfree(sc->sc_rthashs, M_DEVBUF);
3010
3011         /*
3012          * Free per-cpu lists
3013          */
3014         kfree(sc->sc_rtlists, M_DEVBUF);
3015 }
3016
3017 /*
3018  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
3019  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
3020  */
3021 #define mix(a, b, c)                                                    \
3022 do {                                                                    \
3023         a -= b; a -= c; a ^= (c >> 13);                                 \
3024         b -= c; b -= a; b ^= (a << 8);                                  \
3025         c -= a; c -= b; c ^= (b >> 13);                                 \
3026         a -= b; a -= c; a ^= (c >> 12);                                 \
3027         b -= c; b -= a; b ^= (a << 16);                                 \
3028         c -= a; c -= b; c ^= (b >> 5);                                  \
3029         a -= b; a -= c; a ^= (c >> 3);                                  \
3030         b -= c; b -= a; b ^= (a << 10);                                 \
3031         c -= a; c -= b; c ^= (b >> 15);                                 \
3032 } while (/*CONSTCOND*/0)
3033
3034 static __inline uint32_t
3035 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
3036 {
3037         uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
3038
3039         b += addr[5] << 8;
3040         b += addr[4];
3041         a += addr[3] << 24;
3042         a += addr[2] << 16;
3043         a += addr[1] << 8;
3044         a += addr[0];
3045
3046         mix(a, b, c);
3047
3048         return (c & BRIDGE_RTHASH_MASK);
3049 }
3050
3051 #undef mix
3052
3053 static int
3054 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
3055 {
3056         int i, d;
3057
3058         for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
3059                 d = ((int)a[i]) - ((int)b[i]);
3060         }
3061
3062         return (d);
3063 }
3064
3065 /*
3066  * bridge_rtnode_lookup:
3067  *
3068  *      Look up a bridge route node for the specified destination.
3069  */
3070 static struct bridge_rtnode *
3071 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
3072 {
3073         struct bridge_rtnode *brt;
3074         uint32_t hash;
3075         int dir;
3076
3077         hash = bridge_rthash(sc, addr);
3078         LIST_FOREACH(brt, &sc->sc_rthashs[mycpuid][hash], brt_hash) {
3079                 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
3080                 if (dir == 0)
3081                         return (brt);
3082                 if (dir > 0)
3083                         return (NULL);
3084         }
3085
3086         return (NULL);
3087 }
3088
3089 /*
3090  * bridge_rtnode_insert:
3091  *
3092  *      Insert the specified bridge node into the route table.
3093  *      Caller has to make sure that rtnode does not exist.
3094  */
3095 static void
3096 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
3097 {
3098         struct bridge_rtnode *lbrt;
3099         uint32_t hash;
3100         int dir;
3101
3102         hash = bridge_rthash(sc, brt->brt_addr);
3103
3104         lbrt = LIST_FIRST(&sc->sc_rthashs[mycpuid][hash]);
3105         if (lbrt == NULL) {
3106                 LIST_INSERT_HEAD(&sc->sc_rthashs[mycpuid][hash], brt, brt_hash);
3107                 goto out;
3108         }
3109
3110         do {
3111                 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
3112                 KASSERT(dir != 0, ("rtnode already exist\n"));
3113
3114                 if (dir > 0) {
3115                         LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
3116                         goto out;
3117                 }
3118                 if (LIST_NEXT(lbrt, brt_hash) == NULL) {
3119                         LIST_INSERT_AFTER(lbrt, brt, brt_hash);
3120                         goto out;
3121                 }
3122                 lbrt = LIST_NEXT(lbrt, brt_hash);
3123         } while (lbrt != NULL);
3124
3125         panic("no suitable position found for rtnode\n");
3126 out:
3127         LIST_INSERT_HEAD(&sc->sc_rtlists[mycpuid], brt, brt_list);
3128         if (mycpuid == 0) {
3129                 /*
3130                  * Update the brtcnt.
3131                  * We only need to do it once and we do it on CPU0.
3132                  */
3133                 sc->sc_brtcnt++;
3134         }
3135 }
3136
3137 /*
3138  * bridge_rtnode_destroy:
3139  *
3140  *      Destroy a bridge rtnode.
3141  */
3142 static void
3143 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3144 {
3145         LIST_REMOVE(brt, brt_hash);
3146         LIST_REMOVE(brt, brt_list);
3147
3148         if (mycpuid + 1 == ncpus) {
3149                 /* Free rtinfo associated with rtnode on the last cpu */
3150                 kfree(brt->brt_info, M_DEVBUF);
3151         }
3152         kfree(brt, M_DEVBUF);
3153
3154         if (mycpuid == 0) {
3155                 /* Update brtcnt only on CPU0 */
3156                 sc->sc_brtcnt--;
3157         }
3158 }
3159
3160 static __inline int
3161 bridge_post_pfil(struct mbuf *m)
3162 {
3163         if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED)
3164                 return EOPNOTSUPP;
3165
3166         /* Not yet */
3167         if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED)
3168                 return EOPNOTSUPP;
3169
3170         return 0;
3171 }
3172
3173 /*
3174  * Send bridge packets through pfil if they are one of the types pfil can deal
3175  * with, or if they are ARP or REVARP.  (pfil will pass ARP and REVARP without
3176  * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3177  * that interface.
3178  */
3179 static int
3180 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3181 {
3182         int snap, error, i, hlen;
3183         struct ether_header *eh1, eh2;
3184         struct ip *ip;
3185         struct llc llc1;
3186         u_int16_t ether_type;
3187
3188         snap = 0;
3189         error = -1;     /* Default error if not error == 0 */
3190
3191         if (pfil_bridge == 0 && pfil_member == 0)
3192                 return (0); /* filtering is disabled */
3193
3194         i = min((*mp)->m_pkthdr.len, max_protohdr);
3195         if ((*mp)->m_len < i) {
3196                 *mp = m_pullup(*mp, i);
3197                 if (*mp == NULL) {
3198                         kprintf("%s: m_pullup failed\n", __func__);
3199                         return (-1);
3200                 }
3201         }
3202
3203         eh1 = mtod(*mp, struct ether_header *);
3204         ether_type = ntohs(eh1->ether_type);
3205
3206         /*
3207          * Check for SNAP/LLC.
3208          */
3209         if (ether_type < ETHERMTU) {
3210                 struct llc *llc2 = (struct llc *)(eh1 + 1);
3211
3212                 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
3213                     llc2->llc_dsap == LLC_SNAP_LSAP &&
3214                     llc2->llc_ssap == LLC_SNAP_LSAP &&
3215                     llc2->llc_control == LLC_UI) {
3216                         ether_type = htons(llc2->llc_un.type_snap.ether_type);
3217                         snap = 1;
3218                 }
3219         }
3220
3221         /*
3222          * If we're trying to filter bridge traffic, don't look at anything
3223          * other than IP and ARP traffic.  If the filter doesn't understand
3224          * IPv6, don't allow IPv6 through the bridge either.  This is lame
3225          * since if we really wanted, say, an AppleTalk filter, we are hosed,
3226          * but of course we don't have an AppleTalk filter to begin with.
3227          * (Note that since pfil doesn't understand ARP it will pass *ALL*
3228          * ARP traffic.)
3229          */
3230         switch (ether_type) {
3231         case ETHERTYPE_ARP:
3232         case ETHERTYPE_REVARP:
3233                 return (0); /* Automatically pass */
3234
3235         case ETHERTYPE_IP:
3236 #ifdef INET6
3237         case ETHERTYPE_IPV6:
3238 #endif /* INET6 */
3239                 break;
3240
3241         default:
3242                 /*
3243                  * Check to see if the user wants to pass non-ip
3244                  * packets, these will not be checked by pfil(9)
3245                  * and passed unconditionally so the default is to drop.
3246                  */
3247                 if (pfil_onlyip)
3248                         goto bad;
3249         }
3250
3251         /* Strip off the Ethernet header and keep a copy. */
3252         m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
3253         m_adj(*mp, ETHER_HDR_LEN);
3254
3255         /* Strip off snap header, if present */
3256         if (snap) {
3257                 m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
3258                 m_adj(*mp, sizeof(struct llc));
3259         }
3260
3261         /*
3262          * Check the IP header for alignment and errors
3263          */
3264         if (dir == PFIL_IN) {
3265                 switch (ether_type) {
3266                 case ETHERTYPE_IP:
3267                         error = bridge_ip_checkbasic(mp);
3268                         break;
3269 #ifdef INET6
3270                 case ETHERTYPE_IPV6:
3271                         error = bridge_ip6_checkbasic(mp);
3272                         break;
3273 #endif /* INET6 */
3274                 default:
3275                         error = 0;
3276                 }
3277                 if (error)
3278                         goto bad;
3279         }
3280
3281         error = 0;
3282
3283         /*
3284          * Run the packet through pfil
3285          */
3286         switch (ether_type) {
3287         case ETHERTYPE_IP:
3288                 /*
3289                  * before calling the firewall, swap fields the same as
3290                  * IP does. here we assume the header is contiguous
3291                  */
3292                 ip = mtod(*mp, struct ip *);
3293
3294                 ip->ip_len = ntohs(ip->ip_len);
3295                 ip->ip_off = ntohs(ip->ip_off);
3296
3297                 /*
3298                  * Run pfil on the member interface and the bridge, both can
3299                  * be skipped by clearing pfil_member or pfil_bridge.
3300                  *
3301                  * Keep the order:
3302                  *   in_if -> bridge_if -> out_if
3303                  */
3304                 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) {
3305                         error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir);
3306                         if (*mp == NULL || error != 0) /* filter may consume */
3307                                 break;
3308                         error = bridge_post_pfil(*mp);
3309                         if (error)
3310                                 break;
3311                 }
3312
3313                 if (pfil_member && ifp != NULL) {
3314                         error = pfil_run_hooks(&inet_pfil_hook, mp, ifp, dir);
3315                         if (*mp == NULL || error != 0) /* filter may consume */
3316                                 break;
3317                         error = bridge_post_pfil(*mp);
3318                         if (error)
3319                                 break;
3320                 }
3321
3322                 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) {
3323                         error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir);
3324                         if (*mp == NULL || error != 0) /* filter may consume */
3325                                 break;
3326                         error = bridge_post_pfil(*mp);
3327                         if (error)
3328                                 break;
3329                 }
3330
3331                 /* check if we need to fragment the packet */
3332                 if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
3333                         i = (*mp)->m_pkthdr.len;
3334                         if (i > ifp->if_mtu) {
3335                                 error = bridge_fragment(ifp, *mp, &eh2, snap,
3336                                             &llc1);
3337                                 return (error);
3338                         }
3339                 }
3340
3341                 /* Recalculate the ip checksum and restore byte ordering */
3342                 ip = mtod(*mp, struct ip *);
3343                 hlen = ip->ip_hl << 2;
3344                 if (hlen < sizeof(struct ip))
3345                         goto bad;
3346                 if (hlen > (*mp)->m_len) {
3347                         if ((*mp = m_pullup(*mp, hlen)) == 0)
3348                                 goto bad;
3349                         ip = mtod(*mp, struct ip *);
3350                         if (ip == NULL)
3351                                 goto bad;
3352                 }
3353                 ip->ip_len = htons(ip->ip_len);
3354                 ip->ip_off = htons(ip->ip_off);
3355                 ip->ip_sum = 0;
3356                 if (hlen == sizeof(struct ip))
3357                         ip->ip_sum = in_cksum_hdr(ip);
3358                 else
3359                         ip->ip_sum = in_cksum(*mp, hlen);
3360
3361                 break;
3362 #ifdef INET6
3363         case ETHERTYPE_IPV6:
3364                 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
3365                         error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3366                                         dir);
3367
3368                 if (*mp == NULL || error != 0) /* filter may consume */
3369                         break;
3370
3371                 if (pfil_member && ifp != NULL)
3372                         error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp,
3373                                         dir);
3374
3375                 if (*mp == NULL || error != 0) /* filter may consume */
3376                         break;
3377
3378                 if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
3379                         error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3380                                         dir);
3381                 break;
3382 #endif
3383         default:
3384                 error = 0;
3385                 break;
3386         }
3387
3388         if (*mp == NULL)
3389                 return (error);
3390         if (error != 0)
3391                 goto bad;
3392
3393         error = -1;
3394
3395         /*
3396          * Finally, put everything back the way it was and return
3397          */
3398         if (snap) {
3399                 M_PREPEND(*mp, sizeof(struct llc), MB_DONTWAIT);
3400                 if (*mp == NULL)
3401                         return (error);
3402                 bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
3403         }
3404
3405         M_PREPEND(*mp, ETHER_HDR_LEN, MB_DONTWAIT);
3406         if (*mp == NULL)
3407                 return (error);
3408         bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3409
3410         return (0);
3411
3412 bad:
3413         m_freem(*mp);
3414         *mp = NULL;
3415         return (error);
3416 }
3417
3418 /*
3419  * Perform basic checks on header size since
3420  * pfil assumes ip_input has already processed
3421  * it for it.  Cut-and-pasted from ip_input.c.
3422  * Given how simple the IPv6 version is,
3423  * does the IPv4 version really need to be
3424  * this complicated?
3425  *
3426  * XXX Should we update ipstat here, or not?
3427  * XXX Right now we update ipstat but not
3428  * XXX csum_counter.
3429  */
3430 static int
3431 bridge_ip_checkbasic(struct mbuf **mp)
3432 {
3433         struct mbuf *m = *mp;
3434         struct ip *ip;
3435         int len, hlen;
3436         u_short sum;
3437
3438         if (*mp == NULL)
3439                 return (-1);
3440 #if notyet
3441         if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3442                 if ((m = m_copyup(m, sizeof(struct ip),
3443                         (max_linkhdr + 3) & ~3)) == NULL) {
3444                         /* XXXJRT new stat, please */
3445                         ipstat.ips_toosmall++;
3446                         goto bad;
3447                 }
3448         } else
3449 #endif
3450 #ifndef __predict_false
3451 #define __predict_false(x) x
3452 #endif
3453          if (__predict_false(m->m_len < sizeof (struct ip))) {
3454                 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
3455                         ipstat.ips_toosmall++;
3456                         goto bad;
3457                 }
3458         }
3459         ip = mtod(m, struct ip *);
3460         if (ip == NULL) goto bad;
3461
3462         if (ip->ip_v != IPVERSION) {
3463                 ipstat.ips_badvers++;
3464                 goto bad;
3465         }
3466         hlen = ip->ip_hl << 2;
3467         if (hlen < sizeof(struct ip)) { /* minimum header length */
3468                 ipstat.ips_badhlen++;
3469                 goto bad;
3470         }
3471         if (hlen > m->m_len) {
3472                 if ((m = m_pullup(m, hlen)) == 0) {
3473                         ipstat.ips_badhlen++;
3474                         goto bad;
3475                 }
3476                 ip = mtod(m, struct ip *);
3477                 if (ip == NULL) goto bad;
3478         }
3479
3480         if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3481                 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3482         } else {
3483                 if (hlen == sizeof(struct ip)) {
3484                         sum = in_cksum_hdr(ip);
3485                 } else {
3486                         sum = in_cksum(m, hlen);
3487                 }
3488         }
3489         if (sum) {
3490                 ipstat.ips_badsum++;
3491                 goto bad;
3492         }
3493
3494         /* Retrieve the packet length. */
3495         len = ntohs(ip->ip_len);
3496
3497         /*
3498          * Check for additional length bogosity
3499          */
3500         if (len < hlen) {
3501                 ipstat.ips_badlen++;
3502                 goto bad;
3503         }
3504
3505         /*
3506          * Check that the amount of data in the buffers
3507          * is as at least much as the IP header would have us expect.
3508          * Drop packet if shorter than we expect.
3509          */
3510         if (m->m_pkthdr.len < len) {
3511                 ipstat.ips_tooshort++;
3512                 goto bad;
3513         }
3514
3515         /* Checks out, proceed */
3516         *mp = m;
3517         return (0);
3518
3519 bad:
3520         *mp = m;
3521         return (-1);
3522 }
3523
3524 #ifdef INET6
3525 /*
3526  * Same as above, but for IPv6.
3527  * Cut-and-pasted from ip6_input.c.
3528  * XXX Should we update ip6stat, or not?
3529  */
3530 static int
3531 bridge_ip6_checkbasic(struct mbuf **mp)
3532 {
3533         struct mbuf *m = *mp;
3534         struct ip6_hdr *ip6;
3535
3536         /*
3537          * If the IPv6 header is not aligned, slurp it up into a new
3538          * mbuf with space for link headers, in the event we forward
3539          * it.  Otherwise, if it is aligned, make sure the entire base
3540          * IPv6 header is in the first mbuf of the chain.
3541          */
3542 #if notyet
3543         if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3544                 struct ifnet *inifp = m->m_pkthdr.rcvif;
3545                 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
3546                             (max_linkhdr + 3) & ~3)) == NULL) {
3547                         /* XXXJRT new stat, please */
3548                         ip6stat.ip6s_toosmall++;
3549                         in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3550                         goto bad;
3551                 }
3552         } else
3553 #endif
3554         if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
3555                 struct ifnet *inifp = m->m_pkthdr.rcvif;
3556                 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
3557                         ip6stat.ip6s_toosmall++;
3558                         in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3559                         goto bad;
3560                 }
3561         }
3562
3563         ip6 = mtod(m, struct ip6_hdr *);
3564
3565         if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
3566                 ip6stat.ip6s_badvers++;
3567                 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
3568                 goto bad;
3569         }
3570
3571         /* Checks out, proceed */
3572         *mp = m;
3573         return (0);
3574
3575 bad:
3576         *mp = m;
3577         return (-1);
3578 }
3579 #endif /* INET6 */
3580
3581 /*
3582  * bridge_fragment:
3583  *
3584  *      Return a fragmented mbuf chain.
3585  */
3586 static int
3587 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
3588     int snap, struct llc *llc)
3589 {
3590         struct mbuf *m0;
3591         struct ip *ip;
3592         int error = -1;
3593
3594         if (m->m_len < sizeof(struct ip) &&
3595             (m = m_pullup(m, sizeof(struct ip))) == NULL)
3596                 goto out;
3597         ip = mtod(m, struct ip *);
3598
3599         error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
3600                     CSUM_DELAY_IP);
3601         if (error)
3602                 goto out;
3603
3604         /* walk the chain and re-add the Ethernet header */
3605         for (m0 = m; m0; m0 = m0->m_nextpkt) {
3606                 if (error == 0) {
3607                         if (snap) {
3608                                 M_PREPEND(m0, sizeof(struct llc), MB_DONTWAIT);
3609                                 if (m0 == NULL) {
3610                                         error = ENOBUFS;
3611                                         continue;
3612                                 }
3613                                 bcopy(llc, mtod(m0, caddr_t),
3614                                     sizeof(struct llc));
3615                         }
3616                         M_PREPEND(m0, ETHER_HDR_LEN, MB_DONTWAIT);
3617                         if (m0 == NULL) {
3618                                 error = ENOBUFS;
3619                                 continue;
3620                         }
3621                         bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
3622                 } else 
3623                         m_freem(m);
3624         }
3625
3626         if (error == 0)
3627                 ipstat.ips_fragmented++;
3628
3629         return (error);
3630
3631 out:
3632         if (m != NULL)
3633                 m_freem(m);
3634         return (error);
3635 }
3636
3637 static void
3638 bridge_enqueue_handler(struct netmsg *nmsg)
3639 {
3640         struct netmsg_packet *nmp;
3641         struct ifnet *dst_ifp;
3642         struct mbuf *m;
3643
3644         nmp = (struct netmsg_packet *)nmsg;
3645         m = nmp->nm_packet;
3646         dst_ifp = nmp->nm_netmsg.nm_lmsg.u.ms_resultp;
3647
3648         bridge_handoff(dst_ifp, m);
3649 }
3650
3651 static void
3652 bridge_handoff(struct ifnet *dst_ifp, struct mbuf *m)
3653 {
3654         struct mbuf *m0;
3655
3656         /* We may be sending a fragment so traverse the mbuf */
3657         for (; m; m = m0) {
3658                 struct altq_pktattr pktattr;
3659
3660                 m0 = m->m_nextpkt;
3661                 m->m_nextpkt = NULL;
3662
3663                 if (ifq_is_enabled(&dst_ifp->if_snd))
3664                         altq_etherclassify(&dst_ifp->if_snd, m, &pktattr);
3665
3666                 ifq_dispatch(dst_ifp, m, &pktattr);
3667         }
3668 }
3669
3670 static void
3671 bridge_control_dispatch(struct netmsg *nmsg)
3672 {
3673         struct netmsg_brctl *bc_msg = (struct netmsg_brctl *)nmsg;
3674         struct ifnet *bifp = bc_msg->bc_sc->sc_ifp;
3675         int error;
3676
3677         lwkt_serialize_enter(bifp->if_serializer);
3678         error = bc_msg->bc_func(bc_msg->bc_sc, bc_msg->bc_arg);
3679         lwkt_serialize_exit(bifp->if_serializer);
3680
3681         lwkt_replymsg(&nmsg->nm_lmsg, error);
3682 }
3683
3684 static int
3685 bridge_control(struct bridge_softc *sc, u_long cmd,
3686                bridge_ctl_t bc_func, void *bc_arg)
3687 {
3688         struct ifnet *bifp = sc->sc_ifp;
3689         struct netmsg_brctl bc_msg;
3690         struct netmsg *nmsg;
3691         int error;
3692
3693         ASSERT_SERIALIZED(bifp->if_serializer);
3694
3695         bzero(&bc_msg, sizeof(bc_msg));
3696         nmsg = &bc_msg.bc_nmsg;
3697
3698         netmsg_init(nmsg, &curthread->td_msgport, 0, bridge_control_dispatch);
3699         bc_msg.bc_func = bc_func;
3700         bc_msg.bc_sc = sc;
3701         bc_msg.bc_arg = bc_arg;
3702
3703         lwkt_serialize_exit(bifp->if_serializer);
3704         error = lwkt_domsg(BRIDGE_CFGPORT, &nmsg->nm_lmsg, 0);
3705         lwkt_serialize_enter(bifp->if_serializer);
3706         return error;
3707 }
3708
3709 static void
3710 bridge_add_bif_handler(struct netmsg *nmsg)
3711 {
3712         struct netmsg_braddbif *amsg = (struct netmsg_braddbif *)nmsg;
3713         struct bridge_softc *sc;
3714         struct bridge_iflist *bif;
3715
3716         sc = amsg->br_softc;
3717
3718         bif = kmalloc(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
3719         bif->bif_ifp = amsg->br_bif_ifp;
3720         bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
3721         bif->bif_onlist = 1;
3722         bif->bif_info = amsg->br_bif_info;
3723
3724         LIST_INSERT_HEAD(&sc->sc_iflists[mycpuid], bif, bif_next);
3725
3726         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3727 }
3728
3729 static void
3730 bridge_add_bif(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3731                struct ifnet *ifp)
3732 {
3733         struct netmsg_braddbif amsg;
3734
3735         ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
3736
3737         netmsg_init(&amsg.br_nmsg, &curthread->td_msgport, 0,
3738                     bridge_add_bif_handler);
3739         amsg.br_softc = sc;
3740         amsg.br_bif_info = bif_info;
3741         amsg.br_bif_ifp = ifp;
3742
3743         ifnet_domsg(&amsg.br_nmsg.nm_lmsg, 0);
3744 }
3745
3746 static void
3747 bridge_del_bif_handler(struct netmsg *nmsg)
3748 {
3749         struct netmsg_brdelbif *dmsg = (struct netmsg_brdelbif *)nmsg;
3750         struct bridge_softc *sc;
3751         struct bridge_iflist *bif;
3752
3753         sc = dmsg->br_softc;
3754
3755         /*
3756          * Locate the bif associated with the br_bif_info
3757          * on the current CPU
3758          */
3759         bif = bridge_lookup_member_ifinfo(sc, dmsg->br_bif_info);
3760         KKASSERT(bif != NULL && bif->bif_onlist);
3761
3762         /* Remove the bif from the current CPU's iflist */
3763         bif->bif_onlist = 0;
3764         LIST_REMOVE(bif, bif_next);
3765
3766         /* Save the removed bif for later freeing */
3767         LIST_INSERT_HEAD(dmsg->br_bif_list, bif, bif_next);
3768
3769         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3770 }
3771
3772 static void
3773 bridge_del_bif(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3774                struct bridge_iflist_head *saved_bifs)
3775 {
3776         struct netmsg_brdelbif dmsg;
3777
3778         ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
3779
3780         netmsg_init(&dmsg.br_nmsg, &curthread->td_msgport, 0,
3781                     bridge_del_bif_handler);
3782         dmsg.br_softc = sc;
3783         dmsg.br_bif_info = bif_info;
3784         dmsg.br_bif_list = saved_bifs;
3785
3786         ifnet_domsg(&dmsg.br_nmsg.nm_lmsg, 0);
3787 }
3788
3789 static void
3790 bridge_set_bifflags_handler(struct netmsg *nmsg)
3791 {
3792         struct netmsg_brsflags *smsg = (struct netmsg_brsflags *)nmsg;
3793         struct bridge_softc *sc;
3794         struct bridge_iflist *bif;
3795
3796         sc = smsg->br_softc;
3797
3798         /*
3799          * Locate the bif associated with the br_bif_info
3800          * on the current CPU
3801          */
3802         bif = bridge_lookup_member_ifinfo(sc, smsg->br_bif_info);
3803         KKASSERT(bif != NULL && bif->bif_onlist);
3804
3805         bif->bif_flags = smsg->br_bif_flags;
3806
3807         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3808 }
3809
3810 static void
3811 bridge_set_bifflags(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3812                     uint32_t bif_flags)
3813 {
3814         struct netmsg_brsflags smsg;
3815
3816         ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
3817
3818         netmsg_init(&smsg.br_nmsg, &curthread->td_msgport, 0,
3819                     bridge_set_bifflags_handler);
3820         smsg.br_softc = sc;
3821         smsg.br_bif_info = bif_info;
3822         smsg.br_bif_flags = bif_flags;
3823
3824         ifnet_domsg(&smsg.br_nmsg.nm_lmsg, 0);
3825 }