- Correct 'forward' support
[dragonfly.git] / sys / net / ipfw / ip_fw2.c
1 /*
2  * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD: src/sys/netinet/ip_fw2.c,v 1.6.2.12 2003/04/08 10:42:32 maxim Exp $
26  * $DragonFly: src/sys/net/ipfw/ip_fw2.c,v 1.74 2008/08/21 12:11:34 sephe Exp $
27  */
28
29 #define        DEB(x)
30 #define        DDB(x) x
31
32 /*
33  * Implement IP packet firewall (new version)
34  */
35
36 #ifndef KLD_MODULE
37 #include "opt_ipfw.h"
38 #include "opt_ipdn.h"
39 #include "opt_ipdivert.h"
40 #include "opt_inet.h"
41 #ifndef INET
42 #error IPFIREWALL requires INET.
43 #endif /* INET */
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/malloc.h>
49 #include <sys/mbuf.h>
50 #include <sys/kernel.h>
51 #include <sys/proc.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/sysctl.h>
55 #include <sys/syslog.h>
56 #include <sys/thread2.h>
57 #include <sys/ucred.h>
58 #include <sys/in_cksum.h>
59 #include <sys/lock.h>
60
61 #include <net/if.h>
62 #include <net/route.h>
63 #include <net/netmsg2.h>
64
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/in_pcb.h>
69 #include <netinet/ip.h>
70 #include <netinet/ip_var.h>
71 #include <netinet/ip_icmp.h>
72 #include "ip_fw.h"
73 #include <net/dummynet/ip_dummynet.h>
74 #include <netinet/tcp.h>
75 #include <netinet/tcp_timer.h>
76 #include <netinet/tcp_var.h>
77 #include <netinet/tcpip.h>
78 #include <netinet/udp.h>
79 #include <netinet/udp_var.h>
80
81 #include <netinet/if_ether.h> /* XXX for ETHERTYPE_IP */
82
83 /*
84  * Description about per-CPU rule duplication:
85  *
86  * Module loading/unloading and all ioctl operations are serialized
87  * by netisr0, so we don't have any ordering or locking problems.
88  *
89  * Following graph shows how operation on per-CPU rule list is
90  * performed [2 CPU case]:
91  *
92  *   CPU0                 CPU1
93  *
94  * netisr0 <------------------------------------+
95  *  domsg                                       |
96  *    |                                         |
97  *    | netmsg                                  |
98  *    |                                         |
99  *    V                                         |
100  *  ifnet0                                      |
101  *    :                                         | netmsg
102  *    :(delete/add...)                          |
103  *    :                                         |
104  *    :         netmsg                          |
105  *  forwardmsg---------->ifnet1                 |
106  *                          :                   |
107  *                          :(delete/add...)    |
108  *                          :                   |
109  *                          :                   |
110  *                        replymsg--------------+
111  *
112  *
113  *
114  *
115  * Rules which will not create states (dyn rules) [2 CPU case]
116  *
117  *    CPU0               CPU1
118  * layer3_chain       layer3_chain
119  *     |                  |
120  *     V                  V
121  * +-------+ sibling  +-------+ sibling
122  * | rule1 |--------->| rule1 |--------->NULL
123  * +-------+          +-------+
124  *     |                  |
125  *     |next              |next
126  *     V                  V
127  * +-------+ sibling  +-------+ sibling
128  * | rule2 |--------->| rule2 |--------->NULL
129  * +-------+          +-------+
130  *
131  * ip_fw.sibling:
132  * 1) Ease statistics calculation during IP_FW_GET.  We only need to
133  *    iterate layer3_chain on CPU0; the current rule's duplication on
134  *    the other CPUs could safely be read-only accessed by using
135  *    ip_fw.sibling
136  * 2) Accelerate rule insertion and deletion, e.g. rule insertion:
137  *    a) In netisr0 (on CPU0) rule3 is determined to be inserted between
138  *       rule1 and rule2.  To make this decision we need to iterate the
139  *       layer3_chain on CPU0.  The netmsg, which is used to insert the
140  *       rule, will contain rule1 on CPU0 as prev_rule and rule2 on CPU0
141  *       as next_rule
142  *    b) After the insertion on CPU0 is done, we will move on to CPU1.
143  *       But instead of relocating the rule3's position on CPU1 by
144  *       iterating the layer3_chain on CPU1, we set the netmsg's prev_rule
145  *       to rule1->sibling and next_rule to rule2->sibling before the
146  *       netmsg is forwarded to CPU1 from CPU0
147  *       
148  *    
149  *
150  * Rules which will create states (dyn rules) [2 CPU case]
151  * (unnecessary parts are omitted; they are same as in the previous figure)
152  *
153  *   CPU0                       CPU1
154  * 
155  * +-------+                  +-------+
156  * | rule1 |                  | rule1 |
157  * +-------+                  +-------+
158  *   ^   |                      |   ^
159  *   |   |stub              stub|   |
160  *   |   |                      |   |
161  *   |   +----+            +----+   |
162  *   |        |            |        |
163  *   |        V            V        |
164  *   |    +--------------------+    |
165  *   |    |     rule_stub      |    |
166  *   |    | (read-only shared) |    |
167  *   |    |                    |    |
168  *   |    | back pointer array |    |
169  *   |    | (indexed by cpuid) |    |
170  *   |    |                    |    |
171  *   +----|---------[0]        |    |
172  *        |         [1]--------|----+
173  *        |                    |
174  *        +--------------------+
175  *          ^            ^
176  *          |            |
177  *  ........|............|............
178  *  :       |            |           :
179  *  :       |stub        |stub       :
180  *  :       |            |           :
181  *  :  +---------+  +---------+      :
182  *  :  | state1a |  | state1b | .... :
183  *  :  +---------+  +---------+      :
184  *  :                                :
185  *  :           states table         :
186  *  :            (shared)            :
187  *  :      (protected by dyn_lock)   :
188  *  ..................................
189  * 
190  * [state1a and state1b are states created by rule1]
191  *
192  * ip_fw_stub:
193  * This structure is introduced so that shared (locked) state table could
194  * work with per-CPU (duplicated) static rules.  It mainly bridges states
195  * and static rules and serves as static rule's place holder (a read-only
196  * shared part of duplicated rules) from states point of view.
197  *
198  * IPFW_RULE_F_STATE (only for rules which create states):
199  * o  During rule installation, this flag is turned on after rule's
200  *    duplications reach all CPUs, to avoid at least following race:
201  *    1) rule1 is duplicated on CPU0 and is not duplicated on CPU1 yet
202  *    2) rule1 creates state1
203  *    3) state1 is located on CPU1 by check-state
204  *    But rule1 is not duplicated on CPU1 yet
205  * o  During rule deletion, this flag is turned off before deleting states
206  *    created by the rule and before deleting the rule itself, so no
207  *    more states will be created by the to-be-deleted rule even when its
208  *    duplication on certain CPUs are not eliminated yet.
209  */
210
211 #define IPFW_AUTOINC_STEP_MIN   1
212 #define IPFW_AUTOINC_STEP_MAX   1000
213 #define IPFW_AUTOINC_STEP_DEF   100
214
215 #define IPFW_DEFAULT_RULE       65535   /* rulenum for the default rule */
216 #define IPFW_DEFAULT_SET        31      /* set number for the default rule */
217
218 struct netmsg_ipfw {
219         struct netmsg   nmsg;
220         const struct ipfw_ioc_rule *ioc_rule;
221         struct ip_fw    *next_rule;
222         struct ip_fw    *prev_rule;
223         struct ip_fw    *sibling;
224         struct ip_fw_stub *stub;
225 };
226
227 struct netmsg_del {
228         struct netmsg   nmsg;
229         struct ip_fw    *start_rule;
230         struct ip_fw    *prev_rule;
231         uint16_t        rulenum;
232         uint8_t         from_set;
233         uint8_t         to_set;
234 };
235
236 struct netmsg_zent {
237         struct netmsg   nmsg;
238         struct ip_fw    *start_rule;
239         uint16_t        rulenum;
240         uint16_t        log_only;
241 };
242
243 struct ipfw_context {
244         struct ip_fw    *ipfw_layer3_chain;     /* list of rules for layer3 */
245         struct ip_fw    *ipfw_default_rule;     /* default rule */
246         uint64_t        ipfw_norule_counter;    /* counter for ipfw_log(NULL) */
247
248         /*
249          * ipfw_set_disable contains one bit per set value (0..31).
250          * If the bit is set, all rules with the corresponding set
251          * are disabled.  Set IPDW_DEFAULT_SET is reserved for the
252          * default rule and CANNOT be disabled.
253          */
254         uint32_t        ipfw_set_disable;
255         uint32_t        ipfw_gen;               /* generation of rule list */
256 };
257
258 static struct ipfw_context      *ipfw_ctx[MAXCPU];
259
260 #ifdef KLD_MODULE
261 /*
262  * Module can not be unloaded, if there are references to
263  * certains rules of ipfw(4), e.g. dummynet(4)
264  */
265 static int ipfw_refcnt;
266 #endif
267
268 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's");
269
270 /*
271  * Following two global variables are accessed and
272  * updated only on CPU0
273  */
274 static uint32_t static_count;   /* # of static rules */
275 static uint32_t static_ioc_len; /* bytes of static rules */
276
277 /*
278  * If 1, then ipfw static rules are being flushed,
279  * ipfw_chk() will skip to the default rule.
280  */
281 static int ipfw_flushing;
282
283 static int fw_verbose;
284 static int verbose_limit;
285
286 static int fw_debug = 1;
287 static int autoinc_step = IPFW_AUTOINC_STEP_DEF;
288
289 static int      ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS);
290 static int      ipfw_sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS);
291 static int      ipfw_sysctl_dyn_fin(SYSCTL_HANDLER_ARGS);
292 static int      ipfw_sysctl_dyn_rst(SYSCTL_HANDLER_ARGS);
293
294 #ifdef SYSCTL_NODE
295 SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW, 0, "Firewall");
296 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, enable, CTLFLAG_RW,
297     &fw_enable, 0, "Enable ipfw");
298 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, autoinc_step, CTLTYPE_INT | CTLFLAG_RW,
299     &autoinc_step, 0, ipfw_sysctl_autoinc_step, "I",
300     "Rule number autincrement step");
301 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO,one_pass,CTLFLAG_RW,
302     &fw_one_pass, 0,
303     "Only do a single pass through ipfw when using dummynet(4)");
304 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, debug, CTLFLAG_RW,
305     &fw_debug, 0, "Enable printing of debug ip_fw statements");
306 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose, CTLFLAG_RW,
307     &fw_verbose, 0, "Log matches to ipfw rules");
308 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit, CTLFLAG_RW,
309     &verbose_limit, 0, "Set upper limit of matches of ipfw rules logged");
310
311 /*
312  * Description of dynamic rules.
313  *
314  * Dynamic rules are stored in lists accessed through a hash table
315  * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can
316  * be modified through the sysctl variable dyn_buckets which is
317  * updated when the table becomes empty.
318  *
319  * XXX currently there is only one list, ipfw_dyn.
320  *
321  * When a packet is received, its address fields are first masked
322  * with the mask defined for the rule, then hashed, then matched
323  * against the entries in the corresponding list.
324  * Dynamic rules can be used for different purposes:
325  *  + stateful rules;
326  *  + enforcing limits on the number of sessions;
327  *  + in-kernel NAT (not implemented yet)
328  *
329  * The lifetime of dynamic rules is regulated by dyn_*_lifetime,
330  * measured in seconds and depending on the flags.
331  *
332  * The total number of dynamic rules is stored in dyn_count.
333  * The max number of dynamic rules is dyn_max. When we reach
334  * the maximum number of rules we do not create anymore. This is
335  * done to avoid consuming too much memory, but also too much
336  * time when searching on each packet (ideally, we should try instead
337  * to put a limit on the length of the list on each bucket...).
338  *
339  * Each dynamic rule holds a pointer to the parent ipfw rule so
340  * we know what action to perform. Dynamic rules are removed when
341  * the parent rule is deleted. XXX we should make them survive.
342  *
343  * There are some limitations with dynamic rules -- we do not
344  * obey the 'randomized match', and we do not do multiple
345  * passes through the firewall. XXX check the latter!!!
346  *
347  * NOTE about the SHARED LOCKMGR LOCK during dynamic rule looking up:
348  * Only TCP state transition will change dynamic rule's state and ack
349  * sequences, while all packets of one TCP connection only goes through
350  * one TCP thread, so it is safe to use shared lockmgr lock during dynamic
351  * rule looking up.  The keep alive callout uses exclusive lockmgr lock
352  * when it tries to find suitable dynamic rules to send keep alive, so
353  * it will not see half updated state and ack sequences.  Though the expire
354  * field updating looks racy for other protocols, the resolution (second)
355  * of expire field makes this kind of race harmless.
356  * XXX statistics' updating is _not_ MPsafe!!!
357  * XXX once UDP output path is fixed, we could use lockless dynamic rule
358  *     hash table
359  */
360 static ipfw_dyn_rule **ipfw_dyn_v = NULL;
361 static uint32_t dyn_buckets = 256; /* must be power of 2 */
362 static uint32_t curr_dyn_buckets = 256; /* must be power of 2 */
363 static uint32_t dyn_buckets_gen; /* generation of dyn buckets array */
364 static struct lock dyn_lock; /* dynamic rules' hash table lock */
365 static struct callout ipfw_timeout_h;
366
367 /*
368  * Timeouts for various events in handing dynamic rules.
369  */
370 static uint32_t dyn_ack_lifetime = 300;
371 static uint32_t dyn_syn_lifetime = 20;
372 static uint32_t dyn_fin_lifetime = 1;
373 static uint32_t dyn_rst_lifetime = 1;
374 static uint32_t dyn_udp_lifetime = 10;
375 static uint32_t dyn_short_lifetime = 5;
376
377 /*
378  * Keepalives are sent if dyn_keepalive is set. They are sent every
379  * dyn_keepalive_period seconds, in the last dyn_keepalive_interval
380  * seconds of lifetime of a rule.
381  * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower
382  * than dyn_keepalive_period.
383  */
384
385 static uint32_t dyn_keepalive_interval = 20;
386 static uint32_t dyn_keepalive_period = 5;
387 static uint32_t dyn_keepalive = 1;      /* do send keepalives */
388
389 static uint32_t dyn_count;              /* # of dynamic rules */
390 static uint32_t dyn_max = 4096;         /* max # of dynamic rules */
391
392 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_buckets, CTLTYPE_INT | CTLFLAG_RW,
393     &dyn_buckets, 0, ipfw_sysctl_dyn_buckets, "I", "Number of dyn. buckets");
394 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets, CTLFLAG_RD,
395     &curr_dyn_buckets, 0, "Current Number of dyn. buckets");
396 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_count, CTLFLAG_RD,
397     &dyn_count, 0, "Number of dyn. rules");
398 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_max, CTLFLAG_RW,
399     &dyn_max, 0, "Max number of dyn. rules");
400 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, static_count, CTLFLAG_RD,
401     &static_count, 0, "Number of static rules");
402 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime, CTLFLAG_RW,
403     &dyn_ack_lifetime, 0, "Lifetime of dyn. rules for acks");
404 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime, CTLFLAG_RW,
405     &dyn_syn_lifetime, 0, "Lifetime of dyn. rules for syn");
406 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime,
407     CTLTYPE_INT | CTLFLAG_RW, &dyn_fin_lifetime, 0, ipfw_sysctl_dyn_fin, "I",
408     "Lifetime of dyn. rules for fin");
409 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime,
410     CTLTYPE_INT | CTLFLAG_RW, &dyn_rst_lifetime, 0, ipfw_sysctl_dyn_rst, "I",
411     "Lifetime of dyn. rules for rst");
412 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime, CTLFLAG_RW,
413     &dyn_udp_lifetime, 0, "Lifetime of dyn. rules for UDP");
414 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime, CTLFLAG_RW,
415     &dyn_short_lifetime, 0, "Lifetime of dyn. rules for other situations");
416 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive, CTLFLAG_RW,
417     &dyn_keepalive, 0, "Enable keepalives for dyn. rules");
418
419 #endif /* SYSCTL_NODE */
420
421 static ip_fw_chk_t      ipfw_chk;
422
423 static __inline int
424 ipfw_free_rule(struct ip_fw *rule)
425 {
426         KASSERT(rule->cpuid == mycpuid, ("rule freed on cpu%d\n", mycpuid));
427         KASSERT(rule->refcnt > 0, ("invalid refcnt %u\n", rule->refcnt));
428         rule->refcnt--;
429         if (rule->refcnt == 0) {
430                 kfree(rule, M_IPFW);
431                 return 1;
432         }
433         return 0;
434 }
435
436 static void
437 ipfw_unref_rule(void *priv)
438 {
439         ipfw_free_rule(priv);
440 #ifdef KLD_MODULE
441         atomic_subtract_int(&ipfw_refcnt, 1);
442 #endif
443 }
444
445 static __inline void
446 ipfw_ref_rule(struct ip_fw *rule)
447 {
448         KASSERT(rule->cpuid == mycpuid, ("rule used on cpu%d\n", mycpuid));
449 #ifdef KLD_MODULE
450         atomic_add_int(&ipfw_refcnt, 1);
451 #endif
452         rule->refcnt++;
453 }
454
455 /*
456  * This macro maps an ip pointer into a layer3 header pointer of type T
457  */
458 #define L3HDR(T, ip) ((T *)((uint32_t *)(ip) + (ip)->ip_hl))
459
460 static __inline int
461 icmptype_match(struct ip *ip, ipfw_insn_u32 *cmd)
462 {
463         int type = L3HDR(struct icmp,ip)->icmp_type;
464
465         return (type <= ICMP_MAXTYPE && (cmd->d[0] & (1 << type)));
466 }
467
468 #define TT      ((1 << ICMP_ECHO) | \
469                  (1 << ICMP_ROUTERSOLICIT) | \
470                  (1 << ICMP_TSTAMP) | \
471                  (1 << ICMP_IREQ) | \
472                  (1 << ICMP_MASKREQ))
473
474 static int
475 is_icmp_query(struct ip *ip)
476 {
477         int type = L3HDR(struct icmp, ip)->icmp_type;
478
479         return (type <= ICMP_MAXTYPE && (TT & (1 << type)));
480 }
481
482 #undef TT
483
484 /*
485  * The following checks use two arrays of 8 or 16 bits to store the
486  * bits that we want set or clear, respectively. They are in the
487  * low and high half of cmd->arg1 or cmd->d[0].
488  *
489  * We scan options and store the bits we find set. We succeed if
490  *
491  *      (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear
492  *
493  * The code is sometimes optimized not to store additional variables.
494  */
495
496 static int
497 flags_match(ipfw_insn *cmd, uint8_t bits)
498 {
499         u_char want_clear;
500         bits = ~bits;
501
502         if (((cmd->arg1 & 0xff) & bits) != 0)
503                 return 0; /* some bits we want set were clear */
504
505         want_clear = (cmd->arg1 >> 8) & 0xff;
506         if ((want_clear & bits) != want_clear)
507                 return 0; /* some bits we want clear were set */
508         return 1;
509 }
510
511 static int
512 ipopts_match(struct ip *ip, ipfw_insn *cmd)
513 {
514         int optlen, bits = 0;
515         u_char *cp = (u_char *)(ip + 1);
516         int x = (ip->ip_hl << 2) - sizeof(struct ip);
517
518         for (; x > 0; x -= optlen, cp += optlen) {
519                 int opt = cp[IPOPT_OPTVAL];
520
521                 if (opt == IPOPT_EOL)
522                         break;
523
524                 if (opt == IPOPT_NOP) {
525                         optlen = 1;
526                 } else {
527                         optlen = cp[IPOPT_OLEN];
528                         if (optlen <= 0 || optlen > x)
529                                 return 0; /* invalid or truncated */
530                 }
531
532                 switch (opt) {
533                 case IPOPT_LSRR:
534                         bits |= IP_FW_IPOPT_LSRR;
535                         break;
536
537                 case IPOPT_SSRR:
538                         bits |= IP_FW_IPOPT_SSRR;
539                         break;
540
541                 case IPOPT_RR:
542                         bits |= IP_FW_IPOPT_RR;
543                         break;
544
545                 case IPOPT_TS:
546                         bits |= IP_FW_IPOPT_TS;
547                         break;
548
549                 default:
550                         break;
551                 }
552         }
553         return (flags_match(cmd, bits));
554 }
555
556 static int
557 tcpopts_match(struct ip *ip, ipfw_insn *cmd)
558 {
559         int optlen, bits = 0;
560         struct tcphdr *tcp = L3HDR(struct tcphdr,ip);
561         u_char *cp = (u_char *)(tcp + 1);
562         int x = (tcp->th_off << 2) - sizeof(struct tcphdr);
563
564         for (; x > 0; x -= optlen, cp += optlen) {
565                 int opt = cp[0];
566
567                 if (opt == TCPOPT_EOL)
568                         break;
569
570                 if (opt == TCPOPT_NOP) {
571                         optlen = 1;
572                 } else {
573                         optlen = cp[1];
574                         if (optlen <= 0)
575                                 break;
576                 }
577
578                 switch (opt) {
579                 case TCPOPT_MAXSEG:
580                         bits |= IP_FW_TCPOPT_MSS;
581                         break;
582
583                 case TCPOPT_WINDOW:
584                         bits |= IP_FW_TCPOPT_WINDOW;
585                         break;
586
587                 case TCPOPT_SACK_PERMITTED:
588                 case TCPOPT_SACK:
589                         bits |= IP_FW_TCPOPT_SACK;
590                         break;
591
592                 case TCPOPT_TIMESTAMP:
593                         bits |= IP_FW_TCPOPT_TS;
594                         break;
595
596                 case TCPOPT_CC:
597                 case TCPOPT_CCNEW:
598                 case TCPOPT_CCECHO:
599                         bits |= IP_FW_TCPOPT_CC;
600                         break;
601
602                 default:
603                         break;
604                 }
605         }
606         return (flags_match(cmd, bits));
607 }
608
609 static int
610 iface_match(struct ifnet *ifp, ipfw_insn_if *cmd)
611 {
612         if (ifp == NULL)        /* no iface with this packet, match fails */
613                 return 0;
614
615         /* Check by name or by IP address */
616         if (cmd->name[0] != '\0') { /* match by name */
617                 /* Check name */
618                 if (cmd->p.glob) {
619                         if (kfnmatch(cmd->name, ifp->if_xname, 0) == 0)
620                                 return(1);
621                 } else {
622                         if (strncmp(ifp->if_xname, cmd->name, IFNAMSIZ) == 0)
623                                 return(1);
624                 }
625         } else {
626                 struct ifaddr_container *ifac;
627
628                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
629                         struct ifaddr *ia = ifac->ifa;
630
631                         if (ia->ifa_addr == NULL)
632                                 continue;
633                         if (ia->ifa_addr->sa_family != AF_INET)
634                                 continue;
635                         if (cmd->p.ip.s_addr == ((struct sockaddr_in *)
636                             (ia->ifa_addr))->sin_addr.s_addr)
637                                 return(1);      /* match */
638                 }
639         }
640         return(0);      /* no match, fail ... */
641 }
642
643 #define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0
644
645 /*
646  * We enter here when we have a rule with O_LOG.
647  * XXX this function alone takes about 2Kbytes of code!
648  */
649 static void
650 ipfw_log(struct ip_fw *f, u_int hlen, struct ether_header *eh,
651          struct mbuf *m, struct ifnet *oif)
652 {
653         char *action;
654         int limit_reached = 0;
655         char action2[40], proto[48], fragment[28];
656
657         fragment[0] = '\0';
658         proto[0] = '\0';
659
660         if (f == NULL) {        /* bogus pkt */
661                 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
662
663                 if (verbose_limit != 0 &&
664                     ctx->ipfw_norule_counter >= verbose_limit)
665                         return;
666                 ctx->ipfw_norule_counter++;
667                 if (ctx->ipfw_norule_counter == verbose_limit)
668                         limit_reached = verbose_limit;
669                 action = "Refuse";
670         } else {        /* O_LOG is the first action, find the real one */
671                 ipfw_insn *cmd = ACTION_PTR(f);
672                 ipfw_insn_log *l = (ipfw_insn_log *)cmd;
673
674                 if (l->max_log != 0 && l->log_left == 0)
675                         return;
676                 l->log_left--;
677                 if (l->log_left == 0)
678                         limit_reached = l->max_log;
679                 cmd += F_LEN(cmd);      /* point to first action */
680                 if (cmd->opcode == O_PROB)
681                         cmd += F_LEN(cmd);
682
683                 action = action2;
684                 switch (cmd->opcode) {
685                 case O_DENY:
686                         action = "Deny";
687                         break;
688
689                 case O_REJECT:
690                         if (cmd->arg1==ICMP_REJECT_RST) {
691                                 action = "Reset";
692                         } else if (cmd->arg1==ICMP_UNREACH_HOST) {
693                                 action = "Reject";
694                         } else {
695                                 ksnprintf(SNPARGS(action2, 0), "Unreach %d",
696                                           cmd->arg1);
697                         }
698                         break;
699
700                 case O_ACCEPT:
701                         action = "Accept";
702                         break;
703
704                 case O_COUNT:
705                         action = "Count";
706                         break;
707
708                 case O_DIVERT:
709                         ksnprintf(SNPARGS(action2, 0), "Divert %d", cmd->arg1);
710                         break;
711
712                 case O_TEE:
713                         ksnprintf(SNPARGS(action2, 0), "Tee %d", cmd->arg1);
714                         break;
715
716                 case O_SKIPTO:
717                         ksnprintf(SNPARGS(action2, 0), "SkipTo %d", cmd->arg1);
718                         break;
719
720                 case O_PIPE:
721                         ksnprintf(SNPARGS(action2, 0), "Pipe %d", cmd->arg1);
722                         break;
723
724                 case O_QUEUE:
725                         ksnprintf(SNPARGS(action2, 0), "Queue %d", cmd->arg1);
726                         break;
727
728                 case O_FORWARD_IP:
729                         {
730                                 ipfw_insn_sa *sa = (ipfw_insn_sa *)cmd;
731                                 int len;
732
733                                 len = ksnprintf(SNPARGS(action2, 0),
734                                                 "Forward to %s",
735                                                 inet_ntoa(sa->sa.sin_addr));
736                                 if (sa->sa.sin_port) {
737                                         ksnprintf(SNPARGS(action2, len), ":%d",
738                                                   sa->sa.sin_port);
739                                 }
740                         }
741                         break;
742
743                 default:
744                         action = "UNKNOWN";
745                         break;
746                 }
747         }
748
749         if (hlen == 0) {        /* non-ip */
750                 ksnprintf(SNPARGS(proto, 0), "MAC");
751         } else {
752                 struct ip *ip = mtod(m, struct ip *);
753                 /* these three are all aliases to the same thing */
754                 struct icmp *const icmp = L3HDR(struct icmp, ip);
755                 struct tcphdr *const tcp = (struct tcphdr *)icmp;
756                 struct udphdr *const udp = (struct udphdr *)icmp;
757
758                 int ip_off, offset, ip_len;
759                 int len;
760
761                 if (eh != NULL) { /* layer 2 packets are as on the wire */
762                         ip_off = ntohs(ip->ip_off);
763                         ip_len = ntohs(ip->ip_len);
764                 } else {
765                         ip_off = ip->ip_off;
766                         ip_len = ip->ip_len;
767                 }
768                 offset = ip_off & IP_OFFMASK;
769                 switch (ip->ip_p) {
770                 case IPPROTO_TCP:
771                         len = ksnprintf(SNPARGS(proto, 0), "TCP %s",
772                                         inet_ntoa(ip->ip_src));
773                         if (offset == 0) {
774                                 ksnprintf(SNPARGS(proto, len), ":%d %s:%d",
775                                           ntohs(tcp->th_sport),
776                                           inet_ntoa(ip->ip_dst),
777                                           ntohs(tcp->th_dport));
778                         } else {
779                                 ksnprintf(SNPARGS(proto, len), " %s",
780                                           inet_ntoa(ip->ip_dst));
781                         }
782                         break;
783
784                 case IPPROTO_UDP:
785                         len = ksnprintf(SNPARGS(proto, 0), "UDP %s",
786                                         inet_ntoa(ip->ip_src));
787                         if (offset == 0) {
788                                 ksnprintf(SNPARGS(proto, len), ":%d %s:%d",
789                                           ntohs(udp->uh_sport),
790                                           inet_ntoa(ip->ip_dst),
791                                           ntohs(udp->uh_dport));
792                         } else {
793                                 ksnprintf(SNPARGS(proto, len), " %s",
794                                           inet_ntoa(ip->ip_dst));
795                         }
796                         break;
797
798                 case IPPROTO_ICMP:
799                         if (offset == 0) {
800                                 len = ksnprintf(SNPARGS(proto, 0),
801                                                 "ICMP:%u.%u ",
802                                                 icmp->icmp_type,
803                                                 icmp->icmp_code);
804                         } else {
805                                 len = ksnprintf(SNPARGS(proto, 0), "ICMP ");
806                         }
807                         len += ksnprintf(SNPARGS(proto, len), "%s",
808                                          inet_ntoa(ip->ip_src));
809                         ksnprintf(SNPARGS(proto, len), " %s",
810                                   inet_ntoa(ip->ip_dst));
811                         break;
812
813                 default:
814                         len = ksnprintf(SNPARGS(proto, 0), "P:%d %s", ip->ip_p,
815                                         inet_ntoa(ip->ip_src));
816                         ksnprintf(SNPARGS(proto, len), " %s",
817                                   inet_ntoa(ip->ip_dst));
818                         break;
819                 }
820
821                 if (ip_off & (IP_MF | IP_OFFMASK)) {
822                         ksnprintf(SNPARGS(fragment, 0), " (frag %d:%d@%d%s)",
823                                   ntohs(ip->ip_id), ip_len - (ip->ip_hl << 2),
824                                   offset << 3, (ip_off & IP_MF) ? "+" : "");
825                 }
826         }
827
828         if (oif || m->m_pkthdr.rcvif) {
829                 log(LOG_SECURITY | LOG_INFO,
830                     "ipfw: %d %s %s %s via %s%s\n",
831                     f ? f->rulenum : -1,
832                     action, proto, oif ? "out" : "in",
833                     oif ? oif->if_xname : m->m_pkthdr.rcvif->if_xname,
834                     fragment);
835         } else {
836                 log(LOG_SECURITY | LOG_INFO,
837                     "ipfw: %d %s %s [no if info]%s\n",
838                     f ? f->rulenum : -1,
839                     action, proto, fragment);
840         }
841
842         if (limit_reached) {
843                 log(LOG_SECURITY | LOG_NOTICE,
844                     "ipfw: limit %d reached on entry %d\n",
845                     limit_reached, f ? f->rulenum : -1);
846         }
847 }
848
849 #undef SNPARGS
850
851 /*
852  * IMPORTANT: the hash function for dynamic rules must be commutative
853  * in source and destination (ip,port), because rules are bidirectional
854  * and we want to find both in the same bucket.
855  */
856 static __inline int
857 hash_packet(struct ipfw_flow_id *id)
858 {
859         uint32_t i;
860
861         i = (id->dst_ip) ^ (id->src_ip) ^ (id->dst_port) ^ (id->src_port);
862         i &= (curr_dyn_buckets - 1);
863         return i;
864 }
865
866 /**
867  * unlink a dynamic rule from a chain. prev is a pointer to
868  * the previous one, q is a pointer to the rule to delete,
869  * head is a pointer to the head of the queue.
870  * Modifies q and potentially also head.
871  */
872 #define UNLINK_DYN_RULE(prev, head, q)                                  \
873 do {                                                                    \
874         ipfw_dyn_rule *old_q = q;                                       \
875                                                                         \
876         /* remove a refcount to the parent */                           \
877         if (q->dyn_type == O_LIMIT)                                     \
878                 q->parent->count--;                                     \
879         DEB(kprintf("-- unlink entry 0x%08x %d -> 0x%08x %d, %d left\n", \
880                 (q->id.src_ip), (q->id.src_port),                       \
881                 (q->id.dst_ip), (q->id.dst_port), dyn_count-1 ); )      \
882         if (prev != NULL)                                               \
883                 prev->next = q = q->next;                               \
884         else                                                            \
885                 head = q = q->next;                                     \
886         KASSERT(dyn_count > 0, ("invalid dyn count %u\n", dyn_count));  \
887         dyn_count--;                                                    \
888         kfree(old_q, M_IPFW);                                           \
889 } while (0)
890
891 #define TIME_LEQ(a, b)  ((int)((a) - (b)) <= 0)
892
893 /**
894  * Remove dynamic rules pointing to "rule", or all of them if rule == NULL.
895  *
896  * If keep_me == NULL, rules are deleted even if not expired,
897  * otherwise only expired rules are removed.
898  *
899  * The value of the second parameter is also used to point to identify
900  * a rule we absolutely do not want to remove (e.g. because we are
901  * holding a reference to it -- this is the case with O_LIMIT_PARENT
902  * rules). The pointer is only used for comparison, so any non-null
903  * value will do.
904  */
905 static void
906 remove_dyn_rule_locked(struct ip_fw *rule, ipfw_dyn_rule *keep_me)
907 {
908         static uint32_t last_remove = 0; /* XXX */
909
910 #define FORCE   (keep_me == NULL)
911
912         ipfw_dyn_rule *prev, *q;
913         int i, pass = 0, max_pass = 0, unlinked = 0;
914
915         if (ipfw_dyn_v == NULL || dyn_count == 0)
916                 return;
917         /* do not expire more than once per second, it is useless */
918         if (!FORCE && last_remove == time_second)
919                 return;
920         last_remove = time_second;
921
922         /*
923          * because O_LIMIT refer to parent rules, during the first pass only
924          * remove child and mark any pending LIMIT_PARENT, and remove
925          * them in a second pass.
926          */
927 next_pass:
928         for (i = 0; i < curr_dyn_buckets; i++) {
929                 for (prev = NULL, q = ipfw_dyn_v[i]; q;) {
930                         /*
931                          * Logic can become complex here, so we split tests.
932                          */
933                         if (q == keep_me)
934                                 goto next;
935                         if (rule != NULL && rule->stub != q->stub)
936                                 goto next; /* not the one we are looking for */
937                         if (q->dyn_type == O_LIMIT_PARENT) {
938                                 /*
939                                  * handle parent in the second pass,
940                                  * record we need one.
941                                  */
942                                 max_pass = 1;
943                                 if (pass == 0)
944                                         goto next;
945                                 if (FORCE && q->count != 0) {
946                                         /* XXX should not happen! */
947                                         kprintf("OUCH! cannot remove rule, "
948                                                 "count %d\n", q->count);
949                                 }
950                         } else {
951                                 if (!FORCE && !TIME_LEQ(q->expire, time_second))
952                                         goto next;
953                         }
954                         unlinked = 1;
955                         UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q);
956                         continue;
957 next:
958                         prev = q;
959                         q = q->next;
960                 }
961         }
962         if (pass++ < max_pass)
963                 goto next_pass;
964
965         if (unlinked)
966                 ++dyn_buckets_gen;
967
968 #undef FORCE
969 }
970
971 /**
972  * lookup a dynamic rule.
973  */
974 static ipfw_dyn_rule *
975 lookup_dyn_rule(struct ipfw_flow_id *pkt, int *match_direction,
976                 struct tcphdr *tcp)
977 {
978         /*
979          * stateful ipfw extensions.
980          * Lookup into dynamic session queue
981          */
982 #define MATCH_REVERSE   0
983 #define MATCH_FORWARD   1
984 #define MATCH_NONE      2
985 #define MATCH_UNKNOWN   3
986         int i, dir = MATCH_NONE;
987         ipfw_dyn_rule *prev, *q=NULL;
988
989         if (ipfw_dyn_v == NULL)
990                 goto done;      /* not found */
991
992         i = hash_packet(pkt);
993         for (prev = NULL, q = ipfw_dyn_v[i]; q != NULL;) {
994                 if (q->dyn_type == O_LIMIT_PARENT)
995                         goto next;
996
997                 if (TIME_LEQ(q->expire, time_second)) {
998                         /*
999                          * Entry expired; skip.
1000                          * Let ipfw_tick() take care of it
1001                          */
1002                         goto next;
1003                 }
1004
1005                 if (pkt->proto == q->id.proto) {
1006                         if (pkt->src_ip == q->id.src_ip &&
1007                             pkt->dst_ip == q->id.dst_ip &&
1008                             pkt->src_port == q->id.src_port &&
1009                             pkt->dst_port == q->id.dst_port) {
1010                                 dir = MATCH_FORWARD;
1011                                 break;
1012                         }
1013                         if (pkt->src_ip == q->id.dst_ip &&
1014                             pkt->dst_ip == q->id.src_ip &&
1015                             pkt->src_port == q->id.dst_port &&
1016                             pkt->dst_port == q->id.src_port) {
1017                                 dir = MATCH_REVERSE;
1018                                 break;
1019                         }
1020                 }
1021 next:
1022                 prev = q;
1023                 q = q->next;
1024         }
1025         if (q == NULL)
1026                 goto done; /* q = NULL, not found */
1027
1028         if (pkt->proto == IPPROTO_TCP) { /* update state according to flags */
1029                 u_char flags = pkt->flags & (TH_FIN|TH_SYN|TH_RST);
1030
1031 #define BOTH_SYN        (TH_SYN | (TH_SYN << 8))
1032 #define BOTH_FIN        (TH_FIN | (TH_FIN << 8))
1033
1034                 q->state |= (dir == MATCH_FORWARD ) ? flags : (flags << 8);
1035                 switch (q->state) {
1036                 case TH_SYN:                            /* opening */
1037                         q->expire = time_second + dyn_syn_lifetime;
1038                         break;
1039
1040                 case BOTH_SYN:                  /* move to established */
1041                 case BOTH_SYN | TH_FIN :        /* one side tries to close */
1042                 case BOTH_SYN | (TH_FIN << 8) :
1043                         if (tcp) {
1044                                 uint32_t ack = ntohl(tcp->th_ack);
1045
1046 #define _SEQ_GE(a, b)   ((int)(a) - (int)(b) >= 0)
1047
1048                                 if (dir == MATCH_FORWARD) {
1049                                         if (q->ack_fwd == 0 ||
1050                                             _SEQ_GE(ack, q->ack_fwd))
1051                                                 q->ack_fwd = ack;
1052                                         else /* ignore out-of-sequence */
1053                                                 break;
1054                                 } else {
1055                                         if (q->ack_rev == 0 ||
1056                                             _SEQ_GE(ack, q->ack_rev))
1057                                                 q->ack_rev = ack;
1058                                         else /* ignore out-of-sequence */
1059                                                 break;
1060                                 }
1061 #undef _SEQ_GE
1062                         }
1063                         q->expire = time_second + dyn_ack_lifetime;
1064                         break;
1065
1066                 case BOTH_SYN | BOTH_FIN:       /* both sides closed */
1067                         KKASSERT(dyn_fin_lifetime < dyn_keepalive_period);
1068                         q->expire = time_second + dyn_fin_lifetime;
1069                         break;
1070
1071                 default:
1072 #if 0
1073                         /*
1074                          * reset or some invalid combination, but can also
1075                          * occur if we use keep-state the wrong way.
1076                          */
1077                         if ((q->state & ((TH_RST << 8) | TH_RST)) == 0)
1078                                 kprintf("invalid state: 0x%x\n", q->state);
1079 #endif
1080                         KKASSERT(dyn_rst_lifetime < dyn_keepalive_period);
1081                         q->expire = time_second + dyn_rst_lifetime;
1082                         break;
1083                 }
1084         } else if (pkt->proto == IPPROTO_UDP) {
1085                 q->expire = time_second + dyn_udp_lifetime;
1086         } else {
1087                 /* other protocols */
1088                 q->expire = time_second + dyn_short_lifetime;
1089         }
1090 done:
1091         if (match_direction)
1092                 *match_direction = dir;
1093         return q;
1094 }
1095
1096 static struct ip_fw *
1097 lookup_rule(struct ipfw_flow_id *pkt, int *match_direction, struct tcphdr *tcp,
1098             uint16_t len, int *deny)
1099 {
1100         struct ip_fw *rule = NULL;
1101         ipfw_dyn_rule *q;
1102         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1103         uint32_t gen;
1104
1105         *deny = 0;
1106         gen = ctx->ipfw_gen;
1107
1108         lockmgr(&dyn_lock, LK_SHARED);
1109
1110         if (ctx->ipfw_gen != gen) {
1111                 /*
1112                  * Static rules had been change when we were waiting
1113                  * for the dynamic hash table lock; deny this packet,
1114                  * since it is _not_ known whether it is safe to keep
1115                  * iterating the static rules.
1116                  */
1117                 *deny = 1;
1118                 goto back;
1119         }
1120
1121         q = lookup_dyn_rule(pkt, match_direction, tcp);
1122         if (q == NULL) {
1123                 rule = NULL;
1124         } else {
1125                 rule = q->stub->rule[mycpuid];
1126                 KKASSERT(rule->stub == q->stub && rule->cpuid == mycpuid);
1127
1128                 /* XXX */
1129                 q->pcnt++;
1130                 q->bcnt += len;
1131         }
1132 back:
1133         lockmgr(&dyn_lock, LK_RELEASE);
1134         return rule;
1135 }
1136
1137 static void
1138 realloc_dynamic_table(void)
1139 {
1140         ipfw_dyn_rule **old_dyn_v;
1141         uint32_t old_curr_dyn_buckets;
1142
1143         KASSERT(dyn_buckets <= 65536 && (dyn_buckets & (dyn_buckets - 1)) == 0,
1144                 ("invalid dyn_buckets %d\n", dyn_buckets));
1145
1146         /* Save the current buckets array for later error recovery */
1147         old_dyn_v = ipfw_dyn_v;
1148         old_curr_dyn_buckets = curr_dyn_buckets;
1149
1150         curr_dyn_buckets = dyn_buckets;
1151         for (;;) {
1152                 ipfw_dyn_v = kmalloc(curr_dyn_buckets * sizeof(ipfw_dyn_rule *),
1153                                      M_IPFW, M_NOWAIT | M_ZERO);
1154                 if (ipfw_dyn_v != NULL || curr_dyn_buckets <= 2)
1155                         break;
1156
1157                 curr_dyn_buckets /= 2;
1158                 if (curr_dyn_buckets <= old_curr_dyn_buckets &&
1159                     old_dyn_v != NULL) {
1160                         /*
1161                          * Don't try allocating smaller buckets array, reuse
1162                          * the old one, which alreay contains enough buckets
1163                          */
1164                         break;
1165                 }
1166         }
1167
1168         if (ipfw_dyn_v != NULL) {
1169                 if (old_dyn_v != NULL)
1170                         kfree(old_dyn_v, M_IPFW);
1171         } else {
1172                 /* Allocation failed, restore old buckets array */
1173                 ipfw_dyn_v = old_dyn_v;
1174                 curr_dyn_buckets = old_curr_dyn_buckets;
1175         }
1176
1177         if (ipfw_dyn_v != NULL)
1178                 ++dyn_buckets_gen;
1179 }
1180
1181 /**
1182  * Install state of type 'type' for a dynamic session.
1183  * The hash table contains two type of rules:
1184  * - regular rules (O_KEEP_STATE)
1185  * - rules for sessions with limited number of sess per user
1186  *   (O_LIMIT). When they are created, the parent is
1187  *   increased by 1, and decreased on delete. In this case,
1188  *   the third parameter is the parent rule and not the chain.
1189  * - "parent" rules for the above (O_LIMIT_PARENT).
1190  */
1191 static ipfw_dyn_rule *
1192 add_dyn_rule(struct ipfw_flow_id *id, uint8_t dyn_type, struct ip_fw *rule)
1193 {
1194         ipfw_dyn_rule *r;
1195         int i;
1196
1197         if (ipfw_dyn_v == NULL ||
1198             (dyn_count == 0 && dyn_buckets != curr_dyn_buckets)) {
1199                 realloc_dynamic_table();
1200                 if (ipfw_dyn_v == NULL)
1201                         return NULL; /* failed ! */
1202         }
1203         i = hash_packet(id);
1204
1205         r = kmalloc(sizeof(*r), M_IPFW, M_NOWAIT | M_ZERO);
1206         if (r == NULL) {
1207                 kprintf ("sorry cannot allocate state\n");
1208                 return NULL;
1209         }
1210
1211         /* increase refcount on parent, and set pointer */
1212         if (dyn_type == O_LIMIT) {
1213                 ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule;
1214
1215                 if (parent->dyn_type != O_LIMIT_PARENT)
1216                         panic("invalid parent");
1217                 parent->count++;
1218                 r->parent = parent;
1219                 rule = parent->stub->rule[mycpuid];
1220                 KKASSERT(rule->stub == parent->stub);
1221         }
1222         KKASSERT(rule->cpuid == mycpuid && rule->stub != NULL);
1223
1224         r->id = *id;
1225         r->expire = time_second + dyn_syn_lifetime;
1226         r->stub = rule->stub;
1227         r->dyn_type = dyn_type;
1228         r->pcnt = r->bcnt = 0;
1229         r->count = 0;
1230
1231         r->bucket = i;
1232         r->next = ipfw_dyn_v[i];
1233         ipfw_dyn_v[i] = r;
1234         dyn_count++;
1235         dyn_buckets_gen++;
1236         DEB(kprintf("-- add dyn entry ty %d 0x%08x %d -> 0x%08x %d, total %d\n",
1237            dyn_type,
1238            (r->id.src_ip), (r->id.src_port),
1239            (r->id.dst_ip), (r->id.dst_port),
1240            dyn_count );)
1241         return r;
1242 }
1243
1244 /**
1245  * lookup dynamic parent rule using pkt and rule as search keys.
1246  * If the lookup fails, then install one.
1247  */
1248 static ipfw_dyn_rule *
1249 lookup_dyn_parent(struct ipfw_flow_id *pkt, struct ip_fw *rule)
1250 {
1251         ipfw_dyn_rule *q;
1252         int i;
1253
1254         if (ipfw_dyn_v) {
1255                 i = hash_packet(pkt);
1256                 for (q = ipfw_dyn_v[i]; q != NULL; q = q->next) {
1257                         if (q->dyn_type == O_LIMIT_PARENT &&
1258                             rule->stub == q->stub &&
1259                             pkt->proto == q->id.proto &&
1260                             pkt->src_ip == q->id.src_ip &&
1261                             pkt->dst_ip == q->id.dst_ip &&
1262                             pkt->src_port == q->id.src_port &&
1263                             pkt->dst_port == q->id.dst_port) {
1264                                 q->expire = time_second + dyn_short_lifetime;
1265                                 DEB(kprintf("lookup_dyn_parent found 0x%p\n",q);)
1266                                 return q;
1267                         }
1268                 }
1269         }
1270         return add_dyn_rule(pkt, O_LIMIT_PARENT, rule);
1271 }
1272
1273 /**
1274  * Install dynamic state for rule type cmd->o.opcode
1275  *
1276  * Returns 1 (failure) if state is not installed because of errors or because
1277  * session limitations are enforced.
1278  */
1279 static int
1280 install_state_locked(struct ip_fw *rule, ipfw_insn_limit *cmd,
1281                      struct ip_fw_args *args)
1282 {
1283         static int last_log; /* XXX */
1284
1285         ipfw_dyn_rule *q;
1286
1287         DEB(kprintf("-- install state type %d 0x%08x %u -> 0x%08x %u\n",
1288             cmd->o.opcode,
1289             (args->f_id.src_ip), (args->f_id.src_port),
1290             (args->f_id.dst_ip), (args->f_id.dst_port) );)
1291
1292         q = lookup_dyn_rule(&args->f_id, NULL, NULL);
1293         if (q != NULL) { /* should never occur */
1294                 if (last_log != time_second) {
1295                         last_log = time_second;
1296                         kprintf(" install_state: entry already present, done\n");
1297                 }
1298                 return 0;
1299         }
1300
1301         if (dyn_count >= dyn_max) {
1302                 /*
1303                  * Run out of slots, try to remove any expired rule.
1304                  */
1305                 remove_dyn_rule_locked(NULL, (ipfw_dyn_rule *)1);
1306                 if (dyn_count >= dyn_max) {
1307                         if (last_log != time_second) {
1308                                 last_log = time_second;
1309                                 kprintf("install_state: "
1310                                         "Too many dynamic rules\n");
1311                         }
1312                         return 1; /* cannot install, notify caller */
1313                 }
1314         }
1315
1316         switch (cmd->o.opcode) {
1317         case O_KEEP_STATE: /* bidir rule */
1318                 if (add_dyn_rule(&args->f_id, O_KEEP_STATE, rule) == NULL)
1319                         return 1;
1320                 break;
1321
1322         case O_LIMIT: /* limit number of sessions */
1323                 {
1324                         uint16_t limit_mask = cmd->limit_mask;
1325                         struct ipfw_flow_id id;
1326                         ipfw_dyn_rule *parent;
1327
1328                         DEB(kprintf("installing dyn-limit rule %d\n",
1329                             cmd->conn_limit);)
1330
1331                         id.dst_ip = id.src_ip = 0;
1332                         id.dst_port = id.src_port = 0;
1333                         id.proto = args->f_id.proto;
1334
1335                         if (limit_mask & DYN_SRC_ADDR)
1336                                 id.src_ip = args->f_id.src_ip;
1337                         if (limit_mask & DYN_DST_ADDR)
1338                                 id.dst_ip = args->f_id.dst_ip;
1339                         if (limit_mask & DYN_SRC_PORT)
1340                                 id.src_port = args->f_id.src_port;
1341                         if (limit_mask & DYN_DST_PORT)
1342                                 id.dst_port = args->f_id.dst_port;
1343
1344                         parent = lookup_dyn_parent(&id, rule);
1345                         if (parent == NULL) {
1346                                 kprintf("add parent failed\n");
1347                                 return 1;
1348                         }
1349
1350                         if (parent->count >= cmd->conn_limit) {
1351                                 /*
1352                                  * See if we can remove some expired rule.
1353                                  */
1354                                 remove_dyn_rule_locked(rule, parent);
1355                                 if (parent->count >= cmd->conn_limit) {
1356                                         if (fw_verbose &&
1357                                             last_log != time_second) {
1358                                                 last_log = time_second;
1359                                                 log(LOG_SECURITY | LOG_DEBUG,
1360                                                     "drop session, "
1361                                                     "too many entries\n");
1362                                         }
1363                                         return 1;
1364                                 }
1365                         }
1366                         if (add_dyn_rule(&args->f_id, O_LIMIT,
1367                                          (struct ip_fw *)parent) == NULL)
1368                                 return 1;
1369                 }
1370                 break;
1371         default:
1372                 kprintf("unknown dynamic rule type %u\n", cmd->o.opcode);
1373                 return 1;
1374         }
1375         lookup_dyn_rule(&args->f_id, NULL, NULL); /* XXX just set lifetime */
1376         return 0;
1377 }
1378
1379 static int
1380 install_state(struct ip_fw *rule, ipfw_insn_limit *cmd,
1381               struct ip_fw_args *args, int *deny)
1382 {
1383         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1384         uint32_t gen;
1385         int ret = 0;
1386
1387         *deny = 0;
1388         gen = ctx->ipfw_gen;
1389
1390         lockmgr(&dyn_lock, LK_EXCLUSIVE);
1391         if (ctx->ipfw_gen != gen) {
1392                 /* See the comment in lookup_rule() */
1393                 *deny = 1;
1394         } else {
1395                 ret = install_state_locked(rule, cmd, args);
1396         }
1397         lockmgr(&dyn_lock, LK_RELEASE);
1398
1399         return ret;
1400 }
1401
1402 /*
1403  * Transmit a TCP packet, containing either a RST or a keepalive.
1404  * When flags & TH_RST, we are sending a RST packet, because of a
1405  * "reset" action matched the packet.
1406  * Otherwise we are sending a keepalive, and flags & TH_
1407  */
1408 static void
1409 send_pkt(struct ipfw_flow_id *id, uint32_t seq, uint32_t ack, int flags)
1410 {
1411         struct mbuf *m;
1412         struct ip *ip;
1413         struct tcphdr *tcp;
1414         struct route sro;       /* fake route */
1415
1416         MGETHDR(m, MB_DONTWAIT, MT_HEADER);
1417         if (m == NULL)
1418                 return;
1419         m->m_pkthdr.rcvif = NULL;
1420         m->m_pkthdr.len = m->m_len = sizeof(struct ip) + sizeof(struct tcphdr);
1421         m->m_data += max_linkhdr;
1422
1423         ip = mtod(m, struct ip *);
1424         bzero(ip, m->m_len);
1425         tcp = (struct tcphdr *)(ip + 1); /* no IP options */
1426         ip->ip_p = IPPROTO_TCP;
1427         tcp->th_off = 5;
1428
1429         /*
1430          * Assume we are sending a RST (or a keepalive in the reverse
1431          * direction), swap src and destination addresses and ports.
1432          */
1433         ip->ip_src.s_addr = htonl(id->dst_ip);
1434         ip->ip_dst.s_addr = htonl(id->src_ip);
1435         tcp->th_sport = htons(id->dst_port);
1436         tcp->th_dport = htons(id->src_port);
1437         if (flags & TH_RST) {   /* we are sending a RST */
1438                 if (flags & TH_ACK) {
1439                         tcp->th_seq = htonl(ack);
1440                         tcp->th_ack = htonl(0);
1441                         tcp->th_flags = TH_RST;
1442                 } else {
1443                         if (flags & TH_SYN)
1444                                 seq++;
1445                         tcp->th_seq = htonl(0);
1446                         tcp->th_ack = htonl(seq);
1447                         tcp->th_flags = TH_RST | TH_ACK;
1448                 }
1449         } else {
1450                 /*
1451                  * We are sending a keepalive. flags & TH_SYN determines
1452                  * the direction, forward if set, reverse if clear.
1453                  * NOTE: seq and ack are always assumed to be correct
1454                  * as set by the caller. This may be confusing...
1455                  */
1456                 if (flags & TH_SYN) {
1457                         /*
1458                          * we have to rewrite the correct addresses!
1459                          */
1460                         ip->ip_dst.s_addr = htonl(id->dst_ip);
1461                         ip->ip_src.s_addr = htonl(id->src_ip);
1462                         tcp->th_dport = htons(id->dst_port);
1463                         tcp->th_sport = htons(id->src_port);
1464                 }
1465                 tcp->th_seq = htonl(seq);
1466                 tcp->th_ack = htonl(ack);
1467                 tcp->th_flags = TH_ACK;
1468         }
1469
1470         /*
1471          * set ip_len to the payload size so we can compute
1472          * the tcp checksum on the pseudoheader
1473          * XXX check this, could save a couple of words ?
1474          */
1475         ip->ip_len = htons(sizeof(struct tcphdr));
1476         tcp->th_sum = in_cksum(m, m->m_pkthdr.len);
1477
1478         /*
1479          * now fill fields left out earlier
1480          */
1481         ip->ip_ttl = ip_defttl;
1482         ip->ip_len = m->m_pkthdr.len;
1483
1484         bzero(&sro, sizeof(sro));
1485         ip_rtaddr(ip->ip_dst, &sro);
1486
1487         m->m_pkthdr.fw_flags |= IPFW_MBUF_GENERATED;
1488         ip_output(m, NULL, &sro, 0, NULL, NULL);
1489         if (sro.ro_rt)
1490                 RTFREE(sro.ro_rt);
1491 }
1492
1493 /*
1494  * sends a reject message, consuming the mbuf passed as an argument.
1495  */
1496 static void
1497 send_reject(struct ip_fw_args *args, int code, int offset, int ip_len)
1498 {
1499         if (code != ICMP_REJECT_RST) { /* Send an ICMP unreach */
1500                 /* We need the IP header in host order for icmp_error(). */
1501                 if (args->eh != NULL) {
1502                         struct ip *ip = mtod(args->m, struct ip *);
1503
1504                         ip->ip_len = ntohs(ip->ip_len);
1505                         ip->ip_off = ntohs(ip->ip_off);
1506                 }
1507                 icmp_error(args->m, ICMP_UNREACH, code, 0L, 0);
1508         } else if (offset == 0 && args->f_id.proto == IPPROTO_TCP) {
1509                 struct tcphdr *const tcp =
1510                     L3HDR(struct tcphdr, mtod(args->m, struct ip *));
1511
1512                 if ((tcp->th_flags & TH_RST) == 0) {
1513                         send_pkt(&args->f_id, ntohl(tcp->th_seq),
1514                                  ntohl(tcp->th_ack), tcp->th_flags | TH_RST);
1515                 }
1516                 m_freem(args->m);
1517         } else {
1518                 m_freem(args->m);
1519         }
1520         args->m = NULL;
1521 }
1522
1523 /**
1524  *
1525  * Given an ip_fw *, lookup_next_rule will return a pointer
1526  * to the next rule, which can be either the jump
1527  * target (for skipto instructions) or the next one in the list (in
1528  * all other cases including a missing jump target).
1529  * The result is also written in the "next_rule" field of the rule.
1530  * Backward jumps are not allowed, so start looking from the next
1531  * rule...
1532  *
1533  * This never returns NULL -- in case we do not have an exact match,
1534  * the next rule is returned. When the ruleset is changed,
1535  * pointers are flushed so we are always correct.
1536  */
1537
1538 static struct ip_fw *
1539 lookup_next_rule(struct ip_fw *me)
1540 {
1541         struct ip_fw *rule = NULL;
1542         ipfw_insn *cmd;
1543
1544         /* look for action, in case it is a skipto */
1545         cmd = ACTION_PTR(me);
1546         if (cmd->opcode == O_LOG)
1547                 cmd += F_LEN(cmd);
1548         if (cmd->opcode == O_SKIPTO) {
1549                 for (rule = me->next; rule; rule = rule->next) {
1550                         if (rule->rulenum >= cmd->arg1)
1551                                 break;
1552                 }
1553         }
1554         if (rule == NULL)                       /* failure or not a skipto */
1555                 rule = me->next;
1556         me->next_rule = rule;
1557         return rule;
1558 }
1559
1560 /*
1561  * The main check routine for the firewall.
1562  *
1563  * All arguments are in args so we can modify them and return them
1564  * back to the caller.
1565  *
1566  * Parameters:
1567  *
1568  *      args->m (in/out) The packet; we set to NULL when/if we nuke it.
1569  *              Starts with the IP header.
1570  *      args->eh (in)   Mac header if present, or NULL for layer3 packet.
1571  *      args->oif       Outgoing interface, or NULL if packet is incoming.
1572  *              The incoming interface is in the mbuf. (in)
1573  *
1574  *      args->rule      Pointer to the last matching rule (in/out)
1575  *      args->next_hop  Socket we are forwarding to (out).
1576  *      args->f_id      Addresses grabbed from the packet (out)
1577  *
1578  * Return value:
1579  *
1580  *      IP_FW_PORT_DENY_FLAG    the packet must be dropped.
1581  *      0       The packet is to be accepted and routed normally OR
1582  *              the packet was denied/rejected and has been dropped;
1583  *              in the latter case, *m is equal to NULL upon return.
1584  *      port    Divert the packet to port, with these caveats:
1585  *
1586  *              - If IP_FW_PORT_TEE_FLAG is set, tee the packet instead
1587  *                of diverting it (ie, 'ipfw tee').
1588  *
1589  *              - If IP_FW_PORT_DYNT_FLAG is set, interpret the lower
1590  *                16 bits as a dummynet pipe number instead of diverting
1591  */
1592
1593 static int
1594 ipfw_chk(struct ip_fw_args *args)
1595 {
1596         /*
1597          * Local variables hold state during the processing of a packet.
1598          *
1599          * IMPORTANT NOTE: to speed up the processing of rules, there
1600          * are some assumption on the values of the variables, which
1601          * are documented here. Should you change them, please check
1602          * the implementation of the various instructions to make sure
1603          * that they still work.
1604          *
1605          * args->eh     The MAC header. It is non-null for a layer2
1606          *      packet, it is NULL for a layer-3 packet.
1607          *
1608          * m | args->m  Pointer to the mbuf, as received from the caller.
1609          *      It may change if ipfw_chk() does an m_pullup, or if it
1610          *      consumes the packet because it calls send_reject().
1611          *      XXX This has to change, so that ipfw_chk() never modifies
1612          *      or consumes the buffer.
1613          * ip   is simply an alias of the value of m, and it is kept
1614          *      in sync with it (the packet is  supposed to start with
1615          *      the ip header).
1616          */
1617         struct mbuf *m = args->m;
1618         struct ip *ip = mtod(m, struct ip *);
1619
1620         /*
1621          * oif | args->oif      If NULL, ipfw_chk has been called on the
1622          *      inbound path (ether_input, ip_input).
1623          *      If non-NULL, ipfw_chk has been called on the outbound path
1624          *      (ether_output, ip_output).
1625          */
1626         struct ifnet *oif = args->oif;
1627
1628         struct ip_fw *f = NULL;         /* matching rule */
1629         int retval = 0;
1630         struct m_tag *mtag;
1631
1632         /*
1633          * hlen The length of the IPv4 header.
1634          *      hlen >0 means we have an IPv4 packet.
1635          */
1636         u_int hlen = 0;         /* hlen >0 means we have an IP pkt */
1637
1638         /*
1639          * offset       The offset of a fragment. offset != 0 means that
1640          *      we have a fragment at this offset of an IPv4 packet.
1641          *      offset == 0 means that (if this is an IPv4 packet)
1642          *      this is the first or only fragment.
1643          */
1644         u_short offset = 0;
1645
1646         /*
1647          * Local copies of addresses. They are only valid if we have
1648          * an IP packet.
1649          *
1650          * proto        The protocol. Set to 0 for non-ip packets,
1651          *      or to the protocol read from the packet otherwise.
1652          *      proto != 0 means that we have an IPv4 packet.
1653          *
1654          * src_port, dst_port   port numbers, in HOST format. Only
1655          *      valid for TCP and UDP packets.
1656          *
1657          * src_ip, dst_ip       ip addresses, in NETWORK format.
1658          *      Only valid for IPv4 packets.
1659          */
1660         uint8_t proto;
1661         uint16_t src_port = 0, dst_port = 0;    /* NOTE: host format    */
1662         struct in_addr src_ip, dst_ip;          /* NOTE: network format */
1663         uint16_t ip_len = 0;
1664
1665         /*
1666          * dyn_dir = MATCH_UNKNOWN when rules unchecked,
1667          *      MATCH_NONE when checked and not matched (dyn_f = NULL),
1668          *      MATCH_FORWARD or MATCH_REVERSE otherwise (dyn_f != NULL)
1669          */
1670         int dyn_dir = MATCH_UNKNOWN;
1671         struct ip_fw *dyn_f = NULL;
1672         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1673
1674         if (m->m_pkthdr.fw_flags & IPFW_MBUF_GENERATED)
1675                 return 0;       /* accept */
1676
1677         if (args->eh == NULL ||         /* layer 3 packet */
1678             (m->m_pkthdr.len >= sizeof(struct ip) &&
1679              ntohs(args->eh->ether_type) == ETHERTYPE_IP))
1680                 hlen = ip->ip_hl << 2;
1681
1682         /*
1683          * Collect parameters into local variables for faster matching.
1684          */
1685         if (hlen == 0) {        /* do not grab addresses for non-ip pkts */
1686                 proto = args->f_id.proto = 0;   /* mark f_id invalid */
1687                 goto after_ip_checks;
1688         }
1689
1690         proto = args->f_id.proto = ip->ip_p;
1691         src_ip = ip->ip_src;
1692         dst_ip = ip->ip_dst;
1693         if (args->eh != NULL) { /* layer 2 packets are as on the wire */
1694                 offset = ntohs(ip->ip_off) & IP_OFFMASK;
1695                 ip_len = ntohs(ip->ip_len);
1696         } else {
1697                 offset = ip->ip_off & IP_OFFMASK;
1698                 ip_len = ip->ip_len;
1699         }
1700
1701 #define PULLUP_TO(len)                          \
1702 do {                                            \
1703         if (m->m_len < (len)) {                 \
1704                 args->m = m = m_pullup(m, (len));\
1705                 if (m == NULL)                  \
1706                         goto pullup_failed;     \
1707                 ip = mtod(m, struct ip *);      \
1708         }                                       \
1709 } while (0)
1710
1711         if (offset == 0) {
1712                 switch (proto) {
1713                 case IPPROTO_TCP:
1714                         {
1715                                 struct tcphdr *tcp;
1716
1717                                 PULLUP_TO(hlen + sizeof(struct tcphdr));
1718                                 tcp = L3HDR(struct tcphdr, ip);
1719                                 dst_port = tcp->th_dport;
1720                                 src_port = tcp->th_sport;
1721                                 args->f_id.flags = tcp->th_flags;
1722                         }
1723                         break;
1724
1725                 case IPPROTO_UDP:
1726                         {
1727                                 struct udphdr *udp;
1728
1729                                 PULLUP_TO(hlen + sizeof(struct udphdr));
1730                                 udp = L3HDR(struct udphdr, ip);
1731                                 dst_port = udp->uh_dport;
1732                                 src_port = udp->uh_sport;
1733                         }
1734                         break;
1735
1736                 case IPPROTO_ICMP:
1737                         PULLUP_TO(hlen + 4);    /* type, code and checksum. */
1738                         args->f_id.flags = L3HDR(struct icmp, ip)->icmp_type;
1739                         break;
1740
1741                 default:
1742                         break;
1743                 }
1744         }
1745
1746 #undef PULLUP_TO
1747
1748         args->f_id.src_ip = ntohl(src_ip.s_addr);
1749         args->f_id.dst_ip = ntohl(dst_ip.s_addr);
1750         args->f_id.src_port = src_port = ntohs(src_port);
1751         args->f_id.dst_port = dst_port = ntohs(dst_port);
1752
1753 after_ip_checks:
1754         if (args->rule) {
1755                 /*
1756                  * Packet has already been tagged. Look for the next rule
1757                  * to restart processing.
1758                  *
1759                  * If fw_one_pass != 0 then just accept it.
1760                  * XXX should not happen here, but optimized out in
1761                  * the caller.
1762                  */
1763                 if (fw_one_pass)
1764                         return 0;
1765
1766                 /* This rule is being/has been flushed */
1767                 if (ipfw_flushing)
1768                         return IP_FW_PORT_DENY_FLAG;
1769
1770                 KASSERT(args->rule->cpuid == mycpuid,
1771                         ("rule used on cpu%d\n", mycpuid));
1772
1773                 /* This rule was deleted */
1774                 if (args->rule->rule_flags & IPFW_RULE_F_INVALID)
1775                         return IP_FW_PORT_DENY_FLAG;
1776
1777                 f = args->rule->next_rule;
1778                 if (f == NULL)
1779                         f = lookup_next_rule(args->rule);
1780         } else {
1781                 /*
1782                  * Find the starting rule. It can be either the first
1783                  * one, or the one after divert_rule if asked so.
1784                  */
1785                 int skipto;
1786
1787                 mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL);
1788                 if (mtag != NULL)
1789                         skipto = *(uint16_t *)m_tag_data(mtag);
1790                 else
1791                         skipto = 0;
1792
1793                 f = ctx->ipfw_layer3_chain;
1794                 if (args->eh == NULL && skipto != 0) {
1795                         /* No skipto during rule flushing */
1796                         if (ipfw_flushing)
1797                                 return IP_FW_PORT_DENY_FLAG;
1798
1799                         if (skipto >= IPFW_DEFAULT_RULE)
1800                                 return(IP_FW_PORT_DENY_FLAG); /* invalid */
1801
1802                         while (f && f->rulenum <= skipto)
1803                                 f = f->next;
1804                         if (f == NULL)  /* drop packet */
1805                                 return(IP_FW_PORT_DENY_FLAG);
1806                 } else if (ipfw_flushing) {
1807                         /* Rules are being flushed; skip to default rule */
1808                         f = ctx->ipfw_default_rule;
1809                 }
1810         }
1811         if ((mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL)) != NULL)
1812                 m_tag_delete(m, mtag);
1813
1814         /*
1815          * Now scan the rules, and parse microinstructions for each rule.
1816          */
1817         for (; f; f = f->next) {
1818                 int l, cmdlen;
1819                 ipfw_insn *cmd;
1820                 int skip_or; /* skip rest of OR block */
1821
1822 again:
1823                 if (ctx->ipfw_set_disable & (1 << f->set))
1824                         continue;
1825
1826                 skip_or = 0;
1827                 for (l = f->cmd_len, cmd = f->cmd; l > 0;
1828                      l -= cmdlen, cmd += cmdlen) {
1829                         int match, deny;
1830
1831                         /*
1832                          * check_body is a jump target used when we find a
1833                          * CHECK_STATE, and need to jump to the body of
1834                          * the target rule.
1835                          */
1836
1837 check_body:
1838                         cmdlen = F_LEN(cmd);
1839                         /*
1840                          * An OR block (insn_1 || .. || insn_n) has the
1841                          * F_OR bit set in all but the last instruction.
1842                          * The first match will set "skip_or", and cause
1843                          * the following instructions to be skipped until
1844                          * past the one with the F_OR bit clear.
1845                          */
1846                         if (skip_or) {          /* skip this instruction */
1847                                 if ((cmd->len & F_OR) == 0)
1848                                         skip_or = 0;    /* next one is good */
1849                                 continue;
1850                         }
1851                         match = 0; /* set to 1 if we succeed */
1852
1853                         switch (cmd->opcode) {
1854                         /*
1855                          * The first set of opcodes compares the packet's
1856                          * fields with some pattern, setting 'match' if a
1857                          * match is found. At the end of the loop there is
1858                          * logic to deal with F_NOT and F_OR flags associated
1859                          * with the opcode.
1860                          */
1861                         case O_NOP:
1862                                 match = 1;
1863                                 break;
1864
1865                         case O_FORWARD_MAC:
1866                                 kprintf("ipfw: opcode %d unimplemented\n",
1867                                         cmd->opcode);
1868                                 break;
1869
1870                         case O_GID:
1871                         case O_UID:
1872                                 /*
1873                                  * We only check offset == 0 && proto != 0,
1874                                  * as this ensures that we have an IPv4
1875                                  * packet with the ports info.
1876                                  */
1877                                 if (offset!=0)
1878                                         break;
1879                             {
1880                                 struct inpcbinfo *pi;
1881                                 int wildcard;
1882                                 struct inpcb *pcb;
1883
1884                                 if (proto == IPPROTO_TCP) {
1885                                         wildcard = 0;
1886                                         pi = &tcbinfo[mycpu->gd_cpuid];
1887                                 } else if (proto == IPPROTO_UDP) {
1888                                         wildcard = 1;
1889                                         pi = &udbinfo;
1890                                 } else
1891                                         break;
1892
1893                                 pcb =  (oif) ?
1894                                         in_pcblookup_hash(pi,
1895                                             dst_ip, htons(dst_port),
1896                                             src_ip, htons(src_port),
1897                                             wildcard, oif) :
1898                                         in_pcblookup_hash(pi,
1899                                             src_ip, htons(src_port),
1900                                             dst_ip, htons(dst_port),
1901                                             wildcard, NULL);
1902
1903                                 if (pcb == NULL || pcb->inp_socket == NULL)
1904                                         break;
1905
1906                                 if (cmd->opcode == O_UID) {
1907 #define socheckuid(a,b) ((a)->so_cred->cr_uid != (b))
1908                                         match =
1909                                           !socheckuid(pcb->inp_socket,
1910                                            (uid_t)((ipfw_insn_u32 *)cmd)->d[0]);
1911 #undef socheckuid
1912                                 } else  {
1913                                         match = groupmember(
1914                                             (uid_t)((ipfw_insn_u32 *)cmd)->d[0],
1915                                             pcb->inp_socket->so_cred);
1916                                 }
1917                             }
1918                                 break;
1919
1920                         case O_RECV:
1921                                 match = iface_match(m->m_pkthdr.rcvif,
1922                                     (ipfw_insn_if *)cmd);
1923                                 break;
1924
1925                         case O_XMIT:
1926                                 match = iface_match(oif, (ipfw_insn_if *)cmd);
1927                                 break;
1928
1929                         case O_VIA:
1930                                 match = iface_match(oif ? oif :
1931                                     m->m_pkthdr.rcvif, (ipfw_insn_if *)cmd);
1932                                 break;
1933
1934                         case O_MACADDR2:
1935                                 if (args->eh != NULL) { /* have MAC header */
1936                                         uint32_t *want = (uint32_t *)
1937                                                 ((ipfw_insn_mac *)cmd)->addr;
1938                                         uint32_t *mask = (uint32_t *)
1939                                                 ((ipfw_insn_mac *)cmd)->mask;
1940                                         uint32_t *hdr = (uint32_t *)args->eh;
1941
1942                                         match =
1943                                         (want[0] == (hdr[0] & mask[0]) &&
1944                                          want[1] == (hdr[1] & mask[1]) &&
1945                                          want[2] == (hdr[2] & mask[2]));
1946                                 }
1947                                 break;
1948
1949                         case O_MAC_TYPE:
1950                                 if (args->eh != NULL) {
1951                                         uint16_t t =
1952                                             ntohs(args->eh->ether_type);
1953                                         uint16_t *p =
1954                                             ((ipfw_insn_u16 *)cmd)->ports;
1955                                         int i;
1956
1957                                         /* Special vlan handling */
1958                                         if (m->m_flags & M_VLANTAG)
1959                                                 t = ETHERTYPE_VLAN;
1960
1961                                         for (i = cmdlen - 1; !match && i > 0;
1962                                              i--, p += 2) {
1963                                                 match =
1964                                                 (t >= p[0] && t <= p[1]);
1965                                         }
1966                                 }
1967                                 break;
1968
1969                         case O_FRAG:
1970                                 match = (hlen > 0 && offset != 0);
1971                                 break;
1972
1973                         case O_IN:      /* "out" is "not in" */
1974                                 match = (oif == NULL);
1975                                 break;
1976
1977                         case O_LAYER2:
1978                                 match = (args->eh != NULL);
1979                                 break;
1980
1981                         case O_PROTO:
1982                                 /*
1983                                  * We do not allow an arg of 0 so the
1984                                  * check of "proto" only suffices.
1985                                  */
1986                                 match = (proto == cmd->arg1);
1987                                 break;
1988
1989                         case O_IP_SRC:
1990                                 match = (hlen > 0 &&
1991                                     ((ipfw_insn_ip *)cmd)->addr.s_addr ==
1992                                     src_ip.s_addr);
1993                                 break;
1994
1995                         case O_IP_SRC_MASK:
1996                                 match = (hlen > 0 &&
1997                                     ((ipfw_insn_ip *)cmd)->addr.s_addr ==
1998                                      (src_ip.s_addr &
1999                                      ((ipfw_insn_ip *)cmd)->mask.s_addr));
2000                                 break;
2001
2002                         case O_IP_SRC_ME:
2003                                 if (hlen > 0) {
2004                                         struct ifnet *tif;
2005
2006                                         tif = INADDR_TO_IFP(&src_ip);
2007                                         match = (tif != NULL);
2008                                 }
2009                                 break;
2010
2011                         case O_IP_DST_SET:
2012                         case O_IP_SRC_SET:
2013                                 if (hlen > 0) {
2014                                         uint32_t *d = (uint32_t *)(cmd + 1);
2015                                         uint32_t addr =
2016                                             cmd->opcode == O_IP_DST_SET ?
2017                                                 args->f_id.dst_ip :
2018                                                 args->f_id.src_ip;
2019
2020                                         if (addr < d[0])
2021                                                 break;
2022                                         addr -= d[0]; /* subtract base */
2023                                         match =
2024                                         (addr < cmd->arg1) &&
2025                                          (d[1 + (addr >> 5)] &
2026                                           (1 << (addr & 0x1f)));
2027                                 }
2028                                 break;
2029
2030                         case O_IP_DST:
2031                                 match = (hlen > 0 &&
2032                                     ((ipfw_insn_ip *)cmd)->addr.s_addr ==
2033                                     dst_ip.s_addr);
2034                                 break;
2035
2036                         case O_IP_DST_MASK:
2037                                 match = (hlen > 0) &&
2038                                     (((ipfw_insn_ip *)cmd)->addr.s_addr ==
2039                                      (dst_ip.s_addr &
2040                                      ((ipfw_insn_ip *)cmd)->mask.s_addr));
2041                                 break;
2042
2043                         case O_IP_DST_ME:
2044                                 if (hlen > 0) {
2045                                         struct ifnet *tif;
2046
2047                                         tif = INADDR_TO_IFP(&dst_ip);
2048                                         match = (tif != NULL);
2049                                 }
2050                                 break;
2051
2052                         case O_IP_SRCPORT:
2053                         case O_IP_DSTPORT:
2054                                 /*
2055                                  * offset == 0 && proto != 0 is enough
2056                                  * to guarantee that we have an IPv4
2057                                  * packet with port info.
2058                                  */
2059                                 if ((proto==IPPROTO_UDP || proto==IPPROTO_TCP)
2060                                     && offset == 0) {
2061                                         uint16_t x =
2062                                             (cmd->opcode == O_IP_SRCPORT) ?
2063                                                 src_port : dst_port ;
2064                                         uint16_t *p =
2065                                             ((ipfw_insn_u16 *)cmd)->ports;
2066                                         int i;
2067
2068                                         for (i = cmdlen - 1; !match && i > 0;
2069                                              i--, p += 2) {
2070                                                 match =
2071                                                 (x >= p[0] && x <= p[1]);
2072                                         }
2073                                 }
2074                                 break;
2075
2076                         case O_ICMPTYPE:
2077                                 match = (offset == 0 && proto==IPPROTO_ICMP &&
2078                                     icmptype_match(ip, (ipfw_insn_u32 *)cmd));
2079                                 break;
2080
2081                         case O_IPOPT:
2082                                 match = (hlen > 0 && ipopts_match(ip, cmd));
2083                                 break;
2084
2085                         case O_IPVER:
2086                                 match = (hlen > 0 && cmd->arg1 == ip->ip_v);
2087                                 break;
2088
2089                         case O_IPTTL:
2090                                 match = (hlen > 0 && cmd->arg1 == ip->ip_ttl);
2091                                 break;
2092
2093                         case O_IPID:
2094                                 match = (hlen > 0 &&
2095                                     cmd->arg1 == ntohs(ip->ip_id));
2096                                 break;
2097
2098                         case O_IPLEN:
2099                                 match = (hlen > 0 && cmd->arg1 == ip_len);
2100                                 break;
2101
2102                         case O_IPPRECEDENCE:
2103                                 match = (hlen > 0 &&
2104                                     (cmd->arg1 == (ip->ip_tos & 0xe0)));
2105                                 break;
2106
2107                         case O_IPTOS:
2108                                 match = (hlen > 0 &&
2109                                     flags_match(cmd, ip->ip_tos));
2110                                 break;
2111
2112                         case O_TCPFLAGS:
2113                                 match = (proto == IPPROTO_TCP && offset == 0 &&
2114                                     flags_match(cmd,
2115                                         L3HDR(struct tcphdr,ip)->th_flags));
2116                                 break;
2117
2118                         case O_TCPOPTS:
2119                                 match = (proto == IPPROTO_TCP && offset == 0 &&
2120                                     tcpopts_match(ip, cmd));
2121                                 break;
2122
2123                         case O_TCPSEQ:
2124                                 match = (proto == IPPROTO_TCP && offset == 0 &&
2125                                     ((ipfw_insn_u32 *)cmd)->d[0] ==
2126                                         L3HDR(struct tcphdr,ip)->th_seq);
2127                                 break;
2128
2129                         case O_TCPACK:
2130                                 match = (proto == IPPROTO_TCP && offset == 0 &&
2131                                     ((ipfw_insn_u32 *)cmd)->d[0] ==
2132                                         L3HDR(struct tcphdr,ip)->th_ack);
2133                                 break;
2134
2135                         case O_TCPWIN:
2136                                 match = (proto == IPPROTO_TCP && offset == 0 &&
2137                                     cmd->arg1 ==
2138                                         L3HDR(struct tcphdr,ip)->th_win);
2139                                 break;
2140
2141                         case O_ESTAB:
2142                                 /* reject packets which have SYN only */
2143                                 /* XXX should i also check for TH_ACK ? */
2144                                 match = (proto == IPPROTO_TCP && offset == 0 &&
2145                                     (L3HDR(struct tcphdr,ip)->th_flags &
2146                                      (TH_RST | TH_ACK | TH_SYN)) != TH_SYN);
2147                                 break;
2148
2149                         case O_LOG:
2150                                 if (fw_verbose)
2151                                         ipfw_log(f, hlen, args->eh, m, oif);
2152                                 match = 1;
2153                                 break;
2154
2155                         case O_PROB:
2156                                 match = (krandom() <
2157                                         ((ipfw_insn_u32 *)cmd)->d[0]);
2158                                 break;
2159
2160                         /*
2161                          * The second set of opcodes represents 'actions',
2162                          * i.e. the terminal part of a rule once the packet
2163                          * matches all previous patterns.
2164                          * Typically there is only one action for each rule,
2165                          * and the opcode is stored at the end of the rule
2166                          * (but there are exceptions -- see below).
2167                          *
2168                          * In general, here we set retval and terminate the
2169                          * outer loop (would be a 'break 3' in some language,
2170                          * but we need to do a 'goto done').
2171                          *
2172                          * Exceptions:
2173                          * O_COUNT and O_SKIPTO actions:
2174                          *   instead of terminating, we jump to the next rule
2175                          *   ('goto next_rule', equivalent to a 'break 2'),
2176                          *   or to the SKIPTO target ('goto again' after
2177                          *   having set f, cmd and l), respectively.
2178                          *
2179                          * O_LIMIT and O_KEEP_STATE: these opcodes are
2180                          *   not real 'actions', and are stored right
2181                          *   before the 'action' part of the rule.
2182                          *   These opcodes try to install an entry in the
2183                          *   state tables; if successful, we continue with
2184                          *   the next opcode (match=1; break;), otherwise
2185                          *   the packet must be dropped ('goto done' after
2186                          *   setting retval).  If static rules are changed
2187                          *   during the state installation, the packet will
2188                          *   be dropped ('return IP_FW_PORT_DENY_FLAG').
2189                          *
2190                          * O_PROBE_STATE and O_CHECK_STATE: these opcodes
2191                          *   cause a lookup of the state table, and a jump
2192                          *   to the 'action' part of the parent rule
2193                          *   ('goto check_body') if an entry is found, or
2194                          *   (CHECK_STATE only) a jump to the next rule if
2195                          *   the entry is not found ('goto next_rule').
2196                          *   The result of the lookup is cached to make
2197                          *   further instances of these opcodes are
2198                          *   effectively NOPs.  If static rules are changed
2199                          *   during the state looking up, the packet will
2200                          *   be dropped ('return IP_FW_PORT_DENY_FLAG').
2201                          */
2202                         case O_LIMIT:
2203                         case O_KEEP_STATE:
2204                                 if (!(f->rule_flags & IPFW_RULE_F_STATE)) {
2205                                         kprintf("%s rule (%d) is not ready "
2206                                                 "on cpu%d\n",
2207                                                 cmd->opcode == O_LIMIT ?
2208                                                 "limit" : "keep state",
2209                                                 f->rulenum, f->cpuid);
2210                                         goto next_rule;
2211                                 }
2212                                 if (install_state(f,
2213                                     (ipfw_insn_limit *)cmd, args, &deny)) {
2214                                         if (deny)
2215                                                 return IP_FW_PORT_DENY_FLAG;
2216
2217                                         retval = IP_FW_PORT_DENY_FLAG;
2218                                         goto done; /* error/limit violation */
2219                                 }
2220                                 if (deny)
2221                                         return IP_FW_PORT_DENY_FLAG;
2222                                 match = 1;
2223                                 break;
2224
2225                         case O_PROBE_STATE:
2226                         case O_CHECK_STATE:
2227                                 /*
2228                                  * dynamic rules are checked at the first
2229                                  * keep-state or check-state occurrence,
2230                                  * with the result being stored in dyn_dir.
2231                                  * The compiler introduces a PROBE_STATE
2232                                  * instruction for us when we have a
2233                                  * KEEP_STATE (because PROBE_STATE needs
2234                                  * to be run first).
2235                                  */
2236                                 if (dyn_dir == MATCH_UNKNOWN) {
2237                                         dyn_f = lookup_rule(&args->f_id,
2238                                                 &dyn_dir,
2239                                                 proto == IPPROTO_TCP ?
2240                                                 L3HDR(struct tcphdr, ip) : NULL,
2241                                                 ip_len, &deny);
2242                                         if (deny)
2243                                                 return IP_FW_PORT_DENY_FLAG;
2244                                         if (dyn_f != NULL) {
2245                                                 /*
2246                                                  * Found a rule from a dynamic
2247                                                  * entry; jump to the 'action'
2248                                                  * part of the rule.
2249                                                  */
2250                                                 f = dyn_f;
2251                                                 cmd = ACTION_PTR(f);
2252                                                 l = f->cmd_len - f->act_ofs;
2253                                                 goto check_body;
2254                                         }
2255                                 }
2256                                 /*
2257                                  * Dynamic entry not found. If CHECK_STATE,
2258                                  * skip to next rule, if PROBE_STATE just
2259                                  * ignore and continue with next opcode.
2260                                  */
2261                                 if (cmd->opcode == O_CHECK_STATE)
2262                                         goto next_rule;
2263                                 else if (!(f->rule_flags & IPFW_RULE_F_STATE))
2264                                         goto next_rule; /* not ready yet */
2265                                 match = 1;
2266                                 break;
2267
2268                         case O_ACCEPT:
2269                                 retval = 0;     /* accept */
2270                                 goto done;
2271
2272                         case O_PIPE:
2273                         case O_QUEUE:
2274                                 args->rule = f; /* report matching rule */
2275                                 retval = cmd->arg1 | IP_FW_PORT_DYNT_FLAG;
2276                                 goto done;
2277
2278                         case O_DIVERT:
2279                         case O_TEE:
2280                                 if (args->eh) /* not on layer 2 */
2281                                         break;
2282
2283                                 mtag = m_tag_get(PACKET_TAG_IPFW_DIVERT,
2284                                                  sizeof(uint16_t), MB_DONTWAIT);
2285                                 if (mtag == NULL) {
2286                                         retval = IP_FW_PORT_DENY_FLAG;
2287                                         goto done;
2288                                 }
2289                                 *(uint16_t *)m_tag_data(mtag) = f->rulenum;
2290                                 m_tag_prepend(m, mtag);
2291                                 retval = (cmd->opcode == O_DIVERT) ?
2292                                     cmd->arg1 :
2293                                     cmd->arg1 | IP_FW_PORT_TEE_FLAG;
2294                                 goto done;
2295
2296                         case O_COUNT:
2297                         case O_SKIPTO:
2298                                 f->pcnt++;      /* update stats */
2299                                 f->bcnt += ip_len;
2300                                 f->timestamp = time_second;
2301                                 if (cmd->opcode == O_COUNT)
2302                                         goto next_rule;
2303                                 /* handle skipto */
2304                                 if (f->next_rule == NULL)
2305                                         lookup_next_rule(f);
2306                                 f = f->next_rule;
2307                                 goto again;
2308
2309                         case O_REJECT:
2310                                 /*
2311                                  * Drop the packet and send a reject notice
2312                                  * if the packet is not ICMP (or is an ICMP
2313                                  * query), and it is not multicast/broadcast.
2314                                  */
2315                                 if (hlen > 0 &&
2316                                     (proto != IPPROTO_ICMP ||
2317                                      is_icmp_query(ip)) &&
2318                                     !(m->m_flags & (M_BCAST|M_MCAST)) &&
2319                                     !IN_MULTICAST(ntohl(dst_ip.s_addr))) {
2320                                         /*
2321                                          * Update statistics before the possible
2322                                          * blocking 'send_reject'
2323                                          */
2324                                         f->pcnt++;
2325                                         f->bcnt += ip_len;
2326                                         f->timestamp = time_second;
2327
2328                                         send_reject(args, cmd->arg1,
2329                                             offset,ip_len);
2330                                         m = args->m;
2331
2332                                         /*
2333                                          * Return directly here, rule stats
2334                                          * have been updated above.
2335                                          */
2336                                         return IP_FW_PORT_DENY_FLAG;
2337                                 }
2338                                 /* FALLTHROUGH */
2339                         case O_DENY:
2340                                 retval = IP_FW_PORT_DENY_FLAG;
2341                                 goto done;
2342
2343                         case O_FORWARD_IP:
2344                                 if (args->eh)   /* not valid on layer2 pkts */
2345                                         break;
2346                                 if (!dyn_f || dyn_dir == MATCH_FORWARD) {
2347                                         args->next_hop =
2348                                             &((ipfw_insn_sa *)cmd)->sa;
2349                                 }
2350                                 retval = 0;
2351                                 goto done;
2352
2353                         default:
2354                                 panic("-- unknown opcode %d\n", cmd->opcode);
2355                         } /* end of switch() on opcodes */
2356
2357                         if (cmd->len & F_NOT)
2358                                 match = !match;
2359
2360                         if (match) {
2361                                 if (cmd->len & F_OR)
2362                                         skip_or = 1;
2363                         } else {
2364                                 if (!(cmd->len & F_OR)) /* not an OR block, */
2365                                         break;          /* try next rule    */
2366                         }
2367
2368                 }       /* end of inner for, scan opcodes */
2369
2370 next_rule:;             /* try next rule                */
2371
2372         }               /* end of outer for, scan rules */
2373         kprintf("+++ ipfw: ouch!, skip past end of rules, denying packet\n");
2374         return(IP_FW_PORT_DENY_FLAG);
2375
2376 done:
2377         /* Update statistics */
2378         f->pcnt++;
2379         f->bcnt += ip_len;
2380         f->timestamp = time_second;
2381         return retval;
2382
2383 pullup_failed:
2384         if (fw_verbose)
2385                 kprintf("pullup failed\n");
2386         return(IP_FW_PORT_DENY_FLAG);
2387 }
2388
2389 static void
2390 ipfw_dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
2391 {
2392         struct m_tag *mtag;
2393         struct dn_pkt *pkt;
2394         ipfw_insn *cmd;
2395         const struct ipfw_flow_id *id;
2396         struct dn_flow_id *fid;
2397
2398         M_ASSERTPKTHDR(m);
2399
2400         mtag = m_tag_get(PACKET_TAG_DUMMYNET, sizeof(*pkt), MB_DONTWAIT);
2401         if (mtag == NULL) {
2402                 m_freem(m);
2403                 return;
2404         }
2405         m_tag_prepend(m, mtag);
2406
2407         pkt = m_tag_data(mtag);
2408         bzero(pkt, sizeof(*pkt));
2409
2410         cmd = fwa->rule->cmd + fwa->rule->act_ofs;
2411         if (cmd->opcode == O_LOG)
2412                 cmd += F_LEN(cmd);
2413         KASSERT(cmd->opcode == O_PIPE || cmd->opcode == O_QUEUE,
2414                 ("Rule is not PIPE or QUEUE, opcode %d\n", cmd->opcode));
2415
2416         pkt->dn_m = m;
2417         pkt->dn_flags = (dir & DN_FLAGS_DIR_MASK);
2418         pkt->ifp = fwa->oif;
2419         pkt->cpuid = mycpu->gd_cpuid;
2420         pkt->pipe_nr = pipe_nr;
2421
2422         id = &fwa->f_id;
2423         fid = &pkt->id;
2424         fid->fid_dst_ip = id->dst_ip;
2425         fid->fid_src_ip = id->src_ip;
2426         fid->fid_dst_port = id->dst_port;
2427         fid->fid_src_port = id->src_port;
2428         fid->fid_proto = id->proto;
2429         fid->fid_flags = id->flags;
2430
2431         ipfw_ref_rule(fwa->rule);
2432         pkt->dn_priv = fwa->rule;
2433         pkt->dn_unref_priv = ipfw_unref_rule;
2434
2435         if (cmd->opcode == O_PIPE)
2436                 pkt->dn_flags |= DN_FLAGS_IS_PIPE;
2437
2438         if (dir == DN_TO_IP_OUT) {
2439                 /*
2440                  * We need to copy *ro because for ICMP pkts (and maybe
2441                  * others) the caller passed a pointer into the stack;
2442                  * dst might also be a pointer into *ro so it needs to
2443                  * be updated.
2444                  */
2445                 pkt->ro = *(fwa->ro);
2446                 if (fwa->ro->ro_rt)
2447                         fwa->ro->ro_rt->rt_refcnt++;
2448                 if (fwa->dst == (struct sockaddr_in *)&fwa->ro->ro_dst) {
2449                         /* 'dst' points into 'ro' */
2450                         fwa->dst = (struct sockaddr_in *)&(pkt->ro.ro_dst);
2451                 }
2452                 pkt->dn_dst = fwa->dst;
2453                 pkt->flags = fwa->flags;
2454         }
2455
2456         m->m_pkthdr.fw_flags |= DUMMYNET_MBUF_TAGGED;
2457         ip_dn_queue(m);
2458 }
2459
2460 /*
2461  * When a rule is added/deleted, clear the next_rule pointers in all rules.
2462  * These will be reconstructed on the fly as packets are matched.
2463  * Must be called at splimp().
2464  */
2465 static void
2466 ipfw_flush_rule_ptrs(struct ipfw_context *ctx)
2467 {
2468         struct ip_fw *rule;
2469
2470         for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next)
2471                 rule->next_rule = NULL;
2472 }
2473
2474 static __inline void
2475 ipfw_inc_static_count(struct ip_fw *rule)
2476 {
2477         KKASSERT(mycpuid == 0);
2478
2479         static_count++;
2480         static_ioc_len += IOC_RULESIZE(rule);
2481 }
2482
2483 static __inline void
2484 ipfw_dec_static_count(struct ip_fw *rule)
2485 {
2486         int l = IOC_RULESIZE(rule);
2487
2488         KKASSERT(mycpuid == 0);
2489
2490         KASSERT(static_count > 0, ("invalid static count %u\n", static_count));
2491         static_count--;
2492
2493         KASSERT(static_ioc_len >= l,
2494                 ("invalid static len %u\n", static_ioc_len));
2495         static_ioc_len -= l;
2496 }
2497
2498 static void
2499 ipfw_link_sibling(struct netmsg_ipfw *fwmsg, struct ip_fw *rule)
2500 {
2501         if (fwmsg->sibling != NULL) {
2502                 KKASSERT(mycpuid > 0 && fwmsg->sibling->cpuid == mycpuid - 1);
2503                 fwmsg->sibling->sibling = rule;
2504         }
2505         fwmsg->sibling = rule;
2506 }
2507
2508 static struct ip_fw *
2509 ipfw_create_rule(const struct ipfw_ioc_rule *ioc_rule, struct ip_fw_stub *stub)
2510 {
2511         struct ip_fw *rule;
2512
2513         rule = kmalloc(RULESIZE(ioc_rule), M_IPFW, M_WAITOK | M_ZERO);
2514
2515         rule->act_ofs = ioc_rule->act_ofs;
2516         rule->cmd_len = ioc_rule->cmd_len;
2517         rule->rulenum = ioc_rule->rulenum;
2518         rule->set = ioc_rule->set;
2519         rule->usr_flags = ioc_rule->usr_flags;
2520
2521         bcopy(ioc_rule->cmd, rule->cmd, rule->cmd_len * 4 /* XXX */);
2522
2523         rule->refcnt = 1;
2524         rule->cpuid = mycpuid;
2525
2526         rule->stub = stub;
2527         if (stub != NULL)
2528                 stub->rule[mycpuid] = rule;
2529
2530         return rule;
2531 }
2532
2533 static void
2534 ipfw_add_rule_dispatch(struct netmsg *nmsg)
2535 {
2536         struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
2537         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2538         struct ip_fw *rule;
2539
2540         rule = ipfw_create_rule(fwmsg->ioc_rule, fwmsg->stub);
2541
2542         /*
2543          * Bump generation after ipfw_create_rule(),
2544          * since this function is blocking
2545          */
2546         ctx->ipfw_gen++;
2547
2548         /*
2549          * Insert rule into the pre-determined position
2550          */
2551         if (fwmsg->prev_rule != NULL) {
2552                 struct ip_fw *prev, *next;
2553
2554                 prev = fwmsg->prev_rule;
2555                 KKASSERT(prev->cpuid == mycpuid);
2556
2557                 next = fwmsg->next_rule;
2558                 KKASSERT(next->cpuid == mycpuid);
2559
2560                 rule->next = next;
2561                 prev->next = rule;
2562
2563                 /*
2564                  * Move to the position on the next CPU
2565                  * before the msg is forwarded.
2566                  */
2567                 fwmsg->prev_rule = prev->sibling;
2568                 fwmsg->next_rule = next->sibling;
2569         } else {
2570                 KKASSERT(fwmsg->next_rule == NULL);
2571                 rule->next = ctx->ipfw_layer3_chain;
2572                 ctx->ipfw_layer3_chain = rule;
2573         }
2574
2575         /* Link rule CPU sibling */
2576         ipfw_link_sibling(fwmsg, rule);
2577
2578         ipfw_flush_rule_ptrs(ctx);
2579
2580         if (mycpuid == 0) {
2581                 /* Statistics only need to be updated once */
2582                 ipfw_inc_static_count(rule);
2583
2584                 /* Return the rule on CPU0 */
2585                 nmsg->nm_lmsg.u.ms_resultp = rule;
2586         }
2587
2588         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2589 }
2590
2591 static void
2592 ipfw_enable_state_dispatch(struct netmsg *nmsg)
2593 {
2594         struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
2595         struct ip_fw *rule = lmsg->u.ms_resultp;
2596
2597         KKASSERT(rule->cpuid == mycpuid);
2598         KKASSERT(rule->stub != NULL && rule->stub->rule[mycpuid] == rule);
2599         KKASSERT(!(rule->rule_flags & IPFW_RULE_F_STATE));
2600         rule->rule_flags |= IPFW_RULE_F_STATE;
2601         lmsg->u.ms_resultp = rule->sibling;
2602
2603         ifnet_forwardmsg(lmsg, mycpuid + 1);
2604 }
2605
2606 /*
2607  * Add a new rule to the list.  Copy the rule into a malloc'ed area,
2608  * then possibly create a rule number and add the rule to the list.
2609  * Update the rule_number in the input struct so the caller knows
2610  * it as well.
2611  */
2612 static void
2613 ipfw_add_rule(struct ipfw_ioc_rule *ioc_rule, uint32_t rule_flags)
2614 {
2615         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2616         struct netmsg_ipfw fwmsg;
2617         struct netmsg *nmsg;
2618         struct ip_fw *f, *prev, *rule;
2619         struct ip_fw_stub *stub;
2620
2621         IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
2622
2623         crit_enter();
2624
2625         /*
2626          * If rulenum is 0, find highest numbered rule before the
2627          * default rule, and add rule number incremental step.
2628          */
2629         if (ioc_rule->rulenum == 0) {
2630                 int step = autoinc_step;
2631
2632                 KKASSERT(step >= IPFW_AUTOINC_STEP_MIN &&
2633                          step <= IPFW_AUTOINC_STEP_MAX);
2634
2635                 /*
2636                  * Locate the highest numbered rule before default
2637                  */
2638                 for (f = ctx->ipfw_layer3_chain; f; f = f->next) {
2639                         if (f->rulenum == IPFW_DEFAULT_RULE)
2640                                 break;
2641                         ioc_rule->rulenum = f->rulenum;
2642                 }
2643                 if (ioc_rule->rulenum < IPFW_DEFAULT_RULE - step)
2644                         ioc_rule->rulenum += step;
2645         }
2646         KASSERT(ioc_rule->rulenum != IPFW_DEFAULT_RULE &&
2647                 ioc_rule->rulenum != 0,
2648                 ("invalid rule num %d\n", ioc_rule->rulenum));
2649
2650         /*
2651          * Now find the right place for the new rule in the sorted list.
2652          */
2653         for (prev = NULL, f = ctx->ipfw_layer3_chain; f;
2654              prev = f, f = f->next) {
2655                 if (f->rulenum > ioc_rule->rulenum) {
2656                         /* Found the location */
2657                         break;
2658                 }
2659         }
2660         KASSERT(f != NULL, ("no default rule?!\n"));
2661
2662         if (rule_flags & IPFW_RULE_F_STATE) {
2663                 int size;
2664
2665                 /*
2666                  * If the new rule will create states, then allocate
2667                  * a rule stub, which will be referenced by states
2668                  * (dyn rules)
2669                  */
2670                 size = sizeof(*stub) + ((ncpus - 1) * sizeof(struct ip_fw *));
2671                 stub = kmalloc(size, M_IPFW, M_WAITOK | M_ZERO);
2672         } else {
2673                 stub = NULL;
2674         }
2675
2676         /*
2677          * Duplicate the rule onto each CPU.
2678          * The rule duplicated on CPU0 will be returned.
2679          */
2680         bzero(&fwmsg, sizeof(fwmsg));
2681         nmsg = &fwmsg.nmsg;
2682         netmsg_init(nmsg, &curthread->td_msgport, 0, ipfw_add_rule_dispatch);
2683         fwmsg.ioc_rule = ioc_rule;
2684         fwmsg.prev_rule = prev;
2685         fwmsg.next_rule = prev == NULL ? NULL : f;
2686         fwmsg.stub = stub;
2687
2688         ifnet_domsg(&nmsg->nm_lmsg, 0);
2689         KKASSERT(fwmsg.prev_rule == NULL && fwmsg.next_rule == NULL);
2690
2691         rule = nmsg->nm_lmsg.u.ms_resultp;
2692         KKASSERT(rule != NULL && rule->cpuid == mycpuid);
2693
2694         if (rule_flags & IPFW_RULE_F_STATE) {
2695                 /*
2696                  * Turn on state flag, _after_ everything on all
2697                  * CPUs have been setup.
2698                  */
2699                 bzero(nmsg, sizeof(*nmsg));
2700                 netmsg_init(nmsg, &curthread->td_msgport, 0,
2701                             ipfw_enable_state_dispatch);
2702                 nmsg->nm_lmsg.u.ms_resultp = rule;
2703
2704                 ifnet_domsg(&nmsg->nm_lmsg, 0);
2705                 KKASSERT(nmsg->nm_lmsg.u.ms_resultp == NULL);
2706         }
2707
2708         crit_exit();
2709
2710         DEB(kprintf("++ installed rule %d, static count now %d\n",
2711                 rule->rulenum, static_count);)
2712 }
2713
2714 /**
2715  * Free storage associated with a static rule (including derived
2716  * dynamic rules).
2717  * The caller is in charge of clearing rule pointers to avoid
2718  * dangling pointers.
2719  * @return a pointer to the next entry.
2720  * Arguments are not checked, so they better be correct.
2721  * Must be called at splimp().
2722  */
2723 static struct ip_fw *
2724 ipfw_delete_rule(struct ipfw_context *ctx,
2725                  struct ip_fw *prev, struct ip_fw *rule)
2726 {
2727         struct ip_fw *n;
2728         struct ip_fw_stub *stub;
2729
2730         ctx->ipfw_gen++;
2731
2732         /* STATE flag should have been cleared before we reach here */
2733         KKASSERT((rule->rule_flags & IPFW_RULE_F_STATE) == 0);
2734
2735         stub = rule->stub;
2736         n = rule->next;
2737         if (prev == NULL)
2738                 ctx->ipfw_layer3_chain = n;
2739         else
2740                 prev->next = n;
2741
2742         /* Mark the rule as invalid */
2743         rule->rule_flags |= IPFW_RULE_F_INVALID;
2744         rule->next_rule = NULL;
2745         rule->sibling = NULL;
2746         rule->stub = NULL;
2747 #ifdef foo
2748         /* Don't reset cpuid here; keep various assertion working */
2749         rule->cpuid = -1;
2750 #endif
2751
2752         /* Statistics only need to be updated once */
2753         if (mycpuid == 0)
2754                 ipfw_dec_static_count(rule);
2755
2756         /* Free 'stub' on the last CPU */
2757         if (stub != NULL && mycpuid == ncpus - 1)
2758                 kfree(stub, M_IPFW);
2759
2760         /* Try to free this rule */
2761         ipfw_free_rule(rule);
2762
2763         /* Return the next rule */
2764         return n;
2765 }
2766
2767 static void
2768 ipfw_flush_dispatch(struct netmsg *nmsg)
2769 {
2770         struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
2771         int kill_default = lmsg->u.ms_result;
2772         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2773         struct ip_fw *rule;
2774
2775         ipfw_flush_rule_ptrs(ctx); /* more efficient to do outside the loop */
2776
2777         while ((rule = ctx->ipfw_layer3_chain) != NULL &&
2778                (kill_default || rule->rulenum != IPFW_DEFAULT_RULE))
2779                 ipfw_delete_rule(ctx, NULL, rule);
2780
2781         ifnet_forwardmsg(lmsg, mycpuid + 1);
2782 }
2783
2784 static void
2785 ipfw_disable_rule_state_dispatch(struct netmsg *nmsg)
2786 {
2787         struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
2788         struct ip_fw *rule;
2789
2790         rule = dmsg->start_rule;
2791         if (rule != NULL) {
2792                 KKASSERT(rule->cpuid == mycpuid);
2793
2794                 /*
2795                  * Move to the position on the next CPU
2796                  * before the msg is forwarded.
2797                  */
2798                 dmsg->start_rule = rule->sibling;
2799         } else {
2800                 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2801
2802                 KKASSERT(dmsg->rulenum == 0);
2803                 rule = ctx->ipfw_layer3_chain;
2804         }
2805
2806         while (rule != NULL) {
2807                 if (dmsg->rulenum && rule->rulenum != dmsg->rulenum)
2808                         break;
2809                 rule->rule_flags &= ~IPFW_RULE_F_STATE;
2810                 rule = rule->next;
2811         }
2812
2813         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2814 }
2815
2816 /*
2817  * Deletes all rules from a chain (including the default rule
2818  * if the second argument is set).
2819  * Must be called at splimp().
2820  */
2821 static void
2822 ipfw_flush(int kill_default)
2823 {
2824         struct netmsg_del dmsg;
2825         struct netmsg nmsg;
2826         struct lwkt_msg *lmsg;
2827         struct ip_fw *rule;
2828         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2829
2830         IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
2831
2832         /*
2833          * If 'kill_default' then caller has done the necessary
2834          * msgport syncing; unnecessary to do it again.
2835          */
2836         if (!kill_default) {
2837                 /*
2838                  * Let ipfw_chk() know the rules are going to
2839                  * be flushed, so it could jump directly to
2840                  * the default rule.
2841                  */
2842                 ipfw_flushing = 1;
2843                 netmsg_service_sync();
2844         }
2845
2846         /*
2847          * Clear STATE flag on rules, so no more states (dyn rules)
2848          * will be created.
2849          */
2850         bzero(&dmsg, sizeof(dmsg));
2851         netmsg_init(&dmsg.nmsg, &curthread->td_msgport, 0,
2852                     ipfw_disable_rule_state_dispatch);
2853         ifnet_domsg(&dmsg.nmsg.nm_lmsg, 0);
2854
2855         /*
2856          * This actually nukes all states (dyn rules)
2857          */
2858         lockmgr(&dyn_lock, LK_EXCLUSIVE);
2859         for (rule = ctx->ipfw_layer3_chain; rule != NULL; rule = rule->next) {
2860                 /*
2861                  * Can't check IPFW_RULE_F_STATE here,
2862                  * since it has been cleared previously.
2863                  * Check 'stub' instead.
2864                  */
2865                 if (rule->stub != NULL) {
2866                         /* Force removal */
2867                         remove_dyn_rule_locked(rule, NULL);
2868                 }
2869         }
2870         lockmgr(&dyn_lock, LK_RELEASE);
2871
2872         /*
2873          * Press the 'flush' button
2874          */
2875         bzero(&nmsg, sizeof(nmsg));
2876         netmsg_init(&nmsg, &curthread->td_msgport, 0, ipfw_flush_dispatch);
2877         lmsg = &nmsg.nm_lmsg;
2878         lmsg->u.ms_result = kill_default;
2879         ifnet_domsg(lmsg, 0);
2880
2881         KASSERT(dyn_count == 0, ("%u dyn rule remains\n", dyn_count));
2882
2883         if (kill_default) {
2884                 if (ipfw_dyn_v != NULL) {
2885                         /*
2886                          * Free dynamic rules(state) hash table
2887                          */
2888                         kfree(ipfw_dyn_v, M_IPFW);
2889                         ipfw_dyn_v = NULL;
2890                 }
2891
2892                 KASSERT(static_count == 0,
2893                         ("%u static rules remains\n", static_count));
2894                 KASSERT(static_ioc_len == 0,
2895                         ("%u bytes of static rules remains\n", static_ioc_len));
2896         } else {
2897                 KASSERT(static_count == 1,
2898                         ("%u static rules remains\n", static_count));
2899                 KASSERT(static_ioc_len == IOC_RULESIZE(ctx->ipfw_default_rule),
2900                         ("%u bytes of static rules remains, should be %u\n",
2901                          static_ioc_len, IOC_RULESIZE(ctx->ipfw_default_rule)));
2902         }
2903
2904         /* Flush is done */
2905         ipfw_flushing = 0;
2906 }
2907
2908 static void
2909 ipfw_alt_delete_rule_dispatch(struct netmsg *nmsg)
2910 {
2911         struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
2912         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2913         struct ip_fw *rule, *prev;
2914
2915         rule = dmsg->start_rule;
2916         KKASSERT(rule->cpuid == mycpuid);
2917         dmsg->start_rule = rule->sibling;
2918
2919         prev = dmsg->prev_rule;
2920         if (prev != NULL) {
2921                 KKASSERT(prev->cpuid == mycpuid);
2922
2923                 /*
2924                  * Move to the position on the next CPU
2925                  * before the msg is forwarded.
2926                  */
2927                 dmsg->prev_rule = prev->sibling;
2928         }
2929
2930         /*
2931          * flush pointers outside the loop, then delete all matching
2932          * rules.  'prev' remains the same throughout the cycle.
2933          */
2934         ipfw_flush_rule_ptrs(ctx);
2935         while (rule && rule->rulenum == dmsg->rulenum)
2936                 rule = ipfw_delete_rule(ctx, prev, rule);
2937
2938         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2939 }
2940
2941 static int
2942 ipfw_alt_delete_rule(uint16_t rulenum)
2943 {
2944         struct ip_fw *prev, *rule, *f;
2945         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2946         struct netmsg_del dmsg;
2947         struct netmsg *nmsg;
2948         int state;
2949
2950         /*
2951          * Locate first rule to delete
2952          */
2953         for (prev = NULL, rule = ctx->ipfw_layer3_chain;
2954              rule && rule->rulenum < rulenum;
2955              prev = rule, rule = rule->next)
2956                 ; /* EMPTY */
2957         if (rule->rulenum != rulenum)
2958                 return EINVAL;
2959
2960         /*
2961          * Check whether any rules with the given number will
2962          * create states.
2963          */
2964         state = 0;
2965         for (f = rule; f && f->rulenum == rulenum; f = f->next) {
2966                 if (f->rule_flags & IPFW_RULE_F_STATE) {
2967                         state = 1;
2968                         break;
2969                 }
2970         }
2971
2972         if (state) {
2973                 /*
2974                  * Clear the STATE flag, so no more states will be
2975                  * created based the rules numbered 'rulenum'.
2976                  */
2977                 bzero(&dmsg, sizeof(dmsg));
2978                 nmsg = &dmsg.nmsg;
2979                 netmsg_init(nmsg, &curthread->td_msgport, 0,
2980                             ipfw_disable_rule_state_dispatch);
2981                 dmsg.start_rule = rule;
2982                 dmsg.rulenum = rulenum;
2983
2984                 ifnet_domsg(&nmsg->nm_lmsg, 0);
2985                 KKASSERT(dmsg.start_rule == NULL);
2986
2987                 /*
2988                  * Nuke all related states
2989                  */
2990                 lockmgr(&dyn_lock, LK_EXCLUSIVE);
2991                 for (f = rule; f && f->rulenum == rulenum; f = f->next) {
2992                         /*
2993                          * Can't check IPFW_RULE_F_STATE here,
2994                          * since it has been cleared previously.
2995                          * Check 'stub' instead.
2996                          */
2997                         if (f->stub != NULL) {
2998                                 /* Force removal */
2999                                 remove_dyn_rule_locked(f, NULL);
3000                         }
3001                 }
3002                 lockmgr(&dyn_lock, LK_RELEASE);
3003         }
3004
3005         /*
3006          * Get rid of the rule duplications on all CPUs
3007          */
3008         bzero(&dmsg, sizeof(dmsg));
3009         nmsg = &dmsg.nmsg;
3010         netmsg_init(nmsg, &curthread->td_msgport, 0,
3011                     ipfw_alt_delete_rule_dispatch);
3012         dmsg.prev_rule = prev;
3013         dmsg.start_rule = rule;
3014         dmsg.rulenum = rulenum;
3015
3016         ifnet_domsg(&nmsg->nm_lmsg, 0);
3017         KKASSERT(dmsg.prev_rule == NULL && dmsg.start_rule == NULL);
3018         return 0;
3019 }
3020
3021 static void
3022 ipfw_alt_delete_ruleset_dispatch(struct netmsg *nmsg)
3023 {
3024         struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3025         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3026         struct ip_fw *prev, *rule;
3027 #ifdef INVARIANTS
3028         int del = 0;
3029 #endif
3030
3031         ipfw_flush_rule_ptrs(ctx);
3032
3033         prev = NULL;
3034         rule = ctx->ipfw_layer3_chain;
3035         while (rule != NULL) {
3036                 if (rule->set == dmsg->from_set) {
3037                         rule = ipfw_delete_rule(ctx, prev, rule);
3038 #ifdef INVARIANTS
3039                         del = 1;
3040 #endif
3041                 } else {
3042                         prev = rule;
3043                         rule = rule->next;
3044                 }
3045         }
3046         KASSERT(del, ("no match set?!\n"));
3047
3048         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3049 }
3050
3051 static void
3052 ipfw_disable_ruleset_state_dispatch(struct netmsg *nmsg)
3053 {
3054         struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3055         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3056         struct ip_fw *rule;
3057 #ifdef INVARIANTS
3058         int cleared = 0;
3059 #endif
3060
3061         for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3062                 if (rule->set == dmsg->from_set) {
3063 #ifdef INVARIANTS
3064                         cleared = 1;
3065 #endif
3066                         rule->rule_flags &= ~IPFW_RULE_F_STATE;
3067                 }
3068         }
3069         KASSERT(cleared, ("no match set?!\n"));
3070
3071         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3072 }
3073
3074 static int
3075 ipfw_alt_delete_ruleset(uint8_t set)
3076 {
3077         struct netmsg_del dmsg;
3078         struct netmsg *nmsg;
3079         int state, del;
3080         struct ip_fw *rule;
3081         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3082
3083         /*
3084          * Check whether the 'set' exists.  If it exists,
3085          * then check whether any rules within the set will
3086          * try to create states.
3087          */
3088         state = 0;
3089         del = 0;
3090         for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3091                 if (rule->set == set) {
3092                         del = 1;
3093                         if (rule->rule_flags & IPFW_RULE_F_STATE) {
3094                                 state = 1;
3095                                 break;
3096                         }
3097                 }
3098         }
3099         if (!del)
3100                 return 0; /* XXX EINVAL? */
3101
3102         if (state) {
3103                 /*
3104                  * Clear the STATE flag, so no more states will be
3105                  * created based the rules in this set.
3106                  */
3107                 bzero(&dmsg, sizeof(dmsg));
3108                 nmsg = &dmsg.nmsg;
3109                 netmsg_init(nmsg, &curthread->td_msgport, 0,
3110                             ipfw_disable_ruleset_state_dispatch);
3111                 dmsg.from_set = set;
3112
3113                 ifnet_domsg(&nmsg->nm_lmsg, 0);
3114
3115                 /*
3116                  * Nuke all related states
3117                  */
3118                 lockmgr(&dyn_lock, LK_EXCLUSIVE);
3119                 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3120                         if (rule->set != set)
3121                                 continue;
3122
3123                         /*
3124                          * Can't check IPFW_RULE_F_STATE here,
3125                          * since it has been cleared previously.
3126                          * Check 'stub' instead.
3127                          */
3128                         if (rule->stub != NULL) {
3129                                 /* Force removal */
3130                                 remove_dyn_rule_locked(rule, NULL);
3131                         }
3132                 }
3133                 lockmgr(&dyn_lock, LK_RELEASE);
3134         }
3135
3136         /*
3137          * Delete this set
3138          */
3139         bzero(&dmsg, sizeof(dmsg));
3140         nmsg = &dmsg.nmsg;
3141         netmsg_init(nmsg, &curthread->td_msgport, 0,
3142                     ipfw_alt_delete_ruleset_dispatch);
3143         dmsg.from_set = set;
3144
3145         ifnet_domsg(&nmsg->nm_lmsg, 0);
3146         return 0;
3147 }
3148
3149 static void
3150 ipfw_alt_move_rule_dispatch(struct netmsg *nmsg)
3151 {
3152         struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3153         struct ip_fw *rule;
3154
3155         rule = dmsg->start_rule;
3156         KKASSERT(rule->cpuid == mycpuid);
3157
3158         /*
3159          * Move to the position on the next CPU
3160          * before the msg is forwarded.
3161          */
3162         dmsg->start_rule = rule->sibling;
3163
3164         while (rule && rule->rulenum <= dmsg->rulenum) {
3165                 if (rule->rulenum == dmsg->rulenum)
3166                         rule->set = dmsg->to_set;
3167                 rule = rule->next;
3168         }
3169         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3170 }
3171
3172 static int
3173 ipfw_alt_move_rule(uint16_t rulenum, uint8_t set)
3174 {
3175         struct netmsg_del dmsg;
3176         struct netmsg *nmsg;
3177         struct ip_fw *rule;
3178         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3179
3180         /*
3181          * Locate first rule to move
3182          */
3183         for (rule = ctx->ipfw_layer3_chain; rule && rule->rulenum <= rulenum;
3184              rule = rule->next) {
3185                 if (rule->rulenum == rulenum && rule->set != set)
3186                         break;
3187         }
3188         if (rule == NULL || rule->rulenum > rulenum)
3189                 return 0; /* XXX error? */
3190
3191         bzero(&dmsg, sizeof(dmsg));
3192         nmsg = &dmsg.nmsg;
3193         netmsg_init(nmsg, &curthread->td_msgport, 0,
3194                     ipfw_alt_move_rule_dispatch);
3195         dmsg.start_rule = rule;
3196         dmsg.rulenum = rulenum;
3197         dmsg.to_set = set;
3198
3199         ifnet_domsg(&nmsg->nm_lmsg, 0);
3200         KKASSERT(dmsg.start_rule == NULL);
3201         return 0;
3202 }
3203
3204 static void
3205 ipfw_alt_move_ruleset_dispatch(struct netmsg *nmsg)
3206 {
3207         struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3208         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3209         struct ip_fw *rule;
3210
3211         for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3212                 if (rule->set == dmsg->from_set)
3213                         rule->set = dmsg->to_set;
3214         }
3215         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3216 }
3217
3218 static int
3219 ipfw_alt_move_ruleset(uint8_t from_set, uint8_t to_set)
3220 {
3221         struct netmsg_del dmsg;
3222         struct netmsg *nmsg;
3223
3224         bzero(&dmsg, sizeof(dmsg));
3225         nmsg = &dmsg.nmsg;
3226         netmsg_init(nmsg, &curthread->td_msgport, 0,
3227                     ipfw_alt_move_ruleset_dispatch);
3228         dmsg.from_set = from_set;
3229         dmsg.to_set = to_set;
3230
3231         ifnet_domsg(&nmsg->nm_lmsg, 0);
3232         return 0;
3233 }
3234
3235 static void
3236 ipfw_alt_swap_ruleset_dispatch(struct netmsg *nmsg)
3237 {
3238         struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3239         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3240         struct ip_fw *rule;
3241
3242         for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3243                 if (rule->set == dmsg->from_set)
3244                         rule->set = dmsg->to_set;
3245                 else if (rule->set == dmsg->to_set)
3246                         rule->set = dmsg->from_set;
3247         }
3248         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3249 }
3250
3251 static int
3252 ipfw_alt_swap_ruleset(uint8_t set1, uint8_t set2)
3253 {
3254         struct netmsg_del dmsg;
3255         struct netmsg *nmsg;
3256
3257         bzero(&dmsg, sizeof(dmsg));
3258         nmsg = &dmsg.nmsg;
3259         netmsg_init(nmsg, &curthread->td_msgport, 0,
3260                     ipfw_alt_swap_ruleset_dispatch);
3261         dmsg.from_set = set1;
3262         dmsg.to_set = set2;
3263
3264         ifnet_domsg(&nmsg->nm_lmsg, 0);
3265         return 0;
3266 }
3267
3268 /**
3269  * Remove all rules with given number, and also do set manipulation.
3270  *
3271  * The argument is an uint32_t. The low 16 bit are the rule or set number,
3272  * the next 8 bits are the new set, the top 8 bits are the command:
3273  *
3274  *      0       delete rules with given number
3275  *      1       delete rules with given set number
3276  *      2       move rules with given number to new set
3277  *      3       move rules with given set number to new set
3278  *      4       swap sets with given numbers
3279  */
3280 static int
3281 ipfw_ctl_alter(uint32_t arg)
3282 {
3283         uint16_t rulenum;
3284         uint8_t cmd, new_set;
3285         int error = 0;
3286
3287         rulenum = arg & 0xffff;
3288         cmd = (arg >> 24) & 0xff;
3289         new_set = (arg >> 16) & 0xff;
3290
3291         if (cmd > 4)
3292                 return EINVAL;
3293         if (new_set >= IPFW_DEFAULT_SET)
3294                 return EINVAL;
3295         if (cmd == 0 || cmd == 2) {
3296                 if (rulenum == IPFW_DEFAULT_RULE)
3297                         return EINVAL;
3298         } else {
3299                 if (rulenum >= IPFW_DEFAULT_SET)
3300                         return EINVAL;
3301         }
3302
3303         switch (cmd) {
3304         case 0: /* delete rules with given number */
3305                 error = ipfw_alt_delete_rule(rulenum);
3306                 break;
3307
3308         case 1: /* delete all rules with given set number */
3309                 error = ipfw_alt_delete_ruleset(rulenum);
3310                 break;
3311
3312         case 2: /* move rules with given number to new set */
3313                 error = ipfw_alt_move_rule(rulenum, new_set);
3314                 break;
3315
3316         case 3: /* move rules with given set number to new set */
3317                 error = ipfw_alt_move_ruleset(rulenum, new_set);
3318                 break;
3319
3320         case 4: /* swap two sets */
3321                 error = ipfw_alt_swap_ruleset(rulenum, new_set);
3322                 break;
3323         }
3324         return error;
3325 }
3326
3327 /*
3328  * Clear counters for a specific rule.
3329  */
3330 static void
3331 clear_counters(struct ip_fw *rule, int log_only)
3332 {
3333         ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule);
3334
3335         if (log_only == 0) {
3336                 rule->bcnt = rule->pcnt = 0;
3337                 rule->timestamp = 0;
3338         }
3339         if (l->o.opcode == O_LOG)
3340                 l->log_left = l->max_log;
3341 }
3342
3343 static void
3344 ipfw_zero_entry_dispatch(struct netmsg *nmsg)
3345 {
3346         struct netmsg_zent *zmsg = (struct netmsg_zent *)nmsg;
3347         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3348         struct ip_fw *rule;
3349
3350         if (zmsg->rulenum == 0) {
3351                 KKASSERT(zmsg->start_rule == NULL);
3352
3353                 ctx->ipfw_norule_counter = 0;
3354                 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next)
3355                         clear_counters(rule, zmsg->log_only);
3356         } else {
3357                 struct ip_fw *start = zmsg->start_rule;
3358
3359                 KKASSERT(start->cpuid == mycpuid);
3360                 KKASSERT(start->rulenum == zmsg->rulenum);
3361
3362                 /*
3363                  * We can have multiple rules with the same number, so we
3364                  * need to clear them all.
3365                  */
3366                 for (rule = start; rule && rule->rulenum == zmsg->rulenum;
3367                      rule = rule->next)
3368                         clear_counters(rule, zmsg->log_only);
3369
3370                 /*
3371                  * Move to the position on the next CPU
3372                  * before the msg is forwarded.
3373                  */
3374                 zmsg->start_rule = start->sibling;
3375         }
3376         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3377 }
3378
3379 /**
3380  * Reset some or all counters on firewall rules.
3381  * @arg frwl is null to clear all entries, or contains a specific
3382  * rule number.
3383  * @arg log_only is 1 if we only want to reset logs, zero otherwise.
3384  */
3385 static int
3386 ipfw_ctl_zero_entry(int rulenum, int log_only)
3387 {
3388         struct netmsg_zent zmsg;
3389         struct netmsg *nmsg;
3390         const char *msg;
3391         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3392
3393         bzero(&zmsg, sizeof(zmsg));
3394         nmsg = &zmsg.nmsg;
3395         netmsg_init(nmsg, &curthread->td_msgport, 0, ipfw_zero_entry_dispatch);
3396         zmsg.log_only = log_only;
3397
3398         if (rulenum == 0) {
3399                 msg = log_only ? "ipfw: All logging counts reset.\n"
3400                                : "ipfw: Accounting cleared.\n";
3401         } else {
3402                 struct ip_fw *rule;
3403
3404                 /*
3405                  * Locate the first rule with 'rulenum'
3406                  */
3407                 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3408                         if (rule->rulenum == rulenum)
3409                                 break;
3410                 }
3411                 if (rule == NULL) /* we did not find any matching rules */
3412                         return (EINVAL);
3413                 zmsg.start_rule = rule;
3414                 zmsg.rulenum = rulenum;
3415
3416                 msg = log_only ? "ipfw: Entry %d logging count reset.\n"
3417                                : "ipfw: Entry %d cleared.\n";
3418         }
3419         ifnet_domsg(&nmsg->nm_lmsg, 0);
3420         KKASSERT(zmsg.start_rule == NULL);
3421
3422         if (fw_verbose)
3423                 log(LOG_SECURITY | LOG_NOTICE, msg, rulenum);
3424         return (0);
3425 }
3426
3427 /*
3428  * Check validity of the structure before insert.
3429  * Fortunately rules are simple, so this mostly need to check rule sizes.
3430  */
3431 static int
3432 ipfw_check_ioc_rule(struct ipfw_ioc_rule *rule, int size, uint32_t *rule_flags)
3433 {
3434         int l, cmdlen = 0;
3435         int have_action = 0;
3436         ipfw_insn *cmd;
3437
3438         *rule_flags = 0;
3439
3440         /* Check for valid size */
3441         if (size < sizeof(*rule)) {
3442                 kprintf("ipfw: rule too short\n");
3443                 return EINVAL;
3444         }
3445         l = IOC_RULESIZE(rule);
3446         if (l != size) {
3447                 kprintf("ipfw: size mismatch (have %d want %d)\n", size, l);
3448                 return EINVAL;
3449         }
3450
3451         /* Check rule number */
3452         if (rule->rulenum == IPFW_DEFAULT_RULE) {
3453                 kprintf("ipfw: invalid rule number\n");
3454                 return EINVAL;
3455         }
3456
3457         /*
3458          * Now go for the individual checks. Very simple ones, basically only
3459          * instruction sizes.
3460          */
3461         for (l = rule->cmd_len, cmd = rule->cmd; l > 0;
3462              l -= cmdlen, cmd += cmdlen) {
3463                 cmdlen = F_LEN(cmd);
3464                 if (cmdlen > l) {
3465                         kprintf("ipfw: opcode %d size truncated\n",
3466                                 cmd->opcode);
3467                         return EINVAL;
3468                 }
3469
3470                 DEB(kprintf("ipfw: opcode %d\n", cmd->opcode);)
3471
3472                 if (cmd->opcode == O_KEEP_STATE || cmd->opcode == O_LIMIT) {
3473                         /* This rule will create states */
3474                         *rule_flags |= IPFW_RULE_F_STATE;
3475                 }
3476
3477                 switch (cmd->opcode) {
3478                 case O_NOP:
3479                 case O_PROBE_STATE:
3480                 case O_KEEP_STATE:
3481                 case O_PROTO:
3482                 case O_IP_SRC_ME:
3483                 case O_IP_DST_ME:
3484                 case O_LAYER2:
3485                 case O_IN:
3486                 case O_FRAG:
3487                 case O_IPOPT:
3488                 case O_IPLEN:
3489                 case O_IPID:
3490                 case O_IPTOS:
3491                 case O_IPPRECEDENCE:
3492                 case O_IPTTL:
3493                 case O_IPVER:
3494                 case O_TCPWIN:
3495                 case O_TCPFLAGS:
3496                 case O_TCPOPTS:
3497                 case O_ESTAB:
3498                         if (cmdlen != F_INSN_SIZE(ipfw_insn))
3499                                 goto bad_size;
3500                         break;
3501
3502                 case O_UID:
3503                 case O_GID:
3504                 case O_IP_SRC:
3505                 case O_IP_DST:
3506                 case O_TCPSEQ:
3507                 case O_TCPACK:
3508                 case O_PROB:
3509                 case O_ICMPTYPE:
3510                         if (cmdlen != F_INSN_SIZE(ipfw_insn_u32))
3511                                 goto bad_size;
3512                         break;
3513
3514                 case O_LIMIT:
3515                         if (cmdlen != F_INSN_SIZE(ipfw_insn_limit))
3516                                 goto bad_size;
3517                         break;
3518
3519                 case O_LOG:
3520                         if (cmdlen != F_INSN_SIZE(ipfw_insn_log))
3521                                 goto bad_size;
3522
3523                         ((ipfw_insn_log *)cmd)->log_left =
3524                             ((ipfw_insn_log *)cmd)->max_log;
3525
3526                         break;
3527
3528                 case O_IP_SRC_MASK:
3529                 case O_IP_DST_MASK:
3530                         if (cmdlen != F_INSN_SIZE(ipfw_insn_ip))
3531                                 goto bad_size;
3532                         if (((ipfw_insn_ip *)cmd)->mask.s_addr == 0) {
3533                                 kprintf("ipfw: opcode %d, useless rule\n",
3534                                         cmd->opcode);
3535                                 return EINVAL;
3536                         }
3537                         break;
3538
3539                 case O_IP_SRC_SET:
3540                 case O_IP_DST_SET:
3541                         if (cmd->arg1 == 0 || cmd->arg1 > 256) {
3542                                 kprintf("ipfw: invalid set size %d\n",
3543                                         cmd->arg1);
3544                                 return EINVAL;
3545                         }
3546                         if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
3547                             (cmd->arg1+31)/32 )
3548                                 goto bad_size;
3549                         break;
3550
3551                 case O_MACADDR2:
3552                         if (cmdlen != F_INSN_SIZE(ipfw_insn_mac))
3553                                 goto bad_size;
3554                         break;
3555
3556                 case O_MAC_TYPE:
3557                 case O_IP_SRCPORT:
3558                 case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */
3559                         if (cmdlen < 2 || cmdlen > 31)
3560                                 goto bad_size;
3561                         break;
3562
3563                 case O_RECV:
3564                 case O_XMIT:
3565                 case O_VIA:
3566                         if (cmdlen != F_INSN_SIZE(ipfw_insn_if))
3567                                 goto bad_size;
3568                         break;
3569
3570                 case O_PIPE:
3571                 case O_QUEUE:
3572                         if (cmdlen != F_INSN_SIZE(ipfw_insn_pipe))
3573                                 goto bad_size;
3574                         goto check_action;
3575
3576                 case O_FORWARD_IP:
3577                         if (cmdlen != F_INSN_SIZE(ipfw_insn_sa))
3578                                 goto bad_size;
3579                         goto check_action;
3580
3581                 case O_FORWARD_MAC: /* XXX not implemented yet */
3582                 case O_CHECK_STATE:
3583                 case O_COUNT:
3584                 case O_ACCEPT:
3585                 case O_DENY:
3586                 case O_REJECT:
3587                 case O_SKIPTO:
3588                 case O_DIVERT:
3589                 case O_TEE:
3590                         if (cmdlen != F_INSN_SIZE(ipfw_insn))
3591                                 goto bad_size;
3592 check_action:
3593                         if (have_action) {
3594                                 kprintf("ipfw: opcode %d, multiple actions"
3595                                         " not allowed\n",
3596                                         cmd->opcode);
3597                                 return EINVAL;
3598                         }
3599                         have_action = 1;
3600                         if (l != cmdlen) {
3601                                 kprintf("ipfw: opcode %d, action must be"
3602                                         " last opcode\n",
3603                                         cmd->opcode);
3604                                 return EINVAL;
3605                         }
3606                         break;
3607                 default:
3608                         kprintf("ipfw: opcode %d, unknown opcode\n",
3609                                 cmd->opcode);
3610                         return EINVAL;
3611                 }
3612         }
3613         if (have_action == 0) {
3614                 kprintf("ipfw: missing action\n");
3615                 return EINVAL;
3616         }
3617         return 0;
3618
3619 bad_size:
3620         kprintf("ipfw: opcode %d size %d wrong\n",
3621                 cmd->opcode, cmdlen);
3622         return EINVAL;
3623 }
3624
3625 static int
3626 ipfw_ctl_add_rule(struct sockopt *sopt)
3627 {
3628         struct ipfw_ioc_rule *ioc_rule;
3629         size_t size;
3630         uint32_t rule_flags;
3631         int error;
3632         
3633         size = sopt->sopt_valsize;
3634         if (size > (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX) ||
3635             size < sizeof(*ioc_rule)) {
3636                 return EINVAL;
3637         }
3638         if (size != (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX)) {
3639                 sopt->sopt_val = krealloc(sopt->sopt_val, sizeof(uint32_t) *
3640                                           IPFW_RULE_SIZE_MAX, M_TEMP, M_WAITOK);
3641         }
3642         ioc_rule = sopt->sopt_val;
3643
3644         error = ipfw_check_ioc_rule(ioc_rule, size, &rule_flags);
3645         if (error)
3646                 return error;
3647
3648         ipfw_add_rule(ioc_rule, rule_flags);
3649
3650         if (sopt->sopt_dir == SOPT_GET)
3651                 sopt->sopt_valsize = IOC_RULESIZE(ioc_rule);
3652         return 0;
3653 }
3654
3655 static void *
3656 ipfw_copy_rule(const struct ip_fw *rule, struct ipfw_ioc_rule *ioc_rule)
3657 {
3658         const struct ip_fw *sibling;
3659 #ifdef INVARIANTS
3660         int i;
3661 #endif
3662
3663         KKASSERT(rule->cpuid == 0);
3664
3665         ioc_rule->act_ofs = rule->act_ofs;
3666         ioc_rule->cmd_len = rule->cmd_len;
3667         ioc_rule->rulenum = rule->rulenum;
3668         ioc_rule->set = rule->set;
3669         ioc_rule->usr_flags = rule->usr_flags;
3670
3671         ioc_rule->set_disable = ipfw_ctx[mycpuid]->ipfw_set_disable;
3672         ioc_rule->static_count = static_count;
3673         ioc_rule->static_len = static_ioc_len;
3674
3675         /*
3676          * Visit (read-only) all of the rule's duplications to get
3677          * the necessary statistics
3678          */
3679 #ifdef INVARIANTS
3680         i = 0;
3681 #endif
3682         ioc_rule->pcnt = 0;
3683         ioc_rule->bcnt = 0;
3684         ioc_rule->timestamp = 0;
3685         for (sibling = rule; sibling != NULL; sibling = sibling->sibling) {
3686                 ioc_rule->pcnt += sibling->pcnt;
3687                 ioc_rule->bcnt += sibling->bcnt;
3688                 if (sibling->timestamp > ioc_rule->timestamp)
3689                         ioc_rule->timestamp = sibling->timestamp;
3690 #ifdef INVARIANTS
3691                 ++i;
3692 #endif
3693         }
3694         KASSERT(i == ncpus, ("static rule is not duplicated on every cpu\n"));
3695
3696         bcopy(rule->cmd, ioc_rule->cmd, ioc_rule->cmd_len * 4 /* XXX */);
3697
3698         return ((uint8_t *)ioc_rule + IOC_RULESIZE(ioc_rule));
3699 }
3700
3701 static void
3702 ipfw_copy_state(const ipfw_dyn_rule *dyn_rule,
3703                 struct ipfw_ioc_state *ioc_state)
3704 {
3705         const struct ipfw_flow_id *id;
3706         struct ipfw_ioc_flowid *ioc_id;
3707
3708         ioc_state->expire = TIME_LEQ(dyn_rule->expire, time_second) ?
3709                             0 : dyn_rule->expire - time_second;
3710         ioc_state->pcnt = dyn_rule->pcnt;
3711         ioc_state->bcnt = dyn_rule->bcnt;
3712
3713         ioc_state->dyn_type = dyn_rule->dyn_type;
3714         ioc_state->count = dyn_rule->count;
3715
3716         ioc_state->rulenum = dyn_rule->stub->rule[mycpuid]->rulenum;
3717
3718         id = &dyn_rule->id;
3719         ioc_id = &ioc_state->id;
3720
3721         ioc_id->type = ETHERTYPE_IP;
3722         ioc_id->u.ip.dst_ip = id->dst_ip;
3723         ioc_id->u.ip.src_ip = id->src_ip;
3724         ioc_id->u.ip.dst_port = id->dst_port;
3725         ioc_id->u.ip.src_port = id->src_port;
3726         ioc_id->u.ip.proto = id->proto;
3727 }
3728
3729 static int
3730 ipfw_ctl_get_rules(struct sockopt *sopt)
3731 {
3732         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3733         struct ip_fw *rule;
3734         void *bp;
3735         size_t size;
3736         uint32_t dcount = 0;
3737
3738         /*
3739          * pass up a copy of the current rules. Static rules
3740          * come first (the last of which has number IPFW_DEFAULT_RULE),
3741          * followed by a possibly empty list of dynamic rule.
3742          */
3743         crit_enter();
3744
3745         size = static_ioc_len;  /* size of static rules */
3746         if (ipfw_dyn_v) {       /* add size of dyn.rules */
3747                 dcount = dyn_count;
3748                 size += dcount * sizeof(struct ipfw_ioc_state);
3749         }
3750
3751         if (sopt->sopt_valsize < size) {
3752                 /* short length, no need to return incomplete rules */
3753                 /* XXX: if superuser, no need to zero buffer */
3754                 bzero(sopt->sopt_val, sopt->sopt_valsize); 
3755                 return 0;
3756         }
3757         bp = sopt->sopt_val;
3758
3759         for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next)
3760                 bp = ipfw_copy_rule(rule, bp);
3761
3762         if (ipfw_dyn_v && dcount != 0) {
3763                 struct ipfw_ioc_state *ioc_state = bp;
3764                 uint32_t dcount2 = 0;
3765 #ifdef INVARIANTS
3766                 size_t old_size = size;
3767 #endif
3768                 int i;
3769
3770                 lockmgr(&dyn_lock, LK_SHARED);
3771
3772                 /* Check 'ipfw_dyn_v' again with lock held */
3773                 if (ipfw_dyn_v == NULL)
3774                         goto skip;
3775
3776                 for (i = 0; i < curr_dyn_buckets; i++) {
3777                         ipfw_dyn_rule *p;
3778
3779                         /*
3780                          * The # of dynamic rules may have grown after the
3781                          * snapshot of 'dyn_count' was taken, so we will have
3782                          * to check 'dcount' (snapshot of dyn_count) here to
3783                          * make sure that we don't overflow the pre-allocated
3784                          * buffer.
3785                          */
3786                         for (p = ipfw_dyn_v[i]; p != NULL && dcount != 0;
3787                              p = p->next, ioc_state++, dcount--, dcount2++)
3788                                 ipfw_copy_state(p, ioc_state);
3789                 }
3790 skip:
3791                 lockmgr(&dyn_lock, LK_RELEASE);
3792
3793                 /*
3794                  * The # of dynamic rules may be shrinked after the
3795                  * snapshot of 'dyn_count' was taken.  To give user a
3796                  * correct dynamic rule count, we use the 'dcount2'
3797                  * calculated above (with shared lockmgr lock held).
3798                  */
3799                 size = static_ioc_len +
3800                        (dcount2 * sizeof(struct ipfw_ioc_state));
3801                 KKASSERT(size <= old_size);
3802         }
3803
3804         crit_exit();
3805
3806         sopt->sopt_valsize = size;
3807         return 0;
3808 }
3809
3810 static void
3811 ipfw_set_disable_dispatch(struct netmsg *nmsg)
3812 {
3813         struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
3814         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3815
3816         ctx->ipfw_gen++;
3817         ctx->ipfw_set_disable = lmsg->u.ms_result32;
3818
3819         ifnet_forwardmsg(lmsg, mycpuid + 1);
3820 }
3821
3822 static void
3823 ipfw_ctl_set_disable(uint32_t disable, uint32_t enable)
3824 {
3825         struct netmsg nmsg;
3826         struct lwkt_msg *lmsg;
3827         uint32_t set_disable;
3828
3829         /* IPFW_DEFAULT_SET is always enabled */
3830         enable |= (1 << IPFW_DEFAULT_SET);
3831         set_disable = (ipfw_ctx[mycpuid]->ipfw_set_disable | disable) & ~enable;
3832
3833         bzero(&nmsg, sizeof(nmsg));
3834         netmsg_init(&nmsg, &curthread->td_msgport, 0, ipfw_set_disable_dispatch);
3835         lmsg = &nmsg.nm_lmsg;
3836         lmsg->u.ms_result32 = set_disable;
3837
3838         ifnet_domsg(lmsg, 0);
3839 }
3840
3841 /**
3842  * {set|get}sockopt parser.
3843  */
3844 static int
3845 ipfw_ctl(struct sockopt *sopt)
3846 {
3847         int error, rulenum;
3848         uint32_t *masks;
3849         size_t size;
3850
3851         error = 0;
3852
3853         switch (sopt->sopt_name) {
3854         case IP_FW_GET:
3855                 error = ipfw_ctl_get_rules(sopt);
3856                 break;
3857
3858         case IP_FW_FLUSH:
3859                 /*
3860                  * Normally we cannot release the lock on each iteration.
3861                  * We could do it here only because we start from the head all
3862                  * the times so there is no risk of missing some entries.
3863                  * On the other hand, the risk is that we end up with
3864                  * a very inconsistent ruleset, so better keep the lock
3865                  * around the whole cycle.
3866                  *
3867                  * XXX this code can be improved by resetting the head of
3868                  * the list to point to the default rule, and then freeing
3869                  * the old list without the need for a lock.
3870                  */
3871
3872                 crit_enter();
3873                 ipfw_flush(0 /* keep default rule */);
3874                 crit_exit();
3875                 break;
3876
3877         case IP_FW_ADD:
3878                 error = ipfw_ctl_add_rule(sopt);
3879                 break;
3880
3881         case IP_FW_DEL:
3882                 /*
3883                  * IP_FW_DEL is used for deleting single rules or sets,
3884                  * and (ab)used to atomically manipulate sets.
3885                  * Argument size is used to distinguish between the two:
3886                  *    sizeof(uint32_t)
3887                  *      delete single rule or set of rules,
3888                  *      or reassign rules (or sets) to a different set.
3889                  *    2 * sizeof(uint32_t)
3890                  *      atomic disable/enable sets.
3891                  *      first uint32_t contains sets to be disabled,
3892                  *      second uint32_t contains sets to be enabled.
3893                  */
3894                 masks = sopt->sopt_val;
3895                 size = sopt->sopt_valsize;
3896                 if (size == sizeof(*masks)) {
3897                         /*
3898                          * Delete or reassign static rule
3899                          */
3900                         error = ipfw_ctl_alter(masks[0]);
3901                 } else if (size == (2 * sizeof(*masks))) {
3902                         /*
3903                          * Set enable/disable
3904                          */
3905                         ipfw_ctl_set_disable(masks[0], masks[1]);
3906                 } else {
3907                         error = EINVAL;
3908                 }
3909                 break;
3910
3911         case IP_FW_ZERO:
3912         case IP_FW_RESETLOG: /* argument is an int, the rule number */
3913                 rulenum = 0;
3914
3915                 if (sopt->sopt_val != 0) {
3916                     error = soopt_to_kbuf(sopt, &rulenum,
3917                             sizeof(int), sizeof(int));
3918                     if (error)
3919                         break;
3920                 }
3921                 error = ipfw_ctl_zero_entry(rulenum,
3922                         sopt->sopt_name == IP_FW_RESETLOG);
3923                 break;
3924
3925         default:
3926                 kprintf("ipfw_ctl invalid option %d\n", sopt->sopt_name);
3927                 error = EINVAL;
3928         }
3929         return error;
3930 }
3931
3932 /*
3933  * This procedure is only used to handle keepalives. It is invoked
3934  * every dyn_keepalive_period
3935  */
3936 static void
3937 ipfw_tick(void *dummy __unused)
3938 {
3939         time_t keep_alive;
3940         uint32_t gen;
3941         int i;
3942
3943         if (ipfw_dyn_v == NULL || dyn_count == 0)
3944                 goto done;
3945
3946         keep_alive = time_second;
3947
3948         lockmgr(&dyn_lock, LK_EXCLUSIVE);
3949 again:
3950         if (ipfw_dyn_v == NULL || dyn_count == 0) {
3951                 lockmgr(&dyn_lock, LK_RELEASE);
3952                 goto done;
3953         }
3954         gen = dyn_buckets_gen;
3955
3956         for (i = 0; i < curr_dyn_buckets; i++) {
3957                 ipfw_dyn_rule *q, *prev;
3958
3959                 for (prev = NULL, q = ipfw_dyn_v[i]; q != NULL;) {
3960                         uint32_t ack_rev, ack_fwd;
3961                         struct ipfw_flow_id id;
3962
3963                         if (q->dyn_type == O_LIMIT_PARENT)
3964                                 goto next;
3965
3966                         if (TIME_LEQ(q->expire, time_second)) {
3967                                 /* State expired */
3968                                 UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q);
3969                                 continue;
3970                         }
3971
3972                         /*
3973                          * Keep alive processing
3974                          */
3975
3976                         if (!dyn_keepalive)
3977                                 goto next;
3978                         if (q->id.proto != IPPROTO_TCP)
3979                                 goto next;
3980                         if ((q->state & BOTH_SYN) != BOTH_SYN)
3981                                 goto next;
3982                         if (TIME_LEQ(time_second + dyn_keepalive_interval,
3983                             q->expire))
3984                                 goto next;      /* too early */
3985                         if (q->keep_alive == keep_alive)
3986                                 goto next;      /* alreay done */
3987
3988                         /*
3989                          * Save necessary information, so that they could
3990                          * survive after possible blocking in send_pkt()
3991                          */
3992                         id = q->id;
3993                         ack_rev = q->ack_rev;
3994                         ack_fwd = q->ack_fwd;
3995
3996                         /* Sending has been started */
3997                         q->keep_alive = keep_alive;
3998
3999                         /* Release lock to avoid possible dead lock */
4000                         lockmgr(&dyn_lock, LK_RELEASE);
4001                         send_pkt(&id, ack_rev - 1, ack_fwd, TH_SYN);
4002                         send_pkt(&id, ack_fwd - 1, ack_rev, 0);
4003                         lockmgr(&dyn_lock, LK_EXCLUSIVE);
4004
4005                         if (gen != dyn_buckets_gen) {
4006                                 /*
4007                                  * Dyn bucket array has been changed during
4008                                  * the above two sending; reiterate.
4009                                  */
4010                                 goto again;
4011                         }
4012 next:
4013                         prev = q;
4014                         q = q->next;
4015                 }
4016         }
4017         lockmgr(&dyn_lock, LK_RELEASE);
4018 done:
4019         callout_reset(&ipfw_timeout_h, dyn_keepalive_period * hz,
4020                       ipfw_tick, NULL);
4021 }
4022
4023 static int
4024 ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS)
4025 {
4026         return sysctl_int_range(oidp, arg1, arg2, req,
4027                IPFW_AUTOINC_STEP_MIN, IPFW_AUTOINC_STEP_MAX);
4028 }
4029
4030 static int
4031 ipfw_sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS)
4032 {
4033         int error, value;
4034
4035         lockmgr(&dyn_lock, LK_EXCLUSIVE);
4036
4037         value = dyn_buckets;
4038         error = sysctl_handle_int(oidp, &value, 0, req);
4039         if (error || !req->newptr)
4040                 goto back;
4041
4042         /*
4043          * Make sure we have a power of 2 and
4044          * do not allow more than 64k entries.
4045          */
4046         error = EINVAL;
4047         if (value <= 1 || value > 65536)
4048                 goto back;
4049         if ((value & (value - 1)) != 0)
4050                 goto back;
4051
4052         error = 0;
4053         dyn_buckets = value;
4054 back:
4055         lockmgr(&dyn_lock, LK_RELEASE);
4056         return error;
4057 }
4058
4059 static int
4060 ipfw_sysctl_dyn_fin(SYSCTL_HANDLER_ARGS)
4061 {
4062         return sysctl_int_range(oidp, arg1, arg2, req,
4063                                 1, dyn_keepalive_period - 1);
4064 }
4065
4066 static int
4067 ipfw_sysctl_dyn_rst(SYSCTL_HANDLER_ARGS)
4068 {
4069         return sysctl_int_range(oidp, arg1, arg2, req,
4070                                 1, dyn_keepalive_period - 1);
4071 }
4072
4073 static void
4074 ipfw_ctx_init_dispatch(struct netmsg *nmsg)
4075 {
4076         struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
4077         struct ipfw_context *ctx;
4078         struct ip_fw *def_rule;
4079
4080         ctx = kmalloc(sizeof(*ctx), M_IPFW, M_WAITOK | M_ZERO);
4081         ipfw_ctx[mycpuid] = ctx;
4082
4083         def_rule = kmalloc(sizeof(*def_rule), M_IPFW, M_WAITOK | M_ZERO);
4084
4085         def_rule->act_ofs = 0;
4086         def_rule->rulenum = IPFW_DEFAULT_RULE;
4087         def_rule->cmd_len = 1;
4088         def_rule->set = IPFW_DEFAULT_SET;
4089
4090         def_rule->cmd[0].len = 1;
4091 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT
4092         def_rule->cmd[0].opcode = O_ACCEPT;
4093 #else
4094         def_rule->cmd[0].opcode = O_DENY;
4095 #endif
4096
4097         def_rule->refcnt = 1;
4098         def_rule->cpuid = mycpuid;
4099
4100         /* Install the default rule */
4101         ctx->ipfw_default_rule = def_rule;
4102         ctx->ipfw_layer3_chain = def_rule;
4103
4104         /* Link rule CPU sibling */
4105         ipfw_link_sibling(fwmsg, def_rule);
4106
4107         /* Statistics only need to be updated once */
4108         if (mycpuid == 0)
4109                 ipfw_inc_static_count(def_rule);
4110
4111         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
4112 }
4113
4114 static void
4115 ipfw_init_dispatch(struct netmsg *nmsg)
4116 {
4117         struct netmsg_ipfw fwmsg;
4118         int error = 0;
4119
4120         crit_enter();
4121
4122         if (IPFW_LOADED) {
4123                 kprintf("IP firewall already loaded\n");
4124                 error = EEXIST;
4125                 goto reply;
4126         }
4127
4128         bzero(&fwmsg, sizeof(fwmsg));
4129         netmsg_init(&fwmsg.nmsg, &curthread->td_msgport, 0,
4130                     ipfw_ctx_init_dispatch);
4131         ifnet_domsg(&fwmsg.nmsg.nm_lmsg, 0);
4132
4133         ip_fw_chk_ptr = ipfw_chk;
4134         ip_fw_ctl_ptr = ipfw_ctl;
4135         ip_fw_dn_io_ptr = ipfw_dummynet_io;
4136
4137         kprintf("ipfw2 initialized, divert %s, "
4138                 "rule-based forwarding enabled, default to %s, logging ",
4139 #ifdef IPDIVERT
4140                 "enabled",
4141 #else
4142                 "disabled",
4143 #endif
4144                 ipfw_ctx[mycpuid]->ipfw_default_rule->cmd[0].opcode ==
4145                 O_ACCEPT ? "accept" : "deny");
4146
4147 #ifdef IPFIREWALL_VERBOSE
4148         fw_verbose = 1;
4149 #endif
4150 #ifdef IPFIREWALL_VERBOSE_LIMIT
4151         verbose_limit = IPFIREWALL_VERBOSE_LIMIT;
4152 #endif
4153         if (fw_verbose == 0) {
4154                 kprintf("disabled\n");
4155         } else if (verbose_limit == 0) {
4156                 kprintf("unlimited\n");
4157         } else {
4158                 kprintf("limited to %d packets/entry by default\n",
4159                         verbose_limit);
4160         }
4161
4162         callout_init(&ipfw_timeout_h);
4163         lockinit(&dyn_lock, "ipfw_dyn", 0, 0);
4164
4165         ip_fw_loaded = 1;
4166         callout_reset(&ipfw_timeout_h, hz, ipfw_tick, NULL);
4167 reply:
4168         crit_exit();
4169         lwkt_replymsg(&nmsg->nm_lmsg, error);
4170 }
4171
4172 static int
4173 ipfw_init(void)
4174 {
4175         struct netmsg smsg;
4176
4177         netmsg_init(&smsg, &curthread->td_msgport, 0, ipfw_init_dispatch);
4178         return lwkt_domsg(IPFW_CFGPORT, &smsg.nm_lmsg, 0);
4179 }
4180
4181 #ifdef KLD_MODULE
4182
4183 static void
4184 ipfw_fini_dispatch(struct netmsg *nmsg)
4185 {
4186         int error = 0, cpu;
4187
4188         crit_enter();
4189
4190         if (ipfw_refcnt != 0) {
4191                 error = EBUSY;
4192                 goto reply;
4193         }
4194
4195         callout_stop(&ipfw_timeout_h);
4196
4197         ip_fw_loaded = 0;
4198         netmsg_service_sync();
4199
4200         ip_fw_chk_ptr = NULL;
4201         ip_fw_ctl_ptr = NULL;
4202         ip_fw_dn_io_ptr = NULL;
4203         ipfw_flush(1 /* kill default rule */);
4204
4205         /* Free pre-cpu context */
4206         for (cpu = 0; cpu < ncpus; ++cpu)
4207                 kfree(ipfw_ctx[cpu], M_IPFW);
4208
4209         kprintf("IP firewall unloaded\n");
4210 reply:
4211         crit_exit();
4212         lwkt_replymsg(&nmsg->nm_lmsg, error);
4213 }
4214
4215 static int
4216 ipfw_fini(void)
4217 {
4218         struct netmsg smsg;
4219
4220         netmsg_init(&smsg, &curthread->td_msgport, 0, ipfw_fini_dispatch);
4221         return lwkt_domsg(IPFW_CFGPORT, &smsg.nm_lmsg, 0);
4222 }
4223
4224 #endif  /* KLD_MODULE */
4225
4226 static int
4227 ipfw_modevent(module_t mod, int type, void *unused)
4228 {
4229         int err = 0;
4230
4231         switch (type) {
4232         case MOD_LOAD:
4233                 err = ipfw_init();
4234                 break;
4235
4236         case MOD_UNLOAD:
4237 #ifndef KLD_MODULE
4238                 kprintf("ipfw statically compiled, cannot unload\n");
4239                 err = EBUSY;
4240 #else
4241                 err = ipfw_fini();
4242 #endif
4243                 break;
4244         default:
4245                 break;
4246         }
4247         return err;
4248 }
4249
4250 static moduledata_t ipfwmod = {
4251         "ipfw",
4252         ipfw_modevent,
4253         0
4254 };
4255 DECLARE_MODULE(ipfw, ipfwmod, SI_SUB_PROTO_END, SI_ORDER_ANY);
4256 MODULE_VERSION(ipfw, 1);