Grrr, forward address is in network byte order
[dragonfly.git] / sys / net / ipfw / ip_fw2.c
1 /*
2  * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD: src/sys/netinet/ip_fw2.c,v 1.6.2.12 2003/04/08 10:42:32 maxim Exp $
26  * $DragonFly: src/sys/net/ipfw/ip_fw2.c,v 1.77 2008/08/26 11:42:40 sephe Exp $
27  */
28
29 #define        DEB(x)
30 #define        DDB(x) x
31
32 /*
33  * Implement IP packet firewall (new version)
34  */
35
36 #ifndef KLD_MODULE
37 #include "opt_ipfw.h"
38 #include "opt_ipdn.h"
39 #include "opt_ipdivert.h"
40 #include "opt_inet.h"
41 #ifndef INET
42 #error IPFIREWALL requires INET.
43 #endif /* INET */
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/malloc.h>
49 #include <sys/mbuf.h>
50 #include <sys/kernel.h>
51 #include <sys/proc.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/sysctl.h>
55 #include <sys/syslog.h>
56 #include <sys/thread2.h>
57 #include <sys/ucred.h>
58 #include <sys/in_cksum.h>
59 #include <sys/lock.h>
60
61 #include <net/if.h>
62 #include <net/route.h>
63 #include <net/netmsg2.h>
64
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/in_pcb.h>
69 #include <netinet/ip.h>
70 #include <netinet/ip_var.h>
71 #include <netinet/ip_icmp.h>
72 #include "ip_fw.h"
73 #include <net/dummynet/ip_dummynet.h>
74 #include <netinet/tcp.h>
75 #include <netinet/tcp_timer.h>
76 #include <netinet/tcp_var.h>
77 #include <netinet/tcpip.h>
78 #include <netinet/udp.h>
79 #include <netinet/udp_var.h>
80
81 #include <netinet/if_ether.h> /* XXX for ETHERTYPE_IP */
82
83 /*
84  * Description about per-CPU rule duplication:
85  *
86  * Module loading/unloading and all ioctl operations are serialized
87  * by netisr0, so we don't have any ordering or locking problems.
88  *
89  * Following graph shows how operation on per-CPU rule list is
90  * performed [2 CPU case]:
91  *
92  *   CPU0                 CPU1
93  *
94  * netisr0 <------------------------------------+
95  *  domsg                                       |
96  *    |                                         |
97  *    | netmsg                                  |
98  *    |                                         |
99  *    V                                         |
100  *  ifnet0                                      |
101  *    :                                         | netmsg
102  *    :(delete/add...)                          |
103  *    :                                         |
104  *    :         netmsg                          |
105  *  forwardmsg---------->ifnet1                 |
106  *                          :                   |
107  *                          :(delete/add...)    |
108  *                          :                   |
109  *                          :                   |
110  *                        replymsg--------------+
111  *
112  *
113  *
114  *
115  * Rules which will not create states (dyn rules) [2 CPU case]
116  *
117  *    CPU0               CPU1
118  * layer3_chain       layer3_chain
119  *     |                  |
120  *     V                  V
121  * +-------+ sibling  +-------+ sibling
122  * | rule1 |--------->| rule1 |--------->NULL
123  * +-------+          +-------+
124  *     |                  |
125  *     |next              |next
126  *     V                  V
127  * +-------+ sibling  +-------+ sibling
128  * | rule2 |--------->| rule2 |--------->NULL
129  * +-------+          +-------+
130  *
131  * ip_fw.sibling:
132  * 1) Ease statistics calculation during IP_FW_GET.  We only need to
133  *    iterate layer3_chain on CPU0; the current rule's duplication on
134  *    the other CPUs could safely be read-only accessed by using
135  *    ip_fw.sibling
136  * 2) Accelerate rule insertion and deletion, e.g. rule insertion:
137  *    a) In netisr0 (on CPU0) rule3 is determined to be inserted between
138  *       rule1 and rule2.  To make this decision we need to iterate the
139  *       layer3_chain on CPU0.  The netmsg, which is used to insert the
140  *       rule, will contain rule1 on CPU0 as prev_rule and rule2 on CPU0
141  *       as next_rule
142  *    b) After the insertion on CPU0 is done, we will move on to CPU1.
143  *       But instead of relocating the rule3's position on CPU1 by
144  *       iterating the layer3_chain on CPU1, we set the netmsg's prev_rule
145  *       to rule1->sibling and next_rule to rule2->sibling before the
146  *       netmsg is forwarded to CPU1 from CPU0
147  *       
148  *    
149  *
150  * Rules which will create states (dyn rules) [2 CPU case]
151  * (unnecessary parts are omitted; they are same as in the previous figure)
152  *
153  *   CPU0                       CPU1
154  * 
155  * +-------+                  +-------+
156  * | rule1 |                  | rule1 |
157  * +-------+                  +-------+
158  *   ^   |                      |   ^
159  *   |   |stub              stub|   |
160  *   |   |                      |   |
161  *   |   +----+            +----+   |
162  *   |        |            |        |
163  *   |        V            V        |
164  *   |    +--------------------+    |
165  *   |    |     rule_stub      |    |
166  *   |    | (read-only shared) |    |
167  *   |    |                    |    |
168  *   |    | back pointer array |    |
169  *   |    | (indexed by cpuid) |    |
170  *   |    |                    |    |
171  *   +----|---------[0]        |    |
172  *        |         [1]--------|----+
173  *        |                    |
174  *        +--------------------+
175  *          ^            ^
176  *          |            |
177  *  ........|............|............
178  *  :       |            |           :
179  *  :       |stub        |stub       :
180  *  :       |            |           :
181  *  :  +---------+  +---------+      :
182  *  :  | state1a |  | state1b | .... :
183  *  :  +---------+  +---------+      :
184  *  :                                :
185  *  :           states table         :
186  *  :            (shared)            :
187  *  :      (protected by dyn_lock)   :
188  *  ..................................
189  * 
190  * [state1a and state1b are states created by rule1]
191  *
192  * ip_fw_stub:
193  * This structure is introduced so that shared (locked) state table could
194  * work with per-CPU (duplicated) static rules.  It mainly bridges states
195  * and static rules and serves as static rule's place holder (a read-only
196  * shared part of duplicated rules) from states point of view.
197  *
198  * IPFW_RULE_F_STATE (only for rules which create states):
199  * o  During rule installation, this flag is turned on after rule's
200  *    duplications reach all CPUs, to avoid at least following race:
201  *    1) rule1 is duplicated on CPU0 and is not duplicated on CPU1 yet
202  *    2) rule1 creates state1
203  *    3) state1 is located on CPU1 by check-state
204  *    But rule1 is not duplicated on CPU1 yet
205  * o  During rule deletion, this flag is turned off before deleting states
206  *    created by the rule and before deleting the rule itself, so no
207  *    more states will be created by the to-be-deleted rule even when its
208  *    duplication on certain CPUs are not eliminated yet.
209  */
210
211 #define IPFW_AUTOINC_STEP_MIN   1
212 #define IPFW_AUTOINC_STEP_MAX   1000
213 #define IPFW_AUTOINC_STEP_DEF   100
214
215 #define IPFW_DEFAULT_RULE       65535   /* rulenum for the default rule */
216 #define IPFW_DEFAULT_SET        31      /* set number for the default rule */
217
218 struct netmsg_ipfw {
219         struct netmsg   nmsg;
220         const struct ipfw_ioc_rule *ioc_rule;
221         struct ip_fw    *next_rule;
222         struct ip_fw    *prev_rule;
223         struct ip_fw    *sibling;
224         struct ip_fw_stub *stub;
225 };
226
227 struct netmsg_del {
228         struct netmsg   nmsg;
229         struct ip_fw    *start_rule;
230         struct ip_fw    *prev_rule;
231         uint16_t        rulenum;
232         uint8_t         from_set;
233         uint8_t         to_set;
234 };
235
236 struct netmsg_zent {
237         struct netmsg   nmsg;
238         struct ip_fw    *start_rule;
239         uint16_t        rulenum;
240         uint16_t        log_only;
241 };
242
243 struct ipfw_context {
244         struct ip_fw    *ipfw_layer3_chain;     /* list of rules for layer3 */
245         struct ip_fw    *ipfw_default_rule;     /* default rule */
246         uint64_t        ipfw_norule_counter;    /* counter for ipfw_log(NULL) */
247
248         /*
249          * ipfw_set_disable contains one bit per set value (0..31).
250          * If the bit is set, all rules with the corresponding set
251          * are disabled.  Set IPDW_DEFAULT_SET is reserved for the
252          * default rule and CANNOT be disabled.
253          */
254         uint32_t        ipfw_set_disable;
255         uint32_t        ipfw_gen;               /* generation of rule list */
256 };
257
258 static struct ipfw_context      *ipfw_ctx[MAXCPU];
259
260 #ifdef KLD_MODULE
261 /*
262  * Module can not be unloaded, if there are references to
263  * certains rules of ipfw(4), e.g. dummynet(4)
264  */
265 static int ipfw_refcnt;
266 #endif
267
268 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's");
269
270 /*
271  * Following two global variables are accessed and
272  * updated only on CPU0
273  */
274 static uint32_t static_count;   /* # of static rules */
275 static uint32_t static_ioc_len; /* bytes of static rules */
276
277 /*
278  * If 1, then ipfw static rules are being flushed,
279  * ipfw_chk() will skip to the default rule.
280  */
281 static int ipfw_flushing;
282
283 static int fw_verbose;
284 static int verbose_limit;
285
286 static int fw_debug = 1;
287 static int autoinc_step = IPFW_AUTOINC_STEP_DEF;
288
289 static int      ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS);
290 static int      ipfw_sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS);
291 static int      ipfw_sysctl_dyn_fin(SYSCTL_HANDLER_ARGS);
292 static int      ipfw_sysctl_dyn_rst(SYSCTL_HANDLER_ARGS);
293
294 #ifdef SYSCTL_NODE
295 SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW, 0, "Firewall");
296 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, enable, CTLFLAG_RW,
297     &fw_enable, 0, "Enable ipfw");
298 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, autoinc_step, CTLTYPE_INT | CTLFLAG_RW,
299     &autoinc_step, 0, ipfw_sysctl_autoinc_step, "I",
300     "Rule number autincrement step");
301 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO,one_pass,CTLFLAG_RW,
302     &fw_one_pass, 0,
303     "Only do a single pass through ipfw when using dummynet(4)");
304 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, debug, CTLFLAG_RW,
305     &fw_debug, 0, "Enable printing of debug ip_fw statements");
306 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose, CTLFLAG_RW,
307     &fw_verbose, 0, "Log matches to ipfw rules");
308 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit, CTLFLAG_RW,
309     &verbose_limit, 0, "Set upper limit of matches of ipfw rules logged");
310
311 /*
312  * Description of dynamic rules.
313  *
314  * Dynamic rules are stored in lists accessed through a hash table
315  * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can
316  * be modified through the sysctl variable dyn_buckets which is
317  * updated when the table becomes empty.
318  *
319  * XXX currently there is only one list, ipfw_dyn.
320  *
321  * When a packet is received, its address fields are first masked
322  * with the mask defined for the rule, then hashed, then matched
323  * against the entries in the corresponding list.
324  * Dynamic rules can be used for different purposes:
325  *  + stateful rules;
326  *  + enforcing limits on the number of sessions;
327  *  + in-kernel NAT (not implemented yet)
328  *
329  * The lifetime of dynamic rules is regulated by dyn_*_lifetime,
330  * measured in seconds and depending on the flags.
331  *
332  * The total number of dynamic rules is stored in dyn_count.
333  * The max number of dynamic rules is dyn_max. When we reach
334  * the maximum number of rules we do not create anymore. This is
335  * done to avoid consuming too much memory, but also too much
336  * time when searching on each packet (ideally, we should try instead
337  * to put a limit on the length of the list on each bucket...).
338  *
339  * Each dynamic rule holds a pointer to the parent ipfw rule so
340  * we know what action to perform. Dynamic rules are removed when
341  * the parent rule is deleted. XXX we should make them survive.
342  *
343  * There are some limitations with dynamic rules -- we do not
344  * obey the 'randomized match', and we do not do multiple
345  * passes through the firewall. XXX check the latter!!!
346  *
347  * NOTE about the SHARED LOCKMGR LOCK during dynamic rule looking up:
348  * Only TCP state transition will change dynamic rule's state and ack
349  * sequences, while all packets of one TCP connection only goes through
350  * one TCP thread, so it is safe to use shared lockmgr lock during dynamic
351  * rule looking up.  The keep alive callout uses exclusive lockmgr lock
352  * when it tries to find suitable dynamic rules to send keep alive, so
353  * it will not see half updated state and ack sequences.  Though the expire
354  * field updating looks racy for other protocols, the resolution (second)
355  * of expire field makes this kind of race harmless.
356  * XXX statistics' updating is _not_ MPsafe!!!
357  * XXX once UDP output path is fixed, we could use lockless dynamic rule
358  *     hash table
359  */
360 static ipfw_dyn_rule **ipfw_dyn_v = NULL;
361 static uint32_t dyn_buckets = 256; /* must be power of 2 */
362 static uint32_t curr_dyn_buckets = 256; /* must be power of 2 */
363 static uint32_t dyn_buckets_gen; /* generation of dyn buckets array */
364 static struct lock dyn_lock; /* dynamic rules' hash table lock */
365 static struct callout ipfw_timeout_h;
366
367 /*
368  * Timeouts for various events in handing dynamic rules.
369  */
370 static uint32_t dyn_ack_lifetime = 300;
371 static uint32_t dyn_syn_lifetime = 20;
372 static uint32_t dyn_fin_lifetime = 1;
373 static uint32_t dyn_rst_lifetime = 1;
374 static uint32_t dyn_udp_lifetime = 10;
375 static uint32_t dyn_short_lifetime = 5;
376
377 /*
378  * Keepalives are sent if dyn_keepalive is set. They are sent every
379  * dyn_keepalive_period seconds, in the last dyn_keepalive_interval
380  * seconds of lifetime of a rule.
381  * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower
382  * than dyn_keepalive_period.
383  */
384
385 static uint32_t dyn_keepalive_interval = 20;
386 static uint32_t dyn_keepalive_period = 5;
387 static uint32_t dyn_keepalive = 1;      /* do send keepalives */
388
389 static uint32_t dyn_count;              /* # of dynamic rules */
390 static uint32_t dyn_max = 4096;         /* max # of dynamic rules */
391
392 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_buckets, CTLTYPE_INT | CTLFLAG_RW,
393     &dyn_buckets, 0, ipfw_sysctl_dyn_buckets, "I", "Number of dyn. buckets");
394 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets, CTLFLAG_RD,
395     &curr_dyn_buckets, 0, "Current Number of dyn. buckets");
396 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_count, CTLFLAG_RD,
397     &dyn_count, 0, "Number of dyn. rules");
398 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_max, CTLFLAG_RW,
399     &dyn_max, 0, "Max number of dyn. rules");
400 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, static_count, CTLFLAG_RD,
401     &static_count, 0, "Number of static rules");
402 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime, CTLFLAG_RW,
403     &dyn_ack_lifetime, 0, "Lifetime of dyn. rules for acks");
404 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime, CTLFLAG_RW,
405     &dyn_syn_lifetime, 0, "Lifetime of dyn. rules for syn");
406 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime,
407     CTLTYPE_INT | CTLFLAG_RW, &dyn_fin_lifetime, 0, ipfw_sysctl_dyn_fin, "I",
408     "Lifetime of dyn. rules for fin");
409 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime,
410     CTLTYPE_INT | CTLFLAG_RW, &dyn_rst_lifetime, 0, ipfw_sysctl_dyn_rst, "I",
411     "Lifetime of dyn. rules for rst");
412 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime, CTLFLAG_RW,
413     &dyn_udp_lifetime, 0, "Lifetime of dyn. rules for UDP");
414 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime, CTLFLAG_RW,
415     &dyn_short_lifetime, 0, "Lifetime of dyn. rules for other situations");
416 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive, CTLFLAG_RW,
417     &dyn_keepalive, 0, "Enable keepalives for dyn. rules");
418
419 #endif /* SYSCTL_NODE */
420
421 static ip_fw_chk_t      ipfw_chk;
422
423 static __inline int
424 ipfw_free_rule(struct ip_fw *rule)
425 {
426         KASSERT(rule->cpuid == mycpuid, ("rule freed on cpu%d\n", mycpuid));
427         KASSERT(rule->refcnt > 0, ("invalid refcnt %u\n", rule->refcnt));
428         rule->refcnt--;
429         if (rule->refcnt == 0) {
430                 kfree(rule, M_IPFW);
431                 return 1;
432         }
433         return 0;
434 }
435
436 static void
437 ipfw_unref_rule(void *priv)
438 {
439         ipfw_free_rule(priv);
440 #ifdef KLD_MODULE
441         atomic_subtract_int(&ipfw_refcnt, 1);
442 #endif
443 }
444
445 static __inline void
446 ipfw_ref_rule(struct ip_fw *rule)
447 {
448         KASSERT(rule->cpuid == mycpuid, ("rule used on cpu%d\n", mycpuid));
449 #ifdef KLD_MODULE
450         atomic_add_int(&ipfw_refcnt, 1);
451 #endif
452         rule->refcnt++;
453 }
454
455 /*
456  * This macro maps an ip pointer into a layer3 header pointer of type T
457  */
458 #define L3HDR(T, ip) ((T *)((uint32_t *)(ip) + (ip)->ip_hl))
459
460 static __inline int
461 icmptype_match(struct ip *ip, ipfw_insn_u32 *cmd)
462 {
463         int type = L3HDR(struct icmp,ip)->icmp_type;
464
465         return (type <= ICMP_MAXTYPE && (cmd->d[0] & (1 << type)));
466 }
467
468 #define TT      ((1 << ICMP_ECHO) | \
469                  (1 << ICMP_ROUTERSOLICIT) | \
470                  (1 << ICMP_TSTAMP) | \
471                  (1 << ICMP_IREQ) | \
472                  (1 << ICMP_MASKREQ))
473
474 static int
475 is_icmp_query(struct ip *ip)
476 {
477         int type = L3HDR(struct icmp, ip)->icmp_type;
478
479         return (type <= ICMP_MAXTYPE && (TT & (1 << type)));
480 }
481
482 #undef TT
483
484 /*
485  * The following checks use two arrays of 8 or 16 bits to store the
486  * bits that we want set or clear, respectively. They are in the
487  * low and high half of cmd->arg1 or cmd->d[0].
488  *
489  * We scan options and store the bits we find set. We succeed if
490  *
491  *      (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear
492  *
493  * The code is sometimes optimized not to store additional variables.
494  */
495
496 static int
497 flags_match(ipfw_insn *cmd, uint8_t bits)
498 {
499         u_char want_clear;
500         bits = ~bits;
501
502         if (((cmd->arg1 & 0xff) & bits) != 0)
503                 return 0; /* some bits we want set were clear */
504
505         want_clear = (cmd->arg1 >> 8) & 0xff;
506         if ((want_clear & bits) != want_clear)
507                 return 0; /* some bits we want clear were set */
508         return 1;
509 }
510
511 static int
512 ipopts_match(struct ip *ip, ipfw_insn *cmd)
513 {
514         int optlen, bits = 0;
515         u_char *cp = (u_char *)(ip + 1);
516         int x = (ip->ip_hl << 2) - sizeof(struct ip);
517
518         for (; x > 0; x -= optlen, cp += optlen) {
519                 int opt = cp[IPOPT_OPTVAL];
520
521                 if (opt == IPOPT_EOL)
522                         break;
523
524                 if (opt == IPOPT_NOP) {
525                         optlen = 1;
526                 } else {
527                         optlen = cp[IPOPT_OLEN];
528                         if (optlen <= 0 || optlen > x)
529                                 return 0; /* invalid or truncated */
530                 }
531
532                 switch (opt) {
533                 case IPOPT_LSRR:
534                         bits |= IP_FW_IPOPT_LSRR;
535                         break;
536
537                 case IPOPT_SSRR:
538                         bits |= IP_FW_IPOPT_SSRR;
539                         break;
540
541                 case IPOPT_RR:
542                         bits |= IP_FW_IPOPT_RR;
543                         break;
544
545                 case IPOPT_TS:
546                         bits |= IP_FW_IPOPT_TS;
547                         break;
548
549                 default:
550                         break;
551                 }
552         }
553         return (flags_match(cmd, bits));
554 }
555
556 static int
557 tcpopts_match(struct ip *ip, ipfw_insn *cmd)
558 {
559         int optlen, bits = 0;
560         struct tcphdr *tcp = L3HDR(struct tcphdr,ip);
561         u_char *cp = (u_char *)(tcp + 1);
562         int x = (tcp->th_off << 2) - sizeof(struct tcphdr);
563
564         for (; x > 0; x -= optlen, cp += optlen) {
565                 int opt = cp[0];
566
567                 if (opt == TCPOPT_EOL)
568                         break;
569
570                 if (opt == TCPOPT_NOP) {
571                         optlen = 1;
572                 } else {
573                         optlen = cp[1];
574                         if (optlen <= 0)
575                                 break;
576                 }
577
578                 switch (opt) {
579                 case TCPOPT_MAXSEG:
580                         bits |= IP_FW_TCPOPT_MSS;
581                         break;
582
583                 case TCPOPT_WINDOW:
584                         bits |= IP_FW_TCPOPT_WINDOW;
585                         break;
586
587                 case TCPOPT_SACK_PERMITTED:
588                 case TCPOPT_SACK:
589                         bits |= IP_FW_TCPOPT_SACK;
590                         break;
591
592                 case TCPOPT_TIMESTAMP:
593                         bits |= IP_FW_TCPOPT_TS;
594                         break;
595
596                 case TCPOPT_CC:
597                 case TCPOPT_CCNEW:
598                 case TCPOPT_CCECHO:
599                         bits |= IP_FW_TCPOPT_CC;
600                         break;
601
602                 default:
603                         break;
604                 }
605         }
606         return (flags_match(cmd, bits));
607 }
608
609 static int
610 iface_match(struct ifnet *ifp, ipfw_insn_if *cmd)
611 {
612         if (ifp == NULL)        /* no iface with this packet, match fails */
613                 return 0;
614
615         /* Check by name or by IP address */
616         if (cmd->name[0] != '\0') { /* match by name */
617                 /* Check name */
618                 if (cmd->p.glob) {
619                         if (kfnmatch(cmd->name, ifp->if_xname, 0) == 0)
620                                 return(1);
621                 } else {
622                         if (strncmp(ifp->if_xname, cmd->name, IFNAMSIZ) == 0)
623                                 return(1);
624                 }
625         } else {
626                 struct ifaddr_container *ifac;
627
628                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
629                         struct ifaddr *ia = ifac->ifa;
630
631                         if (ia->ifa_addr == NULL)
632                                 continue;
633                         if (ia->ifa_addr->sa_family != AF_INET)
634                                 continue;
635                         if (cmd->p.ip.s_addr == ((struct sockaddr_in *)
636                             (ia->ifa_addr))->sin_addr.s_addr)
637                                 return(1);      /* match */
638                 }
639         }
640         return(0);      /* no match, fail ... */
641 }
642
643 #define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0
644
645 /*
646  * We enter here when we have a rule with O_LOG.
647  * XXX this function alone takes about 2Kbytes of code!
648  */
649 static void
650 ipfw_log(struct ip_fw *f, u_int hlen, struct ether_header *eh,
651          struct mbuf *m, struct ifnet *oif)
652 {
653         char *action;
654         int limit_reached = 0;
655         char action2[40], proto[48], fragment[28];
656
657         fragment[0] = '\0';
658         proto[0] = '\0';
659
660         if (f == NULL) {        /* bogus pkt */
661                 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
662
663                 if (verbose_limit != 0 &&
664                     ctx->ipfw_norule_counter >= verbose_limit)
665                         return;
666                 ctx->ipfw_norule_counter++;
667                 if (ctx->ipfw_norule_counter == verbose_limit)
668                         limit_reached = verbose_limit;
669                 action = "Refuse";
670         } else {        /* O_LOG is the first action, find the real one */
671                 ipfw_insn *cmd = ACTION_PTR(f);
672                 ipfw_insn_log *l = (ipfw_insn_log *)cmd;
673
674                 if (l->max_log != 0 && l->log_left == 0)
675                         return;
676                 l->log_left--;
677                 if (l->log_left == 0)
678                         limit_reached = l->max_log;
679                 cmd += F_LEN(cmd);      /* point to first action */
680                 if (cmd->opcode == O_PROB)
681                         cmd += F_LEN(cmd);
682
683                 action = action2;
684                 switch (cmd->opcode) {
685                 case O_DENY:
686                         action = "Deny";
687                         break;
688
689                 case O_REJECT:
690                         if (cmd->arg1==ICMP_REJECT_RST) {
691                                 action = "Reset";
692                         } else if (cmd->arg1==ICMP_UNREACH_HOST) {
693                                 action = "Reject";
694                         } else {
695                                 ksnprintf(SNPARGS(action2, 0), "Unreach %d",
696                                           cmd->arg1);
697                         }
698                         break;
699
700                 case O_ACCEPT:
701                         action = "Accept";
702                         break;
703
704                 case O_COUNT:
705                         action = "Count";
706                         break;
707
708                 case O_DIVERT:
709                         ksnprintf(SNPARGS(action2, 0), "Divert %d", cmd->arg1);
710                         break;
711
712                 case O_TEE:
713                         ksnprintf(SNPARGS(action2, 0), "Tee %d", cmd->arg1);
714                         break;
715
716                 case O_SKIPTO:
717                         ksnprintf(SNPARGS(action2, 0), "SkipTo %d", cmd->arg1);
718                         break;
719
720                 case O_PIPE:
721                         ksnprintf(SNPARGS(action2, 0), "Pipe %d", cmd->arg1);
722                         break;
723
724                 case O_QUEUE:
725                         ksnprintf(SNPARGS(action2, 0), "Queue %d", cmd->arg1);
726                         break;
727
728                 case O_FORWARD_IP:
729                         {
730                                 ipfw_insn_sa *sa = (ipfw_insn_sa *)cmd;
731                                 int len;
732
733                                 len = ksnprintf(SNPARGS(action2, 0),
734                                                 "Forward to %s",
735                                                 inet_ntoa(sa->sa.sin_addr));
736                                 if (sa->sa.sin_port) {
737                                         ksnprintf(SNPARGS(action2, len), ":%d",
738                                                   sa->sa.sin_port);
739                                 }
740                         }
741                         break;
742
743                 default:
744                         action = "UNKNOWN";
745                         break;
746                 }
747         }
748
749         if (hlen == 0) {        /* non-ip */
750                 ksnprintf(SNPARGS(proto, 0), "MAC");
751         } else {
752                 struct ip *ip = mtod(m, struct ip *);
753                 /* these three are all aliases to the same thing */
754                 struct icmp *const icmp = L3HDR(struct icmp, ip);
755                 struct tcphdr *const tcp = (struct tcphdr *)icmp;
756                 struct udphdr *const udp = (struct udphdr *)icmp;
757
758                 int ip_off, offset, ip_len;
759                 int len;
760
761                 if (eh != NULL) { /* layer 2 packets are as on the wire */
762                         ip_off = ntohs(ip->ip_off);
763                         ip_len = ntohs(ip->ip_len);
764                 } else {
765                         ip_off = ip->ip_off;
766                         ip_len = ip->ip_len;
767                 }
768                 offset = ip_off & IP_OFFMASK;
769                 switch (ip->ip_p) {
770                 case IPPROTO_TCP:
771                         len = ksnprintf(SNPARGS(proto, 0), "TCP %s",
772                                         inet_ntoa(ip->ip_src));
773                         if (offset == 0) {
774                                 ksnprintf(SNPARGS(proto, len), ":%d %s:%d",
775                                           ntohs(tcp->th_sport),
776                                           inet_ntoa(ip->ip_dst),
777                                           ntohs(tcp->th_dport));
778                         } else {
779                                 ksnprintf(SNPARGS(proto, len), " %s",
780                                           inet_ntoa(ip->ip_dst));
781                         }
782                         break;
783
784                 case IPPROTO_UDP:
785                         len = ksnprintf(SNPARGS(proto, 0), "UDP %s",
786                                         inet_ntoa(ip->ip_src));
787                         if (offset == 0) {
788                                 ksnprintf(SNPARGS(proto, len), ":%d %s:%d",
789                                           ntohs(udp->uh_sport),
790                                           inet_ntoa(ip->ip_dst),
791                                           ntohs(udp->uh_dport));
792                         } else {
793                                 ksnprintf(SNPARGS(proto, len), " %s",
794                                           inet_ntoa(ip->ip_dst));
795                         }
796                         break;
797
798                 case IPPROTO_ICMP:
799                         if (offset == 0) {
800                                 len = ksnprintf(SNPARGS(proto, 0),
801                                                 "ICMP:%u.%u ",
802                                                 icmp->icmp_type,
803                                                 icmp->icmp_code);
804                         } else {
805                                 len = ksnprintf(SNPARGS(proto, 0), "ICMP ");
806                         }
807                         len += ksnprintf(SNPARGS(proto, len), "%s",
808                                          inet_ntoa(ip->ip_src));
809                         ksnprintf(SNPARGS(proto, len), " %s",
810                                   inet_ntoa(ip->ip_dst));
811                         break;
812
813                 default:
814                         len = ksnprintf(SNPARGS(proto, 0), "P:%d %s", ip->ip_p,
815                                         inet_ntoa(ip->ip_src));
816                         ksnprintf(SNPARGS(proto, len), " %s",
817                                   inet_ntoa(ip->ip_dst));
818                         break;
819                 }
820
821                 if (ip_off & (IP_MF | IP_OFFMASK)) {
822                         ksnprintf(SNPARGS(fragment, 0), " (frag %d:%d@%d%s)",
823                                   ntohs(ip->ip_id), ip_len - (ip->ip_hl << 2),
824                                   offset << 3, (ip_off & IP_MF) ? "+" : "");
825                 }
826         }
827
828         if (oif || m->m_pkthdr.rcvif) {
829                 log(LOG_SECURITY | LOG_INFO,
830                     "ipfw: %d %s %s %s via %s%s\n",
831                     f ? f->rulenum : -1,
832                     action, proto, oif ? "out" : "in",
833                     oif ? oif->if_xname : m->m_pkthdr.rcvif->if_xname,
834                     fragment);
835         } else {
836                 log(LOG_SECURITY | LOG_INFO,
837                     "ipfw: %d %s %s [no if info]%s\n",
838                     f ? f->rulenum : -1,
839                     action, proto, fragment);
840         }
841
842         if (limit_reached) {
843                 log(LOG_SECURITY | LOG_NOTICE,
844                     "ipfw: limit %d reached on entry %d\n",
845                     limit_reached, f ? f->rulenum : -1);
846         }
847 }
848
849 #undef SNPARGS
850
851 /*
852  * IMPORTANT: the hash function for dynamic rules must be commutative
853  * in source and destination (ip,port), because rules are bidirectional
854  * and we want to find both in the same bucket.
855  */
856 static __inline int
857 hash_packet(struct ipfw_flow_id *id)
858 {
859         uint32_t i;
860
861         i = (id->dst_ip) ^ (id->src_ip) ^ (id->dst_port) ^ (id->src_port);
862         i &= (curr_dyn_buckets - 1);
863         return i;
864 }
865
866 /**
867  * unlink a dynamic rule from a chain. prev is a pointer to
868  * the previous one, q is a pointer to the rule to delete,
869  * head is a pointer to the head of the queue.
870  * Modifies q and potentially also head.
871  */
872 #define UNLINK_DYN_RULE(prev, head, q)                                  \
873 do {                                                                    \
874         ipfw_dyn_rule *old_q = q;                                       \
875                                                                         \
876         /* remove a refcount to the parent */                           \
877         if (q->dyn_type == O_LIMIT)                                     \
878                 q->parent->count--;                                     \
879         DEB(kprintf("-- unlink entry 0x%08x %d -> 0x%08x %d, %d left\n", \
880                 (q->id.src_ip), (q->id.src_port),                       \
881                 (q->id.dst_ip), (q->id.dst_port), dyn_count-1 ); )      \
882         if (prev != NULL)                                               \
883                 prev->next = q = q->next;                               \
884         else                                                            \
885                 head = q = q->next;                                     \
886         KASSERT(dyn_count > 0, ("invalid dyn count %u\n", dyn_count));  \
887         dyn_count--;                                                    \
888         kfree(old_q, M_IPFW);                                           \
889 } while (0)
890
891 #define TIME_LEQ(a, b)  ((int)((a) - (b)) <= 0)
892
893 /**
894  * Remove dynamic rules pointing to "rule", or all of them if rule == NULL.
895  *
896  * If keep_me == NULL, rules are deleted even if not expired,
897  * otherwise only expired rules are removed.
898  *
899  * The value of the second parameter is also used to point to identify
900  * a rule we absolutely do not want to remove (e.g. because we are
901  * holding a reference to it -- this is the case with O_LIMIT_PARENT
902  * rules). The pointer is only used for comparison, so any non-null
903  * value will do.
904  */
905 static void
906 remove_dyn_rule_locked(struct ip_fw *rule, ipfw_dyn_rule *keep_me)
907 {
908         static uint32_t last_remove = 0; /* XXX */
909
910 #define FORCE   (keep_me == NULL)
911
912         ipfw_dyn_rule *prev, *q;
913         int i, pass = 0, max_pass = 0, unlinked = 0;
914
915         if (ipfw_dyn_v == NULL || dyn_count == 0)
916                 return;
917         /* do not expire more than once per second, it is useless */
918         if (!FORCE && last_remove == time_second)
919                 return;
920         last_remove = time_second;
921
922         /*
923          * because O_LIMIT refer to parent rules, during the first pass only
924          * remove child and mark any pending LIMIT_PARENT, and remove
925          * them in a second pass.
926          */
927 next_pass:
928         for (i = 0; i < curr_dyn_buckets; i++) {
929                 for (prev = NULL, q = ipfw_dyn_v[i]; q;) {
930                         /*
931                          * Logic can become complex here, so we split tests.
932                          */
933                         if (q == keep_me)
934                                 goto next;
935                         if (rule != NULL && rule->stub != q->stub)
936                                 goto next; /* not the one we are looking for */
937                         if (q->dyn_type == O_LIMIT_PARENT) {
938                                 /*
939                                  * handle parent in the second pass,
940                                  * record we need one.
941                                  */
942                                 max_pass = 1;
943                                 if (pass == 0)
944                                         goto next;
945                                 if (FORCE && q->count != 0) {
946                                         /* XXX should not happen! */
947                                         kprintf("OUCH! cannot remove rule, "
948                                                 "count %d\n", q->count);
949                                 }
950                         } else {
951                                 if (!FORCE && !TIME_LEQ(q->expire, time_second))
952                                         goto next;
953                         }
954                         unlinked = 1;
955                         UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q);
956                         continue;
957 next:
958                         prev = q;
959                         q = q->next;
960                 }
961         }
962         if (pass++ < max_pass)
963                 goto next_pass;
964
965         if (unlinked)
966                 ++dyn_buckets_gen;
967
968 #undef FORCE
969 }
970
971 /**
972  * lookup a dynamic rule.
973  */
974 static ipfw_dyn_rule *
975 lookup_dyn_rule(struct ipfw_flow_id *pkt, int *match_direction,
976                 struct tcphdr *tcp)
977 {
978         /*
979          * stateful ipfw extensions.
980          * Lookup into dynamic session queue
981          */
982 #define MATCH_REVERSE   0
983 #define MATCH_FORWARD   1
984 #define MATCH_NONE      2
985 #define MATCH_UNKNOWN   3
986         int i, dir = MATCH_NONE;
987         ipfw_dyn_rule *prev, *q=NULL;
988
989         if (ipfw_dyn_v == NULL)
990                 goto done;      /* not found */
991
992         i = hash_packet(pkt);
993         for (prev = NULL, q = ipfw_dyn_v[i]; q != NULL;) {
994                 if (q->dyn_type == O_LIMIT_PARENT)
995                         goto next;
996
997                 if (TIME_LEQ(q->expire, time_second)) {
998                         /*
999                          * Entry expired; skip.
1000                          * Let ipfw_tick() take care of it
1001                          */
1002                         goto next;
1003                 }
1004
1005                 if (pkt->proto == q->id.proto) {
1006                         if (pkt->src_ip == q->id.src_ip &&
1007                             pkt->dst_ip == q->id.dst_ip &&
1008                             pkt->src_port == q->id.src_port &&
1009                             pkt->dst_port == q->id.dst_port) {
1010                                 dir = MATCH_FORWARD;
1011                                 break;
1012                         }
1013                         if (pkt->src_ip == q->id.dst_ip &&
1014                             pkt->dst_ip == q->id.src_ip &&
1015                             pkt->src_port == q->id.dst_port &&
1016                             pkt->dst_port == q->id.src_port) {
1017                                 dir = MATCH_REVERSE;
1018                                 break;
1019                         }
1020                 }
1021 next:
1022                 prev = q;
1023                 q = q->next;
1024         }
1025         if (q == NULL)
1026                 goto done; /* q = NULL, not found */
1027
1028         if (pkt->proto == IPPROTO_TCP) { /* update state according to flags */
1029                 u_char flags = pkt->flags & (TH_FIN|TH_SYN|TH_RST);
1030
1031 #define BOTH_SYN        (TH_SYN | (TH_SYN << 8))
1032 #define BOTH_FIN        (TH_FIN | (TH_FIN << 8))
1033
1034                 q->state |= (dir == MATCH_FORWARD ) ? flags : (flags << 8);
1035                 switch (q->state) {
1036                 case TH_SYN:                            /* opening */
1037                         q->expire = time_second + dyn_syn_lifetime;
1038                         break;
1039
1040                 case BOTH_SYN:                  /* move to established */
1041                 case BOTH_SYN | TH_FIN :        /* one side tries to close */
1042                 case BOTH_SYN | (TH_FIN << 8) :
1043                         if (tcp) {
1044                                 uint32_t ack = ntohl(tcp->th_ack);
1045
1046 #define _SEQ_GE(a, b)   ((int)(a) - (int)(b) >= 0)
1047
1048                                 if (dir == MATCH_FORWARD) {
1049                                         if (q->ack_fwd == 0 ||
1050                                             _SEQ_GE(ack, q->ack_fwd))
1051                                                 q->ack_fwd = ack;
1052                                         else /* ignore out-of-sequence */
1053                                                 break;
1054                                 } else {
1055                                         if (q->ack_rev == 0 ||
1056                                             _SEQ_GE(ack, q->ack_rev))
1057                                                 q->ack_rev = ack;
1058                                         else /* ignore out-of-sequence */
1059                                                 break;
1060                                 }
1061 #undef _SEQ_GE
1062                         }
1063                         q->expire = time_second + dyn_ack_lifetime;
1064                         break;
1065
1066                 case BOTH_SYN | BOTH_FIN:       /* both sides closed */
1067                         KKASSERT(dyn_fin_lifetime < dyn_keepalive_period);
1068                         q->expire = time_second + dyn_fin_lifetime;
1069                         break;
1070
1071                 default:
1072 #if 0
1073                         /*
1074                          * reset or some invalid combination, but can also
1075                          * occur if we use keep-state the wrong way.
1076                          */
1077                         if ((q->state & ((TH_RST << 8) | TH_RST)) == 0)
1078                                 kprintf("invalid state: 0x%x\n", q->state);
1079 #endif
1080                         KKASSERT(dyn_rst_lifetime < dyn_keepalive_period);
1081                         q->expire = time_second + dyn_rst_lifetime;
1082                         break;
1083                 }
1084         } else if (pkt->proto == IPPROTO_UDP) {
1085                 q->expire = time_second + dyn_udp_lifetime;
1086         } else {
1087                 /* other protocols */
1088                 q->expire = time_second + dyn_short_lifetime;
1089         }
1090 done:
1091         if (match_direction)
1092                 *match_direction = dir;
1093         return q;
1094 }
1095
1096 static struct ip_fw *
1097 lookup_rule(struct ipfw_flow_id *pkt, int *match_direction, struct tcphdr *tcp,
1098             uint16_t len, int *deny)
1099 {
1100         struct ip_fw *rule = NULL;
1101         ipfw_dyn_rule *q;
1102         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1103         uint32_t gen;
1104
1105         *deny = 0;
1106         gen = ctx->ipfw_gen;
1107
1108         lockmgr(&dyn_lock, LK_SHARED);
1109
1110         if (ctx->ipfw_gen != gen) {
1111                 /*
1112                  * Static rules had been change when we were waiting
1113                  * for the dynamic hash table lock; deny this packet,
1114                  * since it is _not_ known whether it is safe to keep
1115                  * iterating the static rules.
1116                  */
1117                 *deny = 1;
1118                 goto back;
1119         }
1120
1121         q = lookup_dyn_rule(pkt, match_direction, tcp);
1122         if (q == NULL) {
1123                 rule = NULL;
1124         } else {
1125                 rule = q->stub->rule[mycpuid];
1126                 KKASSERT(rule->stub == q->stub && rule->cpuid == mycpuid);
1127
1128                 /* XXX */
1129                 q->pcnt++;
1130                 q->bcnt += len;
1131         }
1132 back:
1133         lockmgr(&dyn_lock, LK_RELEASE);
1134         return rule;
1135 }
1136
1137 static void
1138 realloc_dynamic_table(void)
1139 {
1140         ipfw_dyn_rule **old_dyn_v;
1141         uint32_t old_curr_dyn_buckets;
1142
1143         KASSERT(dyn_buckets <= 65536 && (dyn_buckets & (dyn_buckets - 1)) == 0,
1144                 ("invalid dyn_buckets %d\n", dyn_buckets));
1145
1146         /* Save the current buckets array for later error recovery */
1147         old_dyn_v = ipfw_dyn_v;
1148         old_curr_dyn_buckets = curr_dyn_buckets;
1149
1150         curr_dyn_buckets = dyn_buckets;
1151         for (;;) {
1152                 ipfw_dyn_v = kmalloc(curr_dyn_buckets * sizeof(ipfw_dyn_rule *),
1153                                      M_IPFW, M_NOWAIT | M_ZERO);
1154                 if (ipfw_dyn_v != NULL || curr_dyn_buckets <= 2)
1155                         break;
1156
1157                 curr_dyn_buckets /= 2;
1158                 if (curr_dyn_buckets <= old_curr_dyn_buckets &&
1159                     old_dyn_v != NULL) {
1160                         /*
1161                          * Don't try allocating smaller buckets array, reuse
1162                          * the old one, which alreay contains enough buckets
1163                          */
1164                         break;
1165                 }
1166         }
1167
1168         if (ipfw_dyn_v != NULL) {
1169                 if (old_dyn_v != NULL)
1170                         kfree(old_dyn_v, M_IPFW);
1171         } else {
1172                 /* Allocation failed, restore old buckets array */
1173                 ipfw_dyn_v = old_dyn_v;
1174                 curr_dyn_buckets = old_curr_dyn_buckets;
1175         }
1176
1177         if (ipfw_dyn_v != NULL)
1178                 ++dyn_buckets_gen;
1179 }
1180
1181 /**
1182  * Install state of type 'type' for a dynamic session.
1183  * The hash table contains two type of rules:
1184  * - regular rules (O_KEEP_STATE)
1185  * - rules for sessions with limited number of sess per user
1186  *   (O_LIMIT). When they are created, the parent is
1187  *   increased by 1, and decreased on delete. In this case,
1188  *   the third parameter is the parent rule and not the chain.
1189  * - "parent" rules for the above (O_LIMIT_PARENT).
1190  */
1191 static ipfw_dyn_rule *
1192 add_dyn_rule(struct ipfw_flow_id *id, uint8_t dyn_type, struct ip_fw *rule)
1193 {
1194         ipfw_dyn_rule *r;
1195         int i;
1196
1197         if (ipfw_dyn_v == NULL ||
1198             (dyn_count == 0 && dyn_buckets != curr_dyn_buckets)) {
1199                 realloc_dynamic_table();
1200                 if (ipfw_dyn_v == NULL)
1201                         return NULL; /* failed ! */
1202         }
1203         i = hash_packet(id);
1204
1205         r = kmalloc(sizeof(*r), M_IPFW, M_NOWAIT | M_ZERO);
1206         if (r == NULL) {
1207                 kprintf ("sorry cannot allocate state\n");
1208                 return NULL;
1209         }
1210
1211         /* increase refcount on parent, and set pointer */
1212         if (dyn_type == O_LIMIT) {
1213                 ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule;
1214
1215                 if (parent->dyn_type != O_LIMIT_PARENT)
1216                         panic("invalid parent");
1217                 parent->count++;
1218                 r->parent = parent;
1219                 rule = parent->stub->rule[mycpuid];
1220                 KKASSERT(rule->stub == parent->stub);
1221         }
1222         KKASSERT(rule->cpuid == mycpuid && rule->stub != NULL);
1223
1224         r->id = *id;
1225         r->expire = time_second + dyn_syn_lifetime;
1226         r->stub = rule->stub;
1227         r->dyn_type = dyn_type;
1228         r->pcnt = r->bcnt = 0;
1229         r->count = 0;
1230
1231         r->bucket = i;
1232         r->next = ipfw_dyn_v[i];
1233         ipfw_dyn_v[i] = r;
1234         dyn_count++;
1235         dyn_buckets_gen++;
1236         DEB(kprintf("-- add dyn entry ty %d 0x%08x %d -> 0x%08x %d, total %d\n",
1237            dyn_type,
1238            (r->id.src_ip), (r->id.src_port),
1239            (r->id.dst_ip), (r->id.dst_port),
1240            dyn_count );)
1241         return r;
1242 }
1243
1244 /**
1245  * lookup dynamic parent rule using pkt and rule as search keys.
1246  * If the lookup fails, then install one.
1247  */
1248 static ipfw_dyn_rule *
1249 lookup_dyn_parent(struct ipfw_flow_id *pkt, struct ip_fw *rule)
1250 {
1251         ipfw_dyn_rule *q;
1252         int i;
1253
1254         if (ipfw_dyn_v) {
1255                 i = hash_packet(pkt);
1256                 for (q = ipfw_dyn_v[i]; q != NULL; q = q->next) {
1257                         if (q->dyn_type == O_LIMIT_PARENT &&
1258                             rule->stub == q->stub &&
1259                             pkt->proto == q->id.proto &&
1260                             pkt->src_ip == q->id.src_ip &&
1261                             pkt->dst_ip == q->id.dst_ip &&
1262                             pkt->src_port == q->id.src_port &&
1263                             pkt->dst_port == q->id.dst_port) {
1264                                 q->expire = time_second + dyn_short_lifetime;
1265                                 DEB(kprintf("lookup_dyn_parent found 0x%p\n",q);)
1266                                 return q;
1267                         }
1268                 }
1269         }
1270         return add_dyn_rule(pkt, O_LIMIT_PARENT, rule);
1271 }
1272
1273 /**
1274  * Install dynamic state for rule type cmd->o.opcode
1275  *
1276  * Returns 1 (failure) if state is not installed because of errors or because
1277  * session limitations are enforced.
1278  */
1279 static int
1280 install_state_locked(struct ip_fw *rule, ipfw_insn_limit *cmd,
1281                      struct ip_fw_args *args)
1282 {
1283         static int last_log; /* XXX */
1284
1285         ipfw_dyn_rule *q;
1286
1287         DEB(kprintf("-- install state type %d 0x%08x %u -> 0x%08x %u\n",
1288             cmd->o.opcode,
1289             (args->f_id.src_ip), (args->f_id.src_port),
1290             (args->f_id.dst_ip), (args->f_id.dst_port) );)
1291
1292         q = lookup_dyn_rule(&args->f_id, NULL, NULL);
1293         if (q != NULL) { /* should never occur */
1294                 if (last_log != time_second) {
1295                         last_log = time_second;
1296                         kprintf(" install_state: entry already present, done\n");
1297                 }
1298                 return 0;
1299         }
1300
1301         if (dyn_count >= dyn_max) {
1302                 /*
1303                  * Run out of slots, try to remove any expired rule.
1304                  */
1305                 remove_dyn_rule_locked(NULL, (ipfw_dyn_rule *)1);
1306                 if (dyn_count >= dyn_max) {
1307                         if (last_log != time_second) {
1308                                 last_log = time_second;
1309                                 kprintf("install_state: "
1310                                         "Too many dynamic rules\n");
1311                         }
1312                         return 1; /* cannot install, notify caller */
1313                 }
1314         }
1315
1316         switch (cmd->o.opcode) {
1317         case O_KEEP_STATE: /* bidir rule */
1318                 if (add_dyn_rule(&args->f_id, O_KEEP_STATE, rule) == NULL)
1319                         return 1;
1320                 break;
1321
1322         case O_LIMIT: /* limit number of sessions */
1323                 {
1324                         uint16_t limit_mask = cmd->limit_mask;
1325                         struct ipfw_flow_id id;
1326                         ipfw_dyn_rule *parent;
1327
1328                         DEB(kprintf("installing dyn-limit rule %d\n",
1329                             cmd->conn_limit);)
1330
1331                         id.dst_ip = id.src_ip = 0;
1332                         id.dst_port = id.src_port = 0;
1333                         id.proto = args->f_id.proto;
1334
1335                         if (limit_mask & DYN_SRC_ADDR)
1336                                 id.src_ip = args->f_id.src_ip;
1337                         if (limit_mask & DYN_DST_ADDR)
1338                                 id.dst_ip = args->f_id.dst_ip;
1339                         if (limit_mask & DYN_SRC_PORT)
1340                                 id.src_port = args->f_id.src_port;
1341                         if (limit_mask & DYN_DST_PORT)
1342                                 id.dst_port = args->f_id.dst_port;
1343
1344                         parent = lookup_dyn_parent(&id, rule);
1345                         if (parent == NULL) {
1346                                 kprintf("add parent failed\n");
1347                                 return 1;
1348                         }
1349
1350                         if (parent->count >= cmd->conn_limit) {
1351                                 /*
1352                                  * See if we can remove some expired rule.
1353                                  */
1354                                 remove_dyn_rule_locked(rule, parent);
1355                                 if (parent->count >= cmd->conn_limit) {
1356                                         if (fw_verbose &&
1357                                             last_log != time_second) {
1358                                                 last_log = time_second;
1359                                                 log(LOG_SECURITY | LOG_DEBUG,
1360                                                     "drop session, "
1361                                                     "too many entries\n");
1362                                         }
1363                                         return 1;
1364                                 }
1365                         }
1366                         if (add_dyn_rule(&args->f_id, O_LIMIT,
1367                                          (struct ip_fw *)parent) == NULL)
1368                                 return 1;
1369                 }
1370                 break;
1371         default:
1372                 kprintf("unknown dynamic rule type %u\n", cmd->o.opcode);
1373                 return 1;
1374         }
1375         lookup_dyn_rule(&args->f_id, NULL, NULL); /* XXX just set lifetime */
1376         return 0;
1377 }
1378
1379 static int
1380 install_state(struct ip_fw *rule, ipfw_insn_limit *cmd,
1381               struct ip_fw_args *args, int *deny)
1382 {
1383         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1384         uint32_t gen;
1385         int ret = 0;
1386
1387         *deny = 0;
1388         gen = ctx->ipfw_gen;
1389
1390         lockmgr(&dyn_lock, LK_EXCLUSIVE);
1391         if (ctx->ipfw_gen != gen) {
1392                 /* See the comment in lookup_rule() */
1393                 *deny = 1;
1394         } else {
1395                 ret = install_state_locked(rule, cmd, args);
1396         }
1397         lockmgr(&dyn_lock, LK_RELEASE);
1398
1399         return ret;
1400 }
1401
1402 /*
1403  * Transmit a TCP packet, containing either a RST or a keepalive.
1404  * When flags & TH_RST, we are sending a RST packet, because of a
1405  * "reset" action matched the packet.
1406  * Otherwise we are sending a keepalive, and flags & TH_
1407  */
1408 static void
1409 send_pkt(struct ipfw_flow_id *id, uint32_t seq, uint32_t ack, int flags)
1410 {
1411         struct mbuf *m;
1412         struct ip *ip;
1413         struct tcphdr *tcp;
1414         struct route sro;       /* fake route */
1415
1416         MGETHDR(m, MB_DONTWAIT, MT_HEADER);
1417         if (m == NULL)
1418                 return;
1419         m->m_pkthdr.rcvif = NULL;
1420         m->m_pkthdr.len = m->m_len = sizeof(struct ip) + sizeof(struct tcphdr);
1421         m->m_data += max_linkhdr;
1422
1423         ip = mtod(m, struct ip *);
1424         bzero(ip, m->m_len);
1425         tcp = (struct tcphdr *)(ip + 1); /* no IP options */
1426         ip->ip_p = IPPROTO_TCP;
1427         tcp->th_off = 5;
1428
1429         /*
1430          * Assume we are sending a RST (or a keepalive in the reverse
1431          * direction), swap src and destination addresses and ports.
1432          */
1433         ip->ip_src.s_addr = htonl(id->dst_ip);
1434         ip->ip_dst.s_addr = htonl(id->src_ip);
1435         tcp->th_sport = htons(id->dst_port);
1436         tcp->th_dport = htons(id->src_port);
1437         if (flags & TH_RST) {   /* we are sending a RST */
1438                 if (flags & TH_ACK) {
1439                         tcp->th_seq = htonl(ack);
1440                         tcp->th_ack = htonl(0);
1441                         tcp->th_flags = TH_RST;
1442                 } else {
1443                         if (flags & TH_SYN)
1444                                 seq++;
1445                         tcp->th_seq = htonl(0);
1446                         tcp->th_ack = htonl(seq);
1447                         tcp->th_flags = TH_RST | TH_ACK;
1448                 }
1449         } else {
1450                 /*
1451                  * We are sending a keepalive. flags & TH_SYN determines
1452                  * the direction, forward if set, reverse if clear.
1453                  * NOTE: seq and ack are always assumed to be correct
1454                  * as set by the caller. This may be confusing...
1455                  */
1456                 if (flags & TH_SYN) {
1457                         /*
1458                          * we have to rewrite the correct addresses!
1459                          */
1460                         ip->ip_dst.s_addr = htonl(id->dst_ip);
1461                         ip->ip_src.s_addr = htonl(id->src_ip);
1462                         tcp->th_dport = htons(id->dst_port);
1463                         tcp->th_sport = htons(id->src_port);
1464                 }
1465                 tcp->th_seq = htonl(seq);
1466                 tcp->th_ack = htonl(ack);
1467                 tcp->th_flags = TH_ACK;
1468         }
1469
1470         /*
1471          * set ip_len to the payload size so we can compute
1472          * the tcp checksum on the pseudoheader
1473          * XXX check this, could save a couple of words ?
1474          */
1475         ip->ip_len = htons(sizeof(struct tcphdr));
1476         tcp->th_sum = in_cksum(m, m->m_pkthdr.len);
1477
1478         /*
1479          * now fill fields left out earlier
1480          */
1481         ip->ip_ttl = ip_defttl;
1482         ip->ip_len = m->m_pkthdr.len;
1483
1484         bzero(&sro, sizeof(sro));
1485         ip_rtaddr(ip->ip_dst, &sro);
1486
1487         m->m_pkthdr.fw_flags |= IPFW_MBUF_GENERATED;
1488         ip_output(m, NULL, &sro, 0, NULL, NULL);
1489         if (sro.ro_rt)
1490                 RTFREE(sro.ro_rt);
1491 }
1492
1493 /*
1494  * sends a reject message, consuming the mbuf passed as an argument.
1495  */
1496 static void
1497 send_reject(struct ip_fw_args *args, int code, int offset, int ip_len)
1498 {
1499         if (code != ICMP_REJECT_RST) { /* Send an ICMP unreach */
1500                 /* We need the IP header in host order for icmp_error(). */
1501                 if (args->eh != NULL) {
1502                         struct ip *ip = mtod(args->m, struct ip *);
1503
1504                         ip->ip_len = ntohs(ip->ip_len);
1505                         ip->ip_off = ntohs(ip->ip_off);
1506                 }
1507                 icmp_error(args->m, ICMP_UNREACH, code, 0L, 0);
1508         } else if (offset == 0 && args->f_id.proto == IPPROTO_TCP) {
1509                 struct tcphdr *const tcp =
1510                     L3HDR(struct tcphdr, mtod(args->m, struct ip *));
1511
1512                 if ((tcp->th_flags & TH_RST) == 0) {
1513                         send_pkt(&args->f_id, ntohl(tcp->th_seq),
1514                                  ntohl(tcp->th_ack), tcp->th_flags | TH_RST);
1515                 }
1516                 m_freem(args->m);
1517         } else {
1518                 m_freem(args->m);
1519         }
1520         args->m = NULL;
1521 }
1522
1523 /**
1524  *
1525  * Given an ip_fw *, lookup_next_rule will return a pointer
1526  * to the next rule, which can be either the jump
1527  * target (for skipto instructions) or the next one in the list (in
1528  * all other cases including a missing jump target).
1529  * The result is also written in the "next_rule" field of the rule.
1530  * Backward jumps are not allowed, so start looking from the next
1531  * rule...
1532  *
1533  * This never returns NULL -- in case we do not have an exact match,
1534  * the next rule is returned. When the ruleset is changed,
1535  * pointers are flushed so we are always correct.
1536  */
1537
1538 static struct ip_fw *
1539 lookup_next_rule(struct ip_fw *me)
1540 {
1541         struct ip_fw *rule = NULL;
1542         ipfw_insn *cmd;
1543
1544         /* look for action, in case it is a skipto */
1545         cmd = ACTION_PTR(me);
1546         if (cmd->opcode == O_LOG)
1547                 cmd += F_LEN(cmd);
1548         if (cmd->opcode == O_SKIPTO) {
1549                 for (rule = me->next; rule; rule = rule->next) {
1550                         if (rule->rulenum >= cmd->arg1)
1551                                 break;
1552                 }
1553         }
1554         if (rule == NULL)                       /* failure or not a skipto */
1555                 rule = me->next;
1556         me->next_rule = rule;
1557         return rule;
1558 }
1559
1560 /*
1561  * The main check routine for the firewall.
1562  *
1563  * All arguments are in args so we can modify them and return them
1564  * back to the caller.
1565  *
1566  * Parameters:
1567  *
1568  *      args->m (in/out) The packet; we set to NULL when/if we nuke it.
1569  *              Starts with the IP header.
1570  *      args->eh (in)   Mac header if present, or NULL for layer3 packet.
1571  *      args->oif       Outgoing interface, or NULL if packet is incoming.
1572  *              The incoming interface is in the mbuf. (in)
1573  *
1574  *      args->rule      Pointer to the last matching rule (in/out)
1575  *      args->f_id      Addresses grabbed from the packet (out)
1576  *
1577  * Return value:
1578  *
1579  *      IP_FW_PORT_DENY_FLAG    the packet must be dropped.
1580  *      0       The packet is to be accepted and routed normally OR
1581  *              the packet was denied/rejected and has been dropped;
1582  *              in the latter case, *m is equal to NULL upon return.
1583  *      port    Divert the packet to port, with these caveats:
1584  *
1585  *              - If IP_FW_PORT_TEE_FLAG is set, tee the packet instead
1586  *                of diverting it (ie, 'ipfw tee').
1587  *
1588  *              - If IP_FW_PORT_DYNT_FLAG is set, interpret the lower
1589  *                16 bits as a dummynet pipe number instead of diverting
1590  */
1591
1592 static int
1593 ipfw_chk(struct ip_fw_args *args)
1594 {
1595         /*
1596          * Local variables hold state during the processing of a packet.
1597          *
1598          * IMPORTANT NOTE: to speed up the processing of rules, there
1599          * are some assumption on the values of the variables, which
1600          * are documented here. Should you change them, please check
1601          * the implementation of the various instructions to make sure
1602          * that they still work.
1603          *
1604          * args->eh     The MAC header. It is non-null for a layer2
1605          *      packet, it is NULL for a layer-3 packet.
1606          *
1607          * m | args->m  Pointer to the mbuf, as received from the caller.
1608          *      It may change if ipfw_chk() does an m_pullup, or if it
1609          *      consumes the packet because it calls send_reject().
1610          *      XXX This has to change, so that ipfw_chk() never modifies
1611          *      or consumes the buffer.
1612          * ip   is simply an alias of the value of m, and it is kept
1613          *      in sync with it (the packet is  supposed to start with
1614          *      the ip header).
1615          */
1616         struct mbuf *m = args->m;
1617         struct ip *ip = mtod(m, struct ip *);
1618
1619         /*
1620          * oif | args->oif      If NULL, ipfw_chk has been called on the
1621          *      inbound path (ether_input, ip_input).
1622          *      If non-NULL, ipfw_chk has been called on the outbound path
1623          *      (ether_output, ip_output).
1624          */
1625         struct ifnet *oif = args->oif;
1626
1627         struct ip_fw *f = NULL;         /* matching rule */
1628         int retval = 0;
1629         struct m_tag *mtag;
1630
1631         /*
1632          * hlen The length of the IPv4 header.
1633          *      hlen >0 means we have an IPv4 packet.
1634          */
1635         u_int hlen = 0;         /* hlen >0 means we have an IP pkt */
1636
1637         /*
1638          * offset       The offset of a fragment. offset != 0 means that
1639          *      we have a fragment at this offset of an IPv4 packet.
1640          *      offset == 0 means that (if this is an IPv4 packet)
1641          *      this is the first or only fragment.
1642          */
1643         u_short offset = 0;
1644
1645         /*
1646          * Local copies of addresses. They are only valid if we have
1647          * an IP packet.
1648          *
1649          * proto        The protocol. Set to 0 for non-ip packets,
1650          *      or to the protocol read from the packet otherwise.
1651          *      proto != 0 means that we have an IPv4 packet.
1652          *
1653          * src_port, dst_port   port numbers, in HOST format. Only
1654          *      valid for TCP and UDP packets.
1655          *
1656          * src_ip, dst_ip       ip addresses, in NETWORK format.
1657          *      Only valid for IPv4 packets.
1658          */
1659         uint8_t proto;
1660         uint16_t src_port = 0, dst_port = 0;    /* NOTE: host format    */
1661         struct in_addr src_ip, dst_ip;          /* NOTE: network format */
1662         uint16_t ip_len = 0;
1663
1664         /*
1665          * dyn_dir = MATCH_UNKNOWN when rules unchecked,
1666          *      MATCH_NONE when checked and not matched (dyn_f = NULL),
1667          *      MATCH_FORWARD or MATCH_REVERSE otherwise (dyn_f != NULL)
1668          */
1669         int dyn_dir = MATCH_UNKNOWN;
1670         struct ip_fw *dyn_f = NULL;
1671         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1672
1673         if (m->m_pkthdr.fw_flags & IPFW_MBUF_GENERATED)
1674                 return 0;       /* accept */
1675
1676         if (args->eh == NULL ||         /* layer 3 packet */
1677             (m->m_pkthdr.len >= sizeof(struct ip) &&
1678              ntohs(args->eh->ether_type) == ETHERTYPE_IP))
1679                 hlen = ip->ip_hl << 2;
1680
1681         /*
1682          * Collect parameters into local variables for faster matching.
1683          */
1684         if (hlen == 0) {        /* do not grab addresses for non-ip pkts */
1685                 proto = args->f_id.proto = 0;   /* mark f_id invalid */
1686                 goto after_ip_checks;
1687         }
1688
1689         proto = args->f_id.proto = ip->ip_p;
1690         src_ip = ip->ip_src;
1691         dst_ip = ip->ip_dst;
1692         if (args->eh != NULL) { /* layer 2 packets are as on the wire */
1693                 offset = ntohs(ip->ip_off) & IP_OFFMASK;
1694                 ip_len = ntohs(ip->ip_len);
1695         } else {
1696                 offset = ip->ip_off & IP_OFFMASK;
1697                 ip_len = ip->ip_len;
1698         }
1699
1700 #define PULLUP_TO(len)                          \
1701 do {                                            \
1702         if (m->m_len < (len)) {                 \
1703                 args->m = m = m_pullup(m, (len));\
1704                 if (m == NULL)                  \
1705                         goto pullup_failed;     \
1706                 ip = mtod(m, struct ip *);      \
1707         }                                       \
1708 } while (0)
1709
1710         if (offset == 0) {
1711                 switch (proto) {
1712                 case IPPROTO_TCP:
1713                         {
1714                                 struct tcphdr *tcp;
1715
1716                                 PULLUP_TO(hlen + sizeof(struct tcphdr));
1717                                 tcp = L3HDR(struct tcphdr, ip);
1718                                 dst_port = tcp->th_dport;
1719                                 src_port = tcp->th_sport;
1720                                 args->f_id.flags = tcp->th_flags;
1721                         }
1722                         break;
1723
1724                 case IPPROTO_UDP:
1725                         {
1726                                 struct udphdr *udp;
1727
1728                                 PULLUP_TO(hlen + sizeof(struct udphdr));
1729                                 udp = L3HDR(struct udphdr, ip);
1730                                 dst_port = udp->uh_dport;
1731                                 src_port = udp->uh_sport;
1732                         }
1733                         break;
1734
1735                 case IPPROTO_ICMP:
1736                         PULLUP_TO(hlen + 4);    /* type, code and checksum. */
1737                         args->f_id.flags = L3HDR(struct icmp, ip)->icmp_type;
1738                         break;
1739
1740                 default:
1741                         break;
1742                 }
1743         }
1744
1745 #undef PULLUP_TO
1746
1747         args->f_id.src_ip = ntohl(src_ip.s_addr);
1748         args->f_id.dst_ip = ntohl(dst_ip.s_addr);
1749         args->f_id.src_port = src_port = ntohs(src_port);
1750         args->f_id.dst_port = dst_port = ntohs(dst_port);
1751
1752 after_ip_checks:
1753         if (args->rule) {
1754                 /*
1755                  * Packet has already been tagged. Look for the next rule
1756                  * to restart processing.
1757                  *
1758                  * If fw_one_pass != 0 then just accept it.
1759                  * XXX should not happen here, but optimized out in
1760                  * the caller.
1761                  */
1762                 if (fw_one_pass)
1763                         return 0;
1764
1765                 /* This rule is being/has been flushed */
1766                 if (ipfw_flushing)
1767                         return IP_FW_PORT_DENY_FLAG;
1768
1769                 KASSERT(args->rule->cpuid == mycpuid,
1770                         ("rule used on cpu%d\n", mycpuid));
1771
1772                 /* This rule was deleted */
1773                 if (args->rule->rule_flags & IPFW_RULE_F_INVALID)
1774                         return IP_FW_PORT_DENY_FLAG;
1775
1776                 f = args->rule->next_rule;
1777                 if (f == NULL)
1778                         f = lookup_next_rule(args->rule);
1779         } else {
1780                 /*
1781                  * Find the starting rule. It can be either the first
1782                  * one, or the one after divert_rule if asked so.
1783                  */
1784                 int skipto;
1785
1786                 mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL);
1787                 if (mtag != NULL)
1788                         skipto = *(uint16_t *)m_tag_data(mtag);
1789                 else
1790                         skipto = 0;
1791
1792                 f = ctx->ipfw_layer3_chain;
1793                 if (args->eh == NULL && skipto != 0) {
1794                         /* No skipto during rule flushing */
1795                         if (ipfw_flushing)
1796                                 return IP_FW_PORT_DENY_FLAG;
1797
1798                         if (skipto >= IPFW_DEFAULT_RULE)
1799                                 return(IP_FW_PORT_DENY_FLAG); /* invalid */
1800
1801                         while (f && f->rulenum <= skipto)
1802                                 f = f->next;
1803                         if (f == NULL)  /* drop packet */
1804                                 return(IP_FW_PORT_DENY_FLAG);
1805                 } else if (ipfw_flushing) {
1806                         /* Rules are being flushed; skip to default rule */
1807                         f = ctx->ipfw_default_rule;
1808                 }
1809         }
1810         if ((mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL)) != NULL)
1811                 m_tag_delete(m, mtag);
1812
1813         /*
1814          * Now scan the rules, and parse microinstructions for each rule.
1815          */
1816         for (; f; f = f->next) {
1817                 int l, cmdlen;
1818                 ipfw_insn *cmd;
1819                 int skip_or; /* skip rest of OR block */
1820
1821 again:
1822                 if (ctx->ipfw_set_disable & (1 << f->set))
1823                         continue;
1824
1825                 skip_or = 0;
1826                 for (l = f->cmd_len, cmd = f->cmd; l > 0;
1827                      l -= cmdlen, cmd += cmdlen) {
1828                         int match, deny;
1829
1830                         /*
1831                          * check_body is a jump target used when we find a
1832                          * CHECK_STATE, and need to jump to the body of
1833                          * the target rule.
1834                          */
1835
1836 check_body:
1837                         cmdlen = F_LEN(cmd);
1838                         /*
1839                          * An OR block (insn_1 || .. || insn_n) has the
1840                          * F_OR bit set in all but the last instruction.
1841                          * The first match will set "skip_or", and cause
1842                          * the following instructions to be skipped until
1843                          * past the one with the F_OR bit clear.
1844                          */
1845                         if (skip_or) {          /* skip this instruction */
1846                                 if ((cmd->len & F_OR) == 0)
1847                                         skip_or = 0;    /* next one is good */
1848                                 continue;
1849                         }
1850                         match = 0; /* set to 1 if we succeed */
1851
1852                         switch (cmd->opcode) {
1853                         /*
1854                          * The first set of opcodes compares the packet's
1855                          * fields with some pattern, setting 'match' if a
1856                          * match is found. At the end of the loop there is
1857                          * logic to deal with F_NOT and F_OR flags associated
1858                          * with the opcode.
1859                          */
1860                         case O_NOP:
1861                                 match = 1;
1862                                 break;
1863
1864                         case O_FORWARD_MAC:
1865                                 kprintf("ipfw: opcode %d unimplemented\n",
1866                                         cmd->opcode);
1867                                 break;
1868
1869                         case O_GID:
1870                         case O_UID:
1871                                 /*
1872                                  * We only check offset == 0 && proto != 0,
1873                                  * as this ensures that we have an IPv4
1874                                  * packet with the ports info.
1875                                  */
1876                                 if (offset!=0)
1877                                         break;
1878                             {
1879                                 struct inpcbinfo *pi;
1880                                 int wildcard;
1881                                 struct inpcb *pcb;
1882
1883                                 if (proto == IPPROTO_TCP) {
1884                                         wildcard = 0;
1885                                         pi = &tcbinfo[mycpu->gd_cpuid];
1886                                 } else if (proto == IPPROTO_UDP) {
1887                                         wildcard = 1;
1888                                         pi = &udbinfo;
1889                                 } else
1890                                         break;
1891
1892                                 pcb =  (oif) ?
1893                                         in_pcblookup_hash(pi,
1894                                             dst_ip, htons(dst_port),
1895                                             src_ip, htons(src_port),
1896                                             wildcard, oif) :
1897                                         in_pcblookup_hash(pi,
1898                                             src_ip, htons(src_port),
1899                                             dst_ip, htons(dst_port),
1900                                             wildcard, NULL);
1901
1902                                 if (pcb == NULL || pcb->inp_socket == NULL)
1903                                         break;
1904
1905                                 if (cmd->opcode == O_UID) {
1906 #define socheckuid(a,b) ((a)->so_cred->cr_uid != (b))
1907                                         match =
1908                                           !socheckuid(pcb->inp_socket,
1909                                            (uid_t)((ipfw_insn_u32 *)cmd)->d[0]);
1910 #undef socheckuid
1911                                 } else  {
1912                                         match = groupmember(
1913                                             (uid_t)((ipfw_insn_u32 *)cmd)->d[0],
1914                                             pcb->inp_socket->so_cred);
1915                                 }
1916                             }
1917                                 break;
1918
1919                         case O_RECV:
1920                                 match = iface_match(m->m_pkthdr.rcvif,
1921                                     (ipfw_insn_if *)cmd);
1922                                 break;
1923
1924                         case O_XMIT:
1925                                 match = iface_match(oif, (ipfw_insn_if *)cmd);
1926                                 break;
1927
1928                         case O_VIA:
1929                                 match = iface_match(oif ? oif :
1930                                     m->m_pkthdr.rcvif, (ipfw_insn_if *)cmd);
1931                                 break;
1932
1933                         case O_MACADDR2:
1934                                 if (args->eh != NULL) { /* have MAC header */
1935                                         uint32_t *want = (uint32_t *)
1936                                                 ((ipfw_insn_mac *)cmd)->addr;
1937                                         uint32_t *mask = (uint32_t *)
1938                                                 ((ipfw_insn_mac *)cmd)->mask;
1939                                         uint32_t *hdr = (uint32_t *)args->eh;
1940
1941                                         match =
1942                                         (want[0] == (hdr[0] & mask[0]) &&
1943                                          want[1] == (hdr[1] & mask[1]) &&
1944                                          want[2] == (hdr[2] & mask[2]));
1945                                 }
1946                                 break;
1947
1948                         case O_MAC_TYPE:
1949                                 if (args->eh != NULL) {
1950                                         uint16_t t =
1951                                             ntohs(args->eh->ether_type);
1952                                         uint16_t *p =
1953                                             ((ipfw_insn_u16 *)cmd)->ports;
1954                                         int i;
1955
1956                                         /* Special vlan handling */
1957                                         if (m->m_flags & M_VLANTAG)
1958                                                 t = ETHERTYPE_VLAN;
1959
1960                                         for (i = cmdlen - 1; !match && i > 0;
1961                                              i--, p += 2) {
1962                                                 match =
1963                                                 (t >= p[0] && t <= p[1]);
1964                                         }
1965                                 }
1966                                 break;
1967
1968                         case O_FRAG:
1969                                 match = (hlen > 0 && offset != 0);
1970                                 break;
1971
1972                         case O_IN:      /* "out" is "not in" */
1973                                 match = (oif == NULL);
1974                                 break;
1975
1976                         case O_LAYER2:
1977                                 match = (args->eh != NULL);
1978                                 break;
1979
1980                         case O_PROTO:
1981                                 /*
1982                                  * We do not allow an arg of 0 so the
1983                                  * check of "proto" only suffices.
1984                                  */
1985                                 match = (proto == cmd->arg1);
1986                                 break;
1987
1988                         case O_IP_SRC:
1989                                 match = (hlen > 0 &&
1990                                     ((ipfw_insn_ip *)cmd)->addr.s_addr ==
1991                                     src_ip.s_addr);
1992                                 break;
1993
1994                         case O_IP_SRC_MASK:
1995                                 match = (hlen > 0 &&
1996                                     ((ipfw_insn_ip *)cmd)->addr.s_addr ==
1997                                      (src_ip.s_addr &
1998                                      ((ipfw_insn_ip *)cmd)->mask.s_addr));
1999                                 break;
2000
2001                         case O_IP_SRC_ME:
2002                                 if (hlen > 0) {
2003                                         struct ifnet *tif;
2004
2005                                         tif = INADDR_TO_IFP(&src_ip);
2006                                         match = (tif != NULL);
2007                                 }
2008                                 break;
2009
2010                         case O_IP_DST_SET:
2011                         case O_IP_SRC_SET:
2012                                 if (hlen > 0) {
2013                                         uint32_t *d = (uint32_t *)(cmd + 1);
2014                                         uint32_t addr =
2015                                             cmd->opcode == O_IP_DST_SET ?
2016                                                 args->f_id.dst_ip :
2017                                                 args->f_id.src_ip;
2018
2019                                         if (addr < d[0])
2020                                                 break;
2021                                         addr -= d[0]; /* subtract base */
2022                                         match =
2023                                         (addr < cmd->arg1) &&
2024                                          (d[1 + (addr >> 5)] &
2025                                           (1 << (addr & 0x1f)));
2026                                 }
2027                                 break;
2028
2029                         case O_IP_DST:
2030                                 match = (hlen > 0 &&
2031                                     ((ipfw_insn_ip *)cmd)->addr.s_addr ==
2032                                     dst_ip.s_addr);
2033                                 break;
2034
2035                         case O_IP_DST_MASK:
2036                                 match = (hlen > 0) &&
2037                                     (((ipfw_insn_ip *)cmd)->addr.s_addr ==
2038                                      (dst_ip.s_addr &
2039                                      ((ipfw_insn_ip *)cmd)->mask.s_addr));
2040                                 break;
2041
2042                         case O_IP_DST_ME:
2043                                 if (hlen > 0) {
2044                                         struct ifnet *tif;
2045
2046                                         tif = INADDR_TO_IFP(&dst_ip);
2047                                         match = (tif != NULL);
2048                                 }
2049                                 break;
2050
2051                         case O_IP_SRCPORT:
2052                         case O_IP_DSTPORT:
2053                                 /*
2054                                  * offset == 0 && proto != 0 is enough
2055                                  * to guarantee that we have an IPv4
2056                                  * packet with port info.
2057                                  */
2058                                 if ((proto==IPPROTO_UDP || proto==IPPROTO_TCP)
2059                                     && offset == 0) {
2060                                         uint16_t x =
2061                                             (cmd->opcode == O_IP_SRCPORT) ?
2062                                                 src_port : dst_port ;
2063                                         uint16_t *p =
2064                                             ((ipfw_insn_u16 *)cmd)->ports;
2065                                         int i;
2066
2067                                         for (i = cmdlen - 1; !match && i > 0;
2068                                              i--, p += 2) {
2069                                                 match =
2070                                                 (x >= p[0] && x <= p[1]);
2071                                         }
2072                                 }
2073                                 break;
2074
2075                         case O_ICMPTYPE:
2076                                 match = (offset == 0 && proto==IPPROTO_ICMP &&
2077                                     icmptype_match(ip, (ipfw_insn_u32 *)cmd));
2078                                 break;
2079
2080                         case O_IPOPT:
2081                                 match = (hlen > 0 && ipopts_match(ip, cmd));
2082                                 break;
2083
2084                         case O_IPVER:
2085                                 match = (hlen > 0 && cmd->arg1 == ip->ip_v);
2086                                 break;
2087
2088                         case O_IPTTL:
2089                                 match = (hlen > 0 && cmd->arg1 == ip->ip_ttl);
2090                                 break;
2091
2092                         case O_IPID:
2093                                 match = (hlen > 0 &&
2094                                     cmd->arg1 == ntohs(ip->ip_id));
2095                                 break;
2096
2097                         case O_IPLEN:
2098                                 match = (hlen > 0 && cmd->arg1 == ip_len);
2099                                 break;
2100
2101                         case O_IPPRECEDENCE:
2102                                 match = (hlen > 0 &&
2103                                     (cmd->arg1 == (ip->ip_tos & 0xe0)));
2104                                 break;
2105
2106                         case O_IPTOS:
2107                                 match = (hlen > 0 &&
2108                                     flags_match(cmd, ip->ip_tos));
2109                                 break;
2110
2111                         case O_TCPFLAGS:
2112                                 match = (proto == IPPROTO_TCP && offset == 0 &&
2113                                     flags_match(cmd,
2114                                         L3HDR(struct tcphdr,ip)->th_flags));
2115                                 break;
2116
2117                         case O_TCPOPTS:
2118                                 match = (proto == IPPROTO_TCP && offset == 0 &&
2119                                     tcpopts_match(ip, cmd));
2120                                 break;
2121
2122                         case O_TCPSEQ:
2123                                 match = (proto == IPPROTO_TCP && offset == 0 &&
2124                                     ((ipfw_insn_u32 *)cmd)->d[0] ==
2125                                         L3HDR(struct tcphdr,ip)->th_seq);
2126                                 break;
2127
2128                         case O_TCPACK:
2129                                 match = (proto == IPPROTO_TCP && offset == 0 &&
2130                                     ((ipfw_insn_u32 *)cmd)->d[0] ==
2131                                         L3HDR(struct tcphdr,ip)->th_ack);
2132                                 break;
2133
2134                         case O_TCPWIN:
2135                                 match = (proto == IPPROTO_TCP && offset == 0 &&
2136                                     cmd->arg1 ==
2137                                         L3HDR(struct tcphdr,ip)->th_win);
2138                                 break;
2139
2140                         case O_ESTAB:
2141                                 /* reject packets which have SYN only */
2142                                 /* XXX should i also check for TH_ACK ? */
2143                                 match = (proto == IPPROTO_TCP && offset == 0 &&
2144                                     (L3HDR(struct tcphdr,ip)->th_flags &
2145                                      (TH_RST | TH_ACK | TH_SYN)) != TH_SYN);
2146                                 break;
2147
2148                         case O_LOG:
2149                                 if (fw_verbose)
2150                                         ipfw_log(f, hlen, args->eh, m, oif);
2151                                 match = 1;
2152                                 break;
2153
2154                         case O_PROB:
2155                                 match = (krandom() <
2156                                         ((ipfw_insn_u32 *)cmd)->d[0]);
2157                                 break;
2158
2159                         /*
2160                          * The second set of opcodes represents 'actions',
2161                          * i.e. the terminal part of a rule once the packet
2162                          * matches all previous patterns.
2163                          * Typically there is only one action for each rule,
2164                          * and the opcode is stored at the end of the rule
2165                          * (but there are exceptions -- see below).
2166                          *
2167                          * In general, here we set retval and terminate the
2168                          * outer loop (would be a 'break 3' in some language,
2169                          * but we need to do a 'goto done').
2170                          *
2171                          * Exceptions:
2172                          * O_COUNT and O_SKIPTO actions:
2173                          *   instead of terminating, we jump to the next rule
2174                          *   ('goto next_rule', equivalent to a 'break 2'),
2175                          *   or to the SKIPTO target ('goto again' after
2176                          *   having set f, cmd and l), respectively.
2177                          *
2178                          * O_LIMIT and O_KEEP_STATE: these opcodes are
2179                          *   not real 'actions', and are stored right
2180                          *   before the 'action' part of the rule.
2181                          *   These opcodes try to install an entry in the
2182                          *   state tables; if successful, we continue with
2183                          *   the next opcode (match=1; break;), otherwise
2184                          *   the packet must be dropped ('goto done' after
2185                          *   setting retval).  If static rules are changed
2186                          *   during the state installation, the packet will
2187                          *   be dropped ('return IP_FW_PORT_DENY_FLAG').
2188                          *
2189                          * O_PROBE_STATE and O_CHECK_STATE: these opcodes
2190                          *   cause a lookup of the state table, and a jump
2191                          *   to the 'action' part of the parent rule
2192                          *   ('goto check_body') if an entry is found, or
2193                          *   (CHECK_STATE only) a jump to the next rule if
2194                          *   the entry is not found ('goto next_rule').
2195                          *   The result of the lookup is cached to make
2196                          *   further instances of these opcodes are
2197                          *   effectively NOPs.  If static rules are changed
2198                          *   during the state looking up, the packet will
2199                          *   be dropped ('return IP_FW_PORT_DENY_FLAG').
2200                          */
2201                         case O_LIMIT:
2202                         case O_KEEP_STATE:
2203                                 if (!(f->rule_flags & IPFW_RULE_F_STATE)) {
2204                                         kprintf("%s rule (%d) is not ready "
2205                                                 "on cpu%d\n",
2206                                                 cmd->opcode == O_LIMIT ?
2207                                                 "limit" : "keep state",
2208                                                 f->rulenum, f->cpuid);
2209                                         goto next_rule;
2210                                 }
2211                                 if (install_state(f,
2212                                     (ipfw_insn_limit *)cmd, args, &deny)) {
2213                                         if (deny)
2214                                                 return IP_FW_PORT_DENY_FLAG;
2215
2216                                         retval = IP_FW_PORT_DENY_FLAG;
2217                                         goto done; /* error/limit violation */
2218                                 }
2219                                 if (deny)
2220                                         return IP_FW_PORT_DENY_FLAG;
2221                                 match = 1;
2222                                 break;
2223
2224                         case O_PROBE_STATE:
2225                         case O_CHECK_STATE:
2226                                 /*
2227                                  * dynamic rules are checked at the first
2228                                  * keep-state or check-state occurrence,
2229                                  * with the result being stored in dyn_dir.
2230                                  * The compiler introduces a PROBE_STATE
2231                                  * instruction for us when we have a
2232                                  * KEEP_STATE (because PROBE_STATE needs
2233                                  * to be run first).
2234                                  */
2235                                 if (dyn_dir == MATCH_UNKNOWN) {
2236                                         dyn_f = lookup_rule(&args->f_id,
2237                                                 &dyn_dir,
2238                                                 proto == IPPROTO_TCP ?
2239                                                 L3HDR(struct tcphdr, ip) : NULL,
2240                                                 ip_len, &deny);
2241                                         if (deny)
2242                                                 return IP_FW_PORT_DENY_FLAG;
2243                                         if (dyn_f != NULL) {
2244                                                 /*
2245                                                  * Found a rule from a dynamic
2246                                                  * entry; jump to the 'action'
2247                                                  * part of the rule.
2248                                                  */
2249                                                 f = dyn_f;
2250                                                 cmd = ACTION_PTR(f);
2251                                                 l = f->cmd_len - f->act_ofs;
2252                                                 goto check_body;
2253                                         }
2254                                 }
2255                                 /*
2256                                  * Dynamic entry not found. If CHECK_STATE,
2257                                  * skip to next rule, if PROBE_STATE just
2258                                  * ignore and continue with next opcode.
2259                                  */
2260                                 if (cmd->opcode == O_CHECK_STATE)
2261                                         goto next_rule;
2262                                 else if (!(f->rule_flags & IPFW_RULE_F_STATE))
2263                                         goto next_rule; /* not ready yet */
2264                                 match = 1;
2265                                 break;
2266
2267                         case O_ACCEPT:
2268                                 retval = 0;     /* accept */
2269                                 goto done;
2270
2271                         case O_PIPE:
2272                         case O_QUEUE:
2273                                 args->rule = f; /* report matching rule */
2274                                 retval = cmd->arg1 | IP_FW_PORT_DYNT_FLAG;
2275                                 goto done;
2276
2277                         case O_DIVERT:
2278                         case O_TEE:
2279                                 if (args->eh) /* not on layer 2 */
2280                                         break;
2281
2282                                 mtag = m_tag_get(PACKET_TAG_IPFW_DIVERT,
2283                                                  sizeof(uint16_t), MB_DONTWAIT);
2284                                 if (mtag == NULL) {
2285                                         retval = IP_FW_PORT_DENY_FLAG;
2286                                         goto done;
2287                                 }
2288                                 *(uint16_t *)m_tag_data(mtag) = f->rulenum;
2289                                 m_tag_prepend(m, mtag);
2290                                 retval = (cmd->opcode == O_DIVERT) ?
2291                                     cmd->arg1 :
2292                                     cmd->arg1 | IP_FW_PORT_TEE_FLAG;
2293                                 goto done;
2294
2295                         case O_COUNT:
2296                         case O_SKIPTO:
2297                                 f->pcnt++;      /* update stats */
2298                                 f->bcnt += ip_len;
2299                                 f->timestamp = time_second;
2300                                 if (cmd->opcode == O_COUNT)
2301                                         goto next_rule;
2302                                 /* handle skipto */
2303                                 if (f->next_rule == NULL)
2304                                         lookup_next_rule(f);
2305                                 f = f->next_rule;
2306                                 goto again;
2307
2308                         case O_REJECT:
2309                                 /*
2310                                  * Drop the packet and send a reject notice
2311                                  * if the packet is not ICMP (or is an ICMP
2312                                  * query), and it is not multicast/broadcast.
2313                                  */
2314                                 if (hlen > 0 &&
2315                                     (proto != IPPROTO_ICMP ||
2316                                      is_icmp_query(ip)) &&
2317                                     !(m->m_flags & (M_BCAST|M_MCAST)) &&
2318                                     !IN_MULTICAST(ntohl(dst_ip.s_addr))) {
2319                                         /*
2320                                          * Update statistics before the possible
2321                                          * blocking 'send_reject'
2322                                          */
2323                                         f->pcnt++;
2324                                         f->bcnt += ip_len;
2325                                         f->timestamp = time_second;
2326
2327                                         send_reject(args, cmd->arg1,
2328                                             offset,ip_len);
2329                                         m = args->m;
2330
2331                                         /*
2332                                          * Return directly here, rule stats
2333                                          * have been updated above.
2334                                          */
2335                                         return IP_FW_PORT_DENY_FLAG;
2336                                 }
2337                                 /* FALLTHROUGH */
2338                         case O_DENY:
2339                                 retval = IP_FW_PORT_DENY_FLAG;
2340                                 goto done;
2341
2342                         case O_FORWARD_IP:
2343                                 if (args->eh)   /* not valid on layer2 pkts */
2344                                         break;
2345                                 if (!dyn_f || dyn_dir == MATCH_FORWARD) {
2346                                         struct sockaddr_in *sin;
2347
2348                                         mtag = m_tag_get(PACKET_TAG_IPFORWARD,
2349                                                sizeof(*sin), MB_DONTWAIT);
2350                                         if (mtag == NULL) {
2351                                                 retval = IP_FW_PORT_DENY_FLAG;
2352                                                 goto done;
2353                                         }
2354                                         sin = m_tag_data(mtag);
2355
2356                                         /* Structure copy */
2357                                         *sin = ((ipfw_insn_sa *)cmd)->sa;
2358
2359                                         m_tag_prepend(m, mtag);
2360                                         m->m_pkthdr.fw_flags |=
2361                                                 IPFORWARD_MBUF_TAGGED;
2362                                 }
2363                                 retval = 0;
2364                                 goto done;
2365
2366                         default:
2367                                 panic("-- unknown opcode %d\n", cmd->opcode);
2368                         } /* end of switch() on opcodes */
2369
2370                         if (cmd->len & F_NOT)
2371                                 match = !match;
2372
2373                         if (match) {
2374                                 if (cmd->len & F_OR)
2375                                         skip_or = 1;
2376                         } else {
2377                                 if (!(cmd->len & F_OR)) /* not an OR block, */
2378                                         break;          /* try next rule    */
2379                         }
2380
2381                 }       /* end of inner for, scan opcodes */
2382
2383 next_rule:;             /* try next rule                */
2384
2385         }               /* end of outer for, scan rules */
2386         kprintf("+++ ipfw: ouch!, skip past end of rules, denying packet\n");
2387         return(IP_FW_PORT_DENY_FLAG);
2388
2389 done:
2390         /* Update statistics */
2391         f->pcnt++;
2392         f->bcnt += ip_len;
2393         f->timestamp = time_second;
2394         return retval;
2395
2396 pullup_failed:
2397         if (fw_verbose)
2398                 kprintf("pullup failed\n");
2399         return(IP_FW_PORT_DENY_FLAG);
2400 }
2401
2402 static void
2403 ipfw_dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
2404 {
2405         struct m_tag *mtag;
2406         struct dn_pkt *pkt;
2407         ipfw_insn *cmd;
2408         const struct ipfw_flow_id *id;
2409         struct dn_flow_id *fid;
2410
2411         M_ASSERTPKTHDR(m);
2412
2413         mtag = m_tag_get(PACKET_TAG_DUMMYNET, sizeof(*pkt), MB_DONTWAIT);
2414         if (mtag == NULL) {
2415                 m_freem(m);
2416                 return;
2417         }
2418         m_tag_prepend(m, mtag);
2419
2420         pkt = m_tag_data(mtag);
2421         bzero(pkt, sizeof(*pkt));
2422
2423         cmd = fwa->rule->cmd + fwa->rule->act_ofs;
2424         if (cmd->opcode == O_LOG)
2425                 cmd += F_LEN(cmd);
2426         KASSERT(cmd->opcode == O_PIPE || cmd->opcode == O_QUEUE,
2427                 ("Rule is not PIPE or QUEUE, opcode %d\n", cmd->opcode));
2428
2429         pkt->dn_m = m;
2430         pkt->dn_flags = (dir & DN_FLAGS_DIR_MASK);
2431         pkt->ifp = fwa->oif;
2432         pkt->cpuid = mycpu->gd_cpuid;
2433         pkt->pipe_nr = pipe_nr;
2434
2435         id = &fwa->f_id;
2436         fid = &pkt->id;
2437         fid->fid_dst_ip = id->dst_ip;
2438         fid->fid_src_ip = id->src_ip;
2439         fid->fid_dst_port = id->dst_port;
2440         fid->fid_src_port = id->src_port;
2441         fid->fid_proto = id->proto;
2442         fid->fid_flags = id->flags;
2443
2444         ipfw_ref_rule(fwa->rule);
2445         pkt->dn_priv = fwa->rule;
2446         pkt->dn_unref_priv = ipfw_unref_rule;
2447
2448         if (cmd->opcode == O_PIPE)
2449                 pkt->dn_flags |= DN_FLAGS_IS_PIPE;
2450
2451         if (dir == DN_TO_IP_OUT) {
2452                 /*
2453                  * We need to copy *ro because for ICMP pkts (and maybe
2454                  * others) the caller passed a pointer into the stack;
2455                  * dst might also be a pointer into *ro so it needs to
2456                  * be updated.
2457                  */
2458                 pkt->ro = *(fwa->ro);
2459                 if (fwa->ro->ro_rt)
2460                         fwa->ro->ro_rt->rt_refcnt++;
2461                 if (fwa->dst == (struct sockaddr_in *)&fwa->ro->ro_dst) {
2462                         /* 'dst' points into 'ro' */
2463                         fwa->dst = (struct sockaddr_in *)&(pkt->ro.ro_dst);
2464                 }
2465                 pkt->dn_dst = fwa->dst;
2466                 pkt->flags = fwa->flags;
2467         }
2468
2469         m->m_pkthdr.fw_flags |= DUMMYNET_MBUF_TAGGED;
2470         ip_dn_queue(m);
2471 }
2472
2473 /*
2474  * When a rule is added/deleted, clear the next_rule pointers in all rules.
2475  * These will be reconstructed on the fly as packets are matched.
2476  * Must be called at splimp().
2477  */
2478 static void
2479 ipfw_flush_rule_ptrs(struct ipfw_context *ctx)
2480 {
2481         struct ip_fw *rule;
2482
2483         for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next)
2484                 rule->next_rule = NULL;
2485 }
2486
2487 static __inline void
2488 ipfw_inc_static_count(struct ip_fw *rule)
2489 {
2490         KKASSERT(mycpuid == 0);
2491
2492         static_count++;
2493         static_ioc_len += IOC_RULESIZE(rule);
2494 }
2495
2496 static __inline void
2497 ipfw_dec_static_count(struct ip_fw *rule)
2498 {
2499         int l = IOC_RULESIZE(rule);
2500
2501         KKASSERT(mycpuid == 0);
2502
2503         KASSERT(static_count > 0, ("invalid static count %u\n", static_count));
2504         static_count--;
2505
2506         KASSERT(static_ioc_len >= l,
2507                 ("invalid static len %u\n", static_ioc_len));
2508         static_ioc_len -= l;
2509 }
2510
2511 static void
2512 ipfw_link_sibling(struct netmsg_ipfw *fwmsg, struct ip_fw *rule)
2513 {
2514         if (fwmsg->sibling != NULL) {
2515                 KKASSERT(mycpuid > 0 && fwmsg->sibling->cpuid == mycpuid - 1);
2516                 fwmsg->sibling->sibling = rule;
2517         }
2518         fwmsg->sibling = rule;
2519 }
2520
2521 static struct ip_fw *
2522 ipfw_create_rule(const struct ipfw_ioc_rule *ioc_rule, struct ip_fw_stub *stub)
2523 {
2524         struct ip_fw *rule;
2525
2526         rule = kmalloc(RULESIZE(ioc_rule), M_IPFW, M_WAITOK | M_ZERO);
2527
2528         rule->act_ofs = ioc_rule->act_ofs;
2529         rule->cmd_len = ioc_rule->cmd_len;
2530         rule->rulenum = ioc_rule->rulenum;
2531         rule->set = ioc_rule->set;
2532         rule->usr_flags = ioc_rule->usr_flags;
2533
2534         bcopy(ioc_rule->cmd, rule->cmd, rule->cmd_len * 4 /* XXX */);
2535
2536         rule->refcnt = 1;
2537         rule->cpuid = mycpuid;
2538
2539         rule->stub = stub;
2540         if (stub != NULL)
2541                 stub->rule[mycpuid] = rule;
2542
2543         return rule;
2544 }
2545
2546 static void
2547 ipfw_add_rule_dispatch(struct netmsg *nmsg)
2548 {
2549         struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
2550         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2551         struct ip_fw *rule;
2552
2553         rule = ipfw_create_rule(fwmsg->ioc_rule, fwmsg->stub);
2554
2555         /*
2556          * Bump generation after ipfw_create_rule(),
2557          * since this function is blocking
2558          */
2559         ctx->ipfw_gen++;
2560
2561         /*
2562          * Insert rule into the pre-determined position
2563          */
2564         if (fwmsg->prev_rule != NULL) {
2565                 struct ip_fw *prev, *next;
2566
2567                 prev = fwmsg->prev_rule;
2568                 KKASSERT(prev->cpuid == mycpuid);
2569
2570                 next = fwmsg->next_rule;
2571                 KKASSERT(next->cpuid == mycpuid);
2572
2573                 rule->next = next;
2574                 prev->next = rule;
2575
2576                 /*
2577                  * Move to the position on the next CPU
2578                  * before the msg is forwarded.
2579                  */
2580                 fwmsg->prev_rule = prev->sibling;
2581                 fwmsg->next_rule = next->sibling;
2582         } else {
2583                 KKASSERT(fwmsg->next_rule == NULL);
2584                 rule->next = ctx->ipfw_layer3_chain;
2585                 ctx->ipfw_layer3_chain = rule;
2586         }
2587
2588         /* Link rule CPU sibling */
2589         ipfw_link_sibling(fwmsg, rule);
2590
2591         ipfw_flush_rule_ptrs(ctx);
2592
2593         if (mycpuid == 0) {
2594                 /* Statistics only need to be updated once */
2595                 ipfw_inc_static_count(rule);
2596
2597                 /* Return the rule on CPU0 */
2598                 nmsg->nm_lmsg.u.ms_resultp = rule;
2599         }
2600
2601         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2602 }
2603
2604 static void
2605 ipfw_enable_state_dispatch(struct netmsg *nmsg)
2606 {
2607         struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
2608         struct ip_fw *rule = lmsg->u.ms_resultp;
2609
2610         KKASSERT(rule->cpuid == mycpuid);
2611         KKASSERT(rule->stub != NULL && rule->stub->rule[mycpuid] == rule);
2612         KKASSERT(!(rule->rule_flags & IPFW_RULE_F_STATE));
2613         rule->rule_flags |= IPFW_RULE_F_STATE;
2614         lmsg->u.ms_resultp = rule->sibling;
2615
2616         ifnet_forwardmsg(lmsg, mycpuid + 1);
2617 }
2618
2619 /*
2620  * Add a new rule to the list.  Copy the rule into a malloc'ed area,
2621  * then possibly create a rule number and add the rule to the list.
2622  * Update the rule_number in the input struct so the caller knows
2623  * it as well.
2624  */
2625 static void
2626 ipfw_add_rule(struct ipfw_ioc_rule *ioc_rule, uint32_t rule_flags)
2627 {
2628         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2629         struct netmsg_ipfw fwmsg;
2630         struct netmsg *nmsg;
2631         struct ip_fw *f, *prev, *rule;
2632         struct ip_fw_stub *stub;
2633
2634         IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
2635
2636         crit_enter();
2637
2638         /*
2639          * If rulenum is 0, find highest numbered rule before the
2640          * default rule, and add rule number incremental step.
2641          */
2642         if (ioc_rule->rulenum == 0) {
2643                 int step = autoinc_step;
2644
2645                 KKASSERT(step >= IPFW_AUTOINC_STEP_MIN &&
2646                          step <= IPFW_AUTOINC_STEP_MAX);
2647
2648                 /*
2649                  * Locate the highest numbered rule before default
2650                  */
2651                 for (f = ctx->ipfw_layer3_chain; f; f = f->next) {
2652                         if (f->rulenum == IPFW_DEFAULT_RULE)
2653                                 break;
2654                         ioc_rule->rulenum = f->rulenum;
2655                 }
2656                 if (ioc_rule->rulenum < IPFW_DEFAULT_RULE - step)
2657                         ioc_rule->rulenum += step;
2658         }
2659         KASSERT(ioc_rule->rulenum != IPFW_DEFAULT_RULE &&
2660                 ioc_rule->rulenum != 0,
2661                 ("invalid rule num %d\n", ioc_rule->rulenum));
2662
2663         /*
2664          * Now find the right place for the new rule in the sorted list.
2665          */
2666         for (prev = NULL, f = ctx->ipfw_layer3_chain; f;
2667              prev = f, f = f->next) {
2668                 if (f->rulenum > ioc_rule->rulenum) {
2669                         /* Found the location */
2670                         break;
2671                 }
2672         }
2673         KASSERT(f != NULL, ("no default rule?!\n"));
2674
2675         if (rule_flags & IPFW_RULE_F_STATE) {
2676                 int size;
2677
2678                 /*
2679                  * If the new rule will create states, then allocate
2680                  * a rule stub, which will be referenced by states
2681                  * (dyn rules)
2682                  */
2683                 size = sizeof(*stub) + ((ncpus - 1) * sizeof(struct ip_fw *));
2684                 stub = kmalloc(size, M_IPFW, M_WAITOK | M_ZERO);
2685         } else {
2686                 stub = NULL;
2687         }
2688
2689         /*
2690          * Duplicate the rule onto each CPU.
2691          * The rule duplicated on CPU0 will be returned.
2692          */
2693         bzero(&fwmsg, sizeof(fwmsg));
2694         nmsg = &fwmsg.nmsg;
2695         netmsg_init(nmsg, &curthread->td_msgport, 0, ipfw_add_rule_dispatch);
2696         fwmsg.ioc_rule = ioc_rule;
2697         fwmsg.prev_rule = prev;
2698         fwmsg.next_rule = prev == NULL ? NULL : f;
2699         fwmsg.stub = stub;
2700
2701         ifnet_domsg(&nmsg->nm_lmsg, 0);
2702         KKASSERT(fwmsg.prev_rule == NULL && fwmsg.next_rule == NULL);
2703
2704         rule = nmsg->nm_lmsg.u.ms_resultp;
2705         KKASSERT(rule != NULL && rule->cpuid == mycpuid);
2706
2707         if (rule_flags & IPFW_RULE_F_STATE) {
2708                 /*
2709                  * Turn on state flag, _after_ everything on all
2710                  * CPUs have been setup.
2711                  */
2712                 bzero(nmsg, sizeof(*nmsg));
2713                 netmsg_init(nmsg, &curthread->td_msgport, 0,
2714                             ipfw_enable_state_dispatch);
2715                 nmsg->nm_lmsg.u.ms_resultp = rule;
2716
2717                 ifnet_domsg(&nmsg->nm_lmsg, 0);
2718                 KKASSERT(nmsg->nm_lmsg.u.ms_resultp == NULL);
2719         }
2720
2721         crit_exit();
2722
2723         DEB(kprintf("++ installed rule %d, static count now %d\n",
2724                 rule->rulenum, static_count);)
2725 }
2726
2727 /**
2728  * Free storage associated with a static rule (including derived
2729  * dynamic rules).
2730  * The caller is in charge of clearing rule pointers to avoid
2731  * dangling pointers.
2732  * @return a pointer to the next entry.
2733  * Arguments are not checked, so they better be correct.
2734  * Must be called at splimp().
2735  */
2736 static struct ip_fw *
2737 ipfw_delete_rule(struct ipfw_context *ctx,
2738                  struct ip_fw *prev, struct ip_fw *rule)
2739 {
2740         struct ip_fw *n;
2741         struct ip_fw_stub *stub;
2742
2743         ctx->ipfw_gen++;
2744
2745         /* STATE flag should have been cleared before we reach here */
2746         KKASSERT((rule->rule_flags & IPFW_RULE_F_STATE) == 0);
2747
2748         stub = rule->stub;
2749         n = rule->next;
2750         if (prev == NULL)
2751                 ctx->ipfw_layer3_chain = n;
2752         else
2753                 prev->next = n;
2754
2755         /* Mark the rule as invalid */
2756         rule->rule_flags |= IPFW_RULE_F_INVALID;
2757         rule->next_rule = NULL;
2758         rule->sibling = NULL;
2759         rule->stub = NULL;
2760 #ifdef foo
2761         /* Don't reset cpuid here; keep various assertion working */
2762         rule->cpuid = -1;
2763 #endif
2764
2765         /* Statistics only need to be updated once */
2766         if (mycpuid == 0)
2767                 ipfw_dec_static_count(rule);
2768
2769         /* Free 'stub' on the last CPU */
2770         if (stub != NULL && mycpuid == ncpus - 1)
2771                 kfree(stub, M_IPFW);
2772
2773         /* Try to free this rule */
2774         ipfw_free_rule(rule);
2775
2776         /* Return the next rule */
2777         return n;
2778 }
2779
2780 static void
2781 ipfw_flush_dispatch(struct netmsg *nmsg)
2782 {
2783         struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
2784         int kill_default = lmsg->u.ms_result;
2785         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2786         struct ip_fw *rule;
2787
2788         ipfw_flush_rule_ptrs(ctx); /* more efficient to do outside the loop */
2789
2790         while ((rule = ctx->ipfw_layer3_chain) != NULL &&
2791                (kill_default || rule->rulenum != IPFW_DEFAULT_RULE))
2792                 ipfw_delete_rule(ctx, NULL, rule);
2793
2794         ifnet_forwardmsg(lmsg, mycpuid + 1);
2795 }
2796
2797 static void
2798 ipfw_disable_rule_state_dispatch(struct netmsg *nmsg)
2799 {
2800         struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
2801         struct ip_fw *rule;
2802
2803         rule = dmsg->start_rule;
2804         if (rule != NULL) {
2805                 KKASSERT(rule->cpuid == mycpuid);
2806
2807                 /*
2808                  * Move to the position on the next CPU
2809                  * before the msg is forwarded.
2810                  */
2811                 dmsg->start_rule = rule->sibling;
2812         } else {
2813                 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2814
2815                 KKASSERT(dmsg->rulenum == 0);
2816                 rule = ctx->ipfw_layer3_chain;
2817         }
2818
2819         while (rule != NULL) {
2820                 if (dmsg->rulenum && rule->rulenum != dmsg->rulenum)
2821                         break;
2822                 rule->rule_flags &= ~IPFW_RULE_F_STATE;
2823                 rule = rule->next;
2824         }
2825
2826         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2827 }
2828
2829 /*
2830  * Deletes all rules from a chain (including the default rule
2831  * if the second argument is set).
2832  * Must be called at splimp().
2833  */
2834 static void
2835 ipfw_flush(int kill_default)
2836 {
2837         struct netmsg_del dmsg;
2838         struct netmsg nmsg;
2839         struct lwkt_msg *lmsg;
2840         struct ip_fw *rule;
2841         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2842
2843         IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
2844
2845         /*
2846          * If 'kill_default' then caller has done the necessary
2847          * msgport syncing; unnecessary to do it again.
2848          */
2849         if (!kill_default) {
2850                 /*
2851                  * Let ipfw_chk() know the rules are going to
2852                  * be flushed, so it could jump directly to
2853                  * the default rule.
2854                  */
2855                 ipfw_flushing = 1;
2856                 netmsg_service_sync();
2857         }
2858
2859         /*
2860          * Clear STATE flag on rules, so no more states (dyn rules)
2861          * will be created.
2862          */
2863         bzero(&dmsg, sizeof(dmsg));
2864         netmsg_init(&dmsg.nmsg, &curthread->td_msgport, 0,
2865                     ipfw_disable_rule_state_dispatch);
2866         ifnet_domsg(&dmsg.nmsg.nm_lmsg, 0);
2867
2868         /*
2869          * This actually nukes all states (dyn rules)
2870          */
2871         lockmgr(&dyn_lock, LK_EXCLUSIVE);
2872         for (rule = ctx->ipfw_layer3_chain; rule != NULL; rule = rule->next) {
2873                 /*
2874                  * Can't check IPFW_RULE_F_STATE here,
2875                  * since it has been cleared previously.
2876                  * Check 'stub' instead.
2877                  */
2878                 if (rule->stub != NULL) {
2879                         /* Force removal */
2880                         remove_dyn_rule_locked(rule, NULL);
2881                 }
2882         }
2883         lockmgr(&dyn_lock, LK_RELEASE);
2884
2885         /*
2886          * Press the 'flush' button
2887          */
2888         bzero(&nmsg, sizeof(nmsg));
2889         netmsg_init(&nmsg, &curthread->td_msgport, 0, ipfw_flush_dispatch);
2890         lmsg = &nmsg.nm_lmsg;
2891         lmsg->u.ms_result = kill_default;
2892         ifnet_domsg(lmsg, 0);
2893
2894         KASSERT(dyn_count == 0, ("%u dyn rule remains\n", dyn_count));
2895
2896         if (kill_default) {
2897                 if (ipfw_dyn_v != NULL) {
2898                         /*
2899                          * Free dynamic rules(state) hash table
2900                          */
2901                         kfree(ipfw_dyn_v, M_IPFW);
2902                         ipfw_dyn_v = NULL;
2903                 }
2904
2905                 KASSERT(static_count == 0,
2906                         ("%u static rules remains\n", static_count));
2907                 KASSERT(static_ioc_len == 0,
2908                         ("%u bytes of static rules remains\n", static_ioc_len));
2909         } else {
2910                 KASSERT(static_count == 1,
2911                         ("%u static rules remains\n", static_count));
2912                 KASSERT(static_ioc_len == IOC_RULESIZE(ctx->ipfw_default_rule),
2913                         ("%u bytes of static rules remains, should be %u\n",
2914                          static_ioc_len, IOC_RULESIZE(ctx->ipfw_default_rule)));
2915         }
2916
2917         /* Flush is done */
2918         ipfw_flushing = 0;
2919 }
2920
2921 static void
2922 ipfw_alt_delete_rule_dispatch(struct netmsg *nmsg)
2923 {
2924         struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
2925         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2926         struct ip_fw *rule, *prev;
2927
2928         rule = dmsg->start_rule;
2929         KKASSERT(rule->cpuid == mycpuid);
2930         dmsg->start_rule = rule->sibling;
2931
2932         prev = dmsg->prev_rule;
2933         if (prev != NULL) {
2934                 KKASSERT(prev->cpuid == mycpuid);
2935
2936                 /*
2937                  * Move to the position on the next CPU
2938                  * before the msg is forwarded.
2939                  */
2940                 dmsg->prev_rule = prev->sibling;
2941         }
2942
2943         /*
2944          * flush pointers outside the loop, then delete all matching
2945          * rules.  'prev' remains the same throughout the cycle.
2946          */
2947         ipfw_flush_rule_ptrs(ctx);
2948         while (rule && rule->rulenum == dmsg->rulenum)
2949                 rule = ipfw_delete_rule(ctx, prev, rule);
2950
2951         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2952 }
2953
2954 static int
2955 ipfw_alt_delete_rule(uint16_t rulenum)
2956 {
2957         struct ip_fw *prev, *rule, *f;
2958         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2959         struct netmsg_del dmsg;
2960         struct netmsg *nmsg;
2961         int state;
2962
2963         /*
2964          * Locate first rule to delete
2965          */
2966         for (prev = NULL, rule = ctx->ipfw_layer3_chain;
2967              rule && rule->rulenum < rulenum;
2968              prev = rule, rule = rule->next)
2969                 ; /* EMPTY */
2970         if (rule->rulenum != rulenum)
2971                 return EINVAL;
2972
2973         /*
2974          * Check whether any rules with the given number will
2975          * create states.
2976          */
2977         state = 0;
2978         for (f = rule; f && f->rulenum == rulenum; f = f->next) {
2979                 if (f->rule_flags & IPFW_RULE_F_STATE) {
2980                         state = 1;
2981                         break;
2982                 }
2983         }
2984
2985         if (state) {
2986                 /*
2987                  * Clear the STATE flag, so no more states will be
2988                  * created based the rules numbered 'rulenum'.
2989                  */
2990                 bzero(&dmsg, sizeof(dmsg));
2991                 nmsg = &dmsg.nmsg;
2992                 netmsg_init(nmsg, &curthread->td_msgport, 0,
2993                             ipfw_disable_rule_state_dispatch);
2994                 dmsg.start_rule = rule;
2995                 dmsg.rulenum = rulenum;
2996
2997                 ifnet_domsg(&nmsg->nm_lmsg, 0);
2998                 KKASSERT(dmsg.start_rule == NULL);
2999
3000                 /*
3001                  * Nuke all related states
3002                  */
3003                 lockmgr(&dyn_lock, LK_EXCLUSIVE);
3004                 for (f = rule; f && f->rulenum == rulenum; f = f->next) {
3005                         /*
3006                          * Can't check IPFW_RULE_F_STATE here,
3007                          * since it has been cleared previously.
3008                          * Check 'stub' instead.
3009                          */
3010                         if (f->stub != NULL) {
3011                                 /* Force removal */
3012                                 remove_dyn_rule_locked(f, NULL);
3013                         }
3014                 }
3015                 lockmgr(&dyn_lock, LK_RELEASE);
3016         }
3017
3018         /*
3019          * Get rid of the rule duplications on all CPUs
3020          */
3021         bzero(&dmsg, sizeof(dmsg));
3022         nmsg = &dmsg.nmsg;
3023         netmsg_init(nmsg, &curthread->td_msgport, 0,
3024                     ipfw_alt_delete_rule_dispatch);
3025         dmsg.prev_rule = prev;
3026         dmsg.start_rule = rule;
3027         dmsg.rulenum = rulenum;
3028
3029         ifnet_domsg(&nmsg->nm_lmsg, 0);
3030         KKASSERT(dmsg.prev_rule == NULL && dmsg.start_rule == NULL);
3031         return 0;
3032 }
3033
3034 static void
3035 ipfw_alt_delete_ruleset_dispatch(struct netmsg *nmsg)
3036 {
3037         struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3038         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3039         struct ip_fw *prev, *rule;
3040 #ifdef INVARIANTS
3041         int del = 0;
3042 #endif
3043
3044         ipfw_flush_rule_ptrs(ctx);
3045
3046         prev = NULL;
3047         rule = ctx->ipfw_layer3_chain;
3048         while (rule != NULL) {
3049                 if (rule->set == dmsg->from_set) {
3050                         rule = ipfw_delete_rule(ctx, prev, rule);
3051 #ifdef INVARIANTS
3052                         del = 1;
3053 #endif
3054                 } else {
3055                         prev = rule;
3056                         rule = rule->next;
3057                 }
3058         }
3059         KASSERT(del, ("no match set?!\n"));
3060
3061         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3062 }
3063
3064 static void
3065 ipfw_disable_ruleset_state_dispatch(struct netmsg *nmsg)
3066 {
3067         struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3068         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3069         struct ip_fw *rule;
3070 #ifdef INVARIANTS
3071         int cleared = 0;
3072 #endif
3073
3074         for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3075                 if (rule->set == dmsg->from_set) {
3076 #ifdef INVARIANTS
3077                         cleared = 1;
3078 #endif
3079                         rule->rule_flags &= ~IPFW_RULE_F_STATE;
3080                 }
3081         }
3082         KASSERT(cleared, ("no match set?!\n"));
3083
3084         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3085 }
3086
3087 static int
3088 ipfw_alt_delete_ruleset(uint8_t set)
3089 {
3090         struct netmsg_del dmsg;
3091         struct netmsg *nmsg;
3092         int state, del;
3093         struct ip_fw *rule;
3094         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3095
3096         /*
3097          * Check whether the 'set' exists.  If it exists,
3098          * then check whether any rules within the set will
3099          * try to create states.
3100          */
3101         state = 0;
3102         del = 0;
3103         for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3104                 if (rule->set == set) {
3105                         del = 1;
3106                         if (rule->rule_flags & IPFW_RULE_F_STATE) {
3107                                 state = 1;
3108                                 break;
3109                         }
3110                 }
3111         }
3112         if (!del)
3113                 return 0; /* XXX EINVAL? */
3114
3115         if (state) {
3116                 /*
3117                  * Clear the STATE flag, so no more states will be
3118                  * created based the rules in this set.
3119                  */
3120                 bzero(&dmsg, sizeof(dmsg));
3121                 nmsg = &dmsg.nmsg;
3122                 netmsg_init(nmsg, &curthread->td_msgport, 0,
3123                             ipfw_disable_ruleset_state_dispatch);
3124                 dmsg.from_set = set;
3125
3126                 ifnet_domsg(&nmsg->nm_lmsg, 0);
3127
3128                 /*
3129                  * Nuke all related states
3130                  */
3131                 lockmgr(&dyn_lock, LK_EXCLUSIVE);
3132                 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3133                         if (rule->set != set)
3134                                 continue;
3135
3136                         /*
3137                          * Can't check IPFW_RULE_F_STATE here,
3138                          * since it has been cleared previously.
3139                          * Check 'stub' instead.
3140                          */
3141                         if (rule->stub != NULL) {
3142                                 /* Force removal */
3143                                 remove_dyn_rule_locked(rule, NULL);
3144                         }
3145                 }
3146                 lockmgr(&dyn_lock, LK_RELEASE);
3147         }
3148
3149         /*
3150          * Delete this set
3151          */
3152         bzero(&dmsg, sizeof(dmsg));
3153         nmsg = &dmsg.nmsg;
3154         netmsg_init(nmsg, &curthread->td_msgport, 0,
3155                     ipfw_alt_delete_ruleset_dispatch);
3156         dmsg.from_set = set;
3157
3158         ifnet_domsg(&nmsg->nm_lmsg, 0);
3159         return 0;
3160 }
3161
3162 static void
3163 ipfw_alt_move_rule_dispatch(struct netmsg *nmsg)
3164 {
3165         struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3166         struct ip_fw *rule;
3167
3168         rule = dmsg->start_rule;
3169         KKASSERT(rule->cpuid == mycpuid);
3170
3171         /*
3172          * Move to the position on the next CPU
3173          * before the msg is forwarded.
3174          */
3175         dmsg->start_rule = rule->sibling;
3176
3177         while (rule && rule->rulenum <= dmsg->rulenum) {
3178                 if (rule->rulenum == dmsg->rulenum)
3179                         rule->set = dmsg->to_set;
3180                 rule = rule->next;
3181         }
3182         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3183 }
3184
3185 static int
3186 ipfw_alt_move_rule(uint16_t rulenum, uint8_t set)
3187 {
3188         struct netmsg_del dmsg;
3189         struct netmsg *nmsg;
3190         struct ip_fw *rule;
3191         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3192
3193         /*
3194          * Locate first rule to move
3195          */
3196         for (rule = ctx->ipfw_layer3_chain; rule && rule->rulenum <= rulenum;
3197              rule = rule->next) {
3198                 if (rule->rulenum == rulenum && rule->set != set)
3199                         break;
3200         }
3201         if (rule == NULL || rule->rulenum > rulenum)
3202                 return 0; /* XXX error? */
3203
3204         bzero(&dmsg, sizeof(dmsg));
3205         nmsg = &dmsg.nmsg;
3206         netmsg_init(nmsg, &curthread->td_msgport, 0,
3207                     ipfw_alt_move_rule_dispatch);
3208         dmsg.start_rule = rule;
3209         dmsg.rulenum = rulenum;
3210         dmsg.to_set = set;
3211
3212         ifnet_domsg(&nmsg->nm_lmsg, 0);
3213         KKASSERT(dmsg.start_rule == NULL);
3214         return 0;
3215 }
3216
3217 static void
3218 ipfw_alt_move_ruleset_dispatch(struct netmsg *nmsg)
3219 {
3220         struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3221         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3222         struct ip_fw *rule;
3223
3224         for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3225                 if (rule->set == dmsg->from_set)
3226                         rule->set = dmsg->to_set;
3227         }
3228         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3229 }
3230
3231 static int
3232 ipfw_alt_move_ruleset(uint8_t from_set, uint8_t to_set)
3233 {
3234         struct netmsg_del dmsg;
3235         struct netmsg *nmsg;
3236
3237         bzero(&dmsg, sizeof(dmsg));
3238         nmsg = &dmsg.nmsg;
3239         netmsg_init(nmsg, &curthread->td_msgport, 0,
3240                     ipfw_alt_move_ruleset_dispatch);
3241         dmsg.from_set = from_set;
3242         dmsg.to_set = to_set;
3243
3244         ifnet_domsg(&nmsg->nm_lmsg, 0);
3245         return 0;
3246 }
3247
3248 static void
3249 ipfw_alt_swap_ruleset_dispatch(struct netmsg *nmsg)
3250 {
3251         struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3252         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3253         struct ip_fw *rule;
3254
3255         for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3256                 if (rule->set == dmsg->from_set)
3257                         rule->set = dmsg->to_set;
3258                 else if (rule->set == dmsg->to_set)
3259                         rule->set = dmsg->from_set;
3260         }
3261         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3262 }
3263
3264 static int
3265 ipfw_alt_swap_ruleset(uint8_t set1, uint8_t set2)
3266 {
3267         struct netmsg_del dmsg;
3268         struct netmsg *nmsg;
3269
3270         bzero(&dmsg, sizeof(dmsg));
3271         nmsg = &dmsg.nmsg;
3272         netmsg_init(nmsg, &curthread->td_msgport, 0,
3273                     ipfw_alt_swap_ruleset_dispatch);
3274         dmsg.from_set = set1;
3275         dmsg.to_set = set2;
3276
3277         ifnet_domsg(&nmsg->nm_lmsg, 0);
3278         return 0;
3279 }
3280
3281 /**
3282  * Remove all rules with given number, and also do set manipulation.
3283  *
3284  * The argument is an uint32_t. The low 16 bit are the rule or set number,
3285  * the next 8 bits are the new set, the top 8 bits are the command:
3286  *
3287  *      0       delete rules with given number
3288  *      1       delete rules with given set number
3289  *      2       move rules with given number to new set
3290  *      3       move rules with given set number to new set
3291  *      4       swap sets with given numbers
3292  */
3293 static int
3294 ipfw_ctl_alter(uint32_t arg)
3295 {
3296         uint16_t rulenum;
3297         uint8_t cmd, new_set;
3298         int error = 0;
3299
3300         rulenum = arg & 0xffff;
3301         cmd = (arg >> 24) & 0xff;
3302         new_set = (arg >> 16) & 0xff;
3303
3304         if (cmd > 4)
3305                 return EINVAL;
3306         if (new_set >= IPFW_DEFAULT_SET)
3307                 return EINVAL;
3308         if (cmd == 0 || cmd == 2) {
3309                 if (rulenum == IPFW_DEFAULT_RULE)
3310                         return EINVAL;
3311         } else {
3312                 if (rulenum >= IPFW_DEFAULT_SET)
3313                         return EINVAL;
3314         }
3315
3316         switch (cmd) {
3317         case 0: /* delete rules with given number */
3318                 error = ipfw_alt_delete_rule(rulenum);
3319                 break;
3320
3321         case 1: /* delete all rules with given set number */
3322                 error = ipfw_alt_delete_ruleset(rulenum);
3323                 break;
3324
3325         case 2: /* move rules with given number to new set */
3326                 error = ipfw_alt_move_rule(rulenum, new_set);
3327                 break;
3328
3329         case 3: /* move rules with given set number to new set */
3330                 error = ipfw_alt_move_ruleset(rulenum, new_set);
3331                 break;
3332
3333         case 4: /* swap two sets */
3334                 error = ipfw_alt_swap_ruleset(rulenum, new_set);
3335                 break;
3336         }
3337         return error;
3338 }
3339
3340 /*
3341  * Clear counters for a specific rule.
3342  */
3343 static void
3344 clear_counters(struct ip_fw *rule, int log_only)
3345 {
3346         ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule);
3347
3348         if (log_only == 0) {
3349                 rule->bcnt = rule->pcnt = 0;
3350                 rule->timestamp = 0;
3351         }
3352         if (l->o.opcode == O_LOG)
3353                 l->log_left = l->max_log;
3354 }
3355
3356 static void
3357 ipfw_zero_entry_dispatch(struct netmsg *nmsg)
3358 {
3359         struct netmsg_zent *zmsg = (struct netmsg_zent *)nmsg;
3360         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3361         struct ip_fw *rule;
3362
3363         if (zmsg->rulenum == 0) {
3364                 KKASSERT(zmsg->start_rule == NULL);
3365
3366                 ctx->ipfw_norule_counter = 0;
3367                 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next)
3368                         clear_counters(rule, zmsg->log_only);
3369         } else {
3370                 struct ip_fw *start = zmsg->start_rule;
3371
3372                 KKASSERT(start->cpuid == mycpuid);
3373                 KKASSERT(start->rulenum == zmsg->rulenum);
3374
3375                 /*
3376                  * We can have multiple rules with the same number, so we
3377                  * need to clear them all.
3378                  */
3379                 for (rule = start; rule && rule->rulenum == zmsg->rulenum;
3380                      rule = rule->next)
3381                         clear_counters(rule, zmsg->log_only);
3382
3383                 /*
3384                  * Move to the position on the next CPU
3385                  * before the msg is forwarded.
3386                  */
3387                 zmsg->start_rule = start->sibling;
3388         }
3389         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3390 }
3391
3392 /**
3393  * Reset some or all counters on firewall rules.
3394  * @arg frwl is null to clear all entries, or contains a specific
3395  * rule number.
3396  * @arg log_only is 1 if we only want to reset logs, zero otherwise.
3397  */
3398 static int
3399 ipfw_ctl_zero_entry(int rulenum, int log_only)
3400 {
3401         struct netmsg_zent zmsg;
3402         struct netmsg *nmsg;
3403         const char *msg;
3404         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3405
3406         bzero(&zmsg, sizeof(zmsg));
3407         nmsg = &zmsg.nmsg;
3408         netmsg_init(nmsg, &curthread->td_msgport, 0, ipfw_zero_entry_dispatch);
3409         zmsg.log_only = log_only;
3410
3411         if (rulenum == 0) {
3412                 msg = log_only ? "ipfw: All logging counts reset.\n"
3413                                : "ipfw: Accounting cleared.\n";
3414         } else {
3415                 struct ip_fw *rule;
3416
3417                 /*
3418                  * Locate the first rule with 'rulenum'
3419                  */
3420                 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3421                         if (rule->rulenum == rulenum)
3422                                 break;
3423                 }
3424                 if (rule == NULL) /* we did not find any matching rules */
3425                         return (EINVAL);
3426                 zmsg.start_rule = rule;
3427                 zmsg.rulenum = rulenum;
3428
3429                 msg = log_only ? "ipfw: Entry %d logging count reset.\n"
3430                                : "ipfw: Entry %d cleared.\n";
3431         }
3432         ifnet_domsg(&nmsg->nm_lmsg, 0);
3433         KKASSERT(zmsg.start_rule == NULL);
3434
3435         if (fw_verbose)
3436                 log(LOG_SECURITY | LOG_NOTICE, msg, rulenum);
3437         return (0);
3438 }
3439
3440 /*
3441  * Check validity of the structure before insert.
3442  * Fortunately rules are simple, so this mostly need to check rule sizes.
3443  */
3444 static int
3445 ipfw_check_ioc_rule(struct ipfw_ioc_rule *rule, int size, uint32_t *rule_flags)
3446 {
3447         int l, cmdlen = 0;
3448         int have_action = 0;
3449         ipfw_insn *cmd;
3450
3451         *rule_flags = 0;
3452
3453         /* Check for valid size */
3454         if (size < sizeof(*rule)) {
3455                 kprintf("ipfw: rule too short\n");
3456                 return EINVAL;
3457         }
3458         l = IOC_RULESIZE(rule);
3459         if (l != size) {
3460                 kprintf("ipfw: size mismatch (have %d want %d)\n", size, l);
3461                 return EINVAL;
3462         }
3463
3464         /* Check rule number */
3465         if (rule->rulenum == IPFW_DEFAULT_RULE) {
3466                 kprintf("ipfw: invalid rule number\n");
3467                 return EINVAL;
3468         }
3469
3470         /*
3471          * Now go for the individual checks. Very simple ones, basically only
3472          * instruction sizes.
3473          */
3474         for (l = rule->cmd_len, cmd = rule->cmd; l > 0;
3475              l -= cmdlen, cmd += cmdlen) {
3476                 cmdlen = F_LEN(cmd);
3477                 if (cmdlen > l) {
3478                         kprintf("ipfw: opcode %d size truncated\n",
3479                                 cmd->opcode);
3480                         return EINVAL;
3481                 }
3482
3483                 DEB(kprintf("ipfw: opcode %d\n", cmd->opcode);)
3484
3485                 if (cmd->opcode == O_KEEP_STATE || cmd->opcode == O_LIMIT) {
3486                         /* This rule will create states */
3487                         *rule_flags |= IPFW_RULE_F_STATE;
3488                 }
3489
3490                 switch (cmd->opcode) {
3491                 case O_NOP:
3492                 case O_PROBE_STATE:
3493                 case O_KEEP_STATE:
3494                 case O_PROTO:
3495                 case O_IP_SRC_ME:
3496                 case O_IP_DST_ME:
3497                 case O_LAYER2:
3498                 case O_IN:
3499                 case O_FRAG:
3500                 case O_IPOPT:
3501                 case O_IPLEN:
3502                 case O_IPID:
3503                 case O_IPTOS:
3504                 case O_IPPRECEDENCE:
3505                 case O_IPTTL:
3506                 case O_IPVER:
3507                 case O_TCPWIN:
3508                 case O_TCPFLAGS:
3509                 case O_TCPOPTS:
3510                 case O_ESTAB:
3511                         if (cmdlen != F_INSN_SIZE(ipfw_insn))
3512                                 goto bad_size;
3513                         break;
3514
3515                 case O_UID:
3516                 case O_GID:
3517                 case O_IP_SRC:
3518                 case O_IP_DST:
3519                 case O_TCPSEQ:
3520                 case O_TCPACK:
3521                 case O_PROB:
3522                 case O_ICMPTYPE:
3523                         if (cmdlen != F_INSN_SIZE(ipfw_insn_u32))
3524                                 goto bad_size;
3525                         break;
3526
3527                 case O_LIMIT:
3528                         if (cmdlen != F_INSN_SIZE(ipfw_insn_limit))
3529                                 goto bad_size;
3530                         break;
3531
3532                 case O_LOG:
3533                         if (cmdlen != F_INSN_SIZE(ipfw_insn_log))
3534                                 goto bad_size;
3535
3536                         ((ipfw_insn_log *)cmd)->log_left =
3537                             ((ipfw_insn_log *)cmd)->max_log;
3538
3539                         break;
3540
3541                 case O_IP_SRC_MASK:
3542                 case O_IP_DST_MASK:
3543                         if (cmdlen != F_INSN_SIZE(ipfw_insn_ip))
3544                                 goto bad_size;
3545                         if (((ipfw_insn_ip *)cmd)->mask.s_addr == 0) {
3546                                 kprintf("ipfw: opcode %d, useless rule\n",
3547                                         cmd->opcode);
3548                                 return EINVAL;
3549                         }
3550                         break;
3551
3552                 case O_IP_SRC_SET:
3553                 case O_IP_DST_SET:
3554                         if (cmd->arg1 == 0 || cmd->arg1 > 256) {
3555                                 kprintf("ipfw: invalid set size %d\n",
3556                                         cmd->arg1);
3557                                 return EINVAL;
3558                         }
3559                         if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
3560                             (cmd->arg1+31)/32 )
3561                                 goto bad_size;
3562                         break;
3563
3564                 case O_MACADDR2:
3565                         if (cmdlen != F_INSN_SIZE(ipfw_insn_mac))
3566                                 goto bad_size;
3567                         break;
3568
3569                 case O_MAC_TYPE:
3570                 case O_IP_SRCPORT:
3571                 case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */
3572                         if (cmdlen < 2 || cmdlen > 31)
3573                                 goto bad_size;
3574                         break;
3575
3576                 case O_RECV:
3577                 case O_XMIT:
3578                 case O_VIA:
3579                         if (cmdlen != F_INSN_SIZE(ipfw_insn_if))
3580                                 goto bad_size;
3581                         break;
3582
3583                 case O_PIPE:
3584                 case O_QUEUE:
3585                         if (cmdlen != F_INSN_SIZE(ipfw_insn_pipe))
3586                                 goto bad_size;
3587                         goto check_action;
3588
3589                 case O_FORWARD_IP:
3590                         if (cmdlen != F_INSN_SIZE(ipfw_insn_sa)) {
3591                                 goto bad_size;
3592                         } else {
3593                                 in_addr_t fwd_addr;
3594
3595                                 fwd_addr = ((ipfw_insn_sa *)cmd)->
3596                                            sa.sin_addr.s_addr;
3597                                 if (IN_MULTICAST(ntohl(fwd_addr))) {
3598                                         kprintf("ipfw: try forwarding to "
3599                                                 "multicast address\n");
3600                                         return EINVAL;
3601                                 }
3602                         }
3603                         goto check_action;
3604
3605                 case O_FORWARD_MAC: /* XXX not implemented yet */
3606                 case O_CHECK_STATE:
3607                 case O_COUNT:
3608                 case O_ACCEPT:
3609                 case O_DENY:
3610                 case O_REJECT:
3611                 case O_SKIPTO:
3612                 case O_DIVERT:
3613                 case O_TEE:
3614                         if (cmdlen != F_INSN_SIZE(ipfw_insn))
3615                                 goto bad_size;
3616 check_action:
3617                         if (have_action) {
3618                                 kprintf("ipfw: opcode %d, multiple actions"
3619                                         " not allowed\n",
3620                                         cmd->opcode);
3621                                 return EINVAL;
3622                         }
3623                         have_action = 1;
3624                         if (l != cmdlen) {
3625                                 kprintf("ipfw: opcode %d, action must be"
3626                                         " last opcode\n",
3627                                         cmd->opcode);
3628                                 return EINVAL;
3629                         }
3630                         break;
3631                 default:
3632                         kprintf("ipfw: opcode %d, unknown opcode\n",
3633                                 cmd->opcode);
3634                         return EINVAL;
3635                 }
3636         }
3637         if (have_action == 0) {
3638                 kprintf("ipfw: missing action\n");
3639                 return EINVAL;
3640         }
3641         return 0;
3642
3643 bad_size:
3644         kprintf("ipfw: opcode %d size %d wrong\n",
3645                 cmd->opcode, cmdlen);
3646         return EINVAL;
3647 }
3648
3649 static int
3650 ipfw_ctl_add_rule(struct sockopt *sopt)
3651 {
3652         struct ipfw_ioc_rule *ioc_rule;
3653         size_t size;
3654         uint32_t rule_flags;
3655         int error;
3656         
3657         size = sopt->sopt_valsize;
3658         if (size > (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX) ||
3659             size < sizeof(*ioc_rule)) {
3660                 return EINVAL;
3661         }
3662         if (size != (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX)) {
3663                 sopt->sopt_val = krealloc(sopt->sopt_val, sizeof(uint32_t) *
3664                                           IPFW_RULE_SIZE_MAX, M_TEMP, M_WAITOK);
3665         }
3666         ioc_rule = sopt->sopt_val;
3667
3668         error = ipfw_check_ioc_rule(ioc_rule, size, &rule_flags);
3669         if (error)
3670                 return error;
3671
3672         ipfw_add_rule(ioc_rule, rule_flags);
3673
3674         if (sopt->sopt_dir == SOPT_GET)
3675                 sopt->sopt_valsize = IOC_RULESIZE(ioc_rule);
3676         return 0;
3677 }
3678
3679 static void *
3680 ipfw_copy_rule(const struct ip_fw *rule, struct ipfw_ioc_rule *ioc_rule)
3681 {
3682         const struct ip_fw *sibling;
3683 #ifdef INVARIANTS
3684         int i;
3685 #endif
3686
3687         KKASSERT(rule->cpuid == 0);
3688
3689         ioc_rule->act_ofs = rule->act_ofs;
3690         ioc_rule->cmd_len = rule->cmd_len;
3691         ioc_rule->rulenum = rule->rulenum;
3692         ioc_rule->set = rule->set;
3693         ioc_rule->usr_flags = rule->usr_flags;
3694
3695         ioc_rule->set_disable = ipfw_ctx[mycpuid]->ipfw_set_disable;
3696         ioc_rule->static_count = static_count;
3697         ioc_rule->static_len = static_ioc_len;
3698
3699         /*
3700          * Visit (read-only) all of the rule's duplications to get
3701          * the necessary statistics
3702          */
3703 #ifdef INVARIANTS
3704         i = 0;
3705 #endif
3706         ioc_rule->pcnt = 0;
3707         ioc_rule->bcnt = 0;
3708         ioc_rule->timestamp = 0;
3709         for (sibling = rule; sibling != NULL; sibling = sibling->sibling) {
3710                 ioc_rule->pcnt += sibling->pcnt;
3711                 ioc_rule->bcnt += sibling->bcnt;
3712                 if (sibling->timestamp > ioc_rule->timestamp)
3713                         ioc_rule->timestamp = sibling->timestamp;
3714 #ifdef INVARIANTS
3715                 ++i;
3716 #endif
3717         }
3718         KASSERT(i == ncpus, ("static rule is not duplicated on every cpu\n"));
3719
3720         bcopy(rule->cmd, ioc_rule->cmd, ioc_rule->cmd_len * 4 /* XXX */);
3721
3722         return ((uint8_t *)ioc_rule + IOC_RULESIZE(ioc_rule));
3723 }
3724
3725 static void
3726 ipfw_copy_state(const ipfw_dyn_rule *dyn_rule,
3727                 struct ipfw_ioc_state *ioc_state)
3728 {
3729         const struct ipfw_flow_id *id;
3730         struct ipfw_ioc_flowid *ioc_id;
3731
3732         ioc_state->expire = TIME_LEQ(dyn_rule->expire, time_second) ?
3733                             0 : dyn_rule->expire - time_second;
3734         ioc_state->pcnt = dyn_rule->pcnt;
3735         ioc_state->bcnt = dyn_rule->bcnt;
3736
3737         ioc_state->dyn_type = dyn_rule->dyn_type;
3738         ioc_state->count = dyn_rule->count;
3739
3740         ioc_state->rulenum = dyn_rule->stub->rule[mycpuid]->rulenum;
3741
3742         id = &dyn_rule->id;
3743         ioc_id = &ioc_state->id;
3744
3745         ioc_id->type = ETHERTYPE_IP;
3746         ioc_id->u.ip.dst_ip = id->dst_ip;
3747         ioc_id->u.ip.src_ip = id->src_ip;
3748         ioc_id->u.ip.dst_port = id->dst_port;
3749         ioc_id->u.ip.src_port = id->src_port;
3750         ioc_id->u.ip.proto = id->proto;
3751 }
3752
3753 static int
3754 ipfw_ctl_get_rules(struct sockopt *sopt)
3755 {
3756         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3757         struct ip_fw *rule;
3758         void *bp;
3759         size_t size;
3760         uint32_t dcount = 0;
3761
3762         /*
3763          * pass up a copy of the current rules. Static rules
3764          * come first (the last of which has number IPFW_DEFAULT_RULE),
3765          * followed by a possibly empty list of dynamic rule.
3766          */
3767         crit_enter();
3768
3769         size = static_ioc_len;  /* size of static rules */
3770         if (ipfw_dyn_v) {       /* add size of dyn.rules */
3771                 dcount = dyn_count;
3772                 size += dcount * sizeof(struct ipfw_ioc_state);
3773         }
3774
3775         if (sopt->sopt_valsize < size) {
3776                 /* short length, no need to return incomplete rules */
3777                 /* XXX: if superuser, no need to zero buffer */
3778                 bzero(sopt->sopt_val, sopt->sopt_valsize); 
3779                 return 0;
3780         }
3781         bp = sopt->sopt_val;
3782
3783         for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next)
3784                 bp = ipfw_copy_rule(rule, bp);
3785
3786         if (ipfw_dyn_v && dcount != 0) {
3787                 struct ipfw_ioc_state *ioc_state = bp;
3788                 uint32_t dcount2 = 0;
3789 #ifdef INVARIANTS
3790                 size_t old_size = size;
3791 #endif
3792                 int i;
3793
3794                 lockmgr(&dyn_lock, LK_SHARED);
3795
3796                 /* Check 'ipfw_dyn_v' again with lock held */
3797                 if (ipfw_dyn_v == NULL)
3798                         goto skip;
3799
3800                 for (i = 0; i < curr_dyn_buckets; i++) {
3801                         ipfw_dyn_rule *p;
3802
3803                         /*
3804                          * The # of dynamic rules may have grown after the
3805                          * snapshot of 'dyn_count' was taken, so we will have
3806                          * to check 'dcount' (snapshot of dyn_count) here to
3807                          * make sure that we don't overflow the pre-allocated
3808                          * buffer.
3809                          */
3810                         for (p = ipfw_dyn_v[i]; p != NULL && dcount != 0;
3811                              p = p->next, ioc_state++, dcount--, dcount2++)
3812                                 ipfw_copy_state(p, ioc_state);
3813                 }
3814 skip:
3815                 lockmgr(&dyn_lock, LK_RELEASE);
3816
3817                 /*
3818                  * The # of dynamic rules may be shrinked after the
3819                  * snapshot of 'dyn_count' was taken.  To give user a
3820                  * correct dynamic rule count, we use the 'dcount2'
3821                  * calculated above (with shared lockmgr lock held).
3822                  */
3823                 size = static_ioc_len +
3824                        (dcount2 * sizeof(struct ipfw_ioc_state));
3825                 KKASSERT(size <= old_size);
3826         }
3827
3828         crit_exit();
3829
3830         sopt->sopt_valsize = size;
3831         return 0;
3832 }
3833
3834 static void
3835 ipfw_set_disable_dispatch(struct netmsg *nmsg)
3836 {
3837         struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
3838         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3839
3840         ctx->ipfw_gen++;
3841         ctx->ipfw_set_disable = lmsg->u.ms_result32;
3842
3843         ifnet_forwardmsg(lmsg, mycpuid + 1);
3844 }
3845
3846 static void
3847 ipfw_ctl_set_disable(uint32_t disable, uint32_t enable)
3848 {
3849         struct netmsg nmsg;
3850         struct lwkt_msg *lmsg;
3851         uint32_t set_disable;
3852
3853         /* IPFW_DEFAULT_SET is always enabled */
3854         enable |= (1 << IPFW_DEFAULT_SET);
3855         set_disable = (ipfw_ctx[mycpuid]->ipfw_set_disable | disable) & ~enable;
3856
3857         bzero(&nmsg, sizeof(nmsg));
3858         netmsg_init(&nmsg, &curthread->td_msgport, 0, ipfw_set_disable_dispatch);
3859         lmsg = &nmsg.nm_lmsg;
3860         lmsg->u.ms_result32 = set_disable;
3861
3862         ifnet_domsg(lmsg, 0);
3863 }
3864
3865 /**
3866  * {set|get}sockopt parser.
3867  */
3868 static int
3869 ipfw_ctl(struct sockopt *sopt)
3870 {
3871         int error, rulenum;
3872         uint32_t *masks;
3873         size_t size;
3874
3875         error = 0;
3876
3877         switch (sopt->sopt_name) {
3878         case IP_FW_GET:
3879                 error = ipfw_ctl_get_rules(sopt);
3880                 break;
3881
3882         case IP_FW_FLUSH:
3883                 /*
3884                  * Normally we cannot release the lock on each iteration.
3885                  * We could do it here only because we start from the head all
3886                  * the times so there is no risk of missing some entries.
3887                  * On the other hand, the risk is that we end up with
3888                  * a very inconsistent ruleset, so better keep the lock
3889                  * around the whole cycle.
3890                  *
3891                  * XXX this code can be improved by resetting the head of
3892                  * the list to point to the default rule, and then freeing
3893                  * the old list without the need for a lock.
3894                  */
3895
3896                 crit_enter();
3897                 ipfw_flush(0 /* keep default rule */);
3898                 crit_exit();
3899                 break;
3900
3901         case IP_FW_ADD:
3902                 error = ipfw_ctl_add_rule(sopt);
3903                 break;
3904
3905         case IP_FW_DEL:
3906                 /*
3907                  * IP_FW_DEL is used for deleting single rules or sets,
3908                  * and (ab)used to atomically manipulate sets.
3909                  * Argument size is used to distinguish between the two:
3910                  *    sizeof(uint32_t)
3911                  *      delete single rule or set of rules,
3912                  *      or reassign rules (or sets) to a different set.
3913                  *    2 * sizeof(uint32_t)
3914                  *      atomic disable/enable sets.
3915                  *      first uint32_t contains sets to be disabled,
3916                  *      second uint32_t contains sets to be enabled.
3917                  */
3918                 masks = sopt->sopt_val;
3919                 size = sopt->sopt_valsize;
3920                 if (size == sizeof(*masks)) {
3921                         /*
3922                          * Delete or reassign static rule
3923                          */
3924                         error = ipfw_ctl_alter(masks[0]);
3925                 } else if (size == (2 * sizeof(*masks))) {
3926                         /*
3927                          * Set enable/disable
3928                          */
3929                         ipfw_ctl_set_disable(masks[0], masks[1]);
3930                 } else {
3931                         error = EINVAL;
3932                 }
3933                 break;
3934
3935         case IP_FW_ZERO:
3936         case IP_FW_RESETLOG: /* argument is an int, the rule number */
3937                 rulenum = 0;
3938
3939                 if (sopt->sopt_val != 0) {
3940                     error = soopt_to_kbuf(sopt, &rulenum,
3941                             sizeof(int), sizeof(int));
3942                     if (error)
3943                         break;
3944                 }
3945                 error = ipfw_ctl_zero_entry(rulenum,
3946                         sopt->sopt_name == IP_FW_RESETLOG);
3947                 break;
3948
3949         default:
3950                 kprintf("ipfw_ctl invalid option %d\n", sopt->sopt_name);
3951                 error = EINVAL;
3952         }
3953         return error;
3954 }
3955
3956 /*
3957  * This procedure is only used to handle keepalives. It is invoked
3958  * every dyn_keepalive_period
3959  */
3960 static void
3961 ipfw_tick(void *dummy __unused)
3962 {
3963         time_t keep_alive;
3964         uint32_t gen;
3965         int i;
3966
3967         if (ipfw_dyn_v == NULL || dyn_count == 0)
3968                 goto done;
3969
3970         keep_alive = time_second;
3971
3972         lockmgr(&dyn_lock, LK_EXCLUSIVE);
3973 again:
3974         if (ipfw_dyn_v == NULL || dyn_count == 0) {
3975                 lockmgr(&dyn_lock, LK_RELEASE);
3976                 goto done;
3977         }
3978         gen = dyn_buckets_gen;
3979
3980         for (i = 0; i < curr_dyn_buckets; i++) {
3981                 ipfw_dyn_rule *q, *prev;
3982
3983                 for (prev = NULL, q = ipfw_dyn_v[i]; q != NULL;) {
3984                         uint32_t ack_rev, ack_fwd;
3985                         struct ipfw_flow_id id;
3986
3987                         if (q->dyn_type == O_LIMIT_PARENT)
3988                                 goto next;
3989
3990                         if (TIME_LEQ(q->expire, time_second)) {
3991                                 /* State expired */
3992                                 UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q);
3993                                 continue;
3994                         }
3995
3996                         /*
3997                          * Keep alive processing
3998                          */
3999
4000                         if (!dyn_keepalive)
4001                                 goto next;
4002                         if (q->id.proto != IPPROTO_TCP)
4003                                 goto next;
4004                         if ((q->state & BOTH_SYN) != BOTH_SYN)
4005                                 goto next;
4006                         if (TIME_LEQ(time_second + dyn_keepalive_interval,
4007                             q->expire))
4008                                 goto next;      /* too early */
4009                         if (q->keep_alive == keep_alive)
4010                                 goto next;      /* alreay done */
4011
4012                         /*
4013                          * Save necessary information, so that they could
4014                          * survive after possible blocking in send_pkt()
4015                          */
4016                         id = q->id;
4017                         ack_rev = q->ack_rev;
4018                         ack_fwd = q->ack_fwd;
4019
4020                         /* Sending has been started */
4021                         q->keep_alive = keep_alive;
4022
4023                         /* Release lock to avoid possible dead lock */
4024                         lockmgr(&dyn_lock, LK_RELEASE);
4025                         send_pkt(&id, ack_rev - 1, ack_fwd, TH_SYN);
4026                         send_pkt(&id, ack_fwd - 1, ack_rev, 0);
4027                         lockmgr(&dyn_lock, LK_EXCLUSIVE);
4028
4029                         if (gen != dyn_buckets_gen) {
4030                                 /*
4031                                  * Dyn bucket array has been changed during
4032                                  * the above two sending; reiterate.
4033                                  */
4034                                 goto again;
4035                         }
4036 next:
4037                         prev = q;
4038                         q = q->next;
4039                 }
4040         }
4041         lockmgr(&dyn_lock, LK_RELEASE);
4042 done:
4043         callout_reset(&ipfw_timeout_h, dyn_keepalive_period * hz,
4044                       ipfw_tick, NULL);
4045 }
4046
4047 static int
4048 ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS)
4049 {
4050         return sysctl_int_range(oidp, arg1, arg2, req,
4051                IPFW_AUTOINC_STEP_MIN, IPFW_AUTOINC_STEP_MAX);
4052 }
4053
4054 static int
4055 ipfw_sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS)
4056 {
4057         int error, value;
4058
4059         lockmgr(&dyn_lock, LK_EXCLUSIVE);
4060
4061         value = dyn_buckets;
4062         error = sysctl_handle_int(oidp, &value, 0, req);
4063         if (error || !req->newptr)
4064                 goto back;
4065
4066         /*
4067          * Make sure we have a power of 2 and
4068          * do not allow more than 64k entries.
4069          */
4070         error = EINVAL;
4071         if (value <= 1 || value > 65536)
4072                 goto back;
4073         if ((value & (value - 1)) != 0)
4074                 goto back;
4075
4076         error = 0;
4077         dyn_buckets = value;
4078 back:
4079         lockmgr(&dyn_lock, LK_RELEASE);
4080         return error;
4081 }
4082
4083 static int
4084 ipfw_sysctl_dyn_fin(SYSCTL_HANDLER_ARGS)
4085 {
4086         return sysctl_int_range(oidp, arg1, arg2, req,
4087                                 1, dyn_keepalive_period - 1);
4088 }
4089
4090 static int
4091 ipfw_sysctl_dyn_rst(SYSCTL_HANDLER_ARGS)
4092 {
4093         return sysctl_int_range(oidp, arg1, arg2, req,
4094                                 1, dyn_keepalive_period - 1);
4095 }
4096
4097 static void
4098 ipfw_ctx_init_dispatch(struct netmsg *nmsg)
4099 {
4100         struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
4101         struct ipfw_context *ctx;
4102         struct ip_fw *def_rule;
4103
4104         ctx = kmalloc(sizeof(*ctx), M_IPFW, M_WAITOK | M_ZERO);
4105         ipfw_ctx[mycpuid] = ctx;
4106
4107         def_rule = kmalloc(sizeof(*def_rule), M_IPFW, M_WAITOK | M_ZERO);
4108
4109         def_rule->act_ofs = 0;
4110         def_rule->rulenum = IPFW_DEFAULT_RULE;
4111         def_rule->cmd_len = 1;
4112         def_rule->set = IPFW_DEFAULT_SET;
4113
4114         def_rule->cmd[0].len = 1;
4115 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT
4116         def_rule->cmd[0].opcode = O_ACCEPT;
4117 #else
4118         def_rule->cmd[0].opcode = O_DENY;
4119 #endif
4120
4121         def_rule->refcnt = 1;
4122         def_rule->cpuid = mycpuid;
4123
4124         /* Install the default rule */
4125         ctx->ipfw_default_rule = def_rule;
4126         ctx->ipfw_layer3_chain = def_rule;
4127
4128         /* Link rule CPU sibling */
4129         ipfw_link_sibling(fwmsg, def_rule);
4130
4131         /* Statistics only need to be updated once */
4132         if (mycpuid == 0)
4133                 ipfw_inc_static_count(def_rule);
4134
4135         ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
4136 }
4137
4138 static void
4139 ipfw_init_dispatch(struct netmsg *nmsg)
4140 {
4141         struct netmsg_ipfw fwmsg;
4142         int error = 0;
4143
4144         crit_enter();
4145
4146         if (IPFW_LOADED) {
4147                 kprintf("IP firewall already loaded\n");
4148                 error = EEXIST;
4149                 goto reply;
4150         }
4151
4152         bzero(&fwmsg, sizeof(fwmsg));
4153         netmsg_init(&fwmsg.nmsg, &curthread->td_msgport, 0,
4154                     ipfw_ctx_init_dispatch);
4155         ifnet_domsg(&fwmsg.nmsg.nm_lmsg, 0);
4156
4157         ip_fw_chk_ptr = ipfw_chk;
4158         ip_fw_ctl_ptr = ipfw_ctl;
4159         ip_fw_dn_io_ptr = ipfw_dummynet_io;
4160
4161         kprintf("ipfw2 initialized, divert %s, "
4162                 "rule-based forwarding enabled, default to %s, logging ",
4163 #ifdef IPDIVERT
4164                 "enabled",
4165 #else
4166                 "disabled",
4167 #endif
4168                 ipfw_ctx[mycpuid]->ipfw_default_rule->cmd[0].opcode ==
4169                 O_ACCEPT ? "accept" : "deny");
4170
4171 #ifdef IPFIREWALL_VERBOSE
4172         fw_verbose = 1;
4173 #endif
4174 #ifdef IPFIREWALL_VERBOSE_LIMIT
4175         verbose_limit = IPFIREWALL_VERBOSE_LIMIT;
4176 #endif
4177         if (fw_verbose == 0) {
4178                 kprintf("disabled\n");
4179         } else if (verbose_limit == 0) {
4180                 kprintf("unlimited\n");
4181         } else {
4182                 kprintf("limited to %d packets/entry by default\n",
4183                         verbose_limit);
4184         }
4185
4186         callout_init(&ipfw_timeout_h);
4187         lockinit(&dyn_lock, "ipfw_dyn", 0, 0);
4188
4189         ip_fw_loaded = 1;
4190         callout_reset(&ipfw_timeout_h, hz, ipfw_tick, NULL);
4191 reply:
4192         crit_exit();
4193         lwkt_replymsg(&nmsg->nm_lmsg, error);
4194 }
4195
4196 static int
4197 ipfw_init(void)
4198 {
4199         struct netmsg smsg;
4200
4201         netmsg_init(&smsg, &curthread->td_msgport, 0, ipfw_init_dispatch);
4202         return lwkt_domsg(IPFW_CFGPORT, &smsg.nm_lmsg, 0);
4203 }
4204
4205 #ifdef KLD_MODULE
4206
4207 static void
4208 ipfw_fini_dispatch(struct netmsg *nmsg)
4209 {
4210         int error = 0, cpu;
4211
4212         crit_enter();
4213
4214         if (ipfw_refcnt != 0) {
4215                 error = EBUSY;
4216                 goto reply;
4217         }
4218
4219         callout_stop(&ipfw_timeout_h);
4220
4221         ip_fw_loaded = 0;
4222         netmsg_service_sync();
4223
4224         ip_fw_chk_ptr = NULL;
4225         ip_fw_ctl_ptr = NULL;
4226         ip_fw_dn_io_ptr = NULL;
4227         ipfw_flush(1 /* kill default rule */);
4228
4229         /* Free pre-cpu context */
4230         for (cpu = 0; cpu < ncpus; ++cpu)
4231                 kfree(ipfw_ctx[cpu], M_IPFW);
4232
4233         kprintf("IP firewall unloaded\n");
4234 reply:
4235         crit_exit();
4236         lwkt_replymsg(&nmsg->nm_lmsg, error);
4237 }
4238
4239 static int
4240 ipfw_fini(void)
4241 {
4242         struct netmsg smsg;
4243
4244         netmsg_init(&smsg, &curthread->td_msgport, 0, ipfw_fini_dispatch);
4245         return lwkt_domsg(IPFW_CFGPORT, &smsg.nm_lmsg, 0);
4246 }
4247
4248 #endif  /* KLD_MODULE */
4249
4250 static int
4251 ipfw_modevent(module_t mod, int type, void *unused)
4252 {
4253         int err = 0;
4254
4255         switch (type) {
4256         case MOD_LOAD:
4257                 err = ipfw_init();
4258                 break;
4259
4260         case MOD_UNLOAD:
4261 #ifndef KLD_MODULE
4262                 kprintf("ipfw statically compiled, cannot unload\n");
4263                 err = EBUSY;
4264 #else
4265                 err = ipfw_fini();
4266 #endif
4267                 break;
4268         default:
4269                 break;
4270         }
4271         return err;
4272 }
4273
4274 static moduledata_t ipfwmod = {
4275         "ipfw",
4276         ipfw_modevent,
4277         0
4278 };
4279 DECLARE_MODULE(ipfw, ipfwmod, SI_SUB_PROTO_END, SI_ORDER_ANY);
4280 MODULE_VERSION(ipfw, 1);