383a8c224e3d1321abf0e1387af1cd2919197bf6
[dragonfly.git] / sys / net / ipfw / ip_fw2.c
1 /*
2  * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD: src/sys/netinet/ip_fw2.c,v 1.6.2.12 2003/04/08 10:42:32 maxim Exp $
26  */
27
28 /*
29  * Implement IP packet firewall (new version)
30  */
31
32 #include "opt_ipfw.h"
33 #include "opt_inet.h"
34 #ifndef INET
35 #error IPFIREWALL requires INET.
36 #endif /* INET */
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/malloc.h>
41 #include <sys/mbuf.h>
42 #include <sys/kernel.h>
43 #include <sys/proc.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
46 #include <sys/sysctl.h>
47 #include <sys/syslog.h>
48 #include <sys/ucred.h>
49 #include <sys/in_cksum.h>
50 #include <sys/limits.h>
51 #include <sys/lock.h>
52 #include <sys/tree.h>
53
54 #include <net/if.h>
55 #include <net/route.h>
56 #include <net/pfil.h>
57 #include <net/dummynet/ip_dummynet.h>
58
59 #include <sys/thread2.h>
60 #include <sys/mplock2.h>
61 #include <net/netmsg2.h>
62
63 #include <netinet/in.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in_var.h>
66 #include <netinet/in_pcb.h>
67 #include <netinet/ip.h>
68 #include <netinet/ip_var.h>
69 #include <netinet/ip_icmp.h>
70 #include <netinet/tcp.h>
71 #include <netinet/tcp_seq.h>
72 #include <netinet/tcp_timer.h>
73 #include <netinet/tcp_var.h>
74 #include <netinet/tcpip.h>
75 #include <netinet/udp.h>
76 #include <netinet/udp_var.h>
77 #include <netinet/ip_divert.h>
78 #include <netinet/if_ether.h> /* XXX for ETHERTYPE_IP */
79
80 #include <net/ipfw/ip_fw2.h>
81
82 #ifdef IPFIREWALL_DEBUG
83 #define DPRINTF(fmt, ...) \
84 do { \
85         if (fw_debug > 0) \
86                 kprintf(fmt, __VA_ARGS__); \
87 } while (0)
88 #else
89 #define DPRINTF(fmt, ...)       ((void)0)
90 #endif
91
92 /*
93  * Description about per-CPU rule duplication:
94  *
95  * Module loading/unloading and all ioctl operations are serialized
96  * by netisr0, so we don't have any ordering or locking problems.
97  *
98  * Following graph shows how operation on per-CPU rule list is
99  * performed [2 CPU case]:
100  *
101  *   CPU0                 CPU1
102  *
103  * netisr0 <------------------------------------+
104  *  domsg                                       |
105  *    :                                         |
106  *    :(delete/add...)                          |
107  *    :                                         |
108  *    :         netmsg                          | netmsg
109  *  forwardmsg---------->netisr1                |
110  *                          :                   |
111  *                          :(delete/add...)    |
112  *                          :                   |
113  *                          :                   |
114  *                        replymsg--------------+
115  *
116  *
117  *
118  * Rule structure [2 CPU case]
119  *
120  *    CPU0               CPU1
121  *
122  * layer3_chain       layer3_chain
123  *     |                  |
124  *     V                  V
125  * +-------+ sibling  +-------+ sibling
126  * | rule1 |--------->| rule1 |--------->NULL
127  * +-------+          +-------+
128  *     |                  |
129  *     |next              |next
130  *     V                  V
131  * +-------+ sibling  +-------+ sibling
132  * | rule2 |--------->| rule2 |--------->NULL
133  * +-------+          +-------+
134  *
135  * ip_fw.sibling:
136  * 1) Ease statistics calculation during IP_FW_GET.  We only need to
137  *    iterate layer3_chain in netisr0; the current rule's duplication
138  *    to the other CPUs could safely be read-only accessed through
139  *    ip_fw.sibling.
140  * 2) Accelerate rule insertion and deletion, e.g. rule insertion:
141  *    a) In netisr0 rule3 is determined to be inserted between rule1
142  *       and rule2.  To make this decision we need to iterate the
143  *       layer3_chain in netisr0.  The netmsg, which is used to insert
144  *       the rule, will contain rule1 in netisr0 as prev_rule and rule2
145  *       in netisr0 as next_rule.
146  *    b) After the insertion in netisr0 is done, we will move on to
147  *       netisr1.  But instead of relocating the rule3's position in
148  *       netisr1 by iterating the layer3_chain in netisr1, we set the
149  *       netmsg's prev_rule to rule1->sibling and next_rule to
150  *       rule2->sibling before the netmsg is forwarded to netisr1 from
151  *       netisr0.
152  */
153
154 /*
155  * Description of states and tracks.
156  *
157  * Both states and tracks are stored in per-cpu RB trees instead of
158  * per-cpu hash tables to avoid the worst case hash degeneration.
159  *
160  * The lifetimes of states and tracks are regulated by dyn_*_lifetime,
161  * measured in seconds and depending on the flags.
162  *
163  * When a packet is received, its address fields are first masked with
164  * the mask defined for the rule, then matched against the entries in
165  * the per-cpu state RB tree.  States are generated by 'keep-state'
166  * and 'limit' options.
167  *
168  * The max number of states is ipfw_state_max.  When we reach the
169  * maximum number of states we do not create anymore.  This is done to
170  * avoid consuming too much memory, but also too much time when
171  * searching on each packet.
172  *
173  * Each state holds a pointer to the parent ipfw rule of the current
174  * CPU so we know what action to perform.  States are removed when the
175  * parent rule is deleted.  XXX we should make them survive.
176  *
177  * There are some limitations with states -- we do not obey the
178  * 'randomized match', and we do not do multiple passes through the
179  * firewall.  XXX check the latter!!!
180  *
181  * States grow independently on each CPU, e.g. 2 CPU case:
182  *
183  *        CPU0                     CPU1
184  * ...................      ...................
185  * :  state RB tree  :      :  state RB tree  :
186  * :                 :      :                 :
187  * : state1   state2 :      :      state3     :
188  * :     |    |      :      :        |        :
189  * :.....|....|......:      :........|........:
190  *       |    |                      |
191  *       |    |                      |st_rule
192  *       |    |                      |
193  *       V    V                      V
194  *     +-------+                 +-------+
195  *     | rule1 |                 | rule1 |
196  *     +-------+                 +-------+
197  *
198  * Tracks are used to enforce limits on the number of sessions.  Tracks
199  * are generated by 'limit' option.
200  *
201  * The max number of tracks is ipfw_track_max.  When we reach the
202  * maximum number of tracks we do not create anymore.  This is done to
203  * avoid consuming too much memory.
204  *
205  * Tracks are organized into two layers, track counter RB tree is
206  * shared between CPUs, track RB tree is per-cpu.  States generated by
207  * 'limit' option are linked to the track in addition to the per-cpu
208  * state RB tree; mainly to ease expiration.  e.g. 2 CPU case:
209  *
210  *             ..............................
211  *             :    track counter RB tree   :
212  *             :                            :
213  *             :        +-----------+       :
214  *             :        |  trkcnt1  |       :
215  *             :        |           |       :
216  *             :      +--->counter<----+    :
217  *             :      | |           |  |    :
218  *             :      | +-----------+  |    :
219  *             :......|................|....:
220  *                    |                |
221  *        CPU0        |                |         CPU1
222  * .................  |t_count         |  .................
223  * : track RB tree :  |                |  : track RB tree :
224  * :               :  |                |  :               :
225  * : +-->track1-------+                +--------track2    :
226  * : |     A       :                      :               :
227  * : |     |       :                      :               :
228  * :.|.....|.......:                      :...............:
229  *   |     +----------------+
230  *   | .................... |
231  *   | :   state RB tree  : |st_track
232  *   | :                  : |
233  *   +---state1    state2---+
234  *     :     |       |    :
235  *     :.....|.......|....:
236  *           |       |
237  *           |       |st_rule
238  *           V       V
239  *         +----------+
240  *         |   rule1  |
241  *         +----------+
242  */
243
244 #define IPFW_AUTOINC_STEP_MIN   1
245 #define IPFW_AUTOINC_STEP_MAX   1000
246 #define IPFW_AUTOINC_STEP_DEF   100
247
248 #define IPFW_TABLE_MAX_DEF      64
249
250 #define IPFW_DEFAULT_RULE       65535   /* rulenum for the default rule */
251 #define IPFW_DEFAULT_SET        31      /* set number for the default rule */
252
253 #define MATCH_REVERSE           0
254 #define MATCH_FORWARD           1
255 #define MATCH_NONE              2
256 #define MATCH_UNKNOWN           3
257
258 #define IPFW_STATE_TCPFLAGS     (TH_SYN | TH_FIN | TH_RST)
259 #define IPFW_STATE_TCPSTATES    (IPFW_STATE_TCPFLAGS |  \
260                                  (IPFW_STATE_TCPFLAGS << 8))
261
262 #define BOTH_SYN                (TH_SYN | (TH_SYN << 8))
263 #define BOTH_FIN                (TH_FIN | (TH_FIN << 8))
264 #define BOTH_RST                (TH_RST | (TH_RST << 8))
265 /* TH_ACK here means FIN was ACKed. */
266 #define BOTH_FINACK             (TH_ACK | (TH_ACK << 8))
267
268 #define IPFW_STATE_TCPCLOSED(s) ((s)->st_proto == IPPROTO_TCP &&        \
269                                  (((s)->st_state & BOTH_RST) ||         \
270                                   ((s)->st_state & BOTH_FINACK) == BOTH_FINACK))
271
272 #define O_ANCHOR                O_NOP
273
274 struct netmsg_ipfw {
275         struct netmsg_base      base;
276         const struct ipfw_ioc_rule *ioc_rule;
277         struct ip_fw            *next_rule;
278         struct ip_fw            *prev_rule;
279         struct ip_fw            *sibling;
280         uint32_t                rule_flags;
281         struct ip_fw            **cross_rules;
282 };
283
284 struct netmsg_del {
285         struct netmsg_base      base;
286         struct ip_fw            *start_rule;
287         struct ip_fw            *prev_rule;
288         uint16_t                rulenum;
289         uint8_t                 from_set;
290         uint8_t                 to_set;
291 };
292
293 struct netmsg_zent {
294         struct netmsg_base      base;
295         struct ip_fw            *start_rule;
296         uint16_t                rulenum;
297         uint16_t                log_only;
298 };
299
300 struct netmsg_cpstate {
301         struct netmsg_base      base;
302         struct ipfw_ioc_state   *ioc_state;
303         int                     state_cntmax;
304         int                     state_cnt;
305 };
306
307 struct netmsg_tblent {
308         struct netmsg_base      base;
309         struct sockaddr         *key;
310         struct sockaddr         *netmask;
311         struct ipfw_tblent      *sibling;
312         int                     tableid;
313 };
314
315 struct netmsg_tblflush {
316         struct netmsg_base      base;
317         int                     tableid;
318         int                     destroy;
319 };
320
321 struct netmsg_tblexp {
322         struct netmsg_base      base;
323         time_t                  expire;
324         int                     tableid;
325         int                     cnt;
326         int                     expcnt;
327         struct radix_node_head  *rnh;
328 };
329
330 struct ipfw_table_cp {
331         struct ipfw_ioc_tblent  *te;
332         int                     te_idx;
333         int                     te_cnt;
334 };
335
336 struct ipfw_addrs {
337         uint32_t                addr1;
338         uint32_t                addr2;
339 };
340
341 struct ipfw_ports {
342         uint16_t                port1;
343         uint16_t                port2;
344 };
345
346 struct ipfw_key {
347         union {
348                 struct ipfw_addrs addrs;
349                 uint64_t        value;
350         } addr_u;
351         union {
352                 struct ipfw_ports ports;
353                 uint32_t        value;
354         } port_u;
355         uint8_t                 proto;
356         uint8_t                 swap;   /* IPFW_KEY_SWAP_ */
357         uint16_t                rsvd2;
358 };
359
360 #define IPFW_KEY_SWAP_ADDRS     0x1
361 #define IPFW_KEY_SWAP_PORTS     0x2
362 #define IPFW_KEY_SWAP_ALL       (IPFW_KEY_SWAP_ADDRS | IPFW_KEY_SWAP_PORTS)
363
364 struct ipfw_trkcnt {
365         RB_ENTRY(ipfw_trkcnt)   tc_rblink;
366         struct ipfw_key         tc_key;
367         uintptr_t               tc_ruleid;
368         int                     tc_refs;
369         int                     tc_count;
370         time_t                  tc_expire;      /* userland get-only */
371         uint16_t                tc_rulenum;     /* userland get-only */
372 } __cachealign;
373
374 #define tc_addrs                tc_key.addr_u.value
375 #define tc_ports                tc_key.port_u.value
376 #define tc_proto                tc_key.proto
377 #define tc_saddr                tc_key.addr_u.addrs.addr1
378 #define tc_daddr                tc_key.addr_u.addrs.addr2
379 #define tc_sport                tc_key.port_u.ports.port1
380 #define tc_dport                tc_key.port_u.ports.port2
381
382 RB_HEAD(ipfw_trkcnt_tree, ipfw_trkcnt);
383
384 struct ipfw_state;
385
386 struct ipfw_track {
387         RB_ENTRY(ipfw_track)    t_rblink;
388         struct ipfw_key         t_key;
389         struct ip_fw            *t_rule;
390         time_t                  t_lastexp;
391         LIST_HEAD(, ipfw_state) t_state_list;
392         time_t                  t_expire;
393         volatile int            *t_count;
394         struct ipfw_trkcnt      *t_trkcnt;
395         TAILQ_ENTRY(ipfw_track) t_link;
396 };
397
398 #define t_addrs                 t_key.addr_u.value
399 #define t_ports                 t_key.port_u.value
400 #define t_proto                 t_key.proto
401 #define t_saddr                 t_key.addr_u.addrs.addr1
402 #define t_daddr                 t_key.addr_u.addrs.addr2
403 #define t_sport                 t_key.port_u.ports.port1
404 #define t_dport                 t_key.port_u.ports.port2
405
406 RB_HEAD(ipfw_track_tree, ipfw_track);
407 TAILQ_HEAD(ipfw_track_list, ipfw_track);
408
409 struct ipfw_state {
410         RB_ENTRY(ipfw_state)    st_rblink;
411         struct ipfw_key         st_key;
412
413         time_t                  st_expire;      /* expire time */
414         struct ip_fw            *st_rule;
415
416         uint64_t                st_pcnt;        /* packets */
417         uint64_t                st_bcnt;        /* bytes */
418
419         /*
420          * st_state:
421          * State of this rule, typically a combination of TCP flags.
422          *
423          * st_ack_fwd/st_ack_rev:
424          * Most recent ACKs in forward and reverse direction.  They
425          * are used to generate keepalives.
426          */
427         uint32_t                st_state;
428         uint32_t                st_ack_fwd;
429         uint32_t                st_seq_fwd;
430         uint32_t                st_ack_rev;
431         uint32_t                st_seq_rev;
432
433         uint16_t                st_flags;       /* IPFW_STATE_F_ */
434         uint16_t                st_type;        /* O_KEEP_STATE/O_LIMIT */
435         struct ipfw_track       *st_track;
436
437         LIST_ENTRY(ipfw_state)  st_trklink;
438         TAILQ_ENTRY(ipfw_state) st_link;
439 };
440
441 #define st_addrs                st_key.addr_u.value
442 #define st_ports                st_key.port_u.value
443 #define st_proto                st_key.proto
444 #define st_swap                 st_key.swap
445
446 #define IPFW_STATE_F_ACKFWD     0x0001
447 #define IPFW_STATE_F_SEQFWD     0x0002
448 #define IPFW_STATE_F_ACKREV     0x0004
449 #define IPFW_STATE_F_SEQREV     0x0008
450
451 TAILQ_HEAD(ipfw_state_list, ipfw_state);
452 RB_HEAD(ipfw_state_tree, ipfw_state);
453
454 struct ipfw_tblent {
455         struct radix_node       te_nodes[2];
456         struct sockaddr_in      te_key;
457         u_long                  te_use;
458         time_t                  te_lastuse;
459         struct ipfw_tblent      *te_sibling;
460         volatile int            te_expired;
461 };
462
463 struct ipfw_context {
464         struct ip_fw            *ipfw_layer3_chain;     /* rules for layer3 */
465         struct ip_fw            *ipfw_default_rule;     /* default rule */
466         uint64_t                ipfw_norule_counter;    /* ipfw_log(NULL) stat*/
467
468         /*
469          * ipfw_set_disable contains one bit per set value (0..31).
470          * If the bit is set, all rules with the corresponding set
471          * are disabled.  Set IPDW_DEFAULT_SET is reserved for the
472          * default rule and CANNOT be disabled.
473          */
474         uint32_t                ipfw_set_disable;
475
476         uint8_t                 ipfw_flags;     /* IPFW_FLAG_ */
477
478         struct ip_fw            *ipfw_cont_rule;
479
480         struct ipfw_state_tree  ipfw_state_tree;
481         struct ipfw_state_list  ipfw_state_list;
482         int                     ipfw_state_loosecnt;
483         int                     ipfw_state_cnt;
484
485         union {
486                 struct ipfw_state state;
487                 struct ipfw_track track;
488                 struct ipfw_trkcnt trkcnt;
489         } ipfw_tmpkey;
490
491         struct ipfw_track_tree  ipfw_track_tree;
492         struct ipfw_track_list  ipfw_track_list;
493         struct ipfw_trkcnt      *ipfw_trkcnt_spare;
494
495         struct callout          ipfw_stateto_ch;
496         time_t                  ipfw_state_lastexp;
497         struct netmsg_base      ipfw_stateexp_nm;
498         struct netmsg_base      ipfw_stateexp_more;
499         struct ipfw_state       ipfw_stateexp_anch;
500
501         struct callout          ipfw_trackto_ch;
502         time_t                  ipfw_track_lastexp;
503         struct netmsg_base      ipfw_trackexp_nm;
504         struct netmsg_base      ipfw_trackexp_more;
505         struct ipfw_track       ipfw_trackexp_anch;
506
507         struct callout          ipfw_keepalive_ch;
508         struct netmsg_base      ipfw_keepalive_nm;
509         struct netmsg_base      ipfw_keepalive_more;
510         struct ipfw_state       ipfw_keepalive_anch;
511
512         /*
513          * Statistics
514          */
515         u_long                  ipfw_sts_reap;
516         u_long                  ipfw_sts_reapfailed;
517         u_long                  ipfw_sts_overflow;
518         u_long                  ipfw_sts_nomem;
519         u_long                  ipfw_sts_tcprecycled;
520
521         u_long                  ipfw_tks_nomem;
522         u_long                  ipfw_tks_reap;
523         u_long                  ipfw_tks_reapfailed;
524         u_long                  ipfw_tks_overflow;
525         u_long                  ipfw_tks_cntnomem;
526
527         u_long                  ipfw_frags;
528         u_long                  ipfw_defraged;
529         u_long                  ipfw_defrag_remote;
530
531         /* Last field */
532         struct radix_node_head  *ipfw_tables[];
533 };
534
535 #define IPFW_FLAG_KEEPALIVE     0x01
536 #define IPFW_FLAG_STATEEXP      0x02
537 #define IPFW_FLAG_TRACKEXP      0x04
538 #define IPFW_FLAG_STATEREAP     0x08
539 #define IPFW_FLAG_TRACKREAP     0x10
540
541 #define ipfw_state_tmpkey       ipfw_tmpkey.state
542 #define ipfw_track_tmpkey       ipfw_tmpkey.track
543 #define ipfw_trkcnt_tmpkey      ipfw_tmpkey.trkcnt
544
545 struct ipfw_global {
546         int                     ipfw_state_loosecnt;    /* cache aligned */
547         time_t                  ipfw_state_globexp __cachealign;
548
549         struct lwkt_token       ipfw_trkcnt_token __cachealign;
550         struct ipfw_trkcnt_tree ipfw_trkcnt_tree;
551         int                     ipfw_trkcnt_cnt;
552         time_t                  ipfw_track_globexp;
553
554         /* Accessed in netisr0. */
555         struct ip_fw            *ipfw_crossref_free __cachealign;
556         struct callout          ipfw_crossref_ch;
557         struct netmsg_base      ipfw_crossref_nm;
558
559 #ifdef KLD_MODULE
560         /*
561          * Module can not be unloaded, if there are references to
562          * certains rules of ipfw(4), e.g. dummynet(4)
563          */
564         int                     ipfw_refcnt __cachealign;
565 #endif
566 } __cachealign;
567
568 static struct ipfw_context      *ipfw_ctx[MAXCPU];
569
570 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's");
571
572 /*
573  * Following two global variables are accessed and updated only
574  * in netisr0.
575  */
576 static uint32_t static_count;   /* # of static rules */
577 static uint32_t static_ioc_len; /* bytes of static rules */
578
579 /*
580  * If 1, then ipfw static rules are being flushed,
581  * ipfw_chk() will skip to the default rule.
582  */
583 static int ipfw_flushing;
584
585 static int fw_verbose;
586 static int verbose_limit;
587
588 static int fw_debug;
589 static int autoinc_step = IPFW_AUTOINC_STEP_DEF;
590
591 static int      ipfw_table_max = IPFW_TABLE_MAX_DEF;
592
593 static int      ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS);
594 static int      ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS);
595
596 TUNABLE_INT("net.inet.ip.fw.table_max", &ipfw_table_max);
597
598 SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW, 0, "Firewall");
599 SYSCTL_NODE(_net_inet_ip_fw, OID_AUTO, stats, CTLFLAG_RW, 0,
600     "Firewall statistics");
601
602 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
603     &fw_enable, 0, ipfw_sysctl_enable, "I", "Enable ipfw");
604 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, autoinc_step, CTLTYPE_INT | CTLFLAG_RW,
605     &autoinc_step, 0, ipfw_sysctl_autoinc_step, "I",
606     "Rule number autincrement step");
607 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO,one_pass,CTLFLAG_RW,
608     &fw_one_pass, 0,
609     "Only do a single pass through ipfw when using dummynet(4)");
610 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, debug, CTLFLAG_RW,
611     &fw_debug, 0, "Enable printing of debug ip_fw statements");
612 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose, CTLFLAG_RW,
613     &fw_verbose, 0, "Log matches to ipfw rules");
614 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit, CTLFLAG_RW,
615     &verbose_limit, 0, "Set upper limit of matches of ipfw rules logged");
616 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, table_max, CTLFLAG_RD,
617     &ipfw_table_max, 0, "Max # of tables");
618
619 static int      ipfw_sysctl_dyncnt(SYSCTL_HANDLER_ARGS);
620 static int      ipfw_sysctl_dynmax(SYSCTL_HANDLER_ARGS);
621 static int      ipfw_sysctl_statecnt(SYSCTL_HANDLER_ARGS);
622 static int      ipfw_sysctl_statemax(SYSCTL_HANDLER_ARGS);
623 static int      ipfw_sysctl_scancnt(SYSCTL_HANDLER_ARGS);
624 static int      ipfw_sysctl_stat(SYSCTL_HANDLER_ARGS);
625
626 /*
627  * Timeouts for various events in handing states.
628  *
629  * NOTE:
630  * 1 == 0~1 second.
631  * 2 == 1~2 second(s).
632  *
633  * We use 2 seconds for FIN lifetime, so that the states will not be
634  * ripped prematurely.
635  */
636 static uint32_t dyn_ack_lifetime = 300;
637 static uint32_t dyn_syn_lifetime = 20;
638 static uint32_t dyn_finwait_lifetime = 20;
639 static uint32_t dyn_fin_lifetime = 2;
640 static uint32_t dyn_rst_lifetime = 2;
641 static uint32_t dyn_udp_lifetime = 10;
642 static uint32_t dyn_short_lifetime = 5; /* used by tracks too */
643
644 /*
645  * Keepalives are sent if dyn_keepalive is set. They are sent every
646  * dyn_keepalive_period seconds, in the last dyn_keepalive_interval
647  * seconds of lifetime of a rule.
648  */
649 static uint32_t dyn_keepalive_interval = 20;
650 static uint32_t dyn_keepalive_period = 5;
651 static uint32_t dyn_keepalive = 1;      /* do send keepalives */
652
653 static struct ipfw_global       ipfw_gd;
654 static int      ipfw_state_loosecnt_updthr;
655 static int      ipfw_state_max = 4096;  /* max # of states */
656 static int      ipfw_track_max = 4096;  /* max # of tracks */
657
658 static int      ipfw_state_headroom;    /* setup at module load time */
659 static int      ipfw_state_reap_min = 8;
660 static int      ipfw_state_expire_max = 32;
661 static int      ipfw_state_scan_max = 256;
662 static int      ipfw_keepalive_max = 8;
663 static int      ipfw_track_reap_max = 4;
664 static int      ipfw_track_expire_max = 16;
665 static int      ipfw_track_scan_max = 128;
666
667 /* Compat */
668 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_count,
669     CTLTYPE_INT | CTLFLAG_RD, NULL, 0, ipfw_sysctl_dyncnt, "I",
670     "Number of states and tracks");
671 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_max,
672     CTLTYPE_INT | CTLFLAG_RW, NULL, 0, ipfw_sysctl_dynmax, "I",
673     "Max number of states and tracks");
674
675 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, state_cnt,
676     CTLTYPE_INT | CTLFLAG_RD, NULL, 0, ipfw_sysctl_statecnt, "I",
677     "Number of states");
678 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, state_max,
679     CTLTYPE_INT | CTLFLAG_RW, NULL, 0, ipfw_sysctl_statemax, "I",
680     "Max number of states");
681 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, state_headroom, CTLFLAG_RW,
682     &ipfw_state_headroom, 0, "headroom for state reap");
683 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, track_cnt, CTLFLAG_RD,
684     &ipfw_gd.ipfw_trkcnt_cnt, 0, "Number of tracks");
685 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, track_max, CTLFLAG_RW,
686     &ipfw_track_max, 0, "Max number of tracks");
687 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, static_count, CTLFLAG_RD,
688     &static_count, 0, "Number of static rules");
689 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime, CTLFLAG_RW,
690     &dyn_ack_lifetime, 0, "Lifetime of dyn. rules for acks");
691 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime, CTLFLAG_RW,
692     &dyn_syn_lifetime, 0, "Lifetime of dyn. rules for syn");
693 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime, CTLFLAG_RW,
694     &dyn_fin_lifetime, 0, "Lifetime of dyn. rules for fin");
695 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_finwait_lifetime, CTLFLAG_RW,
696     &dyn_finwait_lifetime, 0, "Lifetime of dyn. rules for fin wait");
697 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime, CTLFLAG_RW,
698     &dyn_rst_lifetime, 0, "Lifetime of dyn. rules for rst");
699 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime, CTLFLAG_RW,
700     &dyn_udp_lifetime, 0, "Lifetime of dyn. rules for UDP");
701 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime, CTLFLAG_RW,
702     &dyn_short_lifetime, 0, "Lifetime of dyn. rules for other situations");
703 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive, CTLFLAG_RW,
704     &dyn_keepalive, 0, "Enable keepalives for dyn. rules");
705 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, state_scan_max,
706     CTLTYPE_INT | CTLFLAG_RW, &ipfw_state_scan_max, 0, ipfw_sysctl_scancnt,
707     "I", "# of states to scan for each expire iteration");
708 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, state_expire_max,
709     CTLTYPE_INT | CTLFLAG_RW, &ipfw_state_expire_max, 0, ipfw_sysctl_scancnt,
710     "I", "# of states to expire for each expire iteration");
711 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, keepalive_max,
712     CTLTYPE_INT | CTLFLAG_RW, &ipfw_keepalive_max, 0, ipfw_sysctl_scancnt,
713     "I", "# of states to expire for each expire iteration");
714 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, state_reap_min,
715     CTLTYPE_INT | CTLFLAG_RW, &ipfw_state_reap_min, 0, ipfw_sysctl_scancnt,
716     "I", "# of states to reap for state shortage");
717 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, track_scan_max,
718     CTLTYPE_INT | CTLFLAG_RW, &ipfw_track_scan_max, 0, ipfw_sysctl_scancnt,
719     "I", "# of tracks to scan for each expire iteration");
720 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, track_expire_max,
721     CTLTYPE_INT | CTLFLAG_RW, &ipfw_track_expire_max, 0, ipfw_sysctl_scancnt,
722     "I", "# of tracks to expire for each expire iteration");
723 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, track_reap_max,
724     CTLTYPE_INT | CTLFLAG_RW, &ipfw_track_reap_max, 0, ipfw_sysctl_scancnt,
725     "I", "# of tracks to reap for track shortage");
726
727 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, state_reap,
728     CTLTYPE_ULONG | CTLFLAG_RW, NULL,
729     __offsetof(struct ipfw_context, ipfw_sts_reap), ipfw_sysctl_stat,
730     "LU", "# of state reaps due to states shortage");
731 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, state_reapfailed,
732     CTLTYPE_ULONG | CTLFLAG_RW, NULL,
733     __offsetof(struct ipfw_context, ipfw_sts_reapfailed), ipfw_sysctl_stat,
734     "LU", "# of state reap failure");
735 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, state_overflow,
736     CTLTYPE_ULONG | CTLFLAG_RW, NULL,
737     __offsetof(struct ipfw_context, ipfw_sts_overflow), ipfw_sysctl_stat,
738     "LU", "# of state overflow");
739 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, state_nomem,
740     CTLTYPE_ULONG | CTLFLAG_RW, NULL,
741     __offsetof(struct ipfw_context, ipfw_sts_nomem), ipfw_sysctl_stat,
742     "LU", "# of state allocation failure");
743 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, state_tcprecycled,
744     CTLTYPE_ULONG | CTLFLAG_RW, NULL,
745     __offsetof(struct ipfw_context, ipfw_sts_tcprecycled), ipfw_sysctl_stat,
746     "LU", "# of state deleted due to fast TCP port recycling");
747
748 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, track_nomem,
749     CTLTYPE_ULONG | CTLFLAG_RW, NULL,
750     __offsetof(struct ipfw_context, ipfw_tks_nomem), ipfw_sysctl_stat,
751     "LU", "# of track allocation failure");
752 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, track_reap,
753     CTLTYPE_ULONG | CTLFLAG_RW, NULL,
754     __offsetof(struct ipfw_context, ipfw_tks_reap), ipfw_sysctl_stat,
755     "LU", "# of track reap due to tracks shortage");
756 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, track_reapfailed,
757     CTLTYPE_ULONG | CTLFLAG_RW, NULL,
758     __offsetof(struct ipfw_context, ipfw_tks_reapfailed), ipfw_sysctl_stat,
759     "LU", "# of track reap failure");
760 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, track_overflow,
761     CTLTYPE_ULONG | CTLFLAG_RW, NULL,
762     __offsetof(struct ipfw_context, ipfw_tks_overflow), ipfw_sysctl_stat,
763     "LU", "# of track overflow");
764 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, track_cntnomem,
765     CTLTYPE_ULONG | CTLFLAG_RW, NULL,
766     __offsetof(struct ipfw_context, ipfw_tks_cntnomem), ipfw_sysctl_stat,
767     "LU", "# of track counter allocation failure");
768 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, frags,
769     CTLTYPE_ULONG | CTLFLAG_RW, NULL,
770     __offsetof(struct ipfw_context, ipfw_frags), ipfw_sysctl_stat,
771     "LU", "# of IP fragements defraged");
772 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, defraged,
773     CTLTYPE_ULONG | CTLFLAG_RW, NULL,
774     __offsetof(struct ipfw_context, ipfw_defraged), ipfw_sysctl_stat,
775     "LU", "# of IP packets after defrag");
776 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, defrag_remote,
777     CTLTYPE_ULONG | CTLFLAG_RW, NULL,
778     __offsetof(struct ipfw_context, ipfw_defrag_remote), ipfw_sysctl_stat,
779     "LU", "# of IP packets after defrag dispatched to remote cpus");
780
781 static int              ipfw_state_cmp(struct ipfw_state *,
782                             struct ipfw_state *);
783 static int              ipfw_trkcnt_cmp(struct ipfw_trkcnt *,
784                             struct ipfw_trkcnt *);
785 static int              ipfw_track_cmp(struct ipfw_track *,
786                             struct ipfw_track *);
787
788 RB_PROTOTYPE(ipfw_state_tree, ipfw_state, st_rblink, ipfw_state_cmp);
789 RB_GENERATE(ipfw_state_tree, ipfw_state, st_rblink, ipfw_state_cmp);
790
791 RB_PROTOTYPE(ipfw_trkcnt_tree, ipfw_trkcnt, tc_rblink, ipfw_trkcnt_cmp);
792 RB_GENERATE(ipfw_trkcnt_tree, ipfw_trkcnt, tc_rblink, ipfw_trkcnt_cmp);
793
794 RB_PROTOTYPE(ipfw_track_tree, ipfw_track, t_rblink, ipfw_track_cmp);
795 RB_GENERATE(ipfw_track_tree, ipfw_track, t_rblink, ipfw_track_cmp);
796
797 static ip_fw_chk_t      ipfw_chk;
798 static void             ipfw_track_expire_ipifunc(void *);
799 static void             ipfw_state_expire_ipifunc(void *);
800 static void             ipfw_keepalive(void *);
801 static int              ipfw_state_expire_start(struct ipfw_context *,
802                             int, int);
803 static void             ipfw_crossref_timeo(void *);
804
805 #define IPFW_TRKCNT_TOKGET      lwkt_gettoken(&ipfw_gd.ipfw_trkcnt_token)
806 #define IPFW_TRKCNT_TOKREL      lwkt_reltoken(&ipfw_gd.ipfw_trkcnt_token)
807 #define IPFW_TRKCNT_TOKINIT     \
808         lwkt_token_init(&ipfw_gd.ipfw_trkcnt_token, "ipfw_trkcnt");
809
810 static void
811 sa_maskedcopy(const struct sockaddr *src, struct sockaddr *dst,
812     const struct sockaddr *netmask)
813 {
814         const u_char *cp1 = (const u_char *)src;
815         u_char *cp2 = (u_char *)dst;
816         const u_char *cp3 = (const u_char *)netmask;
817         u_char *cplim = cp2 + *cp3;
818         u_char *cplim2 = cp2 + *cp1;
819
820         *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
821         cp3 += 2;
822         if (cplim > cplim2)
823                 cplim = cplim2;
824         while (cp2 < cplim)
825                 *cp2++ = *cp1++ & *cp3++;
826         if (cp2 < cplim2)
827                 bzero(cp2, cplim2 - cp2);
828 }
829
830 static __inline void
831 ipfw_key_build(struct ipfw_key *key, in_addr_t saddr, uint16_t sport,
832     in_addr_t daddr, uint16_t dport, uint8_t proto)
833 {
834
835         key->proto = proto;
836         key->swap = 0;
837
838         if (saddr < daddr) {
839                 key->addr_u.addrs.addr1 = daddr;
840                 key->addr_u.addrs.addr2 = saddr;
841                 key->swap |= IPFW_KEY_SWAP_ADDRS;
842         } else {
843                 key->addr_u.addrs.addr1 = saddr;
844                 key->addr_u.addrs.addr2 = daddr;
845         }
846
847         if (sport < dport) {
848                 key->port_u.ports.port1 = dport;
849                 key->port_u.ports.port2 = sport;
850                 key->swap |= IPFW_KEY_SWAP_PORTS;
851         } else {
852                 key->port_u.ports.port1 = sport;
853                 key->port_u.ports.port2 = dport;
854         }
855
856         if (sport == dport && (key->swap & IPFW_KEY_SWAP_ADDRS))
857                 key->swap |= IPFW_KEY_SWAP_PORTS;
858         if (saddr == daddr && (key->swap & IPFW_KEY_SWAP_PORTS))
859                 key->swap |= IPFW_KEY_SWAP_ADDRS;
860 }
861
862 static __inline void
863 ipfw_key_4tuple(const struct ipfw_key *key, in_addr_t *saddr, uint16_t *sport,
864     in_addr_t *daddr, uint16_t *dport)
865 {
866
867         if (key->swap & IPFW_KEY_SWAP_ADDRS) {
868                 *saddr = key->addr_u.addrs.addr2;
869                 *daddr = key->addr_u.addrs.addr1;
870         } else {
871                 *saddr = key->addr_u.addrs.addr1;
872                 *daddr = key->addr_u.addrs.addr2;
873         }
874
875         if (key->swap & IPFW_KEY_SWAP_PORTS) {
876                 *sport = key->port_u.ports.port2;
877                 *dport = key->port_u.ports.port1;
878         } else {
879                 *sport = key->port_u.ports.port1;
880                 *dport = key->port_u.ports.port2;
881         }
882 }
883
884 static int
885 ipfw_state_cmp(struct ipfw_state *s1, struct ipfw_state *s2)
886 {
887
888         if (s1->st_proto > s2->st_proto)
889                 return (1);
890         if (s1->st_proto < s2->st_proto)
891                 return (-1);
892
893         if (s1->st_addrs > s2->st_addrs)
894                 return (1);
895         if (s1->st_addrs < s2->st_addrs)
896                 return (-1);
897
898         if (s1->st_ports > s2->st_ports)
899                 return (1);
900         if (s1->st_ports < s2->st_ports)
901                 return (-1);
902
903         if (s1->st_swap == s2->st_swap ||
904             (s1->st_swap ^ s2->st_swap) == IPFW_KEY_SWAP_ALL)
905                 return (0);
906
907         if (s1->st_swap > s2->st_swap)
908                 return (1);
909         else
910                 return (-1);
911 }
912
913 static int
914 ipfw_trkcnt_cmp(struct ipfw_trkcnt *t1, struct ipfw_trkcnt *t2)
915 {
916
917         if (t1->tc_proto > t2->tc_proto)
918                 return (1);
919         if (t1->tc_proto < t2->tc_proto)
920                 return (-1);
921
922         if (t1->tc_addrs > t2->tc_addrs)
923                 return (1);
924         if (t1->tc_addrs < t2->tc_addrs)
925                 return (-1);
926
927         if (t1->tc_ports > t2->tc_ports)
928                 return (1);
929         if (t1->tc_ports < t2->tc_ports)
930                 return (-1);
931
932         if (t1->tc_ruleid > t2->tc_ruleid)
933                 return (1);
934         if (t1->tc_ruleid < t2->tc_ruleid)
935                 return (-1);
936
937         return (0);
938 }
939
940 static int
941 ipfw_track_cmp(struct ipfw_track *t1, struct ipfw_track *t2)
942 {
943
944         if (t1->t_proto > t2->t_proto)
945                 return (1);
946         if (t1->t_proto < t2->t_proto)
947                 return (-1);
948
949         if (t1->t_addrs > t2->t_addrs)
950                 return (1);
951         if (t1->t_addrs < t2->t_addrs)
952                 return (-1);
953
954         if (t1->t_ports > t2->t_ports)
955                 return (1);
956         if (t1->t_ports < t2->t_ports)
957                 return (-1);
958
959         if ((uintptr_t)t1->t_rule > (uintptr_t)t2->t_rule)
960                 return (1);
961         if ((uintptr_t)t1->t_rule < (uintptr_t)t2->t_rule)
962                 return (-1);
963
964         return (0);
965 }
966
967 static void
968 ipfw_state_max_set(int state_max)
969 {
970
971         ipfw_state_max = state_max;
972         /* Allow 5% states over-allocation. */
973         ipfw_state_loosecnt_updthr = (state_max / 20) / netisr_ncpus;
974 }
975
976 static __inline int
977 ipfw_state_cntcoll(void)
978 {
979         int cpu, state_cnt = 0;
980
981         for (cpu = 0; cpu < netisr_ncpus; ++cpu)
982                 state_cnt += ipfw_ctx[cpu]->ipfw_state_cnt;
983         return (state_cnt);
984 }
985
986 static __inline int
987 ipfw_state_cntsync(void)
988 {
989         int state_cnt;
990
991         state_cnt = ipfw_state_cntcoll();
992         ipfw_gd.ipfw_state_loosecnt = state_cnt;
993         return (state_cnt);
994 }
995
996 static __inline int
997 ipfw_free_rule(struct ip_fw *rule)
998 {
999         KASSERT(rule->cpuid == mycpuid, ("rule freed on cpu%d", mycpuid));
1000         KASSERT(rule->refcnt > 0, ("invalid refcnt %u", rule->refcnt));
1001         rule->refcnt--;
1002         if (rule->refcnt == 0) {
1003                 if (rule->cross_rules != NULL)
1004                         kfree(rule->cross_rules, M_IPFW);
1005                 kfree(rule, M_IPFW);
1006                 return 1;
1007         }
1008         return 0;
1009 }
1010
1011 static void
1012 ipfw_unref_rule(void *priv)
1013 {
1014         ipfw_free_rule(priv);
1015 #ifdef KLD_MODULE
1016         KASSERT(ipfw_gd.ipfw_refcnt > 0,
1017             ("invalid ipfw_refcnt %d", ipfw_gd.ipfw_refcnt));
1018         atomic_subtract_int(&ipfw_gd.ipfw_refcnt, 1);
1019 #endif
1020 }
1021
1022 static __inline void
1023 ipfw_ref_rule(struct ip_fw *rule)
1024 {
1025         KASSERT(rule->cpuid == mycpuid, ("rule used on cpu%d", mycpuid));
1026 #ifdef KLD_MODULE
1027         atomic_add_int(&ipfw_gd.ipfw_refcnt, 1);
1028 #endif
1029         rule->refcnt++;
1030 }
1031
1032 /*
1033  * This macro maps an ip pointer into a layer3 header pointer of type T
1034  */
1035 #define L3HDR(T, ip) ((T *)((uint32_t *)(ip) + (ip)->ip_hl))
1036
1037 static __inline int
1038 icmptype_match(struct ip *ip, ipfw_insn_u32 *cmd)
1039 {
1040         int type = L3HDR(struct icmp,ip)->icmp_type;
1041
1042         return (type <= ICMP_MAXTYPE && (cmd->d[0] & (1 << type)));
1043 }
1044
1045 #define TT      ((1 << ICMP_ECHO) | \
1046                  (1 << ICMP_ROUTERSOLICIT) | \
1047                  (1 << ICMP_TSTAMP) | \
1048                  (1 << ICMP_IREQ) | \
1049                  (1 << ICMP_MASKREQ))
1050
1051 static int
1052 is_icmp_query(struct ip *ip)
1053 {
1054         int type = L3HDR(struct icmp, ip)->icmp_type;
1055
1056         return (type <= ICMP_MAXTYPE && (TT & (1 << type)));
1057 }
1058
1059 #undef TT
1060
1061 /*
1062  * The following checks use two arrays of 8 or 16 bits to store the
1063  * bits that we want set or clear, respectively. They are in the
1064  * low and high half of cmd->arg1 or cmd->d[0].
1065  *
1066  * We scan options and store the bits we find set. We succeed if
1067  *
1068  *      (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear
1069  *
1070  * The code is sometimes optimized not to store additional variables.
1071  */
1072 static int
1073 flags_match(ipfw_insn *cmd, uint8_t bits)
1074 {
1075         u_char want_clear;
1076         bits = ~bits;
1077
1078         if (((cmd->arg1 & 0xff) & bits) != 0)
1079                 return 0; /* some bits we want set were clear */
1080
1081         want_clear = (cmd->arg1 >> 8) & 0xff;
1082         if ((want_clear & bits) != want_clear)
1083                 return 0; /* some bits we want clear were set */
1084         return 1;
1085 }
1086
1087 static int
1088 ipopts_match(struct ip *ip, ipfw_insn *cmd)
1089 {
1090         int optlen, bits = 0;
1091         u_char *cp = (u_char *)(ip + 1);
1092         int x = (ip->ip_hl << 2) - sizeof(struct ip);
1093
1094         for (; x > 0; x -= optlen, cp += optlen) {
1095                 int opt = cp[IPOPT_OPTVAL];
1096
1097                 if (opt == IPOPT_EOL)
1098                         break;
1099
1100                 if (opt == IPOPT_NOP) {
1101                         optlen = 1;
1102                 } else {
1103                         optlen = cp[IPOPT_OLEN];
1104                         if (optlen <= 0 || optlen > x)
1105                                 return 0; /* invalid or truncated */
1106                 }
1107
1108                 switch (opt) {
1109                 case IPOPT_LSRR:
1110                         bits |= IP_FW_IPOPT_LSRR;
1111                         break;
1112
1113                 case IPOPT_SSRR:
1114                         bits |= IP_FW_IPOPT_SSRR;
1115                         break;
1116
1117                 case IPOPT_RR:
1118                         bits |= IP_FW_IPOPT_RR;
1119                         break;
1120
1121                 case IPOPT_TS:
1122                         bits |= IP_FW_IPOPT_TS;
1123                         break;
1124
1125                 default:
1126                         break;
1127                 }
1128         }
1129         return (flags_match(cmd, bits));
1130 }
1131
1132 static int
1133 tcpopts_match(struct ip *ip, ipfw_insn *cmd)
1134 {
1135         int optlen, bits = 0;
1136         struct tcphdr *tcp = L3HDR(struct tcphdr,ip);
1137         u_char *cp = (u_char *)(tcp + 1);
1138         int x = (tcp->th_off << 2) - sizeof(struct tcphdr);
1139
1140         for (; x > 0; x -= optlen, cp += optlen) {
1141                 int opt = cp[0];
1142
1143                 if (opt == TCPOPT_EOL)
1144                         break;
1145
1146                 if (opt == TCPOPT_NOP) {
1147                         optlen = 1;
1148                 } else {
1149                         optlen = cp[1];
1150                         if (optlen <= 0)
1151                                 break;
1152                 }
1153
1154                 switch (opt) {
1155                 case TCPOPT_MAXSEG:
1156                         bits |= IP_FW_TCPOPT_MSS;
1157                         break;
1158
1159                 case TCPOPT_WINDOW:
1160                         bits |= IP_FW_TCPOPT_WINDOW;
1161                         break;
1162
1163                 case TCPOPT_SACK_PERMITTED:
1164                 case TCPOPT_SACK:
1165                         bits |= IP_FW_TCPOPT_SACK;
1166                         break;
1167
1168                 case TCPOPT_TIMESTAMP:
1169                         bits |= IP_FW_TCPOPT_TS;
1170                         break;
1171
1172                 case TCPOPT_CC:
1173                 case TCPOPT_CCNEW:
1174                 case TCPOPT_CCECHO:
1175                         bits |= IP_FW_TCPOPT_CC;
1176                         break;
1177
1178                 default:
1179                         break;
1180                 }
1181         }
1182         return (flags_match(cmd, bits));
1183 }
1184
1185 static int
1186 iface_match(struct ifnet *ifp, ipfw_insn_if *cmd)
1187 {
1188         if (ifp == NULL)        /* no iface with this packet, match fails */
1189                 return 0;
1190
1191         /* Check by name or by IP address */
1192         if (cmd->name[0] != '\0') { /* match by name */
1193                 /* Check name */
1194                 if (cmd->p.glob) {
1195                         if (kfnmatch(cmd->name, ifp->if_xname, 0) == 0)
1196                                 return(1);
1197                 } else {
1198                         if (strncmp(ifp->if_xname, cmd->name, IFNAMSIZ) == 0)
1199                                 return(1);
1200                 }
1201         } else {
1202                 struct ifaddr_container *ifac;
1203
1204                 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1205                         struct ifaddr *ia = ifac->ifa;
1206
1207                         if (ia->ifa_addr == NULL)
1208                                 continue;
1209                         if (ia->ifa_addr->sa_family != AF_INET)
1210                                 continue;
1211                         if (cmd->p.ip.s_addr == ((struct sockaddr_in *)
1212                             (ia->ifa_addr))->sin_addr.s_addr)
1213                                 return(1);      /* match */
1214                 }
1215         }
1216         return(0);      /* no match, fail ... */
1217 }
1218
1219 #define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0
1220
1221 /*
1222  * We enter here when we have a rule with O_LOG.
1223  * XXX this function alone takes about 2Kbytes of code!
1224  */
1225 static void
1226 ipfw_log(struct ipfw_context *ctx, struct ip_fw *f, u_int hlen,
1227     struct ether_header *eh, struct mbuf *m, struct ifnet *oif)
1228 {
1229         char *action;
1230         int limit_reached = 0;
1231         char action2[40], proto[48], fragment[28], abuf[INET_ADDRSTRLEN];
1232
1233         fragment[0] = '\0';
1234         proto[0] = '\0';
1235
1236         if (f == NULL) {        /* bogus pkt */
1237                 if (verbose_limit != 0 &&
1238                     ctx->ipfw_norule_counter >= verbose_limit)
1239                         return;
1240                 ctx->ipfw_norule_counter++;
1241                 if (ctx->ipfw_norule_counter == verbose_limit)
1242                         limit_reached = verbose_limit;
1243                 action = "Refuse";
1244         } else {        /* O_LOG is the first action, find the real one */
1245                 ipfw_insn *cmd = ACTION_PTR(f);
1246                 ipfw_insn_log *l = (ipfw_insn_log *)cmd;
1247
1248                 if (l->max_log != 0 && l->log_left == 0)
1249                         return;
1250                 l->log_left--;
1251                 if (l->log_left == 0)
1252                         limit_reached = l->max_log;
1253                 cmd += F_LEN(cmd);      /* point to first action */
1254                 if (cmd->opcode == O_PROB)
1255                         cmd += F_LEN(cmd);
1256
1257                 action = action2;
1258                 switch (cmd->opcode) {
1259                 case O_DENY:
1260                         action = "Deny";
1261                         break;
1262
1263                 case O_REJECT:
1264                         if (cmd->arg1==ICMP_REJECT_RST) {
1265                                 action = "Reset";
1266                         } else if (cmd->arg1==ICMP_UNREACH_HOST) {
1267                                 action = "Reject";
1268                         } else {
1269                                 ksnprintf(SNPARGS(action2, 0), "Unreach %d",
1270                                           cmd->arg1);
1271                         }
1272                         break;
1273
1274                 case O_ACCEPT:
1275                         action = "Accept";
1276                         break;
1277
1278                 case O_COUNT:
1279                         action = "Count";
1280                         break;
1281
1282                 case O_DIVERT:
1283                         ksnprintf(SNPARGS(action2, 0), "Divert %d", cmd->arg1);
1284                         break;
1285
1286                 case O_TEE:
1287                         ksnprintf(SNPARGS(action2, 0), "Tee %d", cmd->arg1);
1288                         break;
1289
1290                 case O_SKIPTO:
1291                         ksnprintf(SNPARGS(action2, 0), "SkipTo %d", cmd->arg1);
1292                         break;
1293
1294                 case O_PIPE:
1295                         ksnprintf(SNPARGS(action2, 0), "Pipe %d", cmd->arg1);
1296                         break;
1297
1298                 case O_QUEUE:
1299                         ksnprintf(SNPARGS(action2, 0), "Queue %d", cmd->arg1);
1300                         break;
1301
1302                 case O_FORWARD_IP:
1303                         {
1304                                 ipfw_insn_sa *sa = (ipfw_insn_sa *)cmd;
1305                                 int len;
1306
1307                                 len = ksnprintf(SNPARGS(action2, 0),
1308                                     "Forward to %s",
1309                                     kinet_ntoa(sa->sa.sin_addr, abuf));
1310                                 if (sa->sa.sin_port) {
1311                                         ksnprintf(SNPARGS(action2, len), ":%d",
1312                                                   sa->sa.sin_port);
1313                                 }
1314                         }
1315                         break;
1316
1317                 default:
1318                         action = "UNKNOWN";
1319                         break;
1320                 }
1321         }
1322
1323         if (hlen == 0) {        /* non-ip */
1324                 ksnprintf(SNPARGS(proto, 0), "MAC");
1325         } else {
1326                 struct ip *ip = mtod(m, struct ip *);
1327                 /* these three are all aliases to the same thing */
1328                 struct icmp *const icmp = L3HDR(struct icmp, ip);
1329                 struct tcphdr *const tcp = (struct tcphdr *)icmp;
1330                 struct udphdr *const udp = (struct udphdr *)icmp;
1331
1332                 int ip_off, offset, ip_len;
1333                 int len;
1334
1335                 if (eh != NULL) { /* layer 2 packets are as on the wire */
1336                         ip_off = ntohs(ip->ip_off);
1337                         ip_len = ntohs(ip->ip_len);
1338                 } else {
1339                         ip_off = ip->ip_off;
1340                         ip_len = ip->ip_len;
1341                 }
1342                 offset = ip_off & IP_OFFMASK;
1343                 switch (ip->ip_p) {
1344                 case IPPROTO_TCP:
1345                         len = ksnprintf(SNPARGS(proto, 0), "TCP %s",
1346                                         kinet_ntoa(ip->ip_src, abuf));
1347                         if (offset == 0) {
1348                                 ksnprintf(SNPARGS(proto, len), ":%d %s:%d",
1349                                           ntohs(tcp->th_sport),
1350                                           kinet_ntoa(ip->ip_dst, abuf),
1351                                           ntohs(tcp->th_dport));
1352                         } else {
1353                                 ksnprintf(SNPARGS(proto, len), " %s",
1354                                           kinet_ntoa(ip->ip_dst, abuf));
1355                         }
1356                         break;
1357
1358                 case IPPROTO_UDP:
1359                         len = ksnprintf(SNPARGS(proto, 0), "UDP %s",
1360                                         kinet_ntoa(ip->ip_src, abuf));
1361                         if (offset == 0) {
1362                                 ksnprintf(SNPARGS(proto, len), ":%d %s:%d",
1363                                           ntohs(udp->uh_sport),
1364                                           kinet_ntoa(ip->ip_dst, abuf),
1365                                           ntohs(udp->uh_dport));
1366                         } else {
1367                                 ksnprintf(SNPARGS(proto, len), " %s",
1368                                           kinet_ntoa(ip->ip_dst, abuf));
1369                         }
1370                         break;
1371
1372                 case IPPROTO_ICMP:
1373                         if (offset == 0) {
1374                                 len = ksnprintf(SNPARGS(proto, 0),
1375                                                 "ICMP:%u.%u ",
1376                                                 icmp->icmp_type,
1377                                                 icmp->icmp_code);
1378                         } else {
1379                                 len = ksnprintf(SNPARGS(proto, 0), "ICMP ");
1380                         }
1381                         len += ksnprintf(SNPARGS(proto, len), "%s",
1382                                          kinet_ntoa(ip->ip_src, abuf));
1383                         ksnprintf(SNPARGS(proto, len), " %s",
1384                                   kinet_ntoa(ip->ip_dst, abuf));
1385                         break;
1386
1387                 default:
1388                         len = ksnprintf(SNPARGS(proto, 0), "P:%d %s", ip->ip_p,
1389                                         kinet_ntoa(ip->ip_src, abuf));
1390                         ksnprintf(SNPARGS(proto, len), " %s",
1391                                   kinet_ntoa(ip->ip_dst, abuf));
1392                         break;
1393                 }
1394
1395                 if (ip_off & (IP_MF | IP_OFFMASK)) {
1396                         ksnprintf(SNPARGS(fragment, 0), " (frag %d:%d@%d%s)",
1397                                   ntohs(ip->ip_id), ip_len - (ip->ip_hl << 2),
1398                                   offset << 3, (ip_off & IP_MF) ? "+" : "");
1399                 }
1400         }
1401
1402         if (oif || m->m_pkthdr.rcvif) {
1403                 log(LOG_SECURITY | LOG_INFO,
1404                     "ipfw: %d %s %s %s via %s%s\n",
1405                     f ? f->rulenum : -1,
1406                     action, proto, oif ? "out" : "in",
1407                     oif ? oif->if_xname : m->m_pkthdr.rcvif->if_xname,
1408                     fragment);
1409         } else {
1410                 log(LOG_SECURITY | LOG_INFO,
1411                     "ipfw: %d %s %s [no if info]%s\n",
1412                     f ? f->rulenum : -1,
1413                     action, proto, fragment);
1414         }
1415
1416         if (limit_reached) {
1417                 log(LOG_SECURITY | LOG_NOTICE,
1418                     "ipfw: limit %d reached on entry %d\n",
1419                     limit_reached, f ? f->rulenum : -1);
1420         }
1421 }
1422
1423 #undef SNPARGS
1424
1425 #define TIME_LEQ(a, b)  ((a) - (b) <= 0)
1426
1427 static void
1428 ipfw_state_del(struct ipfw_context *ctx, struct ipfw_state *s)
1429 {
1430
1431         KASSERT(s->st_type == O_KEEP_STATE || s->st_type == O_LIMIT,
1432             ("invalid state type %u", s->st_type));
1433         KASSERT(ctx->ipfw_state_cnt > 0,
1434             ("invalid state count %d", ctx->ipfw_state_cnt));
1435
1436         if (s->st_track != NULL) {
1437                 struct ipfw_track *t = s->st_track;
1438
1439                 KASSERT(!LIST_EMPTY(&t->t_state_list),
1440                     ("track state list is empty"));
1441                 LIST_REMOVE(s, st_trklink);
1442
1443                 KASSERT(*t->t_count > 0,
1444                     ("invalid track count %d", *t->t_count));
1445                 atomic_subtract_int(t->t_count, 1);
1446         }
1447
1448         TAILQ_REMOVE(&ctx->ipfw_state_list, s, st_link);
1449         RB_REMOVE(ipfw_state_tree, &ctx->ipfw_state_tree, s);
1450         kfree(s, M_IPFW);
1451
1452         ctx->ipfw_state_cnt--;
1453         if (ctx->ipfw_state_loosecnt > 0)
1454                 ctx->ipfw_state_loosecnt--;
1455 }
1456
1457 static int
1458 ipfw_state_reap(struct ipfw_context *ctx, int reap_max)
1459 {
1460         struct ipfw_state *s, *anchor;
1461         int expired;
1462
1463         if (reap_max < ipfw_state_reap_min)
1464                 reap_max = ipfw_state_reap_min;
1465
1466         if ((ctx->ipfw_flags & IPFW_FLAG_STATEEXP) == 0) {
1467                 /*
1468                  * Kick start state expiring.  Ignore scan limit,
1469                  * we are short of states.
1470                  */
1471                 ctx->ipfw_flags |= IPFW_FLAG_STATEREAP;
1472                 expired = ipfw_state_expire_start(ctx, INT_MAX, reap_max);
1473                 ctx->ipfw_flags &= ~IPFW_FLAG_STATEREAP;
1474                 return (expired);
1475         }
1476
1477         /*
1478          * States are being expired.
1479          */
1480
1481         if (ctx->ipfw_state_cnt == 0)
1482                 return (0);
1483
1484         expired = 0;
1485         anchor = &ctx->ipfw_stateexp_anch;
1486         while ((s = TAILQ_NEXT(anchor, st_link)) != NULL) {
1487                 /*
1488                  * Ignore scan limit; we are short of states.
1489                  */
1490
1491                 TAILQ_REMOVE(&ctx->ipfw_state_list, anchor, st_link);
1492                 TAILQ_INSERT_AFTER(&ctx->ipfw_state_list, s, anchor, st_link);
1493
1494                 if (s->st_type == O_ANCHOR)
1495                         continue;
1496
1497                 if (IPFW_STATE_TCPCLOSED(s) ||
1498                     TIME_LEQ(s->st_expire, time_uptime)) {
1499                         ipfw_state_del(ctx, s);
1500                         if (++expired >= reap_max)
1501                                 break;
1502                         if ((expired & 0xff) == 0 && 
1503                             ipfw_state_cntcoll() + ipfw_state_headroom <=
1504                             ipfw_state_max)
1505                                 break;
1506                 }
1507         }
1508         /*
1509          * NOTE:
1510          * Leave the anchor on the list, even if the end of the list has
1511          * been reached.  ipfw_state_expire_more_dispatch() will handle
1512          * the removal.
1513          */
1514         return (expired);
1515 }
1516
1517 static void
1518 ipfw_state_flush(struct ipfw_context *ctx, const struct ip_fw *rule)
1519 {
1520         struct ipfw_state *s, *sn;
1521
1522         TAILQ_FOREACH_MUTABLE(s, &ctx->ipfw_state_list, st_link, sn) {
1523                 if (s->st_type == O_ANCHOR)
1524                         continue;
1525                 if (rule != NULL && s->st_rule != rule)
1526                         continue;
1527                 ipfw_state_del(ctx, s);
1528         }
1529 }
1530
1531 static void
1532 ipfw_state_expire_done(struct ipfw_context *ctx)
1533 {
1534
1535         KASSERT(ctx->ipfw_flags & IPFW_FLAG_STATEEXP,
1536             ("stateexp is not in progress"));
1537         ctx->ipfw_flags &= ~IPFW_FLAG_STATEEXP;
1538         callout_reset(&ctx->ipfw_stateto_ch, hz,
1539             ipfw_state_expire_ipifunc, NULL);
1540 }
1541
1542 static void
1543 ipfw_state_expire_more(struct ipfw_context *ctx)
1544 {
1545         struct netmsg_base *nm = &ctx->ipfw_stateexp_more;
1546
1547         KASSERT(ctx->ipfw_flags & IPFW_FLAG_STATEEXP,
1548             ("stateexp is not in progress"));
1549         KASSERT(nm->lmsg.ms_flags & MSGF_DONE,
1550             ("stateexp more did not finish"));
1551         netisr_sendmsg_oncpu(nm);
1552 }
1553
1554 static int
1555 ipfw_state_expire_loop(struct ipfw_context *ctx, struct ipfw_state *anchor,
1556     int scan_max, int expire_max)
1557 {
1558         struct ipfw_state *s;
1559         int scanned = 0, expired = 0;
1560
1561         KASSERT(ctx->ipfw_flags & IPFW_FLAG_STATEEXP,
1562             ("stateexp is not in progress"));
1563
1564         while ((s = TAILQ_NEXT(anchor, st_link)) != NULL) {
1565                 if (scanned++ >= scan_max) {
1566                         ipfw_state_expire_more(ctx);
1567                         return (expired);
1568                 }
1569
1570                 TAILQ_REMOVE(&ctx->ipfw_state_list, anchor, st_link);
1571                 TAILQ_INSERT_AFTER(&ctx->ipfw_state_list, s, anchor, st_link);
1572
1573                 if (s->st_type == O_ANCHOR)
1574                         continue;
1575
1576                 if (TIME_LEQ(s->st_expire, time_uptime) ||
1577                     ((ctx->ipfw_flags & IPFW_FLAG_STATEREAP) &&
1578                      IPFW_STATE_TCPCLOSED(s))) {
1579                         ipfw_state_del(ctx, s);
1580                         if (++expired >= expire_max) {
1581                                 ipfw_state_expire_more(ctx);
1582                                 return (expired);
1583                         }
1584                         if ((ctx->ipfw_flags & IPFW_FLAG_STATEREAP) &&
1585                             (expired & 0xff) == 0 &&
1586                             ipfw_state_cntcoll() + ipfw_state_headroom <=
1587                             ipfw_state_max) {
1588                                 ipfw_state_expire_more(ctx);
1589                                 return (expired);
1590                         }
1591                 }
1592         }
1593         TAILQ_REMOVE(&ctx->ipfw_state_list, anchor, st_link);
1594         ipfw_state_expire_done(ctx);
1595         return (expired);
1596 }
1597
1598 static void
1599 ipfw_state_expire_more_dispatch(netmsg_t nm)
1600 {
1601         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1602         struct ipfw_state *anchor;
1603
1604         ASSERT_NETISR_NCPUS(mycpuid);
1605         KASSERT(ctx->ipfw_flags & IPFW_FLAG_STATEEXP,
1606             ("statexp is not in progress"));
1607
1608         /* Reply ASAP */
1609         netisr_replymsg(&nm->base, 0);
1610
1611         anchor = &ctx->ipfw_stateexp_anch;
1612         if (ctx->ipfw_state_cnt == 0) {
1613                 TAILQ_REMOVE(&ctx->ipfw_state_list, anchor, st_link);
1614                 ipfw_state_expire_done(ctx);
1615                 return;
1616         }
1617         ipfw_state_expire_loop(ctx, anchor,
1618             ipfw_state_scan_max, ipfw_state_expire_max);
1619 }
1620
1621 static int
1622 ipfw_state_expire_start(struct ipfw_context *ctx, int scan_max, int expire_max)
1623 {
1624         struct ipfw_state *anchor;
1625
1626         KASSERT((ctx->ipfw_flags & IPFW_FLAG_STATEEXP) == 0,
1627             ("stateexp is in progress"));
1628         ctx->ipfw_flags |= IPFW_FLAG_STATEEXP;
1629
1630         if (ctx->ipfw_state_cnt == 0) {
1631                 ipfw_state_expire_done(ctx);
1632                 return (0);
1633         }
1634
1635         /*
1636          * Do not expire more than once per second, it is useless.
1637          */
1638         if ((ctx->ipfw_flags & IPFW_FLAG_STATEREAP) == 0 &&
1639             ctx->ipfw_state_lastexp == time_uptime) {
1640                 ipfw_state_expire_done(ctx);
1641                 return (0);
1642         }
1643         ctx->ipfw_state_lastexp = time_uptime;
1644
1645         anchor = &ctx->ipfw_stateexp_anch;
1646         TAILQ_INSERT_HEAD(&ctx->ipfw_state_list, anchor, st_link);
1647         return (ipfw_state_expire_loop(ctx, anchor, scan_max, expire_max));
1648 }
1649
1650 static void
1651 ipfw_state_expire_dispatch(netmsg_t nm)
1652 {
1653         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1654
1655         ASSERT_NETISR_NCPUS(mycpuid);
1656
1657         /* Reply ASAP */
1658         crit_enter();
1659         netisr_replymsg(&nm->base, 0);
1660         crit_exit();
1661
1662         if (ctx->ipfw_flags & IPFW_FLAG_STATEEXP) {
1663                 /* Running; done. */
1664                 return;
1665         }
1666         ipfw_state_expire_start(ctx,
1667             ipfw_state_scan_max, ipfw_state_expire_max);
1668 }
1669
1670 static void
1671 ipfw_state_expire_ipifunc(void *dummy __unused)
1672 {
1673         struct netmsg_base *msg;
1674
1675         KKASSERT(mycpuid < netisr_ncpus);
1676         msg = &ipfw_ctx[mycpuid]->ipfw_stateexp_nm;
1677
1678         crit_enter();
1679         if (msg->lmsg.ms_flags & MSGF_DONE)
1680                 netisr_sendmsg_oncpu(msg);
1681         crit_exit();
1682 }
1683
1684 static boolean_t
1685 ipfw_state_update_tcp(struct ipfw_state *s, int dir, const struct tcphdr *tcp)
1686 {
1687         uint32_t seq = ntohl(tcp->th_seq);
1688         uint32_t ack = ntohl(tcp->th_ack);
1689
1690         if (tcp->th_flags & TH_RST)
1691                 return (TRUE);
1692
1693         if (dir == MATCH_FORWARD) {
1694                 if ((s->st_flags & IPFW_STATE_F_SEQFWD) == 0) {
1695                         s->st_flags |= IPFW_STATE_F_SEQFWD;
1696                         s->st_seq_fwd = seq;
1697                 } else if (SEQ_GEQ(seq, s->st_seq_fwd)) {
1698                         s->st_seq_fwd = seq;
1699                 } else {
1700                         /* Out-of-sequence; done. */
1701                         return (FALSE);
1702                 }
1703                 if (tcp->th_flags & TH_ACK) {
1704                         if ((s->st_flags & IPFW_STATE_F_ACKFWD) == 0) {
1705                                 s->st_flags |= IPFW_STATE_F_ACKFWD;
1706                                 s->st_ack_fwd = ack;
1707                         } else if (SEQ_GEQ(ack, s->st_ack_fwd)) {
1708                                 s->st_ack_fwd = ack;
1709                         } else {
1710                                 /* Out-of-sequence; done. */
1711                                 return (FALSE);
1712                         }
1713
1714                         if ((s->st_state & ((TH_FIN | TH_ACK) << 8)) ==
1715                             (TH_FIN << 8) && s->st_ack_fwd == s->st_seq_rev + 1)
1716                                 s->st_state |= (TH_ACK << 8);
1717                 }
1718         } else {
1719                 if ((s->st_flags & IPFW_STATE_F_SEQREV) == 0) {
1720                         s->st_flags |= IPFW_STATE_F_SEQREV;
1721                         s->st_seq_rev = seq;
1722                 } else if (SEQ_GEQ(seq, s->st_seq_rev)) {
1723                         s->st_seq_rev = seq;
1724                 } else {
1725                         /* Out-of-sequence; done. */
1726                         return (FALSE);
1727                 }
1728                 if (tcp->th_flags & TH_ACK) {
1729                         if ((s->st_flags & IPFW_STATE_F_ACKREV) == 0) {
1730                                 s->st_flags |= IPFW_STATE_F_ACKREV;
1731                                 s->st_ack_rev= ack;
1732                         } else if (SEQ_GEQ(ack, s->st_ack_rev)) {
1733                                 s->st_ack_rev = ack;
1734                         } else {
1735                                 /* Out-of-sequence; done. */
1736                                 return (FALSE);
1737                         }
1738
1739                         if ((s->st_state & (TH_FIN | TH_ACK)) == TH_FIN &&
1740                             s->st_ack_rev == s->st_seq_fwd + 1)
1741                                 s->st_state |= TH_ACK;
1742                 }
1743         }
1744         return (TRUE);
1745 }
1746
1747 static void
1748 ipfw_state_update(const struct ipfw_flow_id *pkt, int dir,
1749     const struct tcphdr *tcp, struct ipfw_state *s)
1750 {
1751
1752         if (pkt->proto == IPPROTO_TCP) { /* update state according to flags */
1753                 u_char flags = pkt->flags & IPFW_STATE_TCPFLAGS;
1754
1755                 if (tcp != NULL && !ipfw_state_update_tcp(s, dir, tcp))
1756                         return;
1757
1758                 s->st_state |= (dir == MATCH_FORWARD) ? flags : (flags << 8);
1759                 switch (s->st_state & IPFW_STATE_TCPSTATES) {
1760                 case TH_SYN:                            /* opening */
1761                         s->st_expire = time_uptime + dyn_syn_lifetime;
1762                         break;
1763
1764                 case BOTH_SYN:                  /* move to established */
1765                 case BOTH_SYN | TH_FIN:         /* one side tries to close */
1766                 case BOTH_SYN | (TH_FIN << 8):
1767                         s->st_expire = time_uptime + dyn_ack_lifetime;
1768                         break;
1769
1770                 case BOTH_SYN | BOTH_FIN:       /* both sides closed */
1771                         if ((s->st_state & BOTH_FINACK) == BOTH_FINACK) {
1772                                 /* And both FINs were ACKed. */
1773                                 s->st_expire = time_uptime + dyn_fin_lifetime;
1774                         } else {
1775                                 s->st_expire = time_uptime +
1776                                     dyn_finwait_lifetime;
1777                         }
1778                         break;
1779
1780                 default:
1781 #if 0
1782                         /*
1783                          * reset or some invalid combination, but can also
1784                          * occur if we use keep-state the wrong way.
1785                          */
1786                         if ((s->st_state & ((TH_RST << 8) | TH_RST)) == 0)
1787                                 kprintf("invalid state: 0x%x\n", s->st_state);
1788 #endif
1789                         s->st_expire = time_uptime + dyn_rst_lifetime;
1790                         break;
1791                 }
1792         } else if (pkt->proto == IPPROTO_UDP) {
1793                 s->st_expire = time_uptime + dyn_udp_lifetime;
1794         } else {
1795                 /* other protocols */
1796                 s->st_expire = time_uptime + dyn_short_lifetime;
1797         }
1798 }
1799
1800 /*
1801  * Lookup a state.
1802  */
1803 static struct ipfw_state *
1804 ipfw_state_lookup(struct ipfw_context *ctx, const struct ipfw_flow_id *pkt,
1805     int *match_direction, const struct tcphdr *tcp)
1806 {
1807         struct ipfw_state *key, *s;
1808         int dir = MATCH_NONE;
1809
1810         key = &ctx->ipfw_state_tmpkey;
1811         ipfw_key_build(&key->st_key, pkt->src_ip, pkt->src_port,
1812             pkt->dst_ip, pkt->dst_port, pkt->proto);
1813         s = RB_FIND(ipfw_state_tree, &ctx->ipfw_state_tree, key);
1814         if (s == NULL)
1815                 goto done; /* not found. */
1816         if (TIME_LEQ(s->st_expire, time_uptime)) {
1817                 /* Expired. */
1818                 ipfw_state_del(ctx, s);
1819                 s = NULL;
1820                 goto done;
1821         }
1822         if ((pkt->flags & TH_SYN) && IPFW_STATE_TCPCLOSED(s)) {
1823                 /* TCP ports recycling is too fast. */
1824                 ctx->ipfw_sts_tcprecycled++;
1825                 ipfw_state_del(ctx, s);
1826                 s = NULL;
1827                 goto done;
1828         }
1829
1830         if (s->st_swap == key->st_swap) {
1831                 dir = MATCH_FORWARD;
1832         } else {
1833                 KASSERT((s->st_swap & key->st_swap) == 0,
1834                     ("found mismatch state"));
1835                 dir = MATCH_REVERSE;
1836         }
1837
1838         /* Update this state. */
1839         ipfw_state_update(pkt, dir, tcp, s);
1840
1841         if (s->st_track != NULL) {
1842                 /* This track has been used. */
1843                 s->st_track->t_expire = time_uptime + dyn_short_lifetime;
1844         }
1845 done:
1846         if (match_direction)
1847                 *match_direction = dir;
1848         return (s);
1849 }
1850
1851 static __inline struct ip_fw *
1852 ipfw_state_lookup_rule(struct ipfw_context *ctx, const struct ipfw_flow_id *pkt,
1853     int *match_direction, const struct tcphdr *tcp, uint16_t len)
1854 {
1855         struct ipfw_state *s;
1856
1857         s = ipfw_state_lookup(ctx, pkt, match_direction, tcp);
1858         if (s == NULL)
1859                 return (NULL);
1860
1861         KASSERT(s->st_rule->cpuid == mycpuid,
1862             ("rule %p (cpu%d) does not belong to the current cpu%d",
1863              s->st_rule, s->st_rule->cpuid, mycpuid));
1864
1865         s->st_pcnt++;
1866         s->st_bcnt += len;
1867
1868         return (s->st_rule);
1869 }
1870
1871 static struct ipfw_state *
1872 ipfw_state_add(struct ipfw_context *ctx, const struct ipfw_flow_id *id,
1873     uint16_t type, struct ip_fw *rule, struct ipfw_track *t,
1874     const struct tcphdr *tcp)
1875 {
1876         struct ipfw_state *s, *dup;
1877
1878         KASSERT(type == O_KEEP_STATE || type == O_LIMIT,
1879             ("invalid state type %u", type));
1880
1881         s = kmalloc(sizeof(*s), M_IPFW, M_INTWAIT | M_NULLOK | M_ZERO);
1882         if (s == NULL) {
1883                 ctx->ipfw_sts_nomem++;
1884                 return (NULL);
1885         }
1886
1887         ipfw_key_build(&s->st_key, id->src_ip, id->src_port,
1888             id->dst_ip, id->dst_port, id->proto);
1889
1890         s->st_rule = rule;
1891         s->st_type = type;
1892
1893         ctx->ipfw_state_cnt++;
1894         ctx->ipfw_state_loosecnt++;
1895         if (ctx->ipfw_state_loosecnt >= ipfw_state_loosecnt_updthr) {
1896                 ipfw_gd.ipfw_state_loosecnt += ctx->ipfw_state_loosecnt;
1897                 ctx->ipfw_state_loosecnt = 0;
1898         }
1899
1900         dup = RB_INSERT(ipfw_state_tree, &ctx->ipfw_state_tree, s);
1901         if (dup != NULL)
1902                 panic("ipfw: state exists");
1903         TAILQ_INSERT_TAIL(&ctx->ipfw_state_list, s, st_link);
1904
1905         /*
1906          * Update this state:
1907          * Set st_expire and st_state.
1908          */
1909         ipfw_state_update(id, MATCH_FORWARD, tcp, s);
1910
1911         if (t != NULL) {
1912                 /* Keep the track referenced. */
1913                 LIST_INSERT_HEAD(&t->t_state_list, s, st_trklink);
1914                 s->st_track = t;
1915         }
1916         return (s);
1917 }
1918
1919 static boolean_t
1920 ipfw_track_free(struct ipfw_context *ctx, struct ipfw_track *t)
1921 {
1922         struct ipfw_trkcnt *trk;
1923         boolean_t trk_freed = FALSE;
1924
1925         KASSERT(t->t_count != NULL, ("track anchor"));
1926         KASSERT(LIST_EMPTY(&t->t_state_list),
1927             ("invalid track is still referenced"));
1928
1929         trk = t->t_trkcnt;
1930         KASSERT(trk != NULL, ("track has no trkcnt"));
1931
1932         RB_REMOVE(ipfw_track_tree, &ctx->ipfw_track_tree, t);
1933         TAILQ_REMOVE(&ctx->ipfw_track_list, t, t_link);
1934         kfree(t, M_IPFW);
1935
1936         /*
1937          * fdrop() style reference counting.
1938          * See kern/kern_descrip.c fdrop().
1939          */
1940         for (;;) {
1941                 int refs = trk->tc_refs;
1942
1943                 cpu_ccfence();
1944                 KASSERT(refs > 0, ("invalid trkcnt refs %d", refs));
1945                 if (refs == 1) {
1946                         IPFW_TRKCNT_TOKGET;
1947                         if (atomic_cmpset_int(&trk->tc_refs, refs, 0)) {
1948                                 KASSERT(trk->tc_count == 0,
1949                                     ("%d states reference this trkcnt",
1950                                      trk->tc_count));
1951                                 RB_REMOVE(ipfw_trkcnt_tree,
1952                                     &ipfw_gd.ipfw_trkcnt_tree, trk);
1953
1954                                 KASSERT(ipfw_gd.ipfw_trkcnt_cnt > 0,
1955                                     ("invalid trkcnt cnt %d",
1956                                      ipfw_gd.ipfw_trkcnt_cnt));
1957                                 ipfw_gd.ipfw_trkcnt_cnt--;
1958                                 IPFW_TRKCNT_TOKREL;
1959
1960                                 if (ctx->ipfw_trkcnt_spare == NULL)
1961                                         ctx->ipfw_trkcnt_spare = trk;
1962                                 else
1963                                         kfree(trk, M_IPFW);
1964                                 trk_freed = TRUE;
1965                                 break; /* done! */
1966                         }
1967                         IPFW_TRKCNT_TOKREL;
1968                         /* retry */
1969                 } else if (atomic_cmpset_int(&trk->tc_refs, refs, refs - 1)) {
1970                         break; /* done! */
1971                 }
1972                 /* retry */
1973         }
1974         return (trk_freed);
1975 }
1976
1977 static void
1978 ipfw_track_flush(struct ipfw_context *ctx, struct ip_fw *rule)
1979 {
1980         struct ipfw_track *t, *tn;
1981
1982         TAILQ_FOREACH_MUTABLE(t, &ctx->ipfw_track_list, t_link, tn) {
1983                 if (t->t_count == NULL) /* anchor */
1984                         continue;
1985                 if (rule != NULL && t->t_rule != rule)
1986                         continue;
1987                 ipfw_track_free(ctx, t);
1988         }
1989 }
1990
1991 static boolean_t
1992 ipfw_track_state_expire(struct ipfw_context *ctx, struct ipfw_track *t,
1993     boolean_t reap)
1994 {
1995         struct ipfw_state *s, *sn;
1996         boolean_t ret = FALSE;
1997
1998         KASSERT(t->t_count != NULL, ("track anchor"));
1999
2000         if (LIST_EMPTY(&t->t_state_list))
2001                 return (FALSE);
2002
2003         /*
2004          * Do not expire more than once per second, it is useless.
2005          */
2006         if (t->t_lastexp == time_uptime)
2007                 return (FALSE);
2008         t->t_lastexp = time_uptime;
2009
2010         LIST_FOREACH_MUTABLE(s, &t->t_state_list, st_trklink, sn) {
2011                 if (TIME_LEQ(s->st_expire, time_uptime) ||
2012                     (reap && IPFW_STATE_TCPCLOSED(s))) {
2013                         KASSERT(s->st_track == t,
2014                             ("state track %p does not match %p",
2015                              s->st_track, t));
2016                         ipfw_state_del(ctx, s);
2017                         ret = TRUE;
2018                 }
2019         }
2020         return (ret);
2021 }
2022
2023 static __inline struct ipfw_trkcnt *
2024 ipfw_trkcnt_alloc(struct ipfw_context *ctx)
2025 {
2026         struct ipfw_trkcnt *trk;
2027
2028         if (ctx->ipfw_trkcnt_spare != NULL) {
2029                 trk = ctx->ipfw_trkcnt_spare;
2030                 ctx->ipfw_trkcnt_spare = NULL;
2031         } else {
2032                 trk = kmalloc_cachealign(sizeof(*trk), M_IPFW,
2033                     M_INTWAIT | M_NULLOK);
2034         }
2035         return (trk);
2036 }
2037
2038 static void
2039 ipfw_track_expire_done(struct ipfw_context *ctx)
2040 {
2041
2042         KASSERT(ctx->ipfw_flags & IPFW_FLAG_TRACKEXP,
2043             ("trackexp is not in progress"));
2044         ctx->ipfw_flags &= ~IPFW_FLAG_TRACKEXP;
2045         callout_reset(&ctx->ipfw_trackto_ch, hz,
2046             ipfw_track_expire_ipifunc, NULL);
2047 }
2048
2049 static void
2050 ipfw_track_expire_more(struct ipfw_context *ctx)
2051 {
2052         struct netmsg_base *nm = &ctx->ipfw_trackexp_more;
2053
2054         KASSERT(ctx->ipfw_flags & IPFW_FLAG_TRACKEXP,
2055             ("trackexp is not in progress"));
2056         KASSERT(nm->lmsg.ms_flags & MSGF_DONE,
2057             ("trackexp more did not finish"));
2058         netisr_sendmsg_oncpu(nm);
2059 }
2060
2061 static int
2062 ipfw_track_expire_loop(struct ipfw_context *ctx, struct ipfw_track *anchor,
2063     int scan_max, int expire_max)
2064 {
2065         struct ipfw_track *t;
2066         int scanned = 0, expired = 0;
2067         boolean_t reap = FALSE;
2068
2069         KASSERT(ctx->ipfw_flags & IPFW_FLAG_TRACKEXP,
2070             ("trackexp is not in progress"));
2071
2072         if (ctx->ipfw_flags & IPFW_FLAG_TRACKREAP)
2073                 reap = TRUE;
2074
2075         while ((t = TAILQ_NEXT(anchor, t_link)) != NULL) {
2076                 if (scanned++ >= scan_max) {
2077                         ipfw_track_expire_more(ctx);
2078                         return (expired);
2079                 }
2080
2081                 TAILQ_REMOVE(&ctx->ipfw_track_list, anchor, t_link);
2082                 TAILQ_INSERT_AFTER(&ctx->ipfw_track_list, t, anchor, t_link);
2083
2084                 if (t->t_count == NULL) /* anchor */
2085                         continue;
2086
2087                 ipfw_track_state_expire(ctx, t, reap);
2088                 if (!LIST_EMPTY(&t->t_state_list)) {
2089                         /* There are states referencing this track. */
2090                         continue;
2091                 }
2092
2093                 if (TIME_LEQ(t->t_expire, time_uptime) || reap) {
2094                         /* Expired. */
2095                         if (ipfw_track_free(ctx, t)) {
2096                                 if (++expired >= expire_max) {
2097                                         ipfw_track_expire_more(ctx);
2098                                         return (expired);
2099                                 }
2100                         }
2101                 }
2102         }
2103         TAILQ_REMOVE(&ctx->ipfw_track_list, anchor, t_link);
2104         ipfw_track_expire_done(ctx);
2105         return (expired);
2106 }
2107
2108 static int
2109 ipfw_track_expire_start(struct ipfw_context *ctx, int scan_max, int expire_max)
2110 {
2111         struct ipfw_track *anchor;
2112
2113         KASSERT((ctx->ipfw_flags & IPFW_FLAG_TRACKEXP) == 0,
2114             ("trackexp is in progress"));
2115         ctx->ipfw_flags |= IPFW_FLAG_TRACKEXP;
2116
2117         if (RB_EMPTY(&ctx->ipfw_track_tree)) {
2118                 ipfw_track_expire_done(ctx);
2119                 return (0);
2120         }
2121
2122         /*
2123          * Do not expire more than once per second, it is useless.
2124          */
2125         if ((ctx->ipfw_flags & IPFW_FLAG_TRACKREAP) == 0 &&
2126             ctx->ipfw_track_lastexp == time_uptime) {
2127                 ipfw_track_expire_done(ctx);
2128                 return (0);
2129         }
2130         ctx->ipfw_track_lastexp = time_uptime;
2131
2132         anchor = &ctx->ipfw_trackexp_anch;
2133         TAILQ_INSERT_HEAD(&ctx->ipfw_track_list, anchor, t_link);
2134         return (ipfw_track_expire_loop(ctx, anchor, scan_max, expire_max));
2135 }
2136
2137 static void
2138 ipfw_track_expire_more_dispatch(netmsg_t nm)
2139 {
2140         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2141         struct ipfw_track *anchor;
2142
2143         ASSERT_NETISR_NCPUS(mycpuid);
2144         KASSERT(ctx->ipfw_flags & IPFW_FLAG_TRACKEXP,
2145             ("trackexp is not in progress"));
2146
2147         /* Reply ASAP */
2148         netisr_replymsg(&nm->base, 0);
2149
2150         anchor = &ctx->ipfw_trackexp_anch;
2151         if (RB_EMPTY(&ctx->ipfw_track_tree)) {
2152                 TAILQ_REMOVE(&ctx->ipfw_track_list, anchor, t_link);
2153                 ipfw_track_expire_done(ctx);
2154                 return;
2155         }
2156         ipfw_track_expire_loop(ctx, anchor,
2157             ipfw_track_scan_max, ipfw_track_expire_max);
2158 }
2159
2160 static void
2161 ipfw_track_expire_dispatch(netmsg_t nm)
2162 {
2163         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2164
2165         ASSERT_NETISR_NCPUS(mycpuid);
2166
2167         /* Reply ASAP */
2168         crit_enter();
2169         netisr_replymsg(&nm->base, 0);
2170         crit_exit();
2171
2172         if (ctx->ipfw_flags & IPFW_FLAG_TRACKEXP) {
2173                 /* Running; done. */
2174                 return;
2175         }
2176         ipfw_track_expire_start(ctx,
2177             ipfw_track_scan_max, ipfw_track_expire_max);
2178 }
2179
2180 static void
2181 ipfw_track_expire_ipifunc(void *dummy __unused)
2182 {
2183         struct netmsg_base *msg;
2184
2185         KKASSERT(mycpuid < netisr_ncpus);
2186         msg = &ipfw_ctx[mycpuid]->ipfw_trackexp_nm;
2187
2188         crit_enter();
2189         if (msg->lmsg.ms_flags & MSGF_DONE)
2190                 netisr_sendmsg_oncpu(msg);
2191         crit_exit();
2192 }
2193
2194 static int
2195 ipfw_track_reap(struct ipfw_context *ctx)
2196 {
2197         struct ipfw_track *t, *anchor;
2198         int expired;
2199
2200         if ((ctx->ipfw_flags & IPFW_FLAG_TRACKEXP) == 0) {
2201                 /*
2202                  * Kick start track expiring.  Ignore scan limit,
2203                  * we are short of tracks.
2204                  */
2205                 ctx->ipfw_flags |= IPFW_FLAG_TRACKREAP;
2206                 expired = ipfw_track_expire_start(ctx, INT_MAX,
2207                     ipfw_track_reap_max);
2208                 ctx->ipfw_flags &= ~IPFW_FLAG_TRACKREAP;
2209                 return (expired);
2210         }
2211
2212         /*
2213          * Tracks are being expired.
2214          */
2215
2216         if (RB_EMPTY(&ctx->ipfw_track_tree))
2217                 return (0);
2218
2219         expired = 0;
2220         anchor = &ctx->ipfw_trackexp_anch;
2221         while ((t = TAILQ_NEXT(anchor, t_link)) != NULL) {
2222                 /*
2223                  * Ignore scan limit; we are short of tracks.
2224                  */
2225
2226                 TAILQ_REMOVE(&ctx->ipfw_track_list, anchor, t_link);
2227                 TAILQ_INSERT_AFTER(&ctx->ipfw_track_list, t, anchor, t_link);
2228
2229                 if (t->t_count == NULL) /* anchor */
2230                         continue;
2231
2232                 ipfw_track_state_expire(ctx, t, TRUE);
2233                 if (!LIST_EMPTY(&t->t_state_list)) {
2234                         /* There are states referencing this track. */
2235                         continue;
2236                 }
2237
2238                 if (ipfw_track_free(ctx, t)) {
2239                         if (++expired >= ipfw_track_reap_max) {
2240                                 ipfw_track_expire_more(ctx);
2241                                 break;
2242                         }
2243                 }
2244         }
2245         /*
2246          * NOTE:
2247          * Leave the anchor on the list, even if the end of the list has
2248          * been reached.  ipfw_track_expire_more_dispatch() will handle
2249          * the removal.
2250          */
2251         return (expired);
2252 }
2253
2254 static struct ipfw_track *
2255 ipfw_track_alloc(struct ipfw_context *ctx, const struct ipfw_flow_id *id,
2256     uint16_t limit_mask, struct ip_fw *rule)
2257 {
2258         struct ipfw_track *key, *t, *dup;
2259         struct ipfw_trkcnt *trk, *ret;
2260         boolean_t do_expire = FALSE;
2261
2262         KASSERT(rule->track_ruleid != 0,
2263             ("rule %u has no track ruleid", rule->rulenum));
2264
2265         key = &ctx->ipfw_track_tmpkey;
2266         key->t_proto = id->proto;
2267         key->t_addrs = 0;
2268         key->t_ports = 0;
2269         key->t_rule = rule;
2270         if (limit_mask & DYN_SRC_ADDR)
2271                 key->t_saddr = id->src_ip;
2272         if (limit_mask & DYN_DST_ADDR)
2273                 key->t_daddr = id->dst_ip;
2274         if (limit_mask & DYN_SRC_PORT)
2275                 key->t_sport = id->src_port;
2276         if (limit_mask & DYN_DST_PORT)
2277                 key->t_dport = id->dst_port;
2278
2279         t = RB_FIND(ipfw_track_tree, &ctx->ipfw_track_tree, key);
2280         if (t != NULL)
2281                 goto done;
2282
2283         t = kmalloc(sizeof(*t), M_IPFW, M_INTWAIT | M_NULLOK);
2284         if (t == NULL) {
2285                 ctx->ipfw_tks_nomem++;
2286                 return (NULL);
2287         }
2288
2289         t->t_key = key->t_key;
2290         t->t_rule = rule;
2291         t->t_lastexp = 0;
2292         LIST_INIT(&t->t_state_list);
2293
2294         if (ipfw_gd.ipfw_trkcnt_cnt >= ipfw_track_max) {
2295                 time_t globexp, uptime;
2296
2297                 trk = NULL;
2298                 do_expire = TRUE;
2299
2300                 /*
2301                  * Do not expire globally more than once per second,
2302                  * it is useless.
2303                  */
2304                 uptime = time_uptime;
2305                 globexp = ipfw_gd.ipfw_track_globexp;
2306                 if (globexp != uptime &&
2307                     atomic_cmpset_long(&ipfw_gd.ipfw_track_globexp,
2308                     globexp, uptime)) {
2309                         int cpu;
2310
2311                         /* Expire tracks on other CPUs. */
2312                         for (cpu = 0; cpu < netisr_ncpus; ++cpu) {
2313                                 if (cpu == mycpuid)
2314                                         continue;
2315                                 lwkt_send_ipiq(globaldata_find(cpu),
2316                                     ipfw_track_expire_ipifunc, NULL);
2317                         }
2318                 }
2319         } else {
2320                 trk = ipfw_trkcnt_alloc(ctx);
2321         }
2322         if (trk == NULL) {
2323                 struct ipfw_trkcnt *tkey;
2324
2325                 tkey = &ctx->ipfw_trkcnt_tmpkey;
2326                 key = NULL; /* tkey overlaps key */
2327
2328                 tkey->tc_key = t->t_key;
2329                 tkey->tc_ruleid = rule->track_ruleid;
2330
2331                 IPFW_TRKCNT_TOKGET;
2332                 trk = RB_FIND(ipfw_trkcnt_tree, &ipfw_gd.ipfw_trkcnt_tree,
2333                     tkey);
2334                 if (trk == NULL) {
2335                         IPFW_TRKCNT_TOKREL;
2336                         if (do_expire) {
2337                                 ctx->ipfw_tks_reap++;
2338                                 if (ipfw_track_reap(ctx) > 0) {
2339                                         if (ipfw_gd.ipfw_trkcnt_cnt <
2340                                             ipfw_track_max) {
2341                                                 trk = ipfw_trkcnt_alloc(ctx);
2342                                                 if (trk != NULL)
2343                                                         goto install;
2344                                                 ctx->ipfw_tks_cntnomem++;
2345                                         } else {
2346                                                 ctx->ipfw_tks_overflow++;
2347                                         }
2348                                 } else {
2349                                         ctx->ipfw_tks_reapfailed++;
2350                                         ctx->ipfw_tks_overflow++;
2351                                 }
2352                         } else {
2353                                 ctx->ipfw_tks_cntnomem++;
2354                         }
2355                         kfree(t, M_IPFW);
2356                         return (NULL);
2357                 }
2358                 KASSERT(trk->tc_refs > 0 && trk->tc_refs < netisr_ncpus,
2359                     ("invalid trkcnt refs %d", trk->tc_refs));
2360                 atomic_add_int(&trk->tc_refs, 1);
2361                 IPFW_TRKCNT_TOKREL;
2362         } else {
2363 install:
2364                 trk->tc_key = t->t_key;
2365                 trk->tc_ruleid = rule->track_ruleid;
2366                 trk->tc_refs = 0;
2367                 trk->tc_count = 0;
2368                 trk->tc_expire = 0;
2369                 trk->tc_rulenum = rule->rulenum;
2370
2371                 IPFW_TRKCNT_TOKGET;
2372                 ret = RB_INSERT(ipfw_trkcnt_tree, &ipfw_gd.ipfw_trkcnt_tree,
2373                     trk);
2374                 if (ret != NULL) {
2375                         KASSERT(ret->tc_refs > 0 &&
2376                             ret->tc_refs < netisr_ncpus,
2377                             ("invalid trkcnt refs %d", ret->tc_refs));
2378                         KASSERT(ctx->ipfw_trkcnt_spare == NULL,
2379                             ("trkcnt spare was installed"));
2380                         ctx->ipfw_trkcnt_spare = trk;
2381                         trk = ret;
2382                 } else {
2383                         ipfw_gd.ipfw_trkcnt_cnt++;
2384                 }
2385                 atomic_add_int(&trk->tc_refs, 1);
2386                 IPFW_TRKCNT_TOKREL;
2387         }
2388         t->t_count = &trk->tc_count;
2389         t->t_trkcnt = trk;
2390
2391         dup = RB_INSERT(ipfw_track_tree, &ctx->ipfw_track_tree, t);
2392         if (dup != NULL)
2393                 panic("ipfw: track exists");
2394         TAILQ_INSERT_TAIL(&ctx->ipfw_track_list, t, t_link);
2395 done:
2396         t->t_expire = time_uptime + dyn_short_lifetime;
2397         return (t);
2398 }
2399
2400 /*
2401  * Install state for rule type cmd->o.opcode
2402  *
2403  * Returns 1 (failure) if state is not installed because of errors or because
2404  * states limitations are enforced.
2405  */
2406 static int
2407 ipfw_state_install(struct ipfw_context *ctx, struct ip_fw *rule,
2408     ipfw_insn_limit *cmd, struct ip_fw_args *args, const struct tcphdr *tcp)
2409 {
2410         struct ipfw_state *s;
2411         struct ipfw_track *t;
2412         int count, diff;
2413
2414         if (ipfw_gd.ipfw_state_loosecnt >= ipfw_state_max &&
2415             (diff = (ipfw_state_cntsync() - ipfw_state_max)) >= 0) {
2416                 boolean_t overflow = TRUE;
2417
2418                 ctx->ipfw_sts_reap++;
2419                 if (ipfw_state_reap(ctx, diff) == 0)
2420                         ctx->ipfw_sts_reapfailed++;
2421                 if (ipfw_state_cntsync() < ipfw_state_max)
2422                         overflow = FALSE;
2423
2424                 if (overflow) {
2425                         time_t globexp, uptime;
2426                         int cpu;
2427
2428                         /*
2429                          * Do not expire globally more than once per second,
2430                          * it is useless.
2431                          */
2432                         uptime = time_uptime;
2433                         globexp = ipfw_gd.ipfw_state_globexp;
2434                         if (globexp == uptime ||
2435                             !atomic_cmpset_long(&ipfw_gd.ipfw_state_globexp,
2436                             globexp, uptime)) {
2437                                 ctx->ipfw_sts_overflow++;
2438                                 return (1);
2439                         }
2440
2441                         /* Expire states on other CPUs. */
2442                         for (cpu = 0; cpu < netisr_ncpus; ++cpu) {
2443                                 if (cpu == mycpuid)
2444                                         continue;
2445                                 lwkt_send_ipiq(globaldata_find(cpu),
2446                                     ipfw_state_expire_ipifunc, NULL);
2447                         }
2448                         ctx->ipfw_sts_overflow++;
2449                         return (1);
2450                 }
2451         }
2452
2453         switch (cmd->o.opcode) {
2454         case O_KEEP_STATE: /* bidir rule */
2455                 s = ipfw_state_add(ctx, &args->f_id, O_KEEP_STATE, rule, NULL,
2456                     tcp);
2457                 if (s == NULL)
2458                         return (1);
2459                 break;
2460
2461         case O_LIMIT: /* limit number of sessions */
2462                 t = ipfw_track_alloc(ctx, &args->f_id, cmd->limit_mask, rule);
2463                 if (t == NULL)
2464                         return (1);
2465
2466                 if (*t->t_count >= cmd->conn_limit) {
2467                         if (!ipfw_track_state_expire(ctx, t, TRUE))
2468                                 return (1);
2469                 }
2470                 for (;;) {
2471                         count = *t->t_count;
2472                         if (count >= cmd->conn_limit)
2473                                 return (1);
2474                         if (atomic_cmpset_int(t->t_count, count, count + 1))
2475                                 break;
2476                 }
2477
2478                 s = ipfw_state_add(ctx, &args->f_id, O_LIMIT, rule, t, tcp);
2479                 if (s == NULL) {
2480                         /* Undo damage. */
2481                         atomic_subtract_int(t->t_count, 1);
2482                         return (1);
2483                 }
2484                 break;
2485
2486         default:
2487                 panic("unknown state type %u\n", cmd->o.opcode);
2488         }
2489         return (0);
2490 }
2491
2492 static int
2493 ipfw_table_lookup(struct ipfw_context *ctx, uint16_t tableid,
2494     const struct in_addr *in)
2495 {
2496         struct radix_node_head *rnh;
2497         struct sockaddr_in sin;
2498         struct ipfw_tblent *te;
2499
2500         KASSERT(tableid < ipfw_table_max, ("invalid tableid %u", tableid));
2501         rnh = ctx->ipfw_tables[tableid];
2502         if (rnh == NULL)
2503                 return (0); /* no match */
2504
2505         memset(&sin, 0, sizeof(sin));
2506         sin.sin_family = AF_INET;
2507         sin.sin_len = sizeof(sin);
2508         sin.sin_addr = *in;
2509
2510         te = (struct ipfw_tblent *)rnh->rnh_matchaddr((char *)&sin, rnh);
2511         if (te == NULL)
2512                 return (0); /* no match */
2513
2514         te->te_use++;
2515         te->te_lastuse = time_second;
2516         return (1); /* match */
2517 }
2518
2519 /*
2520  * Transmit a TCP packet, containing either a RST or a keepalive.
2521  * When flags & TH_RST, we are sending a RST packet, because of a
2522  * "reset" action matched the packet.
2523  * Otherwise we are sending a keepalive, and flags & TH_
2524  *
2525  * Only {src,dst}_{ip,port} of "id" are used.
2526  */
2527 static void
2528 send_pkt(const struct ipfw_flow_id *id, uint32_t seq, uint32_t ack, int flags)
2529 {
2530         struct mbuf *m;
2531         struct ip *ip;
2532         struct tcphdr *tcp;
2533         struct route sro;       /* fake route */
2534
2535         MGETHDR(m, M_NOWAIT, MT_HEADER);
2536         if (m == NULL)
2537                 return;
2538         m->m_pkthdr.rcvif = NULL;
2539         m->m_pkthdr.len = m->m_len = sizeof(struct ip) + sizeof(struct tcphdr);
2540         m->m_data += max_linkhdr;
2541
2542         ip = mtod(m, struct ip *);
2543         bzero(ip, m->m_len);
2544         tcp = (struct tcphdr *)(ip + 1); /* no IP options */
2545         ip->ip_p = IPPROTO_TCP;
2546         tcp->th_off = 5;
2547
2548         /*
2549          * Assume we are sending a RST (or a keepalive in the reverse
2550          * direction), swap src and destination addresses and ports.
2551          */
2552         ip->ip_src.s_addr = htonl(id->dst_ip);
2553         ip->ip_dst.s_addr = htonl(id->src_ip);
2554         tcp->th_sport = htons(id->dst_port);
2555         tcp->th_dport = htons(id->src_port);
2556         if (flags & TH_RST) {   /* we are sending a RST */
2557                 if (flags & TH_ACK) {
2558                         tcp->th_seq = htonl(ack);
2559                         tcp->th_ack = htonl(0);
2560                         tcp->th_flags = TH_RST;
2561                 } else {
2562                         if (flags & TH_SYN)
2563                                 seq++;
2564                         tcp->th_seq = htonl(0);
2565                         tcp->th_ack = htonl(seq);
2566                         tcp->th_flags = TH_RST | TH_ACK;
2567                 }
2568         } else {
2569                 /*
2570                  * We are sending a keepalive. flags & TH_SYN determines
2571                  * the direction, forward if set, reverse if clear.
2572                  * NOTE: seq and ack are always assumed to be correct
2573                  * as set by the caller. This may be confusing...
2574                  */
2575                 if (flags & TH_SYN) {
2576                         /*
2577                          * we have to rewrite the correct addresses!
2578                          */
2579                         ip->ip_dst.s_addr = htonl(id->dst_ip);
2580                         ip->ip_src.s_addr = htonl(id->src_ip);
2581                         tcp->th_dport = htons(id->dst_port);
2582                         tcp->th_sport = htons(id->src_port);
2583                 }
2584                 tcp->th_seq = htonl(seq);
2585                 tcp->th_ack = htonl(ack);
2586                 tcp->th_flags = TH_ACK;
2587         }
2588
2589         /*
2590          * set ip_len to the payload size so we can compute
2591          * the tcp checksum on the pseudoheader
2592          * XXX check this, could save a couple of words ?
2593          */
2594         ip->ip_len = htons(sizeof(struct tcphdr));
2595         tcp->th_sum = in_cksum(m, m->m_pkthdr.len);
2596
2597         /*
2598          * now fill fields left out earlier
2599          */
2600         ip->ip_ttl = ip_defttl;
2601         ip->ip_len = m->m_pkthdr.len;
2602
2603         bzero(&sro, sizeof(sro));
2604         ip_rtaddr(ip->ip_dst, &sro);
2605
2606         m->m_pkthdr.fw_flags |= IPFW_MBUF_GENERATED;
2607         ip_output(m, NULL, &sro, 0, NULL, NULL);
2608         if (sro.ro_rt)
2609                 RTFREE(sro.ro_rt);
2610 }
2611
2612 /*
2613  * Send a reject message, consuming the mbuf passed as an argument.
2614  */
2615 static void
2616 send_reject(struct ip_fw_args *args, int code, int offset, int ip_len)
2617 {
2618         if (code != ICMP_REJECT_RST) { /* Send an ICMP unreach */
2619                 /* We need the IP header in host order for icmp_error(). */
2620                 if (args->eh != NULL) {
2621                         struct ip *ip = mtod(args->m, struct ip *);
2622
2623                         ip->ip_len = ntohs(ip->ip_len);
2624                         ip->ip_off = ntohs(ip->ip_off);
2625                 }
2626                 icmp_error(args->m, ICMP_UNREACH, code, 0L, 0);
2627         } else if (offset == 0 && args->f_id.proto == IPPROTO_TCP) {
2628                 struct tcphdr *const tcp =
2629                     L3HDR(struct tcphdr, mtod(args->m, struct ip *));
2630
2631                 if ((tcp->th_flags & TH_RST) == 0) {
2632                         send_pkt(&args->f_id, ntohl(tcp->th_seq),
2633                                  ntohl(tcp->th_ack), tcp->th_flags | TH_RST);
2634                 }
2635                 m_freem(args->m);
2636         } else {
2637                 m_freem(args->m);
2638         }
2639         args->m = NULL;
2640 }
2641
2642 /*
2643  * Given an ip_fw *, lookup_next_rule will return a pointer
2644  * to the next rule, which can be either the jump
2645  * target (for skipto instructions) or the next one in the list (in
2646  * all other cases including a missing jump target).
2647  * The result is also written in the "next_rule" field of the rule.
2648  * Backward jumps are not allowed, so start looking from the next
2649  * rule...
2650  *
2651  * This never returns NULL -- in case we do not have an exact match,
2652  * the next rule is returned. When the ruleset is changed,
2653  * pointers are flushed so we are always correct.
2654  */
2655 static struct ip_fw *
2656 lookup_next_rule(struct ip_fw *me)
2657 {
2658         struct ip_fw *rule = NULL;
2659         ipfw_insn *cmd;
2660
2661         /* look for action, in case it is a skipto */
2662         cmd = ACTION_PTR(me);
2663         if (cmd->opcode == O_LOG)
2664                 cmd += F_LEN(cmd);
2665         if (cmd->opcode == O_SKIPTO) {
2666                 for (rule = me->next; rule; rule = rule->next) {
2667                         if (rule->rulenum >= cmd->arg1)
2668                                 break;
2669                 }
2670         }
2671         if (rule == NULL)                       /* failure or not a skipto */
2672                 rule = me->next;
2673         me->next_rule = rule;
2674         return rule;
2675 }
2676
2677 static int
2678 ipfw_match_uid(const struct ipfw_flow_id *fid, struct ifnet *oif,
2679                 enum ipfw_opcodes opcode, uid_t uid)
2680 {
2681         struct in_addr src_ip, dst_ip;
2682         struct inpcbinfo *pi;
2683         boolean_t wildcard;
2684         struct inpcb *pcb;
2685
2686         if (fid->proto == IPPROTO_TCP) {
2687                 wildcard = FALSE;
2688                 pi = &tcbinfo[mycpuid];
2689         } else if (fid->proto == IPPROTO_UDP) {
2690                 wildcard = TRUE;
2691                 pi = &udbinfo[mycpuid];
2692         } else {
2693                 return 0;
2694         }
2695
2696         /*
2697          * Values in 'fid' are in host byte order
2698          */
2699         dst_ip.s_addr = htonl(fid->dst_ip);
2700         src_ip.s_addr = htonl(fid->src_ip);
2701         if (oif) {
2702                 pcb = in_pcblookup_hash(pi,
2703                         dst_ip, htons(fid->dst_port),
2704                         src_ip, htons(fid->src_port),
2705                         wildcard, oif);
2706         } else {
2707                 pcb = in_pcblookup_hash(pi,
2708                         src_ip, htons(fid->src_port),
2709                         dst_ip, htons(fid->dst_port),
2710                         wildcard, NULL);
2711         }
2712         if (pcb == NULL || pcb->inp_socket == NULL)
2713                 return 0;
2714
2715         if (opcode == O_UID) {
2716 #define socheckuid(a,b) ((a)->so_cred->cr_uid != (b))
2717                 return !socheckuid(pcb->inp_socket, uid);
2718 #undef socheckuid
2719         } else  {
2720                 return groupmember(uid, pcb->inp_socket->so_cred);
2721         }
2722 }
2723
2724 /*
2725  * The main check routine for the firewall.
2726  *
2727  * All arguments are in args so we can modify them and return them
2728  * back to the caller.
2729  *
2730  * Parameters:
2731  *
2732  *      args->m (in/out) The packet; we set to NULL when/if we nuke it.
2733  *              Starts with the IP header.
2734  *      args->eh (in)   Mac header if present, or NULL for layer3 packet.
2735  *      args->oif       Outgoing interface, or NULL if packet is incoming.
2736  *              The incoming interface is in the mbuf. (in)
2737  *
2738  *      args->rule      Pointer to the last matching rule (in/out)
2739  *      args->f_id      Addresses grabbed from the packet (out)
2740  *
2741  * Return value:
2742  *
2743  *      If the packet was denied/rejected and has been dropped, *m is equal
2744  *      to NULL upon return.
2745  *
2746  *      IP_FW_DENY      the packet must be dropped.
2747  *      IP_FW_PASS      The packet is to be accepted and routed normally.
2748  *      IP_FW_DIVERT    Divert the packet to port (args->cookie)
2749  *      IP_FW_TEE       Tee the packet to port (args->cookie)
2750  *      IP_FW_DUMMYNET  Send the packet to pipe/queue (args->cookie)
2751  *      IP_FW_CONTINUE  Continue processing on another cpu.
2752  */
2753 static int
2754 ipfw_chk(struct ip_fw_args *args)
2755 {
2756         /*
2757          * Local variables hold state during the processing of a packet.
2758          *
2759          * IMPORTANT NOTE: to speed up the processing of rules, there
2760          * are some assumption on the values of the variables, which
2761          * are documented here. Should you change them, please check
2762          * the implementation of the various instructions to make sure
2763          * that they still work.
2764          *
2765          * args->eh     The MAC header. It is non-null for a layer2
2766          *      packet, it is NULL for a layer-3 packet.
2767          *
2768          * m | args->m  Pointer to the mbuf, as received from the caller.
2769          *      It may change if ipfw_chk() does an m_pullup, or if it
2770          *      consumes the packet because it calls send_reject().
2771          *      XXX This has to change, so that ipfw_chk() never modifies
2772          *      or consumes the buffer.
2773          * ip   is simply an alias of the value of m, and it is kept
2774          *      in sync with it (the packet is  supposed to start with
2775          *      the ip header).
2776          */
2777         struct mbuf *m = args->m;
2778         struct ip *ip = mtod(m, struct ip *);
2779
2780         /*
2781          * oif | args->oif      If NULL, ipfw_chk has been called on the
2782          *      inbound path (ether_input, ip_input).
2783          *      If non-NULL, ipfw_chk has been called on the outbound path
2784          *      (ether_output, ip_output).
2785          */
2786         struct ifnet *oif = args->oif;
2787
2788         struct ip_fw *f = NULL;         /* matching rule */
2789         int retval = IP_FW_PASS;
2790         struct m_tag *mtag;
2791         struct divert_info *divinfo;
2792
2793         /*
2794          * hlen The length of the IPv4 header.
2795          *      hlen >0 means we have an IPv4 packet.
2796          */
2797         u_int hlen = 0;         /* hlen >0 means we have an IP pkt */
2798
2799         /*
2800          * offset       The offset of a fragment. offset != 0 means that
2801          *      we have a fragment at this offset of an IPv4 packet.
2802          *      offset == 0 means that (if this is an IPv4 packet)
2803          *      this is the first or only fragment.
2804          */
2805         u_short offset = 0;
2806
2807         /*
2808          * Local copies of addresses. They are only valid if we have
2809          * an IP packet.
2810          *
2811          * proto        The protocol. Set to 0 for non-ip packets,
2812          *      or to the protocol read from the packet otherwise.
2813          *      proto != 0 means that we have an IPv4 packet.
2814          *
2815          * src_port, dst_port   port numbers, in HOST format. Only
2816          *      valid for TCP and UDP packets.
2817          *
2818          * src_ip, dst_ip       ip addresses, in NETWORK format.
2819          *      Only valid for IPv4 packets.
2820          */
2821         uint8_t proto;
2822         uint16_t src_port = 0, dst_port = 0;    /* NOTE: host format    */
2823         struct in_addr src_ip, dst_ip;          /* NOTE: network format */
2824         uint16_t ip_len = 0;
2825
2826         /*
2827          * dyn_dir = MATCH_UNKNOWN when rules unchecked,
2828          *      MATCH_NONE when checked and not matched (dyn_f = NULL),
2829          *      MATCH_FORWARD or MATCH_REVERSE otherwise (dyn_f != NULL)
2830          */
2831         int dyn_dir = MATCH_UNKNOWN;
2832         struct ip_fw *dyn_f = NULL;
2833         int cpuid = mycpuid;
2834         struct ipfw_context *ctx;
2835
2836         ASSERT_NETISR_NCPUS(cpuid);
2837         ctx = ipfw_ctx[cpuid];
2838
2839         if (m->m_pkthdr.fw_flags & IPFW_MBUF_GENERATED)
2840                 return IP_FW_PASS;      /* accept */
2841
2842         if (args->eh == NULL ||         /* layer 3 packet */
2843             (m->m_pkthdr.len >= sizeof(struct ip) &&
2844              ntohs(args->eh->ether_type) == ETHERTYPE_IP))
2845                 hlen = ip->ip_hl << 2;
2846
2847         /*
2848          * Collect parameters into local variables for faster matching.
2849          */
2850         if (hlen == 0) {        /* do not grab addresses for non-ip pkts */
2851                 proto = args->f_id.proto = 0;   /* mark f_id invalid */
2852                 goto after_ip_checks;
2853         }
2854
2855         proto = args->f_id.proto = ip->ip_p;
2856         src_ip = ip->ip_src;
2857         dst_ip = ip->ip_dst;
2858         if (args->eh != NULL) { /* layer 2 packets are as on the wire */
2859                 offset = ntohs(ip->ip_off) & IP_OFFMASK;
2860                 ip_len = ntohs(ip->ip_len);
2861         } else {
2862                 offset = ip->ip_off & IP_OFFMASK;
2863                 ip_len = ip->ip_len;
2864         }
2865
2866 #define PULLUP_TO(len)                          \
2867 do {                                            \
2868         if (m->m_len < (len)) {                 \
2869                 args->m = m = m_pullup(m, (len));\
2870                 if (m == NULL)                  \
2871                         goto pullup_failed;     \
2872                 ip = mtod(m, struct ip *);      \
2873         }                                       \
2874 } while (0)
2875
2876         if (offset == 0) {
2877                 switch (proto) {
2878                 case IPPROTO_TCP:
2879                         {
2880                                 struct tcphdr *tcp;
2881
2882                                 PULLUP_TO(hlen + sizeof(struct tcphdr));
2883                                 tcp = L3HDR(struct tcphdr, ip);
2884                                 dst_port = tcp->th_dport;
2885                                 src_port = tcp->th_sport;
2886                                 args->f_id.flags = tcp->th_flags;
2887                         }
2888                         break;
2889
2890                 case IPPROTO_UDP:
2891                         {
2892                                 struct udphdr *udp;
2893
2894                                 PULLUP_TO(hlen + sizeof(struct udphdr));
2895                                 udp = L3HDR(struct udphdr, ip);
2896                                 dst_port = udp->uh_dport;
2897                                 src_port = udp->uh_sport;
2898                         }
2899                         break;
2900
2901                 case IPPROTO_ICMP:
2902                         PULLUP_TO(hlen + 4);    /* type, code and checksum. */
2903                         args->f_id.flags = L3HDR(struct icmp, ip)->icmp_type;
2904                         break;
2905
2906                 default:
2907                         break;
2908                 }
2909         }
2910
2911         args->f_id.src_ip = ntohl(src_ip.s_addr);
2912         args->f_id.dst_ip = ntohl(dst_ip.s_addr);
2913         args->f_id.src_port = src_port = ntohs(src_port);
2914         args->f_id.dst_port = dst_port = ntohs(dst_port);
2915
2916 after_ip_checks:
2917         if (args->rule) {
2918                 /*
2919                  * Packet has already been tagged. Look for the next rule
2920                  * to restart processing.
2921                  *
2922                  * If fw_one_pass != 0 then just accept it.
2923                  * XXX should not happen here, but optimized out in
2924                  * the caller.
2925                  */
2926                 if (fw_one_pass && !args->cont)
2927                         return IP_FW_PASS;
2928                 args->cont = 0;
2929
2930                 /* This rule is being/has been flushed */
2931                 if (ipfw_flushing)
2932                         return IP_FW_DENY;
2933
2934                 KASSERT(args->rule->cpuid == cpuid,
2935                         ("rule used on cpu%d", cpuid));
2936
2937                 /* This rule was deleted */
2938                 if (args->rule->rule_flags & IPFW_RULE_F_INVALID)
2939                         return IP_FW_DENY;
2940
2941                 f = args->rule->next_rule;
2942                 if (f == NULL)
2943                         f = lookup_next_rule(args->rule);
2944         } else {
2945                 /*
2946                  * Find the starting rule. It can be either the first
2947                  * one, or the one after divert_rule if asked so.
2948                  */
2949                 int skipto;
2950
2951                 KKASSERT(!args->cont);
2952
2953                 mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL);
2954                 if (mtag != NULL) {
2955                         divinfo = m_tag_data(mtag);
2956                         skipto = divinfo->skipto;
2957                 } else {
2958                         skipto = 0;
2959                 }
2960
2961                 f = ctx->ipfw_layer3_chain;
2962                 if (args->eh == NULL && skipto != 0) {
2963                         /* No skipto during rule flushing */
2964                         if (ipfw_flushing)
2965                                 return IP_FW_DENY;
2966
2967                         if (skipto >= IPFW_DEFAULT_RULE)
2968                                 return IP_FW_DENY; /* invalid */
2969
2970                         while (f && f->rulenum <= skipto)
2971                                 f = f->next;
2972                         if (f == NULL)  /* drop packet */
2973                                 return IP_FW_DENY;
2974                 } else if (ipfw_flushing) {
2975                         /* Rules are being flushed; skip to default rule */
2976                         f = ctx->ipfw_default_rule;
2977                 }
2978         }
2979         if ((mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL)) != NULL)
2980                 m_tag_delete(m, mtag);
2981
2982         /*
2983          * Now scan the rules, and parse microinstructions for each rule.
2984          */
2985         for (; f; f = f->next) {
2986                 int l, cmdlen;
2987                 ipfw_insn *cmd;
2988                 int skip_or; /* skip rest of OR block */
2989
2990 again:
2991                 if (ctx->ipfw_set_disable & (1 << f->set))
2992                         continue;
2993
2994                 skip_or = 0;
2995                 for (l = f->cmd_len, cmd = f->cmd; l > 0;
2996                      l -= cmdlen, cmd += cmdlen) {
2997                         int match;
2998
2999                         /*
3000                          * check_body is a jump target used when we find a
3001                          * CHECK_STATE, and need to jump to the body of
3002                          * the target rule.
3003                          */
3004
3005 check_body:
3006                         cmdlen = F_LEN(cmd);
3007                         /*
3008                          * An OR block (insn_1 || .. || insn_n) has the
3009                          * F_OR bit set in all but the last instruction.
3010                          * The first match will set "skip_or", and cause
3011                          * the following instructions to be skipped until
3012                          * past the one with the F_OR bit clear.
3013                          */
3014                         if (skip_or) {          /* skip this instruction */
3015                                 if ((cmd->len & F_OR) == 0)
3016                                         skip_or = 0;    /* next one is good */
3017                                 continue;
3018                         }
3019                         match = 0; /* set to 1 if we succeed */
3020
3021                         switch (cmd->opcode) {
3022                         /*
3023                          * The first set of opcodes compares the packet's
3024                          * fields with some pattern, setting 'match' if a
3025                          * match is found. At the end of the loop there is
3026                          * logic to deal with F_NOT and F_OR flags associated
3027                          * with the opcode.
3028                          */
3029                         case O_NOP:
3030                                 match = 1;
3031                                 break;
3032
3033                         case O_FORWARD_MAC:
3034                                 kprintf("ipfw: opcode %d unimplemented\n",
3035                                         cmd->opcode);
3036                                 break;
3037
3038                         case O_GID:
3039                         case O_UID:
3040                                 /*
3041                                  * We only check offset == 0 && proto != 0,
3042                                  * as this ensures that we have an IPv4
3043                                  * packet with the ports info.
3044                                  */
3045                                 if (offset!=0)
3046                                         break;
3047
3048                                 match = ipfw_match_uid(&args->f_id, oif,
3049                                         cmd->opcode,
3050                                         (uid_t)((ipfw_insn_u32 *)cmd)->d[0]);
3051                                 break;
3052
3053                         case O_RECV:
3054                                 match = iface_match(m->m_pkthdr.rcvif,
3055                                     (ipfw_insn_if *)cmd);
3056                                 break;
3057
3058                         case O_XMIT:
3059                                 match = iface_match(oif, (ipfw_insn_if *)cmd);
3060                                 break;
3061
3062                         case O_VIA:
3063                                 match = iface_match(oif ? oif :
3064                                     m->m_pkthdr.rcvif, (ipfw_insn_if *)cmd);
3065                                 break;
3066
3067                         case O_MACADDR2:
3068                                 if (args->eh != NULL) { /* have MAC header */
3069                                         uint32_t *want = (uint32_t *)
3070                                                 ((ipfw_insn_mac *)cmd)->addr;
3071                                         uint32_t *mask = (uint32_t *)
3072                                                 ((ipfw_insn_mac *)cmd)->mask;
3073                                         uint32_t *hdr = (uint32_t *)args->eh;
3074
3075                                         match =
3076                                         (want[0] == (hdr[0] & mask[0]) &&
3077                                          want[1] == (hdr[1] & mask[1]) &&
3078                                          want[2] == (hdr[2] & mask[2]));
3079                                 }
3080                                 break;
3081
3082                         case O_MAC_TYPE:
3083                                 if (args->eh != NULL) {
3084                                         uint16_t t =
3085                                             ntohs(args->eh->ether_type);
3086                                         uint16_t *p =
3087                                             ((ipfw_insn_u16 *)cmd)->ports;
3088                                         int i;
3089
3090                                         /* Special vlan handling */
3091                                         if (m->m_flags & M_VLANTAG)
3092                                                 t = ETHERTYPE_VLAN;
3093
3094                                         for (i = cmdlen - 1; !match && i > 0;
3095                                              i--, p += 2) {
3096                                                 match =
3097                                                 (t >= p[0] && t <= p[1]);
3098                                         }
3099                                 }
3100                                 break;
3101
3102                         case O_FRAG:
3103                                 match = (hlen > 0 && offset != 0);
3104                                 break;
3105
3106                         case O_IN:      /* "out" is "not in" */
3107                                 match = (oif == NULL);
3108                                 break;
3109
3110                         case O_LAYER2:
3111                                 match = (args->eh != NULL);
3112                                 break;
3113
3114                         case O_PROTO:
3115                                 /*
3116                                  * We do not allow an arg of 0 so the
3117                                  * check of "proto" only suffices.
3118                                  */
3119                                 match = (proto == cmd->arg1);
3120                                 break;
3121
3122                         case O_IP_SRC:
3123                                 match = (hlen > 0 &&
3124                                     ((ipfw_insn_ip *)cmd)->addr.s_addr ==
3125                                     src_ip.s_addr);
3126                                 break;
3127
3128                         case O_IP_SRC_MASK:
3129                                 match = (hlen > 0 &&
3130                                     ((ipfw_insn_ip *)cmd)->addr.s_addr ==
3131                                      (src_ip.s_addr &
3132                                      ((ipfw_insn_ip *)cmd)->mask.s_addr));
3133                                 break;
3134
3135                         case O_IP_SRC_ME:
3136                                 if (hlen > 0) {
3137                                         struct ifnet *tif;
3138
3139                                         tif = INADDR_TO_IFP(&src_ip);
3140                                         match = (tif != NULL);
3141                                 }
3142                                 break;
3143
3144                         case O_IP_SRC_TABLE:
3145                                 match = ipfw_table_lookup(ctx, cmd->arg1,
3146                                     &src_ip);
3147                                 break;
3148
3149                         case O_IP_DST_SET:
3150                         case O_IP_SRC_SET:
3151                                 if (hlen > 0) {
3152                                         uint32_t *d = (uint32_t *)(cmd + 1);
3153                                         uint32_t addr =
3154                                             cmd->opcode == O_IP_DST_SET ?
3155                                                 args->f_id.dst_ip :
3156                                                 args->f_id.src_ip;
3157
3158                                         if (addr < d[0])
3159                                                 break;
3160                                         addr -= d[0]; /* subtract base */
3161                                         match =
3162                                         (addr < cmd->arg1) &&
3163                                          (d[1 + (addr >> 5)] &
3164                                           (1 << (addr & 0x1f)));
3165                                 }
3166                                 break;
3167
3168                         case O_IP_DST:
3169                                 match = (hlen > 0 &&
3170                                     ((ipfw_insn_ip *)cmd)->addr.s_addr ==
3171                                     dst_ip.s_addr);
3172                                 break;
3173
3174                         case O_IP_DST_MASK:
3175                                 match = (hlen > 0) &&
3176                                     (((ipfw_insn_ip *)cmd)->addr.s_addr ==
3177                                      (dst_ip.s_addr &
3178                                      ((ipfw_insn_ip *)cmd)->mask.s_addr));
3179                                 break;
3180
3181                         case O_IP_DST_ME:
3182                                 if (hlen > 0) {
3183                                         struct ifnet *tif;
3184
3185                                         tif = INADDR_TO_IFP(&dst_ip);
3186                                         match = (tif != NULL);
3187                                 }
3188                                 break;
3189
3190                         case O_IP_DST_TABLE:
3191                                 match = ipfw_table_lookup(ctx, cmd->arg1,
3192                                     &dst_ip);
3193                                 break;
3194
3195                         case O_IP_SRCPORT:
3196                         case O_IP_DSTPORT:
3197                                 /*
3198                                  * offset == 0 && proto != 0 is enough
3199                                  * to guarantee that we have an IPv4
3200                                  * packet with port info.
3201                                  */
3202                                 if ((proto==IPPROTO_UDP || proto==IPPROTO_TCP)
3203                                     && offset == 0) {
3204                                         uint16_t x =
3205                                             (cmd->opcode == O_IP_SRCPORT) ?
3206                                                 src_port : dst_port ;
3207                                         uint16_t *p =
3208                                             ((ipfw_insn_u16 *)cmd)->ports;
3209                                         int i;
3210
3211                                         for (i = cmdlen - 1; !match && i > 0;
3212                                              i--, p += 2) {
3213                                                 match =
3214                                                 (x >= p[0] && x <= p[1]);
3215                                         }
3216                                 }
3217                                 break;
3218
3219                         case O_ICMPTYPE:
3220                                 match = (offset == 0 && proto==IPPROTO_ICMP &&
3221                                     icmptype_match(ip, (ipfw_insn_u32 *)cmd));
3222                                 break;
3223
3224                         case O_IPOPT:
3225                                 match = (hlen > 0 && ipopts_match(ip, cmd));
3226                                 break;
3227
3228                         case O_IPVER:
3229                                 match = (hlen > 0 && cmd->arg1 == ip->ip_v);
3230                                 break;
3231
3232                         case O_IPTTL:
3233                                 match = (hlen > 0 && cmd->arg1 == ip->ip_ttl);
3234                                 break;
3235
3236                         case O_IPID:
3237                                 match = (hlen > 0 &&
3238                                     cmd->arg1 == ntohs(ip->ip_id));
3239                                 break;
3240
3241                         case O_IPLEN:
3242                                 match = (hlen > 0 && cmd->arg1 == ip_len);
3243                                 break;
3244
3245                         case O_IPPRECEDENCE:
3246                                 match = (hlen > 0 &&
3247                                     (cmd->arg1 == (ip->ip_tos & 0xe0)));
3248                                 break;
3249
3250                         case O_IPTOS:
3251                                 match = (hlen > 0 &&
3252                                     flags_match(cmd, ip->ip_tos));
3253                                 break;
3254
3255                         case O_TCPFLAGS:
3256                                 match = (proto == IPPROTO_TCP && offset == 0 &&
3257                                     flags_match(cmd,
3258                                         L3HDR(struct tcphdr,ip)->th_flags));
3259                                 break;
3260
3261                         case O_TCPOPTS:
3262                                 match = (proto == IPPROTO_TCP && offset == 0 &&
3263                                     tcpopts_match(ip, cmd));
3264                                 break;
3265
3266                         case O_TCPSEQ:
3267                                 match = (proto == IPPROTO_TCP && offset == 0 &&
3268                                     ((ipfw_insn_u32 *)cmd)->d[0] ==
3269                                         L3HDR(struct tcphdr,ip)->th_seq);
3270                                 break;
3271
3272                         case O_TCPACK:
3273                                 match = (proto == IPPROTO_TCP && offset == 0 &&
3274                                     ((ipfw_insn_u32 *)cmd)->d[0] ==
3275                                         L3HDR(struct tcphdr,ip)->th_ack);
3276                                 break;
3277
3278                         case O_TCPWIN:
3279                                 match = (proto == IPPROTO_TCP && offset == 0 &&
3280                                     cmd->arg1 ==
3281                                         L3HDR(struct tcphdr,ip)->th_win);
3282                                 break;
3283
3284                         case O_ESTAB:
3285                                 /* reject packets which have SYN only */
3286                                 /* XXX should i also check for TH_ACK ? */
3287                                 match = (proto == IPPROTO_TCP && offset == 0 &&
3288                                     (L3HDR(struct tcphdr,ip)->th_flags &
3289                                      (TH_RST | TH_ACK | TH_SYN)) != TH_SYN);
3290                                 break;
3291
3292                         case O_LOG:
3293                                 if (fw_verbose) {
3294                                         ipfw_log(ctx, f, hlen, args->eh, m,
3295                                             oif);
3296                                 }
3297                                 match = 1;
3298                                 break;
3299
3300                         case O_PROB:
3301                                 match = (krandom() <
3302                                         ((ipfw_insn_u32 *)cmd)->d[0]);
3303                                 break;
3304
3305                         /*
3306                          * The second set of opcodes represents 'actions',
3307                          * i.e. the terminal part of a rule once the packet
3308                          * matches all previous patterns.
3309                          * Typically there is only one action for each rule,
3310                          * and the opcode is stored at the end of the rule
3311                          * (but there are exceptions -- see below).
3312                          *
3313                          * In general, here we set retval and terminate the
3314                          * outer loop (would be a 'break 3' in some language,
3315                          * but we need to do a 'goto done').
3316                          *
3317                          * Exceptions:
3318                          * O_COUNT and O_SKIPTO actions:
3319                          *   instead of terminating, we jump to the next rule
3320                          *   ('goto next_rule', equivalent to a 'break 2'),
3321                          *   or to the SKIPTO target ('goto again' after
3322                          *   having set f, cmd and l), respectively.
3323                          *
3324                          * O_LIMIT and O_KEEP_STATE: these opcodes are
3325                          *   not real 'actions', and are stored right
3326                          *   before the 'action' part of the rule.
3327                          *   These opcodes try to install an entry in the
3328                          *   state tables; if successful, we continue with
3329                          *   the next opcode (match=1; break;), otherwise
3330                          *   the packet must be dropped ('goto done' after
3331                          *   setting retval).  If static rules are changed
3332                          *   during the state installation, the packet will
3333                          *   be dropped and rule's stats will not beupdated
3334                          *   ('return IP_FW_DENY').
3335                          *
3336                          * O_PROBE_STATE and O_CHECK_STATE: these opcodes
3337                          *   cause a lookup of the state table, and a jump
3338                          *   to the 'action' part of the parent rule
3339                          *   ('goto check_body') if an entry is found, or
3340                          *   (CHECK_STATE only) a jump to the next rule if
3341                          *   the entry is not found ('goto next_rule').
3342                          *   The result of the lookup is cached to make
3343                          *   further instances of these opcodes are
3344                          *   effectively NOPs.  If static rules are changed
3345                          *   during the state looking up, the packet will
3346                          *   be dropped and rule's stats will not be updated
3347                          *   ('return IP_FW_DENY').
3348                          */
3349                         case O_LIMIT:
3350                         case O_KEEP_STATE:
3351                                 if (ipfw_state_install(ctx, f,
3352                                     (ipfw_insn_limit *)cmd, args,
3353                                     (offset == 0 && proto == IPPROTO_TCP) ?
3354                                     L3HDR(struct tcphdr, ip) : NULL)) {
3355                                         retval = IP_FW_DENY;
3356                                         goto done; /* error/limit violation */
3357                                 }
3358                                 match = 1;
3359                                 break;
3360
3361                         case O_PROBE_STATE:
3362                         case O_CHECK_STATE:
3363                                 /*
3364                                  * States are checked at the first keep-state 
3365                                  * check-state occurrence, with the result
3366                                  * being stored in dyn_dir.  The compiler
3367                                  * introduces a PROBE_STATE instruction for
3368                                  * us when we have a KEEP_STATE/LIMIT (because
3369                                  * PROBE_STATE needs to be run first).
3370                                  */
3371                                 if (dyn_dir == MATCH_UNKNOWN) {
3372                                         dyn_f = ipfw_state_lookup_rule(ctx,
3373                                             &args->f_id, &dyn_dir,
3374                                             (offset == 0 &&
3375                                              proto == IPPROTO_TCP) ?
3376                                             L3HDR(struct tcphdr, ip) : NULL,
3377                                             ip_len);
3378                                         if (dyn_f != NULL) {
3379                                                 /*
3380                                                  * Found a rule from a state;
3381                                                  * jump to the 'action' part
3382                                                  * of the rule.
3383                                                  */
3384                                                 f = dyn_f;
3385                                                 cmd = ACTION_PTR(f);
3386                                                 l = f->cmd_len - f->act_ofs;
3387                                                 goto check_body;
3388                                         }
3389                                 }
3390                                 /*
3391                                  * State not found. If CHECK_STATE, skip to
3392                                  * next rule, if PROBE_STATE just ignore and
3393                                  * continue with next opcode.
3394                                  */
3395                                 if (cmd->opcode == O_CHECK_STATE)
3396                                         goto next_rule;
3397                                 match = 1;
3398                                 break;
3399
3400                         case O_ACCEPT:
3401                                 retval = IP_FW_PASS;    /* accept */
3402                                 goto done;
3403
3404                         case O_DEFRAG:
3405                                 if (f->cross_rules == NULL) {
3406                                         /*
3407                                          * This rule was not completely setup;
3408                                          * move on to the next rule.
3409                                          */
3410                                         goto next_rule;
3411                                 }
3412
3413                                 /*
3414                                  * Don't defrag for l2 packets, output packets
3415                                  * or non-fragments.
3416                                  */
3417                                 if (oif != NULL || args->eh != NULL ||
3418                                     (ip->ip_off & (IP_MF | IP_OFFMASK)) == 0)
3419                                         goto next_rule;
3420
3421                                 ctx->ipfw_frags++;
3422                                 m = ip_reass(m);
3423                                 args->m = m;
3424                                 if (m == NULL) {
3425                                         retval = IP_FW_PASS;
3426                                         goto done;
3427                                 }
3428                                 ctx->ipfw_defraged++;
3429                                 KASSERT((m->m_flags & M_HASH) == 0,
3430                                     ("hash not cleared"));
3431
3432                                 /* Update statistics */
3433                                 f->pcnt++;
3434                                 f->bcnt += ip_len;
3435                                 f->timestamp = time_second;
3436
3437                                 ip = mtod(m, struct ip *);
3438                                 hlen = ip->ip_hl << 2;
3439                                 ip->ip_len += hlen;
3440
3441                                 ip->ip_len = htons(ip->ip_len);
3442                                 ip->ip_off = htons(ip->ip_off);
3443
3444                                 ip_hashfn(&m, 0);
3445                                 args->m = m;
3446                                 if (m == NULL)
3447                                         goto pullup_failed;
3448
3449                                 KASSERT(m->m_flags & M_HASH, ("no hash"));
3450                                 cpuid = netisr_hashcpu(m->m_pkthdr.hash);
3451                                 if (cpuid != mycpuid) {
3452                                         /*
3453                                          * NOTE:
3454                                          * ip_len/ip_off are in network byte
3455                                          * order.
3456                                          */
3457                                         ctx->ipfw_defrag_remote++;
3458                                         args->rule = f;
3459                                         return (IP_FW_CONTINUE);
3460                                 }
3461
3462                                 /* 'm' might be changed by ip_hashfn(). */
3463                                 ip = mtod(m, struct ip *);
3464                                 ip->ip_len = ntohs(ip->ip_len);
3465                                 ip->ip_off = ntohs(ip->ip_off);
3466
3467                                 ip_len = ip->ip_len;
3468                                 offset = 0;
3469                                 proto = args->f_id.proto = ip->ip_p;
3470
3471                                 switch (proto) {
3472                                 case IPPROTO_TCP:
3473                                         {
3474                                                 struct tcphdr *tcp;
3475
3476                                                 PULLUP_TO(hlen +
3477                                                     sizeof(struct tcphdr));
3478                                                 tcp = L3HDR(struct tcphdr, ip);
3479                                                 dst_port = tcp->th_dport;
3480                                                 src_port = tcp->th_sport;
3481                                                 args->f_id.flags =
3482                                                     tcp->th_flags;
3483                                         }
3484                                         break;
3485
3486                                 case IPPROTO_UDP:
3487                                         {
3488                                                 struct udphdr *udp;
3489
3490                                                 PULLUP_TO(hlen +
3491                                                     sizeof(struct udphdr));
3492                                                 udp = L3HDR(struct udphdr, ip);
3493                                                 dst_port = udp->uh_dport;
3494                                                 src_port = udp->uh_sport;
3495                                         }
3496                                         break;
3497
3498                                 case IPPROTO_ICMP:
3499                                         /* type, code and checksum. */
3500                                         PULLUP_TO(hlen + 4);
3501                                         args->f_id.flags =
3502                                             L3HDR(struct icmp, ip)->icmp_type;
3503                                         break;
3504
3505                                 default:
3506                                         break;
3507                                 }
3508                                 args->f_id.src_port = src_port =
3509                                     ntohs(src_port);
3510                                 args->f_id.dst_port = dst_port =
3511                                     ntohs(dst_port);
3512
3513                                 /* Move on. */
3514                                 goto next_rule;
3515
3516                         case O_PIPE:
3517                         case O_QUEUE:
3518                                 args->rule = f; /* report matching rule */
3519                                 args->cookie = cmd->arg1;
3520                                 retval = IP_FW_DUMMYNET;
3521                                 goto done;
3522
3523                         case O_DIVERT:
3524                         case O_TEE:
3525                                 if (args->eh) /* not on layer 2 */
3526                                         break;
3527
3528                                 mtag = m_tag_get(PACKET_TAG_IPFW_DIVERT,
3529                                     sizeof(*divinfo), M_INTWAIT | M_NULLOK);
3530                                 if (mtag == NULL) {
3531                                         retval = IP_FW_DENY;
3532                                         goto done;
3533                                 }
3534                                 divinfo = m_tag_data(mtag);
3535
3536                                 divinfo->skipto = f->rulenum;
3537                                 divinfo->port = cmd->arg1;
3538                                 divinfo->tee = (cmd->opcode == O_TEE);
3539                                 m_tag_prepend(m, mtag);
3540
3541                                 args->cookie = cmd->arg1;
3542                                 retval = (cmd->opcode == O_DIVERT) ?
3543                                          IP_FW_DIVERT : IP_FW_TEE;
3544                                 goto done;
3545
3546                         case O_COUNT:
3547                         case O_SKIPTO:
3548                                 f->pcnt++;      /* update stats */
3549                                 f->bcnt += ip_len;
3550                                 f->timestamp = time_second;
3551                                 if (cmd->opcode == O_COUNT)
3552                                         goto next_rule;
3553                                 /* handle skipto */
3554                                 if (f->next_rule == NULL)
3555                                         lookup_next_rule(f);
3556                                 f = f->next_rule;
3557                                 goto again;
3558
3559                         case O_REJECT:
3560                                 /*
3561                                  * Drop the packet and send a reject notice
3562                                  * if the packet is not ICMP (or is an ICMP
3563                                  * query), and it is not multicast/broadcast.
3564                                  */
3565                                 if (hlen > 0 &&
3566                                     (proto != IPPROTO_ICMP ||
3567                                      is_icmp_query(ip)) &&
3568                                     !(m->m_flags & (M_BCAST|M_MCAST)) &&
3569                                     !IN_MULTICAST(ntohl(dst_ip.s_addr))) {
3570                                         send_reject(args, cmd->arg1,
3571                                             offset, ip_len);
3572                                         retval = IP_FW_DENY;
3573                                         goto done;
3574                                 }
3575                                 /* FALLTHROUGH */
3576                         case O_DENY:
3577                                 retval = IP_FW_DENY;
3578                                 goto done;
3579
3580                         case O_FORWARD_IP:
3581                                 if (args->eh)   /* not valid on layer2 pkts */
3582                                         break;
3583                                 if (!dyn_f || dyn_dir == MATCH_FORWARD) {
3584                                         struct sockaddr_in *sin;
3585
3586                                         mtag = m_tag_get(PACKET_TAG_IPFORWARD,
3587                                             sizeof(*sin), M_INTWAIT | M_NULLOK);
3588                                         if (mtag == NULL) {
3589                                                 retval = IP_FW_DENY;
3590                                                 goto done;
3591                                         }
3592                                         sin = m_tag_data(mtag);
3593
3594                                         /* Structure copy */
3595                                         *sin = ((ipfw_insn_sa *)cmd)->sa;
3596
3597                                         m_tag_prepend(m, mtag);
3598                                         m->m_pkthdr.fw_flags |=
3599                                                 IPFORWARD_MBUF_TAGGED;
3600                                         m->m_pkthdr.fw_flags &=
3601                                                 ~BRIDGE_MBUF_TAGGED;
3602                                 }
3603                                 retval = IP_FW_PASS;
3604                                 goto done;
3605
3606                         default:
3607                                 panic("-- unknown opcode %d", cmd->opcode);
3608                         } /* end of switch() on opcodes */
3609
3610                         if (cmd->len & F_NOT)
3611                                 match = !match;
3612
3613                         if (match) {
3614                                 if (cmd->len & F_OR)
3615                                         skip_or = 1;
3616                         } else {
3617                                 if (!(cmd->len & F_OR)) /* not an OR block, */
3618                                         break;          /* try next rule    */
3619                         }
3620
3621                 }       /* end of inner for, scan opcodes */
3622
3623 next_rule:;             /* try next rule                */
3624
3625         }               /* end of outer for, scan rules */
3626         kprintf("+++ ipfw: ouch!, skip past end of rules, denying packet\n");
3627         return IP_FW_DENY;
3628
3629 done:
3630         /* Update statistics */
3631         f->pcnt++;
3632         f->bcnt += ip_len;
3633         f->timestamp = time_second;
3634         return retval;
3635
3636 pullup_failed:
3637         if (fw_verbose)
3638                 kprintf("pullup failed\n");
3639         return IP_FW_DENY;
3640
3641 #undef PULLUP_TO
3642 }
3643
3644 static struct mbuf *
3645 ipfw_dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
3646 {
3647         struct m_tag *mtag;
3648         struct dn_pkt *pkt;
3649         ipfw_insn *cmd;
3650         const struct ipfw_flow_id *id;
3651         struct dn_flow_id *fid;
3652
3653         M_ASSERTPKTHDR(m);
3654
3655         mtag = m_tag_get(PACKET_TAG_DUMMYNET, sizeof(*pkt),
3656             M_INTWAIT | M_NULLOK);
3657         if (mtag == NULL) {
3658                 m_freem(m);
3659                 return (NULL);
3660         }
3661         m_tag_prepend(m, mtag);
3662
3663         pkt = m_tag_data(mtag);
3664         bzero(pkt, sizeof(*pkt));
3665
3666         cmd = fwa->rule->cmd + fwa->rule->act_ofs;
3667         if (cmd->opcode == O_LOG)
3668                 cmd += F_LEN(cmd);
3669         KASSERT(cmd->opcode == O_PIPE || cmd->opcode == O_QUEUE,
3670                 ("Rule is not PIPE or QUEUE, opcode %d", cmd->opcode));
3671
3672         pkt->dn_m = m;
3673         pkt->dn_flags = (dir & DN_FLAGS_DIR_MASK);
3674         pkt->ifp = fwa->oif;
3675         pkt->pipe_nr = pipe_nr;
3676
3677         pkt->cpuid = mycpuid;
3678         pkt->msgport = netisr_curport();
3679
3680         id = &fwa->f_id;
3681         fid = &pkt->id;
3682         fid->fid_dst_ip = id->dst_ip;
3683         fid->fid_src_ip = id->src_ip;
3684         fid->fid_dst_port = id->dst_port;
3685         fid->fid_src_port = id->src_port;
3686         fid->fid_proto = id->proto;
3687         fid->fid_flags = id->flags;
3688
3689         ipfw_ref_rule(fwa->rule);
3690         pkt->dn_priv = fwa->rule;
3691         pkt->dn_unref_priv = ipfw_unref_rule;
3692
3693         if (cmd->opcode == O_PIPE)
3694                 pkt->dn_flags |= DN_FLAGS_IS_PIPE;
3695
3696         m->m_pkthdr.fw_flags |= DUMMYNET_MBUF_TAGGED;
3697         return (m);
3698 }
3699
3700 /*
3701  * When a rule is added/deleted, clear the next_rule pointers in all rules.
3702  * These will be reconstructed on the fly as packets are matched.
3703  */
3704 static void
3705 ipfw_flush_rule_ptrs(struct ipfw_context *ctx)
3706 {
3707         struct ip_fw *rule;
3708
3709         for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next)
3710                 rule->next_rule = NULL;
3711 }
3712
3713 static __inline void
3714 ipfw_inc_static_count(struct ip_fw *rule)
3715 {
3716         /* Static rule's counts are updated only on CPU0 */
3717         KKASSERT(mycpuid == 0);
3718
3719         static_count++;
3720         static_ioc_len += IOC_RULESIZE(rule);
3721 }
3722
3723 static __inline void
3724 ipfw_dec_static_count(struct ip_fw *rule)
3725 {
3726         int l = IOC_RULESIZE(rule);
3727
3728         /* Static rule's counts are updated only on CPU0 */
3729         KKASSERT(mycpuid == 0);
3730
3731         KASSERT(static_count > 0, ("invalid static count %u", static_count));
3732         static_count--;
3733
3734         KASSERT(static_ioc_len >= l,
3735                 ("invalid static len %u", static_ioc_len));
3736         static_ioc_len -= l;
3737 }
3738
3739 static void
3740 ipfw_link_sibling(struct netmsg_ipfw *fwmsg, struct ip_fw *rule)
3741 {
3742         if (fwmsg->sibling != NULL) {
3743                 KKASSERT(mycpuid > 0 && fwmsg->sibling->cpuid == mycpuid - 1);
3744                 fwmsg->sibling->sibling = rule;
3745         }
3746         fwmsg->sibling = rule;
3747 }
3748
3749 static struct ip_fw *
3750 ipfw_create_rule(const struct ipfw_ioc_rule *ioc_rule, uint32_t rule_flags)
3751 {
3752         struct ip_fw *rule;
3753
3754         rule = kmalloc(RULESIZE(ioc_rule), M_IPFW, M_WAITOK | M_ZERO);
3755
3756         rule->act_ofs = ioc_rule->act_ofs;
3757         rule->cmd_len = ioc_rule->cmd_len;
3758         rule->rulenum = ioc_rule->rulenum;
3759         rule->set = ioc_rule->set;
3760         rule->usr_flags = ioc_rule->usr_flags;
3761
3762         bcopy(ioc_rule->cmd, rule->cmd, rule->cmd_len * 4 /* XXX */);
3763
3764         rule->refcnt = 1;
3765         rule->cpuid = mycpuid;
3766         rule->rule_flags = rule_flags;
3767
3768         return rule;
3769 }
3770
3771 static void
3772 ipfw_add_rule_dispatch(netmsg_t nmsg)
3773 {
3774         struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
3775         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3776         struct ip_fw *rule;
3777
3778         ASSERT_NETISR_NCPUS(mycpuid);
3779
3780         rule = ipfw_create_rule(fwmsg->ioc_rule, fwmsg->rule_flags);
3781
3782         /*
3783          * Insert rule into the pre-determined position
3784          */
3785         if (fwmsg->prev_rule != NULL) {
3786                 struct ip_fw *prev, *next;
3787
3788                 prev = fwmsg->prev_rule;
3789                 KKASSERT(prev->cpuid == mycpuid);
3790
3791                 next = fwmsg->next_rule;
3792                 KKASSERT(next->cpuid == mycpuid);
3793
3794                 rule->next = next;
3795                 prev->next = rule;
3796
3797                 /*
3798                  * Move to the position on the next CPU
3799                  * before the msg is forwarded.
3800                  */
3801                 fwmsg->prev_rule = prev->sibling;
3802                 fwmsg->next_rule = next->sibling;
3803         } else {
3804                 KKASSERT(fwmsg->next_rule == NULL);
3805                 rule->next = ctx->ipfw_layer3_chain;
3806                 ctx->ipfw_layer3_chain = rule;
3807         }
3808
3809         /* Link rule CPU sibling */
3810         ipfw_link_sibling(fwmsg, rule);
3811
3812         ipfw_flush_rule_ptrs(ctx);
3813
3814         if (mycpuid == 0) {
3815                 /* Statistics only need to be updated once */
3816                 ipfw_inc_static_count(rule);
3817
3818                 /* Return the rule on CPU0 */
3819                 nmsg->lmsg.u.ms_resultp = rule;
3820         }
3821
3822         if (rule->rule_flags & IPFW_RULE_F_GENTRACK)
3823                 rule->track_ruleid = (uintptr_t)nmsg->lmsg.u.ms_resultp;
3824
3825         if (fwmsg->cross_rules != NULL) {
3826                 /* Save rules for later use. */
3827                 fwmsg->cross_rules[mycpuid] = rule;
3828         }
3829
3830         netisr_forwardmsg(&nmsg->base, mycpuid + 1);
3831 }
3832
3833 static void
3834 ipfw_crossref_rule_dispatch(netmsg_t nmsg)
3835 {
3836         struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
3837         struct ip_fw *rule = fwmsg->sibling;
3838         int sz = sizeof(struct ip_fw *) * netisr_ncpus;
3839
3840         ASSERT_NETISR_NCPUS(mycpuid);
3841         KASSERT(rule->rule_flags & IPFW_RULE_F_CROSSREF,
3842             ("not crossref rule"));
3843
3844         rule->cross_rules = kmalloc(sz, M_IPFW, M_WAITOK);
3845         memcpy(rule->cross_rules, fwmsg->cross_rules, sz);
3846
3847         fwmsg->sibling = rule->sibling;
3848         netisr_forwardmsg(&fwmsg->base, mycpuid + 1);
3849 }
3850
3851 /*
3852  * Add a new rule to the list.  Copy the rule into a malloc'ed area,
3853  * then possibly create a rule number and add the rule to the list.
3854  * Update the rule_number in the input struct so the caller knows
3855  * it as well.
3856  */
3857 static void
3858 ipfw_add_rule(struct ipfw_ioc_rule *ioc_rule, uint32_t rule_flags)
3859 {
3860         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3861         struct netmsg_ipfw fwmsg;
3862         struct ip_fw *f, *prev, *rule;
3863
3864         ASSERT_NETISR0;
3865
3866         /*
3867          * If rulenum is 0, find highest numbered rule before the
3868          * default rule, and add rule number incremental step.
3869          */
3870         if (ioc_rule->rulenum == 0) {
3871                 int step = autoinc_step;
3872
3873                 KKASSERT(step >= IPFW_AUTOINC_STEP_MIN &&
3874                          step <= IPFW_AUTOINC_STEP_MAX);
3875
3876                 /*
3877                  * Locate the highest numbered rule before default
3878                  */
3879                 for (f = ctx->ipfw_layer3_chain; f; f = f->next) {
3880                         if (f->rulenum == IPFW_DEFAULT_RULE)
3881                                 break;
3882                         ioc_rule->rulenum = f->rulenum;
3883                 }
3884                 if (ioc_rule->rulenum < IPFW_DEFAULT_RULE - step)
3885                         ioc_rule->rulenum += step;
3886         }
3887         KASSERT(ioc_rule->rulenum != IPFW_DEFAULT_RULE &&
3888                 ioc_rule->rulenum != 0,
3889                 ("invalid rule num %d", ioc_rule->rulenum));
3890
3891         /*
3892          * Now find the right place for the new rule in the sorted list.
3893          */
3894         for (prev = NULL, f = ctx->ipfw_layer3_chain; f;
3895              prev = f, f = f->next) {
3896                 if (f->rulenum > ioc_rule->rulenum) {
3897                         /* Found the location */
3898                         break;
3899                 }
3900         }
3901         KASSERT(f != NULL, ("no default rule?!"));
3902
3903         /*
3904          * Duplicate the rule onto each CPU.
3905          * The rule duplicated on CPU0 will be returned.
3906          */
3907         bzero(&fwmsg, sizeof(fwmsg));
3908         netmsg_init(&fwmsg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
3909             ipfw_add_rule_dispatch);
3910         fwmsg.ioc_rule = ioc_rule;
3911         fwmsg.prev_rule = prev;
3912         fwmsg.next_rule = prev == NULL ? NULL : f;
3913         fwmsg.rule_flags = rule_flags;
3914         if (rule_flags & IPFW_RULE_F_CROSSREF) {
3915                 fwmsg.cross_rules = kmalloc(
3916                     sizeof(struct ip_fw *) * netisr_ncpus, M_TEMP,
3917                     M_WAITOK | M_ZERO);
3918         }
3919
3920         netisr_domsg_global(&fwmsg.base);
3921         KKASSERT(fwmsg.prev_rule == NULL && fwmsg.next_rule == NULL);
3922
3923         rule = fwmsg.base.lmsg.u.ms_resultp;
3924         KKASSERT(rule != NULL && rule->cpuid == mycpuid);
3925
3926         if (fwmsg.cross_rules != NULL) {
3927                 netmsg_init(&fwmsg.base, NULL, &curthread->td_msgport,
3928                     MSGF_PRIORITY, ipfw_crossref_rule_dispatch);
3929                 fwmsg.sibling = rule;
3930                 netisr_domsg_global(&fwmsg.base);
3931                 KKASSERT(fwmsg.sibling == NULL);
3932
3933                 kfree(fwmsg.cross_rules, M_TEMP);
3934
3935 #ifdef KLD_MODULE
3936                 atomic_add_int(&ipfw_gd.ipfw_refcnt, 1);
3937 #endif
3938         }
3939
3940         DPRINTF("++ installed rule %d, static count now %d\n",
3941                 rule->rulenum, static_count);
3942 }
3943
3944 /*
3945  * Free storage associated with a static rule (including derived
3946  * states/tracks).
3947  * The caller is in charge of clearing rule pointers to avoid
3948  * dangling pointers.
3949  * @return a pointer to the next entry.
3950  * Arguments are not checked, so they better be correct.
3951  */
3952 static struct ip_fw *
3953 ipfw_delete_rule(struct ipfw_context *ctx,
3954                  struct ip_fw *prev, struct ip_fw *rule)
3955 {
3956         struct ip_fw *n;
3957
3958         n = rule->next;
3959         if (prev == NULL)
3960                 ctx->ipfw_layer3_chain = n;
3961         else
3962                 prev->next = n;
3963
3964         /* Mark the rule as invalid */
3965         rule->rule_flags |= IPFW_RULE_F_INVALID;
3966         rule->next_rule = NULL;
3967         rule->sibling = NULL;
3968 #ifdef foo
3969         /* Don't reset cpuid here; keep various assertion working */
3970         rule->cpuid = -1;
3971 #endif
3972
3973         /* Statistics only need to be updated once */
3974         if (mycpuid == 0)
3975                 ipfw_dec_static_count(rule);
3976
3977         if ((rule->rule_flags & IPFW_RULE_F_CROSSREF) == 0) {
3978                 /* Try to free this rule */
3979                 ipfw_free_rule(rule);
3980         } else {
3981                 /* TODO: check staging area. */
3982                 if (mycpuid == 0) {
3983                         rule->next = ipfw_gd.ipfw_crossref_free;
3984                         ipfw_gd.ipfw_crossref_free = rule;
3985                 }
3986         }
3987
3988         /* Return the next rule */
3989         return n;
3990 }
3991
3992 static void
3993 ipfw_flush_dispatch(netmsg_t nmsg)
3994 {
3995         int kill_default = nmsg->lmsg.u.ms_result;
3996         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3997         struct ip_fw *rule;
3998
3999         ASSERT_NETISR_NCPUS(mycpuid);
4000
4001         /*
4002          * Flush states.
4003          */
4004         ipfw_state_flush(ctx, NULL);
4005         KASSERT(ctx->ipfw_state_cnt == 0,
4006             ("%d pcpu states remain", ctx->ipfw_state_cnt));
4007         ctx->ipfw_state_loosecnt = 0;
4008         ctx->ipfw_state_lastexp = 0;
4009
4010         /*
4011          * Flush tracks.
4012          */
4013         ipfw_track_flush(ctx, NULL);
4014         ctx->ipfw_track_lastexp = 0;
4015         if (ctx->ipfw_trkcnt_spare != NULL) {
4016                 kfree(ctx->ipfw_trkcnt_spare, M_IPFW);
4017                 ctx->ipfw_trkcnt_spare = NULL;
4018         }
4019
4020         ipfw_flush_rule_ptrs(ctx); /* more efficient to do outside the loop */
4021
4022         while ((rule = ctx->ipfw_layer3_chain) != NULL &&
4023                (kill_default || rule->rulenum != IPFW_DEFAULT_RULE))
4024                 ipfw_delete_rule(ctx, NULL, rule);
4025
4026         netisr_forwardmsg(&nmsg->base, mycpuid + 1);
4027 }
4028
4029 /*
4030  * Deletes all rules from a chain (including the default rule
4031  * if the second argument is set).
4032  */
4033 static void
4034 ipfw_flush(int kill_default)
4035 {
4036         struct netmsg_base nmsg;
4037 #ifdef INVARIANTS
4038         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4039         int state_cnt;
4040 #endif
4041
4042         ASSERT_NETISR0;
4043
4044         /*
4045          * If 'kill_default' then caller has done the necessary
4046          * msgport syncing; unnecessary to do it again.
4047          */
4048         if (!kill_default) {
4049                 /*
4050                  * Let ipfw_chk() know the rules are going to
4051                  * be flushed, so it could jump directly to
4052                  * the default rule.
4053                  */
4054                 ipfw_flushing = 1;
4055                 /* XXX use priority sync */
4056                 netmsg_service_sync();
4057         }
4058
4059         /*
4060          * Press the 'flush' button
4061          */
4062         bzero(&nmsg, sizeof(nmsg));
4063         netmsg_init(&nmsg, NULL, &curthread->td_msgport, MSGF_PRIORITY,
4064             ipfw_flush_dispatch);
4065         nmsg.lmsg.u.ms_result = kill_default;
4066         netisr_domsg_global(&nmsg);
4067         ipfw_gd.ipfw_state_loosecnt = 0;
4068         ipfw_gd.ipfw_state_globexp = 0;
4069         ipfw_gd.ipfw_track_globexp = 0;
4070
4071 #ifdef INVARIANTS
4072         state_cnt = ipfw_state_cntcoll();
4073         KASSERT(state_cnt == 0, ("%d states remain", state_cnt));
4074
4075         KASSERT(ipfw_gd.ipfw_trkcnt_cnt == 0,
4076             ("%d trkcnts remain", ipfw_gd.ipfw_trkcnt_cnt));
4077
4078         if (kill_default) {
4079                 KASSERT(static_count == 0,
4080                         ("%u static rules remain", static_count));
4081                 KASSERT(static_ioc_len == 0,
4082                         ("%u bytes of static rules remain", static_ioc_len));
4083         } else {
4084                 KASSERT(static_count == 1,
4085                         ("%u static rules remain", static_count));
4086                 KASSERT(static_ioc_len == IOC_RULESIZE(ctx->ipfw_default_rule),
4087                         ("%u bytes of static rules remain, should be %lu",
4088                          static_ioc_len,
4089                          (u_long)IOC_RULESIZE(ctx->ipfw_default_rule)));
4090         }
4091 #endif
4092
4093         /* Flush is done */
4094         ipfw_flushing = 0;
4095 }
4096
4097 static void
4098 ipfw_alt_delete_rule_dispatch(netmsg_t nmsg)
4099 {
4100         struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
4101         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4102         struct ip_fw *rule, *prev;
4103
4104         ASSERT_NETISR_NCPUS(mycpuid);
4105
4106         rule = dmsg->start_rule;
4107         KKASSERT(rule->cpuid == mycpuid);
4108         dmsg->start_rule = rule->sibling;
4109
4110         prev = dmsg->prev_rule;
4111         if (prev != NULL) {
4112                 KKASSERT(prev->cpuid == mycpuid);
4113
4114                 /*
4115                  * Move to the position on the next CPU
4116                  * before the msg is forwarded.
4117                  */
4118                 dmsg->prev_rule = prev->sibling;
4119         }
4120
4121         /*
4122          * flush pointers outside the loop, then delete all matching
4123          * rules.  'prev' remains the same throughout the cycle.
4124          */
4125         ipfw_flush_rule_ptrs(ctx);
4126         while (rule && rule->rulenum == dmsg->rulenum) {
4127                 if (rule->rule_flags & IPFW_RULE_F_GENSTATE) {
4128                         /* Flush states generated by this rule. */
4129                         ipfw_state_flush(ctx, rule);
4130                 }
4131                 if (rule->rule_flags & IPFW_RULE_F_GENTRACK) {
4132                         /* Flush tracks generated by this rule. */
4133                         ipfw_track_flush(ctx, rule);
4134                 }
4135                 rule = ipfw_delete_rule(ctx, prev, rule);
4136         }
4137
4138         netisr_forwardmsg(&nmsg->base, mycpuid + 1);
4139 }
4140
4141 static int
4142 ipfw_alt_delete_rule(uint16_t rulenum)
4143 {
4144         struct ip_fw *prev, *rule;
4145         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4146         struct netmsg_del dmsg;
4147
4148         ASSERT_NETISR0;
4149
4150         /*
4151          * Locate first rule to delete
4152          */
4153         for (prev = NULL, rule = ctx->ipfw_layer3_chain;
4154              rule && rule->rulenum < rulenum;
4155              prev = rule, rule = rule->next)
4156                 ; /* EMPTY */
4157         if (rule->rulenum != rulenum)
4158                 return EINVAL;
4159
4160         /*
4161          * Get rid of the rule duplications on all CPUs
4162          */
4163         bzero(&dmsg, sizeof(dmsg));
4164         netmsg_init(&dmsg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
4165             ipfw_alt_delete_rule_dispatch);
4166         dmsg.prev_rule = prev;
4167         dmsg.start_rule = rule;
4168         dmsg.rulenum = rulenum;
4169
4170         netisr_domsg_global(&dmsg.base);
4171         KKASSERT(dmsg.prev_rule == NULL && dmsg.start_rule == NULL);
4172         return 0;
4173 }
4174
4175 static void
4176 ipfw_alt_delete_ruleset_dispatch(netmsg_t nmsg)
4177 {
4178         struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
4179         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4180         struct ip_fw *prev, *rule;
4181 #ifdef INVARIANTS
4182         int del = 0;
4183 #endif
4184
4185         ASSERT_NETISR_NCPUS(mycpuid);
4186
4187         ipfw_flush_rule_ptrs(ctx);
4188
4189         prev = NULL;
4190         rule = ctx->ipfw_layer3_chain;
4191         while (rule != NULL) {
4192                 if (rule->set == dmsg->from_set) {
4193                         if (rule->rule_flags & IPFW_RULE_F_GENSTATE) {
4194                                 /* Flush states generated by this rule. */
4195                                 ipfw_state_flush(ctx, rule);
4196                         }
4197                         if (rule->rule_flags & IPFW_RULE_F_GENTRACK) {
4198                                 /* Flush tracks generated by this rule. */
4199                                 ipfw_track_flush(ctx, rule);
4200                         }
4201                         rule = ipfw_delete_rule(ctx, prev, rule);
4202 #ifdef INVARIANTS
4203                         del = 1;
4204 #endif
4205                 } else {
4206                         prev = rule;
4207                         rule = rule->next;
4208                 }
4209         }
4210         KASSERT(del, ("no match set?!"));
4211
4212         netisr_forwardmsg(&nmsg->base, mycpuid + 1);
4213 }
4214
4215 static int
4216 ipfw_alt_delete_ruleset(uint8_t set)
4217 {
4218         struct netmsg_del dmsg;
4219         int del;
4220         struct ip_fw *rule;
4221         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4222
4223         ASSERT_NETISR0;
4224
4225         /*
4226          * Check whether the 'set' exists.  If it exists,
4227          * then check whether any rules within the set will
4228          * try to create states.
4229          */
4230         del = 0;
4231         for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
4232                 if (rule->set == set)
4233                         del = 1;
4234         }
4235         if (!del)
4236                 return 0; /* XXX EINVAL? */
4237
4238         /*
4239          * Delete this set
4240          */
4241         bzero(&dmsg, sizeof(dmsg));
4242         netmsg_init(&dmsg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
4243             ipfw_alt_delete_ruleset_dispatch);
4244         dmsg.from_set = set;
4245         netisr_domsg_global(&dmsg.base);
4246
4247         return 0;
4248 }
4249
4250 static void
4251 ipfw_alt_move_rule_dispatch(netmsg_t nmsg)
4252 {
4253         struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
4254         struct ip_fw *rule;
4255
4256         ASSERT_NETISR_NCPUS(mycpuid);
4257
4258         rule = dmsg->start_rule;
4259         KKASSERT(rule->cpuid == mycpuid);
4260
4261         /*
4262          * Move to the position on the next CPU
4263          * before the msg is forwarded.
4264          */
4265         dmsg->start_rule = rule->sibling;
4266
4267         while (rule && rule->rulenum <= dmsg->rulenum) {
4268                 if (rule->rulenum == dmsg->rulenum)
4269                         rule->set = dmsg->to_set;
4270                 rule = rule->next;
4271         }
4272         netisr_forwardmsg(&nmsg->base, mycpuid + 1);
4273 }
4274
4275 static int
4276 ipfw_alt_move_rule(uint16_t rulenum, uint8_t set)
4277 {
4278         struct netmsg_del dmsg;
4279         struct netmsg_base *nmsg;
4280         struct ip_fw *rule;
4281         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4282
4283         ASSERT_NETISR0;
4284
4285         /*
4286          * Locate first rule to move
4287          */
4288         for (rule = ctx->ipfw_layer3_chain; rule && rule->rulenum <= rulenum;
4289              rule = rule->next) {
4290                 if (rule->rulenum == rulenum && rule->set != set)
4291                         break;
4292         }
4293         if (rule == NULL || rule->rulenum > rulenum)
4294                 return 0; /* XXX error? */
4295
4296         bzero(&dmsg, sizeof(dmsg));
4297         nmsg = &dmsg.base;
4298         netmsg_init(nmsg, NULL, &curthread->td_msgport, MSGF_PRIORITY,
4299             ipfw_alt_move_rule_dispatch);
4300         dmsg.start_rule = rule;
4301         dmsg.rulenum = rulenum;
4302         dmsg.to_set = set;
4303
4304         netisr_domsg_global(nmsg);
4305         KKASSERT(dmsg.start_rule == NULL);
4306         return 0;
4307 }
4308
4309 static void
4310 ipfw_alt_move_ruleset_dispatch(netmsg_t nmsg)
4311 {
4312         struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
4313         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4314         struct ip_fw *rule;
4315
4316         ASSERT_NETISR_NCPUS(mycpuid);
4317
4318         for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
4319                 if (rule->set == dmsg->from_set)
4320                         rule->set = dmsg->to_set;
4321         }
4322         netisr_forwardmsg(&nmsg->base, mycpuid + 1);
4323 }
4324
4325 static int
4326 ipfw_alt_move_ruleset(uint8_t from_set, uint8_t to_set)
4327 {
4328         struct netmsg_del dmsg;
4329         struct netmsg_base *nmsg;
4330
4331         ASSERT_NETISR0;
4332
4333         bzero(&dmsg, sizeof(dmsg));
4334         nmsg = &dmsg.base;
4335         netmsg_init(nmsg, NULL, &curthread->td_msgport, MSGF_PRIORITY,
4336             ipfw_alt_move_ruleset_dispatch);
4337         dmsg.from_set = from_set;
4338         dmsg.to_set = to_set;
4339
4340         netisr_domsg_global(nmsg);
4341         return 0;
4342 }
4343
4344 static void
4345 ipfw_alt_swap_ruleset_dispatch(netmsg_t nmsg)
4346 {
4347         struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
4348         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4349         struct ip_fw *rule;
4350
4351         ASSERT_NETISR_NCPUS(mycpuid);
4352
4353         for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
4354                 if (rule->set == dmsg->from_set)
4355                         rule->set = dmsg->to_set;
4356                 else if (rule->set == dmsg->to_set)
4357                         rule->set = dmsg->from_set;
4358         }
4359         netisr_forwardmsg(&nmsg->base, mycpuid + 1);
4360 }
4361
4362 static int
4363 ipfw_alt_swap_ruleset(uint8_t set1, uint8_t set2)
4364 {
4365         struct netmsg_del dmsg;
4366         struct netmsg_base *nmsg;
4367
4368         ASSERT_NETISR0;
4369
4370         bzero(&dmsg, sizeof(dmsg));
4371         nmsg = &dmsg.base;
4372         netmsg_init(nmsg, NULL, &curthread->td_msgport, MSGF_PRIORITY,
4373             ipfw_alt_swap_ruleset_dispatch);
4374         dmsg.from_set = set1;
4375         dmsg.to_set = set2;
4376
4377         netisr_domsg_global(nmsg);
4378         return 0;
4379 }
4380
4381 /*
4382  * Remove all rules with given number, and also do set manipulation.
4383  *
4384  * The argument is an uint32_t. The low 16 bit are the rule or set number,
4385  * the next 8 bits are the new set, the top 8 bits are the command:
4386  *
4387  *      0       delete rules with given number
4388  *      1       delete rules with given set number
4389  *      2       move rules with given number to new set
4390  *      3       move rules with given set number to new set
4391  *      4       swap sets with given numbers
4392  */
4393 static int
4394 ipfw_ctl_alter(uint32_t arg)
4395 {
4396         uint16_t rulenum;
4397         uint8_t cmd, new_set;
4398         int error = 0;
4399
4400         ASSERT_NETISR0;
4401
4402         rulenum = arg & 0xffff;
4403         cmd = (arg >> 24) & 0xff;
4404         new_set = (arg >> 16) & 0xff;
4405
4406         if (cmd > 4)
4407                 return EINVAL;
4408         if (new_set >= IPFW_DEFAULT_SET)
4409                 return EINVAL;
4410         if (cmd == 0 || cmd == 2) {
4411                 if (rulenum == IPFW_DEFAULT_RULE)
4412                         return EINVAL;
4413         } else {
4414                 if (rulenum >= IPFW_DEFAULT_SET)
4415                         return EINVAL;
4416         }
4417
4418         switch (cmd) {
4419         case 0: /* delete rules with given number */
4420                 error = ipfw_alt_delete_rule(rulenum);
4421                 break;
4422
4423         case 1: /* delete all rules with given set number */
4424                 error = ipfw_alt_delete_ruleset(rulenum);
4425                 break;
4426
4427         case 2: /* move rules with given number to new set */
4428                 error = ipfw_alt_move_rule(rulenum, new_set);
4429                 break;
4430
4431         case 3: /* move rules with given set number to new set */
4432                 error = ipfw_alt_move_ruleset(rulenum, new_set);
4433                 break;
4434
4435         case 4: /* swap two sets */
4436                 error = ipfw_alt_swap_ruleset(rulenum, new_set);
4437                 break;
4438         }
4439         return error;
4440 }
4441
4442 /*
4443  * Clear counters for a specific rule.
4444  */
4445 static void
4446 clear_counters(struct ip_fw *rule, int log_only)
4447 {
4448         ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule);
4449
4450         if (log_only == 0) {
4451                 rule->bcnt = rule->pcnt = 0;
4452                 rule->timestamp = 0;
4453         }
4454         if (l->o.opcode == O_LOG)
4455                 l->log_left = l->max_log;
4456 }
4457
4458 static void
4459 ipfw_zero_entry_dispatch(netmsg_t nmsg)
4460 {
4461         struct netmsg_zent *zmsg = (struct netmsg_zent *)nmsg;
4462         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4463         struct ip_fw *rule;
4464
4465         ASSERT_NETISR_NCPUS(mycpuid);
4466
4467         if (zmsg->rulenum == 0) {
4468                 KKASSERT(zmsg->start_rule == NULL);
4469
4470                 ctx->ipfw_norule_counter = 0;
4471                 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next)
4472                         clear_counters(rule, zmsg->log_only);
4473         } else {
4474                 struct ip_fw *start = zmsg->start_rule;
4475
4476                 KKASSERT(start->cpuid == mycpuid);
4477                 KKASSERT(start->rulenum == zmsg->rulenum);
4478
4479                 /*
4480                  * We can have multiple rules with the same number, so we
4481                  * need to clear them all.
4482                  */
4483                 for (rule = start; rule && rule->rulenum == zmsg->rulenum;
4484                      rule = rule->next)
4485                         clear_counters(rule, zmsg->log_only);
4486
4487                 /*
4488                  * Move to the position on the next CPU
4489                  * before the msg is forwarded.
4490                  */
4491                 zmsg->start_rule = start->sibling;
4492         }
4493         netisr_forwardmsg(&nmsg->base, mycpuid + 1);
4494 }
4495
4496 /*
4497  * Reset some or all counters on firewall rules.
4498  * @arg frwl is null to clear all entries, or contains a specific
4499  * rule number.
4500  * @arg log_only is 1 if we only want to reset logs, zero otherwise.
4501  */
4502 static int
4503 ipfw_ctl_zero_entry(int rulenum, int log_only)
4504 {
4505         struct netmsg_zent zmsg;
4506         struct netmsg_base *nmsg;
4507         const char *msg;
4508         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4509
4510         ASSERT_NETISR0;
4511
4512         bzero(&zmsg, sizeof(zmsg));
4513         nmsg = &zmsg.base;
4514         netmsg_init(nmsg, NULL, &curthread->td_msgport, MSGF_PRIORITY,
4515             ipfw_zero_entry_dispatch);
4516         zmsg.log_only = log_only;
4517
4518         if (rulenum == 0) {
4519                 msg = log_only ? "ipfw: All logging counts reset.\n"
4520                                : "ipfw: Accounting cleared.\n";
4521         } else {
4522                 struct ip_fw *rule;
4523
4524                 /*
4525                  * Locate the first rule with 'rulenum'
4526                  */
4527                 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
4528                         if (rule->rulenum == rulenum)
4529                                 break;
4530                 }
4531                 if (rule == NULL) /* we did not find any matching rules */
4532                         return (EINVAL);
4533                 zmsg.start_rule = rule;
4534                 zmsg.rulenum = rulenum;
4535
4536                 msg = log_only ? "ipfw: Entry %d logging count reset.\n"
4537                                : "ipfw: Entry %d cleared.\n";
4538         }
4539         netisr_domsg_global(nmsg);
4540         KKASSERT(zmsg.start_rule == NULL);
4541
4542         if (fw_verbose)
4543                 log(LOG_SECURITY | LOG_NOTICE, msg, rulenum);
4544         return (0);
4545 }
4546
4547 /*
4548  * Check validity of the structure before insert.
4549  * Fortunately rules are simple, so this mostly need to check rule sizes.
4550  */
4551 static int
4552 ipfw_check_ioc_rule(struct ipfw_ioc_rule *rule, int size, uint32_t *rule_flags)
4553 {
4554         int l, cmdlen = 0;
4555         int have_action = 0;
4556         ipfw_insn *cmd;
4557
4558         *rule_flags = 0;
4559
4560         /* Check for valid size */
4561         if (size < sizeof(*rule)) {
4562                 kprintf("ipfw: rule too short\n");
4563                 return EINVAL;
4564         }
4565         l = IOC_RULESIZE(rule);
4566         if (l != size) {
4567                 kprintf("ipfw: size mismatch (have %d want %d)\n", size, l);
4568                 return EINVAL;
4569         }
4570
4571         /* Check rule number */
4572         if (rule->rulenum == IPFW_DEFAULT_RULE) {
4573                 kprintf("ipfw: invalid rule number\n");
4574                 return EINVAL;
4575         }
4576
4577         /*
4578          * Now go for the individual checks. Very simple ones, basically only
4579          * instruction sizes.
4580          */
4581         for (l = rule->cmd_len, cmd = rule->cmd; l > 0;
4582              l -= cmdlen, cmd += cmdlen) {
4583                 cmdlen = F_LEN(cmd);
4584                 if (cmdlen > l) {
4585                         kprintf("ipfw: opcode %d size truncated\n",
4586                                 cmd->opcode);
4587                         return EINVAL;
4588                 }
4589
4590                 DPRINTF("ipfw: opcode %d\n", cmd->opcode);
4591
4592                 if (cmd->opcode == O_KEEP_STATE || cmd->opcode == O_LIMIT) {
4593                         /* This rule will generate states. */
4594                         *rule_flags |= IPFW_RULE_F_GENSTATE;
4595                         if (cmd->opcode == O_LIMIT)
4596                                 *rule_flags |= IPFW_RULE_F_GENTRACK;
4597                 }
4598                 if (cmd->opcode == O_DEFRAG)
4599                         *rule_flags |= IPFW_RULE_F_CROSSREF;
4600
4601                 switch (cmd->opcode) {
4602                 case O_NOP:
4603                 case O_PROBE_STATE:
4604                 case O_KEEP_STATE:
4605                 case O_PROTO:
4606                 case O_IP_SRC_ME:
4607                 case O_IP_DST_ME:
4608                 case O_LAYER2:
4609                 case O_IN:
4610                 case O_FRAG:
4611                 case O_IPOPT:
4612                 case O_IPLEN:
4613                 case O_IPID:
4614                 case O_IPTOS:
4615                 case O_IPPRECEDENCE:
4616                 case O_IPTTL:
4617                 case O_IPVER:
4618                 case O_TCPWIN:
4619                 case O_TCPFLAGS:
4620                 case O_TCPOPTS:
4621                 case O_ESTAB:
4622                         if (cmdlen != F_INSN_SIZE(ipfw_insn))
4623                                 goto bad_size;
4624                         break;
4625
4626                 case O_IP_SRC_TABLE:
4627                 case O_IP_DST_TABLE:
4628                         if (cmdlen != F_INSN_SIZE(ipfw_insn))
4629                                 goto bad_size;
4630                         if (cmd->arg1 >= ipfw_table_max) {
4631                                 kprintf("ipfw: invalid table id %u, max %d\n",
4632                                     cmd->arg1, ipfw_table_max);
4633                                 return EINVAL;
4634                         }
4635                         break;
4636
4637                 case O_UID:
4638                 case O_GID:
4639                 case O_IP_SRC:
4640                 case O_IP_DST:
4641                 case O_TCPSEQ:
4642                 case O_TCPACK:
4643                 case O_PROB:
4644                 case O_ICMPTYPE:
4645                         if (cmdlen != F_INSN_SIZE(ipfw_insn_u32))
4646                                 goto bad_size;
4647                         break;
4648
4649                 case O_LIMIT:
4650                         if (cmdlen != F_INSN_SIZE(ipfw_insn_limit))
4651                                 goto bad_size;
4652                         break;
4653
4654                 case O_LOG:
4655                         if (cmdlen != F_INSN_SIZE(ipfw_insn_log))
4656                                 goto bad_size;
4657
4658                         ((ipfw_insn_log *)cmd)->log_left =
4659                             ((ipfw_insn_log *)cmd)->max_log;
4660
4661                         break;
4662
4663                 case O_IP_SRC_MASK:
4664                 case O_IP_DST_MASK:
4665                         if (cmdlen != F_INSN_SIZE(ipfw_insn_ip))
4666                                 goto bad_size;
4667                         if (((ipfw_insn_ip *)cmd)->mask.s_addr == 0) {
4668                                 kprintf("ipfw: opcode %d, useless rule\n",
4669                                         cmd->opcode);
4670                                 return EINVAL;
4671                         }
4672                         break;
4673
4674                 case O_IP_SRC_SET:
4675                 case O_IP_DST_SET:
4676                         if (cmd->arg1 == 0 || cmd->arg1 > 256) {
4677                                 kprintf("ipfw: invalid set size %d\n",
4678                                         cmd->arg1);
4679                                 return EINVAL;
4680                         }
4681                         if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
4682                             (cmd->arg1+31)/32 )
4683                                 goto bad_size;
4684                         break;
4685
4686                 case O_MACADDR2:
4687                         if (cmdlen != F_INSN_SIZE(ipfw_insn_mac))
4688                                 goto bad_size;
4689                         break;
4690
4691                 case O_MAC_TYPE:
4692                 case O_IP_SRCPORT:
4693                 case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */
4694                         if (cmdlen < 2 || cmdlen > 31)
4695                                 goto bad_size;
4696                         break;
4697
4698                 case O_RECV:
4699                 case O_XMIT:
4700                 case O_VIA:
4701                         if (cmdlen != F_INSN_SIZE(ipfw_insn_if))
4702                                 goto bad_size;
4703                         break;
4704
4705                 case O_PIPE:
4706                 case O_QUEUE:
4707                         if (cmdlen != F_INSN_SIZE(ipfw_insn_pipe))
4708                                 goto bad_size;
4709                         goto check_action;
4710
4711                 case O_FORWARD_IP:
4712                         if (cmdlen != F_INSN_SIZE(ipfw_insn_sa)) {
4713                                 goto bad_size;
4714                         } else {
4715                                 in_addr_t fwd_addr;
4716
4717                                 fwd_addr = ((ipfw_insn_sa *)cmd)->
4718                                            sa.sin_addr.s_addr;
4719                                 if (IN_MULTICAST(ntohl(fwd_addr))) {
4720                                         kprintf("ipfw: try forwarding to "
4721                                                 "multicast address\n");
4722                                         return EINVAL;
4723                                 }
4724                         }
4725                         goto check_action;
4726
4727                 case O_FORWARD_MAC: /* XXX not implemented yet */
4728                 case O_CHECK_STATE:
4729                 case O_COUNT:
4730                 case O_ACCEPT:
4731                 case O_DENY:
4732                 case O_REJECT:
4733                 case O_SKIPTO:
4734                 case O_DIVERT:
4735                 case O_TEE:
4736                 case O_DEFRAG:
4737                         if (cmdlen != F_INSN_SIZE(ipfw_insn))
4738                                 goto bad_size;
4739 check_action:
4740                         if (have_action) {
4741                                 kprintf("ipfw: opcode %d, multiple actions"
4742                                         " not allowed\n",
4743                                         cmd->opcode);
4744                                 return EINVAL;
4745                         }
4746                         have_action = 1;
4747                         if (l != cmdlen) {
4748                                 kprintf("ipfw: opcode %d, action must be"
4749                                         " last opcode\n",
4750                                         cmd->opcode);
4751                                 return EINVAL;
4752                         }
4753                         break;
4754                 default:
4755                         kprintf("ipfw: opcode %d, unknown opcode\n",
4756                                 cmd->opcode);
4757                         return EINVAL;
4758                 }
4759         }
4760         if (have_action == 0) {
4761                 kprintf("ipfw: missing action\n");
4762                 return EINVAL;
4763         }
4764         return 0;
4765
4766 bad_size:
4767         kprintf("ipfw: opcode %d size %d wrong\n",
4768                 cmd->opcode, cmdlen);
4769         return EINVAL;
4770 }
4771
4772 static int
4773 ipfw_ctl_add_rule(struct sockopt *sopt)
4774 {
4775         struct ipfw_ioc_rule *ioc_rule;
4776         size_t size;
4777         uint32_t rule_flags;
4778         int error;
4779
4780         ASSERT_NETISR0;
4781         
4782         size = sopt->sopt_valsize;
4783         if (size > (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX) ||
4784             size < sizeof(*ioc_rule)) {
4785                 return EINVAL;
4786         }
4787         if (size != (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX)) {
4788                 sopt->sopt_val = krealloc(sopt->sopt_val, sizeof(uint32_t) *
4789                                           IPFW_RULE_SIZE_MAX, M_TEMP, M_WAITOK);
4790         }
4791         ioc_rule = sopt->sopt_val;
4792
4793         error = ipfw_check_ioc_rule(ioc_rule, size, &rule_flags);
4794         if (error)
4795                 return error;
4796
4797         ipfw_add_rule(ioc_rule, rule_flags);
4798
4799         if (sopt->sopt_dir == SOPT_GET)
4800                 sopt->sopt_valsize = IOC_RULESIZE(ioc_rule);
4801         return 0;
4802 }
4803
4804 static void *
4805 ipfw_copy_rule(const struct ipfw_context *ctx, const struct ip_fw *rule,
4806     struct ipfw_ioc_rule *ioc_rule)
4807 {
4808         const struct ip_fw *sibling;
4809 #ifdef INVARIANTS
4810         int i;
4811 #endif
4812
4813         ASSERT_NETISR0;
4814         KASSERT(rule->cpuid == 0, ("rule does not belong to cpu0"));
4815
4816         ioc_rule->act_ofs = rule->act_ofs;
4817         ioc_rule->cmd_len = rule->cmd_len;
4818         ioc_rule->rulenum = rule->rulenum;
4819         ioc_rule->set = rule->set;
4820         ioc_rule->usr_flags = rule->usr_flags;
4821
4822         ioc_rule->set_disable = ctx->ipfw_set_disable;
4823         ioc_rule->static_count = static_count;
4824         ioc_rule->static_len = static_ioc_len;
4825
4826         /*
4827          * Visit (read-only) all of the rule's duplications to get
4828          * the necessary statistics
4829          */
4830 #ifdef INVARIANTS
4831         i = 0;
4832 #endif
4833         ioc_rule->pcnt = 0;
4834         ioc_rule->bcnt = 0;
4835         ioc_rule->timestamp = 0;
4836         for (sibling = rule; sibling != NULL; sibling = sibling->sibling) {
4837                 ioc_rule->pcnt += sibling->pcnt;
4838                 ioc_rule->bcnt += sibling->bcnt;
4839                 if (sibling->timestamp > ioc_rule->timestamp)
4840                         ioc_rule->timestamp = sibling->timestamp;
4841 #ifdef INVARIANTS
4842                 ++i;
4843 #endif
4844         }
4845         KASSERT(i == netisr_ncpus,
4846             ("static rule is not duplicated on netisr_ncpus %d", netisr_ncpus));
4847
4848         bcopy(rule->cmd, ioc_rule->cmd, ioc_rule->cmd_len * 4 /* XXX */);
4849
4850         return ((uint8_t *)ioc_rule + IOC_RULESIZE(ioc_rule));
4851 }
4852
4853 static boolean_t
4854 ipfw_track_copy(const struct ipfw_trkcnt *trk, struct ipfw_ioc_state *ioc_state)
4855 {
4856         struct ipfw_ioc_flowid *ioc_id;
4857
4858         if (trk->tc_expire == 0) {
4859                 /* Not a scanned one. */
4860                 return (FALSE);
4861         }
4862
4863         ioc_state->expire = TIME_LEQ(trk->tc_expire, time_uptime) ?
4864             0 : trk->tc_expire - time_uptime;
4865         ioc_state->pcnt = 0;
4866         ioc_state->bcnt = 0;
4867
4868         ioc_state->dyn_type = O_LIMIT_PARENT;
4869         ioc_state->count = trk->tc_count;
4870
4871         ioc_state->rulenum = trk->tc_rulenum;
4872
4873         ioc_id = &ioc_state->id;
4874         ioc_id->type = ETHERTYPE_IP;
4875         ioc_id->u.ip.proto = trk->tc_proto;
4876         ioc_id->u.ip.src_ip = trk->tc_saddr;
4877         ioc_id->u.ip.dst_ip = trk->tc_daddr;
4878         ioc_id->u.ip.src_port = trk->tc_sport;
4879         ioc_id->u.ip.dst_port = trk->tc_dport;
4880
4881         return (TRUE);
4882 }
4883
4884 static boolean_t
4885 ipfw_state_copy(const struct ipfw_state *s, struct ipfw_ioc_state *ioc_state)
4886 {
4887         struct ipfw_ioc_flowid *ioc_id;
4888
4889         if (s->st_type == O_ANCHOR)
4890                 return (FALSE);
4891
4892         ioc_state->expire = TIME_LEQ(s->st_expire, time_uptime) ?
4893             0 : s->st_expire - time_uptime;
4894         ioc_state->pcnt = s->st_pcnt;
4895         ioc_state->bcnt = s->st_bcnt;
4896
4897         ioc_state->dyn_type = s->st_type;
4898         ioc_state->count = 0;
4899
4900         ioc_state->rulenum = s->st_rule->rulenum;
4901
4902         ioc_id = &ioc_state->id;
4903         ioc_id->type = ETHERTYPE_IP;
4904         ioc_id->u.ip.proto = s->st_proto;
4905         ipfw_key_4tuple(&s->st_key,
4906             &ioc_id->u.ip.src_ip, &ioc_id->u.ip.src_port,
4907             &ioc_id->u.ip.dst_ip, &ioc_id->u.ip.dst_port);
4908
4909         return (TRUE);
4910 }
4911
4912 static void
4913 ipfw_state_copy_dispatch(netmsg_t nmsg)
4914 {
4915         struct netmsg_cpstate *nm = (struct netmsg_cpstate *)nmsg;
4916         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4917         const struct ipfw_state *s;
4918         const struct ipfw_track *t;
4919
4920         ASSERT_NETISR_NCPUS(mycpuid);
4921         KASSERT(nm->state_cnt < nm->state_cntmax,
4922             ("invalid state count %d, max %d",
4923              nm->state_cnt, nm->state_cntmax));
4924
4925         TAILQ_FOREACH(s, &ctx->ipfw_state_list, st_link) {
4926                 if (ipfw_state_copy(s, nm->ioc_state)) {
4927                         nm->ioc_state++;
4928                         nm->state_cnt++;
4929                         if (nm->state_cnt == nm->state_cntmax)
4930                                 goto done;
4931                 }
4932         }
4933
4934         /*
4935          * Prepare tracks in the global track tree for userland.
4936          */
4937         TAILQ_FOREACH(t, &ctx->ipfw_track_list, t_link) {
4938                 struct ipfw_trkcnt *trk;
4939
4940                 if (t->t_count == NULL) /* anchor */
4941                         continue;
4942                 trk = t->t_trkcnt;
4943
4944                 /*
4945                  * Only one netisr can run this function at
4946                  * any time, and only this function accesses
4947                  * trkcnt's tc_expire, so this is safe w/o
4948                  * ipfw_gd.ipfw_trkcnt_token.
4949                  */
4950                 if (trk->tc_expire > t->t_expire)
4951                         continue;
4952                 trk->tc_expire = t->t_expire;
4953         }
4954
4955         /*
4956          * Copy tracks in the global track tree to userland in
4957          * the last netisr.
4958          */
4959         if (mycpuid == netisr_ncpus - 1) {
4960                 struct ipfw_trkcnt *trk;
4961
4962                 KASSERT(nm->state_cnt < nm->state_cntmax,
4963                     ("invalid state count %d, max %d",
4964                      nm->state_cnt, nm->state_cntmax));
4965
4966                 IPFW_TRKCNT_TOKGET;
4967                 RB_FOREACH(trk, ipfw_trkcnt_tree, &ipfw_gd.ipfw_trkcnt_tree) {
4968                         if (ipfw_track_copy(trk, nm->ioc_state)) {
4969                                 nm->ioc_state++;
4970                                 nm->state_cnt++;
4971                                 if (nm->state_cnt == nm->state_cntmax) {
4972                                         IPFW_TRKCNT_TOKREL;
4973                                         goto done;
4974                                 }
4975                         }
4976                 }
4977                 IPFW_TRKCNT_TOKREL;
4978         }
4979 done:
4980         if (nm->state_cnt == nm->state_cntmax) {
4981                 /* No more space; done. */
4982                 netisr_replymsg(&nm->base, 0);
4983         } else {
4984                 netisr_forwardmsg(&nm->base, mycpuid + 1);
4985         }
4986 }
4987
4988 static int
4989 ipfw_ctl_get_rules(struct sockopt *sopt)
4990 {
4991         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4992         struct ip_fw *rule;
4993         void *bp;
4994         size_t size;
4995         int state_cnt;
4996
4997         ASSERT_NETISR0;
4998
4999         /*
5000          * pass up a copy of the current rules. Static rules
5001          * come first (the last of which has number IPFW_DEFAULT_RULE),
5002          * followed by a possibly empty list of states.
5003          */
5004
5005         size = static_ioc_len;  /* size of static rules */
5006
5007         /*
5008          * Size of the states.
5009          * XXX take tracks as state for userland compat.
5010          */
5011         state_cnt = ipfw_state_cntcoll() + ipfw_gd.ipfw_trkcnt_cnt;
5012         state_cnt = (state_cnt * 5) / 4; /* leave 25% headroom */
5013         size += state_cnt * sizeof(struct ipfw_ioc_state);
5014
5015         if (sopt->sopt_valsize < size) {
5016                 /* short length, no need to return incomplete rules */
5017                 /* XXX: if superuser, no need to zero buffer */
5018                 bzero(sopt->sopt_val, sopt->sopt_valsize); 
5019                 return 0;
5020         }
5021         bp = sopt->sopt_val;
5022
5023         for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next)
5024                 bp = ipfw_copy_rule(ctx, rule, bp);
5025
5026         if (state_cnt) {
5027                 struct netmsg_cpstate nm;
5028 #ifdef INVARIANTS
5029                 size_t old_size = size;
5030 #endif
5031
5032                 netmsg_init(&nm.base, NULL, &curthread->td_msgport,
5033                     MSGF_PRIORITY, ipfw_state_copy_dispatch);
5034                 nm.ioc_state = bp;
5035                 nm.state_cntmax = state_cnt;
5036                 nm.state_cnt = 0;
5037                 netisr_domsg_global(&nm.base);
5038
5039                 /*
5040                  * The # of states may be shrinked after the snapshot
5041                  * of the state count was taken.  To give user a correct
5042                  * state count, nm->state_cnt is used to recalculate
5043                  * the actual size.
5044                  */
5045                 size = static_ioc_len +
5046                     (nm.state_cnt * sizeof(struct ipfw_ioc_state));
5047                 KKASSERT(size <= old_size);
5048         }
5049
5050         sopt->sopt_valsize = size;
5051         return 0;
5052 }
5053
5054 static void
5055 ipfw_set_disable_dispatch(netmsg_t nmsg)
5056 {
5057         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
5058
5059         ASSERT_NETISR_NCPUS(mycpuid);
5060
5061         ctx->ipfw_set_disable = nmsg->lmsg.u.ms_result32;
5062         netisr_forwardmsg(&nmsg->base, mycpuid + 1);
5063 }
5064
5065 static void
5066 ipfw_ctl_set_disable(uint32_t disable, uint32_t enable)
5067 {
5068         struct netmsg_base nmsg;
5069         uint32_t set_disable;
5070
5071         ASSERT_NETISR0;
5072
5073         /* IPFW_DEFAULT_SET is always enabled */
5074         enable |= (1 << IPFW_DEFAULT_SET);
5075         set_disable = (ipfw_ctx[mycpuid]->ipfw_set_disable | disable) & ~enable;
5076
5077         bzero(&nmsg, sizeof(nmsg));
5078         netmsg_init(&nmsg, NULL, &curthread->td_msgport, MSGF_PRIORITY,
5079             ipfw_set_disable_dispatch);
5080         nmsg.lmsg.u.ms_result32 = set_disable;
5081
5082         netisr_domsg_global(&nmsg);
5083 }
5084
5085 static void
5086 ipfw_table_create_dispatch(netmsg_t nm)
5087 {
5088         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
5089         int tblid = nm->lmsg.u.ms_result;
5090
5091         ASSERT_NETISR_NCPUS(mycpuid);
5092
5093         if (!rn_inithead((void **)&ctx->ipfw_tables[tblid],
5094             rn_cpumaskhead(mycpuid), 32))
5095                 panic("ipfw: create table%d failed", tblid);
5096
5097         netisr_forwardmsg(&nm->base, mycpuid + 1);
5098 }
5099
5100 static int
5101 ipfw_table_create(struct sockopt *sopt)
5102 {
5103         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
5104         struct ipfw_ioc_table *tbl;
5105         struct netmsg_base nm;
5106
5107         ASSERT_NETISR0;
5108
5109         if (sopt->sopt_valsize != sizeof(*tbl))
5110                 return (EINVAL);
5111
5112         tbl = sopt->sopt_val;
5113         if (tbl->tableid < 0 || tbl->tableid >= ipfw_table_max)
5114                 return (EINVAL);
5115
5116         if (ctx->ipfw_tables[tbl->tableid] != NULL)
5117                 return (EEXIST);
5118
5119         netmsg_init(&nm, NULL, &curthread->td_msgport, MSGF_PRIORITY,
5120             ipfw_table_create_dispatch);
5121         nm.lmsg.u.ms_result = tbl->tableid;
5122         netisr_domsg_global(&nm);
5123
5124         return (0);
5125 }
5126
5127 static void
5128 ipfw_table_killrn(struct radix_node_head *rnh, struct radix_node *rn)
5129 {
5130         struct radix_node *ret;
5131
5132         ret = rnh->rnh_deladdr(rn->rn_key, rn->rn_mask, rnh);
5133         if (ret != rn)
5134                 panic("deleted other table entry");
5135         kfree(ret, M_IPFW);
5136 }
5137
5138 static int
5139 ipfw_table_killent(struct radix_node *rn, void *xrnh)
5140 {
5141
5142         ipfw_table_killrn(xrnh, rn);
5143         return (0);
5144 }
5145
5146 static void
5147 ipfw_table_flush_oncpu(struct ipfw_context *ctx, int tableid,
5148     int destroy)
5149 {
5150         struct radix_node_head *rnh;
5151
5152         ASSERT_NETISR_NCPUS(mycpuid);
5153
5154         rnh = ctx->ipfw_tables[tableid];
5155         rnh->rnh_walktree(rnh, ipfw_table_killent, rnh);
5156         if (destroy) {
5157                 Free(rnh);
5158                 ctx->ipfw_tables[tableid] = NULL;
5159         }
5160 }
5161
5162 static void
5163 ipfw_table_flush_dispatch(netmsg_t nmsg)
5164 {
5165         struct netmsg_tblflush *nm = (struct netmsg_tblflush *)nmsg;
5166         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
5167
5168         ASSERT_NETISR_NCPUS(mycpuid);
5169
5170         ipfw_table_flush_oncpu(ctx, nm->tableid, nm->destroy);
5171         netisr_forwardmsg(&nm->base, mycpuid + 1);
5172 }
5173
5174 static void
5175 ipfw_table_flushall_oncpu(struct ipfw_context *ctx, int destroy)
5176 {
5177         int i;
5178
5179         ASSERT_NETISR_NCPUS(mycpuid);
5180
5181         for (i = 0; i < ipfw_table_max; ++i) {
5182                 if (ctx->ipfw_tables[i] != NULL)
5183                         ipfw_table_flush_oncpu(ctx, i, destroy);
5184         }
5185 }
5186
5187 static void
5188 ipfw_table_flushall_dispatch(netmsg_t nmsg)
5189 {
5190         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
5191
5192         ASSERT_NETISR_NCPUS(mycpuid);
5193
5194         ipfw_table_flushall_oncpu(ctx, 0);
5195         netisr_forwardmsg(&nmsg->base, mycpuid + 1);
5196 }
5197
5198 static int
5199 ipfw_table_flush(struct sockopt *sopt)
5200 {
5201         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
5202         struct ipfw_ioc_table *tbl;
5203         struct netmsg_tblflush nm;
5204
5205         ASSERT_NETISR0;
5206
5207         if (sopt->sopt_valsize != sizeof(*tbl))
5208                 return (EINVAL);
5209
5210         tbl = sopt->sopt_val;
5211         if (sopt->sopt_name == IP_FW_TBL_FLUSH && tbl->tableid < 0) {
5212                 netmsg_init(&nm.base, NULL, &curthread->td_msgport,
5213                     MSGF_PRIORITY, ipfw_table_flushall_dispatch);
5214                 netisr_domsg_global(&nm.base);
5215                 return (0);
5216         }
5217
5218         if (tbl->tableid < 0 || tbl->tableid >= ipfw_table_max)
5219                 return (EINVAL);
5220
5221         if (ctx->ipfw_tables[tbl->tableid] == NULL)
5222                 return (ENOENT);
5223
5224         netmsg_init(&nm.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
5225             ipfw_table_flush_dispatch);
5226         nm.tableid = tbl->tableid;
5227         nm.destroy = 0;
5228         if (sopt->sopt_name == IP_FW_TBL_DESTROY)
5229                 nm.destroy = 1;
5230         netisr_domsg_global(&nm.base);
5231
5232         return (0);
5233 }
5234
5235 static int
5236 ipfw_table_cntent(struct radix_node *rn __unused, void *xcnt)
5237 {
5238         int *cnt = xcnt;
5239
5240         (*cnt)++;
5241         return (0);
5242 }
5243
5244 static int
5245 ipfw_table_cpent(struct radix_node *rn, void *xcp)
5246 {
5247         struct ipfw_table_cp *cp = xcp;
5248         struct ipfw_tblent *te = (struct ipfw_tblent *)rn;
5249         struct ipfw_ioc_tblent *ioc_te;
5250 #ifdef INVARIANTS
5251         int cnt;
5252 #endif
5253
5254         KASSERT(cp->te_idx < cp->te_cnt, ("invalid table cp idx %d, cnt %d",
5255             cp->te_idx, cp->te_cnt));
5256         ioc_te = &cp->te[cp->te_idx];
5257
5258         if (te->te_nodes->rn_mask != NULL) {
5259                 memcpy(&ioc_te->netmask, te->te_nodes->rn_mask,
5260                     *te->te_nodes->rn_mask);
5261         } else {
5262                 ioc_te->netmask.sin_len = 0;
5263         }
5264         memcpy(&ioc_te->key, &te->te_key, sizeof(ioc_te->key));
5265
5266         ioc_te->use = te->te_use;
5267         ioc_te->last_used = te->te_lastuse;
5268 #ifdef INVARIANTS
5269         cnt = 1;
5270 #endif
5271
5272         while ((te = te->te_sibling) != NULL) {
5273 #ifdef INVARIANTS
5274                 ++cnt;
5275 #endif
5276                 ioc_te->use += te->te_use;
5277                 if (te->te_lastuse > ioc_te->last_used)
5278                         ioc_te->last_used = te->te_lastuse;
5279         }
5280         KASSERT(cnt == netisr_ncpus,
5281             ("invalid # of tblent %d, should be %d", cnt, netisr_ncpus));
5282
5283         cp->te_idx++;
5284
5285         return (0);
5286 }
5287
5288 static int
5289 ipfw_table_get(struct sockopt *sopt)
5290 {
5291         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
5292         struct radix_node_head *rnh;
5293         struct ipfw_ioc_table *tbl;
5294         struct ipfw_ioc_tblcont *cont;
5295         struct ipfw_table_cp cp;
5296         int cnt = 0, sz;
5297
5298         ASSERT_NETISR0;
5299
5300         if (sopt->sopt_valsize < sizeof(*tbl))
5301                 return (EINVAL);
5302
5303         tbl = sopt->sopt_val;
5304         if (tbl->tableid < 0) {
5305                 struct ipfw_ioc_tbllist *list;
5306                 int i;
5307
5308                 /*
5309                  * List available table ids.
5310                  */
5311                 for (i = 0; i < ipfw_table_max; ++i) {
5312                         if (ctx->ipfw_tables[i] != NULL)
5313                                 ++cnt;
5314                 }
5315
5316                 sz = __offsetof(struct ipfw_ioc_tbllist, tables[cnt]);
5317                 if (sopt->sopt_valsize < sz) {
5318                         bzero(sopt->sopt_val, sopt->sopt_valsize);
5319                         return (E2BIG);
5320                 }
5321                 list = sopt->sopt_val;
5322                 list->tablecnt = cnt;
5323
5324                 cnt = 0;
5325                 for (i = 0; i < ipfw_table_max; ++i) {
5326                         if (ctx->ipfw_tables[i] != NULL) {
5327                                 KASSERT(cnt < list->tablecnt,
5328                                     ("invalid idx %d, cnt %d",
5329                                      cnt, list->tablecnt));
5330                                 list->tables[cnt++] = i;
5331                         }
5332                 }
5333                 sopt->sopt_valsize = sz;
5334                 return (0);
5335         } else if (tbl->tableid >= ipfw_table_max) {
5336                 return (EINVAL);
5337         }
5338
5339         rnh = ctx->ipfw_tables[tbl->tableid];
5340         if (rnh == NULL)
5341                 return (ENOENT);
5342         rnh->rnh_walktree(rnh, ipfw_table_cntent, &cnt);
5343
5344         sz = __offsetof(struct ipfw_ioc_tblcont, ent[cnt]);
5345         if (sopt->sopt_valsize < sz) {
5346                 bzero(sopt->sopt_val, sopt->sopt_valsize);
5347                 return (E2BIG);
5348         }
5349         cont = sopt->sopt_val;
5350         cont->entcnt = cnt;
5351
5352         cp.te = cont->ent;
5353         cp.te_idx = 0;
5354         cp.te_cnt = cnt;
5355         rnh->rnh_walktree(rnh, ipfw_table_cpent, &cp);
5356
5357         sopt->sopt_valsize = sz;
5358         return (0);
5359 }
5360
5361 static void
5362 ipfw_table_add_dispatch(netmsg_t nmsg)
5363 {
5364         struct netmsg_tblent *nm = (struct netmsg_tblent *)nmsg;
5365         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
5366         struct radix_node_head *rnh;
5367         struct ipfw_tblent *te;
5368
5369         ASSERT_NETISR_NCPUS(mycpuid);
5370
5371         rnh = ctx->ipfw_tables[nm->tableid];
5372
5373         te = kmalloc(sizeof(*te), M_IPFW, M_WAITOK | M_ZERO);
5374         te->te_nodes->rn_key = (char *)&te->te_key;
5375         memcpy(&te->te_key, nm->key, sizeof(te->te_key));
5376
5377         if (rnh->rnh_addaddr((char *)&te->te_key, (char *)nm->netmask, rnh,
5378             te->te_nodes) == NULL) {
5379                 if (mycpuid == 0) {
5380                         kfree(te, M_IPFW);
5381                         netisr_replymsg(&nm->base, EEXIST);
5382                         return;
5383                 }
5384                 panic("rnh_addaddr failed");
5385         }
5386
5387         /* Link siblings. */
5388         if (nm->sibling != NULL)
5389                 nm->sibling->te_sibling = te;
5390         nm->sibling = te;
5391
5392         netisr_forwardmsg(&nm->base, mycpuid + 1);
5393 }
5394
5395 static void
5396 ipfw_table_del_dispatch(netmsg_t nmsg)
5397 {
5398         struct netmsg_tblent *nm = (struct netmsg_tblent *)nmsg;
5399         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
5400         struct radix_node_head *rnh;
5401         struct radix_node *rn;
5402
5403         ASSERT_NETISR_NCPUS(mycpuid);
5404
5405         rnh = ctx->ipfw_tables[nm->tableid];
5406         rn = rnh->rnh_deladdr((char *)nm->key, (char *)nm->netmask, rnh);
5407         if (rn == NULL) {
5408                 if (mycpuid == 0) {
5409                         netisr_replymsg(&nm->base, ESRCH);
5410                         return;
5411                 }
5412                 panic("rnh_deladdr failed");
5413         }
5414         kfree(rn, M_IPFW);
5415
5416         netisr_forwardmsg(&nm->base, mycpuid + 1);
5417 }
5418
5419 static int
5420 ipfw_table_alt(struct sockopt *sopt)
5421 {
5422         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
5423         struct ipfw_ioc_tblcont *tbl;
5424         struct ipfw_ioc_tblent *te;
5425         struct sockaddr_in key0;
5426         struct sockaddr *netmask = NULL, *key;
5427         struct netmsg_tblent nm;
5428
5429         ASSERT_NETISR0;
5430
5431         if (sopt->sopt_valsize != sizeof(*tbl))
5432                 return (EINVAL);
5433         tbl = sopt->sopt_val;
5434
5435         if (tbl->tableid < 0  || tbl->tableid >= ipfw_table_max)
5436                 return (EINVAL);
5437         if (tbl->entcnt != 1)
5438                 return (EINVAL);
5439
5440         if (ctx->ipfw_tables[tbl->tableid] == NULL)
5441                 return (ENOENT);
5442         te = &tbl->ent[0];
5443
5444         if (te->key.sin_family != AF_INET ||
5445             te->key.sin_port != 0 ||
5446             te->key.sin_len != sizeof(struct sockaddr_in))
5447                 return (EINVAL);
5448         key = (struct sockaddr *)&te->key;
5449
5450         if (te->netmask.sin_len != 0) {
5451                 if (te->netmask.sin_port != 0 ||
5452                     te->netmask.sin_len > sizeof(struct sockaddr_in))
5453                         return (EINVAL);
5454                 netmask = (struct sockaddr *)&te->netmask;
5455                 sa_maskedcopy(key, (struct sockaddr *)&key0, netmask);
5456                 key = (struct sockaddr *)&key0;
5457         }
5458
5459         if (sopt->sopt_name == IP_FW_TBL_ADD) {
5460                 netmsg_init(&nm.base, NULL, &curthread->td_msgport,
5461                     MSGF_PRIORITY, ipfw_table_add_dispatch);
5462         } else {
5463                 netmsg_init(&nm.base, NULL, &curthread->td_msgport,
5464                     MSGF_PRIORITY, ipfw_table_del_dispatch);
5465         }
5466         nm.key = key;
5467         nm.netmask = netmask;
5468         nm.tableid = tbl->tableid;
5469         nm.sibling = NULL;
5470         return (netisr_domsg_global(&nm.base));
5471 }
5472
5473 static int
5474 ipfw_table_zeroent(struct radix_node *rn, void *arg __unused)
5475 {
5476         struct ipfw_tblent *te = (struct ipfw_tblent *)rn;
5477
5478         te->te_use = 0;
5479         te->te_lastuse = 0;
5480         return (0);
5481 }
5482
5483 static void
5484 ipfw_table_zero_dispatch(netmsg_t nmsg)
5485 {
5486         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
5487         struct radix_node_head *rnh;
5488
5489         ASSERT_NETISR_NCPUS(mycpuid);
5490
5491         rnh = ctx->ipfw_tables[nmsg->lmsg.u.ms_result];
5492         rnh->rnh_walktree(rnh, ipfw_table_zeroent, NULL);
5493
5494         netisr_forwardmsg(&nmsg->base, mycpuid + 1);
5495 }
5496
5497 static void
5498 ipfw_table_zeroall_dispatch(netmsg_t nmsg)
5499 {
5500         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
5501         int i;
5502
5503         ASSERT_NETISR_NCPUS(mycpuid);
5504
5505         for (i = 0; i < ipfw_table_max; ++i) {
5506                 struct radix_node_head *rnh = ctx->ipfw_tables[i];
5507
5508                 if (rnh != NULL)
5509                         rnh->rnh_walktree(rnh, ipfw_table_zeroent, NULL);
5510         }
5511         netisr_forwardmsg(&nmsg->base, mycpuid + 1);
5512 }
5513
5514 static int
5515 ipfw_table_zero(struct sockopt *sopt)
5516 {
5517         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
5518         struct netmsg_base nm;
5519         struct ipfw_ioc_table *tbl;
5520
5521         ASSERT_NETISR0;
5522
5523         if (sopt->sopt_valsize != sizeof(*tbl))
5524                 return (EINVAL);
5525         tbl = sopt->sopt_val;
5526
5527         if (tbl->tableid < 0) {
5528                 netmsg_init(&nm, NULL, &curthread->td_msgport, MSGF_PRIORITY,
5529                     ipfw_table_zeroall_dispatch);
5530                 netisr_domsg_global(&nm);
5531                 return (0);
5532         } else if (tbl->tableid >= ipfw_table_max) {
5533                 return (EINVAL);
5534         } else if (ctx->ipfw_tables[tbl->tableid] == NULL) {
5535                 return (ENOENT);
5536         }
5537
5538         netmsg_init(&nm, NULL, &curthread->td_msgport, MSGF_PRIORITY,
5539             ipfw_table_zero_dispatch);
5540         nm.lmsg.u.ms_result = tbl->tableid;
5541         netisr_domsg_global(&nm);
5542
5543         return (0);
5544 }
5545
5546 static int
5547 ipfw_table_killexp(struct radix_node *rn, void *xnm)
5548 {
5549         struct netmsg_tblexp *nm = xnm;
5550         struct ipfw_tblent *te = (struct ipfw_tblent *)rn;
5551
5552         if (te->te_expired) {
5553                 ipfw_table_killrn(nm->rnh, rn);
5554                 nm->expcnt++;
5555         }
5556         return (0);
5557 }
5558
5559 static void
5560 ipfw_table_expire_dispatch(netmsg_t nmsg)
5561 {
5562         struct netmsg_tblexp *nm = (struct netmsg_tblexp *)nmsg;
5563         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
5564         struct radix_node_head *rnh;
5565
5566         ASSERT_NETISR_NCPUS(mycpuid);
5567
5568         rnh = ctx->ipfw_tables[nm->tableid];
5569         nm->rnh = rnh;
5570         rnh->rnh_walktree(rnh, ipfw_table_killexp, nm);
5571
5572         KASSERT(nm->expcnt == nm->cnt * (mycpuid + 1),
5573             ("not all expired addresses (%d) were deleted (%d)",
5574              nm->cnt * (mycpuid + 1), nm->expcnt));
5575
5576         netisr_forwardmsg(&nm->base, mycpuid + 1);
5577 }
5578
5579 static void
5580 ipfw_table_expireall_dispatch(netmsg_t nmsg)
5581 {
5582         struct netmsg_tblexp *nm = (struct netmsg_tblexp *)nmsg;
5583         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
5584         int i;
5585
5586         ASSERT_NETISR_NCPUS(mycpuid);
5587
5588         for (i = 0; i < ipfw_table_max; ++i) {
5589                 struct radix_node_head *rnh = ctx->ipfw_tables[i];
5590
5591                 if (rnh == NULL)
5592                         continue;
5593                 nm->rnh = rnh;
5594                 rnh->rnh_walktree(rnh, ipfw_table_killexp, nm);
5595         }
5596
5597         KASSERT(nm->expcnt == nm->cnt * (mycpuid + 1),
5598             ("not all expired addresses (%d) were deleted (%d)",
5599              nm->cnt * (mycpuid + 1), nm->expcnt));
5600
5601         netisr_forwardmsg(&nm->base, mycpuid + 1);
5602 }
5603
5604 static int
5605 ipfw_table_markexp(struct radix_node *rn, void *xnm)
5606 {
5607         struct netmsg_tblexp *nm = xnm;
5608         struct ipfw_tblent *te;
5609         time_t lastuse;
5610
5611         te = (struct ipfw_tblent *)rn;
5612         lastuse = te->te_lastuse;
5613
5614         while ((te = te->te_sibling) != NULL) {
5615                 if (te->te_lastuse > lastuse)
5616                         lastuse = te->te_lastuse;
5617         }
5618         if (!TIME_LEQ(lastuse + nm->expire, time_second)) {
5619                 /* Not expired */
5620                 return (0);
5621         }
5622
5623         te = (struct ipfw_tblent *)rn;
5624         te->te_expired = 1;
5625         while ((te = te->te_sibling) != NULL)
5626                 te->te_expired = 1;
5627         nm->cnt++;
5628
5629         return (0);
5630 }
5631
5632 static int
5633 ipfw_table_expire(struct sockopt *sopt)
5634 {
5635         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
5636         struct netmsg_tblexp nm;
5637         struct ipfw_ioc_tblexp *tbl;
5638         struct radix_node_head *rnh;
5639
5640         ASSERT_NETISR0;
5641
5642         if (sopt->sopt_valsize != sizeof(*tbl))
5643                 return (EINVAL);
5644         tbl = sopt->sopt_val;
5645         tbl->expcnt = 0;
5646
5647         nm.expcnt = 0;
5648         nm.cnt = 0;
5649         nm.expire = tbl->expire;
5650
5651         if (tbl->tableid < 0) {
5652                 int i;
5653
5654                 for (i = 0; i < ipfw_table_max; ++i) {
5655                         rnh = ctx->ipfw_tables[i];
5656                         if (rnh == NULL)
5657                                 continue;
5658                         rnh->rnh_walktree(rnh, ipfw_table_markexp, &nm);
5659                 }
5660                 if (nm.cnt == 0) {
5661                         /* No addresses can be expired. */
5662                         return (0);
5663                 }
5664                 tbl->expcnt = nm.cnt;
5665
5666                 netmsg_init(&nm.base, NULL, &curthread->td_msgport,
5667                     MSGF_PRIORITY, ipfw_table_expireall_dispatch);
5668                 nm.tableid = -1;
5669                 netisr_domsg_global(&nm.base);
5670                 KASSERT(nm.expcnt == nm.cnt * netisr_ncpus,
5671                     ("not all expired addresses (%d) were deleted (%d)",
5672                      nm.cnt * netisr_ncpus, nm.expcnt));
5673
5674                 return (0);
5675         } else if (tbl->tableid >= ipfw_table_max) {
5676                 return (EINVAL);
5677         }
5678
5679         rnh = ctx->ipfw_tables[tbl->tableid];
5680         if (rnh == NULL)
5681                 return (ENOENT);
5682         rnh->rnh_walktree(rnh, ipfw_table_markexp, &nm);
5683         if (nm.cnt == 0) {
5684                 /* No addresses can be expired. */
5685                 return (0);
5686         }
5687         tbl->expcnt = nm.cnt;
5688
5689         netmsg_init(&nm.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
5690             ipfw_table_expire_dispatch);
5691         nm.tableid = tbl->tableid;
5692         netisr_domsg_global(&nm.base);
5693         KASSERT(nm.expcnt == nm.cnt * netisr_ncpus,
5694             ("not all expired addresses (%d) were deleted (%d)",
5695              nm.cnt * netisr_ncpus, nm.expcnt));
5696         return (0);
5697 }
5698
5699 static void
5700 ipfw_crossref_free_dispatch(netmsg_t nmsg)
5701 {
5702         struct ip_fw *rule = nmsg->lmsg.u.ms_resultp;
5703
5704         KKASSERT((rule->rule_flags &
5705             (IPFW_RULE_F_CROSSREF | IPFW_RULE_F_INVALID)) ==
5706             (IPFW_RULE_F_CROSSREF | IPFW_RULE_F_INVALID));
5707         ipfw_free_rule(rule);
5708
5709         netisr_replymsg(&nmsg->base, 0);
5710 }
5711
5712 static void
5713 ipfw_crossref_reap(void)
5714 {
5715         struct ip_fw *rule, *prev = NULL;
5716
5717         ASSERT_NETISR0;
5718
5719         rule = ipfw_gd.ipfw_crossref_free;
5720         while (rule != NULL) {
5721                 uint64_t inflight = 0;
5722                 int i;
5723
5724                 for (i = 0; i < netisr_ncpus; ++i)
5725                         inflight += rule->cross_rules[i]->cross_refs;
5726                 if (inflight == 0) {
5727                         struct ip_fw *f = rule;
5728
5729                         /*
5730                          * Unlink.
5731                          */
5732                         rule = rule->next;
5733                         if (prev != NULL)
5734                                 prev->next = rule;
5735                         else
5736                                 ipfw_gd.ipfw_crossref_free = rule;
5737
5738                         /*
5739                          * Free.
5740                          */
5741                         for (i = 1; i < netisr_ncpus; ++i) {
5742                                 struct netmsg_base nm;
5743
5744                                 netmsg_init(&nm, NULL, &curthread->td_msgport,
5745                                     MSGF_PRIORITY, ipfw_crossref_free_dispatch);
5746                                 nm.lmsg.u.ms_resultp = f->cross_rules[i];
5747                                 netisr_domsg(&nm, i);
5748                         }
5749                         KKASSERT((f->rule_flags &
5750                             (IPFW_RULE_F_CROSSREF | IPFW_RULE_F_INVALID)) ==
5751                             (IPFW_RULE_F_CROSSREF | IPFW_RULE_F_INVALID));
5752                         ipfw_unref_rule(f);
5753                 } else {
5754                         prev = rule;
5755                         rule = rule->next;
5756                 }
5757         }
5758
5759         if (ipfw_gd.ipfw_crossref_free != NULL) {
5760                 callout_reset(&ipfw_gd.ipfw_crossref_ch, hz,
5761                     ipfw_crossref_timeo, NULL);
5762         }
5763 }
5764
5765 /*
5766  * {set|get}sockopt parser.
5767  */
5768 static int
5769 ipfw_ctl(struct sockopt *sopt)
5770 {
5771         int error, rulenum;
5772         uint32_t *masks;
5773         size_t size;
5774
5775         ASSERT_NETISR0;
5776
5777         error = 0;
5778
5779         switch (sopt->sopt_name) {
5780         case IP_FW_GET:
5781                 error = ipfw_ctl_get_rules(sopt);
5782                 break;
5783
5784         case IP_FW_FLUSH:
5785                 ipfw_flush(0 /* keep default rule */);
5786                 break;
5787
5788         case IP_FW_ADD:
5789                 error = ipfw_ctl_add_rule(sopt);
5790                 break;
5791
5792         case IP_FW_DEL:
5793                 /*
5794                  * IP_FW_DEL is used for deleting single rules or sets,
5795                  * and (ab)used to atomically manipulate sets.
5796                  * Argument size is used to distinguish between the two:
5797                  *    sizeof(uint32_t)
5798                  *      delete single rule or set of rules,
5799                  *      or reassign rules (or sets) to a different set.
5800                  *    2 * sizeof(uint32_t)
5801                  *      atomic disable/enable sets.
5802                  *      first uint32_t contains sets to be disabled,
5803                  *      second uint32_t contains sets to be enabled.
5804                  */
5805                 masks = sopt->sopt_val;
5806                 size = sopt->sopt_valsize;
5807                 if (size == sizeof(*masks)) {
5808                         /*
5809                          * Delete or reassign static rule
5810                          */
5811                         error = ipfw_ctl_alter(masks[0]);
5812                 } else if (size == (2 * sizeof(*masks))) {
5813                         /*
5814                          * Set enable/disable
5815                          */
5816                         ipfw_ctl_set_disable(masks[0], masks[1]);
5817                 } else {
5818                         error = EINVAL;
5819                 }
5820                 break;
5821
5822         case IP_FW_ZERO:
5823         case IP_FW_RESETLOG: /* argument is an int, the rule number */
5824                 rulenum = 0;
5825
5826                 if (sopt->sopt_val != 0) {
5827                     error = soopt_to_kbuf(sopt, &rulenum,
5828                             sizeof(int), sizeof(int));
5829                     if (error)
5830                         break;
5831                 }
5832                 error = ipfw_ctl_zero_entry(rulenum,
5833                         sopt->sopt_name == IP_FW_RESETLOG);
5834                 break;
5835
5836         case IP_FW_TBL_CREATE:
5837                 error = ipfw_table_create(sopt);
5838                 break;
5839
5840         case IP_FW_TBL_ADD:
5841         case IP_FW_TBL_DEL:
5842                 error = ipfw_table_alt(sopt);
5843                 break;
5844
5845         case IP_FW_TBL_FLUSH:
5846         case IP_FW_TBL_DESTROY:
5847                 error = ipfw_table_flush(sopt);
5848                 break;
5849
5850         case IP_FW_TBL_GET:
5851                 error = ipfw_table_get(sopt);
5852                 break;
5853
5854         case IP_FW_TBL_ZERO:
5855                 error = ipfw_table_zero(sopt);
5856                 break;
5857
5858         case IP_FW_TBL_EXPIRE:
5859                 error = ipfw_table_expire(sopt);
5860                 break;
5861
5862         default:
5863                 kprintf("ipfw_ctl invalid option %d\n", sopt->sopt_name);
5864                 error = EINVAL;
5865         }
5866
5867         ipfw_crossref_reap();
5868         return error;
5869 }
5870
5871 static void
5872 ipfw_keepalive_done(struct ipfw_context *ctx)
5873 {
5874
5875         KASSERT(ctx->ipfw_flags & IPFW_FLAG_KEEPALIVE,
5876             ("keepalive is not in progress"));
5877         ctx->ipfw_flags &= ~IPFW_FLAG_KEEPALIVE;
5878         callout_reset(&ctx->ipfw_keepalive_ch, dyn_keepalive_period * hz,
5879             ipfw_keepalive, NULL);
5880 }
5881
5882 static void
5883 ipfw_keepalive_more(struct ipfw_context *ctx)
5884 {
5885         struct netmsg_base *nm = &ctx->ipfw_keepalive_more;
5886
5887         KASSERT(ctx->ipfw_flags & IPFW_FLAG_KEEPALIVE,
5888             ("keepalive is not in progress"));
5889         KASSERT(nm->lmsg.ms_flags & MSGF_DONE,
5890             ("keepalive more did not finish"));
5891         netisr_sendmsg_oncpu(nm);
5892 }
5893
5894 static void
5895 ipfw_keepalive_loop(struct ipfw_context *ctx, struct ipfw_state *anchor)
5896 {
5897         struct ipfw_state *s;
5898         int scanned = 0, expired = 0, kept = 0;
5899
5900         KASSERT(ctx->ipfw_flags & IPFW_FLAG_KEEPALIVE,
5901             ("keepalive is not in progress"));
5902
5903         while ((s = TAILQ_NEXT(anchor, st_link)) != NULL) {
5904                 uint32_t ack_rev, ack_fwd;
5905                 struct ipfw_flow_id id;
5906
5907                 if (scanned++ >= ipfw_state_scan_max) {
5908                         ipfw_keepalive_more(ctx);
5909                         return;
5910                 }
5911
5912                 TAILQ_REMOVE(&ctx->ipfw_state_list, anchor, st_link);
5913                 TAILQ_INSERT_AFTER(&ctx->ipfw_state_list, s, anchor, st_link);
5914
5915                 if (s->st_type == O_ANCHOR)
5916                         continue;
5917
5918                 if (TIME_LEQ(s->st_expire, time_uptime)) {
5919                         /* State expired. */
5920                         ipfw_state_del(ctx, s);
5921                         if (++expired >= ipfw_state_expire_max) {
5922                                 ipfw_keepalive_more(ctx);
5923                                 return;
5924                         }
5925                         continue;
5926                 }
5927
5928                 /*
5929                  * Keep alive processing
5930                  */
5931
5932                 if (s->st_proto != IPPROTO_TCP)
5933                         continue;
5934                 if ((s->st_state & IPFW_STATE_TCPSTATES) != BOTH_SYN)
5935                         continue;
5936                 if (TIME_LEQ(time_uptime + dyn_keepalive_interval,
5937                     s->st_expire))
5938                         continue;       /* too early */
5939
5940                 ipfw_key_4tuple(&s->st_key, &id.src_ip, &id.src_port,
5941                     &id.dst_ip, &id.dst_port);
5942                 ack_rev = s->st_ack_rev;
5943                 ack_fwd = s->st_ack_fwd;
5944
5945                 send_pkt(&id, ack_rev - 1, ack_fwd, TH_SYN);
5946                 send_pkt(&id, ack_fwd - 1, ack_rev, 0);
5947
5948                 if (++kept >= ipfw_keepalive_max) {
5949                         ipfw_keepalive_more(ctx);
5950                         return;
5951                 }
5952         }
5953         TAILQ_REMOVE(&ctx->ipfw_state_list, anchor, st_link);
5954         ipfw_keepalive_done(ctx);
5955 }
5956
5957 static void
5958 ipfw_keepalive_more_dispatch(netmsg_t nm)
5959 {
5960         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
5961         struct ipfw_state *anchor;
5962
5963         ASSERT_NETISR_NCPUS(mycpuid);
5964         KASSERT(ctx->ipfw_flags & IPFW_FLAG_KEEPALIVE,
5965             ("keepalive is not in progress"));
5966
5967         /* Reply ASAP */
5968         netisr_replymsg(&nm->base, 0);
5969
5970         anchor = &ctx->ipfw_keepalive_anch;
5971         if (!dyn_keepalive || ctx->ipfw_state_cnt == 0) {
5972                 TAILQ_REMOVE(&ctx->ipfw_state_list, anchor, st_link);
5973                 ipfw_keepalive_done(ctx);
5974                 return;
5975         }
5976         ipfw_keepalive_loop(ctx, anchor);
5977 }
5978
5979 /*
5980  * This procedure is only used to handle keepalives. It is invoked
5981  * every dyn_keepalive_period
5982  */
5983 static void
5984 ipfw_keepalive_dispatch(netmsg_t nm)
5985 {
5986         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
5987         struct ipfw_state *anchor;
5988
5989         ASSERT_NETISR_NCPUS(mycpuid);
5990         KASSERT((ctx->ipfw_flags & IPFW_FLAG_KEEPALIVE) == 0,
5991             ("keepalive is in progress"));
5992         ctx->ipfw_flags |= IPFW_FLAG_KEEPALIVE;
5993
5994         /* Reply ASAP */
5995         crit_enter();
5996         netisr_replymsg(&nm->base, 0);
5997         crit_exit();
5998
5999         if (!dyn_keepalive || ctx->ipfw_state_cnt == 0) {
6000                 ipfw_keepalive_done(ctx);
6001                 return;
6002         }
6003
6004         anchor = &ctx->ipfw_keepalive_anch;
6005         TAILQ_INSERT_HEAD(&ctx->ipfw_state_list, anchor, st_link);
6006         ipfw_keepalive_loop(ctx, anchor);
6007 }
6008
6009 /*
6010  * This procedure is only used to handle keepalives. It is invoked
6011  * every dyn_keepalive_period
6012  */
6013 static void
6014 ipfw_keepalive(void *dummy __unused)
6015 {
6016         struct netmsg_base *msg;
6017
6018         KKASSERT(mycpuid < netisr_ncpus);
6019         msg = &ipfw_ctx[mycpuid]->ipfw_keepalive_nm;
6020
6021         crit_enter();
6022         if (msg->lmsg.ms_flags & MSGF_DONE)
6023                 netisr_sendmsg_oncpu(msg);
6024         crit_exit();
6025 }
6026
6027 static void
6028 ipfw_ip_input_dispatch(netmsg_t nmsg)
6029 {
6030         struct netmsg_genpkt *nm = (struct netmsg_genpkt *)nmsg;
6031         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
6032         struct mbuf *m = nm->m;
6033         struct ip_fw *rule = nm->arg1;
6034
6035         ASSERT_NETISR_NCPUS(mycpuid);
6036         KASSERT(rule->cpuid == mycpuid,
6037             ("rule does not belong to cpu%d", mycpuid));
6038         KASSERT(m->m_pkthdr.fw_flags & IPFW_MBUF_CONTINUE,
6039             ("mbuf does not have ipfw continue rule"));
6040
6041         KASSERT(ctx->ipfw_cont_rule == NULL,
6042             ("pending ipfw continue rule"));
6043         ctx->ipfw_cont_rule = rule;
6044         ip_input(m);
6045
6046         /*
6047          * This rule is no longer used; decrement its cross_refs,
6048          * so this rule can be deleted.
6049          */
6050         rule->cross_refs--;
6051
6052         /* May not be cleared, if ipfw was unload/disabled. */
6053         ctx->ipfw_cont_rule = NULL;
6054 }
6055
6056 static int
6057 ipfw_check_in(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir)
6058 {
6059         struct ip_fw_args args;
6060         struct mbuf *m = *m0;
6061         struct m_tag *mtag;
6062         int tee = 0, error = 0, ret, cpuid;
6063         struct netmsg_genpkt *nm;
6064
6065         args.cont = 0;
6066         if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
6067                 /* Extract info from dummynet tag */
6068                 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
6069                 KKASSERT(mtag != NULL);
6070                 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv;
6071                 KKASSERT(args.rule != NULL);
6072
6073                 m_tag_delete(m, mtag);
6074                 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
6075         } else if (m->m_pkthdr.fw_flags & IPFW_MBUF_CONTINUE) {
6076                 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
6077
6078                 KKASSERT(ctx->ipfw_cont_rule != NULL);
6079                 args.rule = ctx->ipfw_cont_rule;
6080                 ctx->ipfw_cont_rule = NULL;
6081
6082                 args.cont = 1;
6083                 m->m_pkthdr.fw_flags &= ~IPFW_MBUF_CONTINUE;
6084         } else {
6085                 args.rule = NULL;
6086         }
6087
6088         args.eh = NULL;
6089         args.oif = NULL;
6090         args.m = m;
6091         ret = ipfw_chk(&args);
6092         m = args.m;
6093
6094         if (m == NULL) {
6095                 error = EACCES;
6096                 goto back;
6097         }
6098
6099         switch (ret) {
6100         case IP_FW_PASS:
6101                 break;
6102
6103         case IP_FW_DENY:
6104                 m_freem(m);
6105                 m = NULL;
6106                 error = EACCES;
6107                 break;
6108
6109         case IP_FW_DUMMYNET:
6110                 /* Send packet to the appropriate pipe */
6111                 m = ipfw_dummynet_io(m, args.cookie, DN_TO_IP_IN, &args);
6112                 break;
6113
6114         case IP_FW_TEE:
6115                 tee = 1;
6116                 /* FALL THROUGH */
6117
6118         case IP_FW_DIVERT:
6119                 /*
6120                  * Must clear bridge tag when changing
6121                  */
6122                 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
6123                 if (ip_divert_p != NULL) {
6124                         m = ip_divert_p(m, tee, 1);
6125                 } else {
6126                         m_freem(m);
6127                         m = NULL;
6128                         /* not sure this is the right error msg */
6129                         error = EACCES;
6130                 }
6131                 break;
6132
6133         case IP_FW_CONTINUE:
6134                 KASSERT(m->m_flags & M_HASH, ("no hash"));
6135                 cpuid = netisr_hashcpu(m->m_pkthdr.hash);
6136                 KASSERT(cpuid != mycpuid,
6137                     ("continue on the same cpu%d", cpuid));
6138
6139                 /*
6140                  * NOTE:
6141                  * Bump cross_refs to prevent this rule and its siblings
6142                  * from being deleted, while this mbuf is inflight.  The
6143                  * cross_refs of the sibling rule on the target cpu will
6144                  * be decremented, once this mbuf is going to be filtered
6145                  * on the target cpu.
6146                  */
6147                 args.rule->cross_refs++;
6148                 m->m_pkthdr.fw_flags |= IPFW_MBUF_CONTINUE;
6149
6150                 nm = &m->m_hdr.mh_genmsg;
6151                 netmsg_init(&nm->base, NULL, &netisr_apanic_rport, 0,
6152                     ipfw_ip_input_dispatch);
6153                 nm->m = m;
6154                 nm->arg1 = args.rule->cross_rules[cpuid];
6155                 netisr_sendmsg(&nm->base, cpuid);
6156
6157                 /* This mbuf is dispatched; no longer valid. */
6158                 m = NULL;
6159                 break;
6160
6161         default:
6162                 panic("unknown ipfw return value: %d", ret);
6163         }
6164 back:
6165         *m0 = m;
6166         return error;
6167 }
6168
6169 static int
6170 ipfw_check_out(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir)
6171 {
6172         struct ip_fw_args args;
6173         struct mbuf *m = *m0;
6174         struct m_tag *mtag;
6175         int tee = 0, error = 0, ret;
6176
6177         args.cont = 0;
6178         if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
6179                 /* Extract info from dummynet tag */
6180                 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
6181                 KKASSERT(mtag != NULL);
6182                 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv;
6183                 KKASSERT(args.rule != NULL);
6184
6185                 m_tag_delete(m, mtag);
6186                 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
6187         } else {
6188                 args.rule = NULL;
6189         }
6190
6191         args.eh = NULL;
6192         args.m = m;
6193         args.oif = ifp;
6194         ret = ipfw_chk(&args);
6195         m = args.m;
6196
6197         if (m == NULL) {
6198                 error = EACCES;
6199                 goto back;
6200         }
6201
6202         switch (ret) {
6203         case IP_FW_PASS:
6204                 break;
6205
6206         case IP_FW_DENY:
6207                 m_freem(m);
6208                 m = NULL;
6209                 error = EACCES;
6210                 break;
6211
6212         case IP_FW_DUMMYNET:
6213                 m = ipfw_dummynet_io(m, args.cookie, DN_TO_IP_OUT, &args);
6214                 break;
6215
6216         case IP_FW_TEE:
6217                 tee = 1;
6218                 /* FALL THROUGH */
6219
6220         case IP_FW_DIVERT:
6221                 if (ip_divert_p != NULL) {
6222                         m = ip_divert_p(m, tee, 0);
6223                 } else {
6224                         m_freem(m);
6225                         m = NULL;
6226                         /* not sure this is the right error msg */
6227                         error = EACCES;
6228                 }
6229                 break;
6230
6231         default:
6232                 panic("unknown ipfw return value: %d", ret);
6233         }
6234 back:
6235         *m0 = m;
6236         return error;
6237 }
6238
6239 static void
6240 ipfw_hook(void)
6241 {
6242         struct pfil_head *pfh;
6243
6244         ASSERT_NETISR0;
6245
6246         pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET);
6247         if (pfh == NULL)
6248                 return;
6249
6250         pfil_add_hook(ipfw_check_in, NULL, PFIL_IN, pfh);
6251         pfil_add_hook(ipfw_check_out, NULL, PFIL_OUT, pfh);
6252 }
6253
6254 static void
6255 ipfw_dehook(void)
6256 {
6257         struct pfil_head *pfh;
6258
6259         ASSERT_NETISR0;
6260
6261         pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET);
6262         if (pfh == NULL)
6263                 return;
6264
6265         pfil_remove_hook(ipfw_check_in, NULL, PFIL_IN, pfh);
6266         pfil_remove_hook(ipfw_check_out, NULL, PFIL_OUT, pfh);
6267 }
6268
6269 static int
6270 ipfw_sysctl_dyncnt(SYSCTL_HANDLER_ARGS)
6271 {
6272         int dyn_cnt;
6273
6274         dyn_cnt = ipfw_state_cntcoll();
6275         dyn_cnt += ipfw_gd.ipfw_trkcnt_cnt;
6276
6277         return (sysctl_handle_int(oidp, &dyn_cnt, 0, req));
6278 }
6279
6280 static int
6281 ipfw_sysctl_statecnt(SYSCTL_HANDLER_ARGS)
6282 {
6283         int state_cnt;
6284
6285         state_cnt = ipfw_state_cntcoll();
6286         return (sysctl_handle_int(oidp, &state_cnt, 0, req));
6287 }
6288
6289 static int
6290 ipfw_sysctl_statemax(SYSCTL_HANDLER_ARGS)
6291 {
6292         int state_max, error;
6293
6294         state_max = ipfw_state_max;
6295         error = sysctl_handle_int(oidp, &state_max, 0, req);
6296         if (error || req->newptr == NULL)
6297                 return (error);
6298
6299         if (state_max < 1)
6300                 return (EINVAL);
6301
6302         ipfw_state_max_set(state_max);
6303         return (0);
6304 }
6305
6306 static int
6307 ipfw_sysctl_dynmax(SYSCTL_HANDLER_ARGS)
6308 {
6309         int dyn_max, error;
6310
6311         dyn_max = ipfw_state_max + ipfw_track_max;
6312
6313         error = sysctl_handle_int(oidp, &dyn_max, 0, req);
6314         if (error || req->newptr == NULL)
6315                 return (error);
6316
6317         if (dyn_max < 2)
6318                 return (EINVAL);
6319
6320         ipfw_state_max_set(dyn_max / 2);
6321         ipfw_track_max = dyn_max / 2;
6322         return (0);
6323 }
6324
6325 static void
6326 ipfw_sysctl_enable_dispatch(netmsg_t nmsg)
6327 {
6328         int enable = nmsg->lmsg.u.ms_result;
6329
6330         ASSERT_NETISR0;
6331
6332         if (fw_enable == enable)
6333                 goto reply;
6334
6335         fw_enable = enable;
6336         if (fw_enable)
6337                 ipfw_hook();
6338         else
6339                 ipfw_dehook();
6340 reply:
6341         netisr_replymsg(&nmsg->base, 0);
6342 }
6343
6344 static int
6345 ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS)
6346 {
6347         struct netmsg_base nmsg;
6348         int enable, error;
6349
6350         enable = fw_enable;
6351         error = sysctl_handle_int(oidp, &enable, 0, req);
6352         if (error || req->newptr == NULL)
6353                 return error;
6354
6355         netmsg_init(&nmsg, NULL, &curthread->td_msgport, MSGF_PRIORITY,
6356             ipfw_sysctl_enable_dispatch);
6357         nmsg.lmsg.u.ms_result = enable;
6358
6359         return netisr_domsg(&nmsg, 0);
6360 }
6361
6362 static int
6363 ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS)
6364 {
6365         return sysctl_int_range(oidp, arg1, arg2, req,
6366                IPFW_AUTOINC_STEP_MIN, IPFW_AUTOINC_STEP_MAX);
6367 }
6368
6369 static int
6370 ipfw_sysctl_scancnt(SYSCTL_HANDLER_ARGS)
6371 {
6372
6373         return sysctl_int_range(oidp, arg1, arg2, req, 1, INT_MAX);
6374 }
6375
6376 static int
6377 ipfw_sysctl_stat(SYSCTL_HANDLER_ARGS)
6378 {
6379         u_long stat = 0;
6380         int cpu, error;
6381
6382         for (cpu = 0; cpu < netisr_ncpus; ++cpu)
6383                 stat += *((u_long *)((uint8_t *)ipfw_ctx[cpu] + arg2));
6384
6385         error = sysctl_handle_long(oidp, &stat, 0, req);
6386         if (error || req->newptr == NULL)
6387                 return (error);
6388
6389         /* Zero out this stat. */
6390         for (cpu = 0; cpu < netisr_ncpus; ++cpu)
6391                 *((u_long *)((uint8_t *)ipfw_ctx[cpu] + arg2)) = 0;
6392         return (0);
6393 }
6394
6395 static void
6396 ipfw_ctx_init_dispatch(netmsg_t nmsg)
6397 {
6398         struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
6399         struct ipfw_context *ctx;
6400         struct ip_fw *def_rule;
6401
6402         ASSERT_NETISR_NCPUS(mycpuid);
6403
6404         ctx = kmalloc(__offsetof(struct ipfw_context,
6405             ipfw_tables[ipfw_table_max]), M_IPFW, M_WAITOK | M_ZERO);
6406
6407         RB_INIT(&ctx->ipfw_state_tree);
6408         TAILQ_INIT(&ctx->ipfw_state_list);
6409
6410         RB_INIT(&ctx->ipfw_track_tree);
6411         TAILQ_INIT(&ctx->ipfw_track_list);
6412
6413         callout_init_mp(&ctx->ipfw_stateto_ch);
6414         netmsg_init(&ctx->ipfw_stateexp_nm, NULL, &netisr_adone_rport,
6415             MSGF_DROPABLE | MSGF_PRIORITY, ipfw_state_expire_dispatch);
6416         ctx->ipfw_stateexp_anch.st_type = O_ANCHOR;
6417         netmsg_init(&ctx->ipfw_stateexp_more, NULL, &netisr_adone_rport,
6418             MSGF_DROPABLE, ipfw_state_expire_more_dispatch);
6419
6420         callout_init_mp(&ctx->ipfw_trackto_ch);
6421         netmsg_init(&ctx->ipfw_trackexp_nm, NULL, &netisr_adone_rport,
6422             MSGF_DROPABLE | MSGF_PRIORITY, ipfw_track_expire_dispatch);
6423         netmsg_init(&ctx->ipfw_trackexp_more, NULL, &netisr_adone_rport,
6424             MSGF_DROPABLE, ipfw_track_expire_more_dispatch);
6425
6426         callout_init_mp(&ctx->ipfw_keepalive_ch);
6427         netmsg_init(&ctx->ipfw_keepalive_nm, NULL, &netisr_adone_rport,
6428             MSGF_DROPABLE | MSGF_PRIORITY, ipfw_keepalive_dispatch);
6429         ctx->ipfw_keepalive_anch.st_type = O_ANCHOR;
6430         netmsg_init(&ctx->ipfw_keepalive_more, NULL, &netisr_adone_rport,
6431             MSGF_DROPABLE, ipfw_keepalive_more_dispatch);
6432
6433         ipfw_ctx[mycpuid] = ctx;
6434
6435         def_rule = kmalloc(sizeof(*def_rule), M_IPFW, M_WAITOK | M_ZERO);
6436
6437         def_rule->act_ofs = 0;
6438         def_rule->rulenum = IPFW_DEFAULT_RULE;
6439         def_rule->cmd_len = 1;
6440         def_rule->set = IPFW_DEFAULT_SET;
6441
6442         def_rule->cmd[0].len = 1;
6443 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT
6444         def_rule->cmd[0].opcode = O_ACCEPT;
6445 #else
6446         if (filters_default_to_accept)
6447                 def_rule->cmd[0].opcode = O_ACCEPT;
6448         else
6449                 def_rule->cmd[0].opcode = O_DENY;
6450 #endif
6451
6452         def_rule->refcnt = 1;
6453         def_rule->cpuid = mycpuid;
6454
6455         /* Install the default rule */
6456         ctx->ipfw_default_rule = def_rule;
6457         ctx->ipfw_layer3_chain = def_rule;
6458
6459         /* Link rule CPU sibling */
6460         ipfw_link_sibling(fwmsg, def_rule);
6461
6462         /* Statistics only need to be updated once */
6463         if (mycpuid == 0)
6464                 ipfw_inc_static_count(def_rule);
6465
6466         netisr_forwardmsg(&nmsg->base, mycpuid + 1);
6467 }
6468
6469 static void
6470 ipfw_crossref_reap_dispatch(netmsg_t nmsg)
6471 {
6472
6473         crit_enter();
6474         /* Reply ASAP */
6475         netisr_replymsg(&nmsg->base, 0);
6476         crit_exit();
6477         ipfw_crossref_reap();
6478 }
6479
6480 static void
6481 ipfw_crossref_timeo(void *dummy __unused)
6482 {
6483         struct netmsg_base *msg = &ipfw_gd.ipfw_crossref_nm;
6484
6485         KKASSERT(mycpuid == 0);
6486
6487         crit_enter();
6488         if (msg->lmsg.ms_flags & MSGF_DONE)
6489                 netisr_sendmsg_oncpu(msg);
6490         crit_exit();
6491 }
6492
6493 static void
6494 ipfw_init_dispatch(netmsg_t nmsg)
6495 {
6496         struct netmsg_ipfw fwmsg;
6497         int error = 0, cpu;
6498
6499         ASSERT_NETISR0;
6500
6501         if (IPFW_LOADED) {
6502                 kprintf("IP firewall already loaded\n");
6503                 error = EEXIST;
6504                 goto reply;
6505         }
6506
6507         if (ipfw_table_max > UINT16_MAX || ipfw_table_max <= 0)
6508                 ipfw_table_max = UINT16_MAX;
6509
6510         /* Initialize global track tree. */
6511         RB_INIT(&ipfw_gd.ipfw_trkcnt_tree);
6512         IPFW_TRKCNT_TOKINIT;
6513
6514         /* GC for freed crossref rules. */
6515         callout_init_mp(&ipfw_gd.ipfw_crossref_ch);
6516         netmsg_init(&ipfw_gd.ipfw_crossref_nm, NULL, &netisr_adone_rport,
6517             MSGF_PRIORITY | MSGF_DROPABLE, ipfw_crossref_reap_dispatch);
6518
6519         ipfw_state_max_set(ipfw_state_max);
6520         ipfw_state_headroom = 8 * netisr_ncpus;
6521
6522         bzero(&fwmsg, sizeof(fwmsg));
6523         netmsg_init(&fwmsg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
6524             ipfw_ctx_init_dispatch);
6525         netisr_domsg_global(&fwmsg.base);
6526
6527         ip_fw_chk_ptr = ipfw_chk;
6528         ip_fw_ctl_ptr = ipfw_ctl;
6529         ip_fw_dn_io_ptr = ipfw_dummynet_io;
6530
6531         kprintf("ipfw2 initialized, default to %s, logging ",
6532                 ipfw_ctx[mycpuid]->ipfw_default_rule->cmd[0].opcode ==
6533                 O_ACCEPT ? "accept" : "deny");
6534
6535 #ifdef IPFIREWALL_VERBOSE
6536         fw_verbose = 1;
6537 #endif
6538 #ifdef IPFIREWALL_VERBOSE_LIMIT
6539         verbose_limit = IPFIREWALL_VERBOSE_LIMIT;
6540 #endif
6541         if (fw_verbose == 0) {
6542                 kprintf("disabled\n");
6543         } else if (verbose_limit == 0) {
6544                 kprintf("unlimited\n");
6545         } else {
6546                 kprintf("limited to %d packets/entry by default\n",
6547                         verbose_limit);
6548         }
6549
6550         ip_fw_loaded = 1;
6551         for (cpu = 0; cpu < netisr_ncpus; ++cpu) {
6552                 callout_reset_bycpu(&ipfw_ctx[cpu]->ipfw_stateto_ch, hz,
6553                     ipfw_state_expire_ipifunc, NULL, cpu);
6554                 callout_reset_bycpu(&ipfw_ctx[cpu]->ipfw_trackto_ch, hz,
6555                     ipfw_track_expire_ipifunc, NULL, cpu);
6556                 callout_reset_bycpu(&ipfw_ctx[cpu]->ipfw_keepalive_ch, hz,
6557                     ipfw_keepalive, NULL, cpu);
6558         }
6559
6560         if (fw_enable)
6561                 ipfw_hook();
6562 reply:
6563         netisr_replymsg(&nmsg->base, error);
6564 }
6565
6566 static int
6567 ipfw_init(void)
6568 {
6569         struct netmsg_base smsg;
6570
6571         netmsg_init(&smsg, NULL, &curthread->td_msgport, MSGF_PRIORITY,
6572             ipfw_init_dispatch);
6573         return netisr_domsg(&smsg, 0);
6574 }
6575
6576 #ifdef KLD_MODULE
6577
6578 static void
6579 ipfw_ctx_fini_dispatch(netmsg_t nmsg)
6580 {
6581         struct ipfw_context *ctx = ipfw_ctx[mycpuid];
6582
6583         ASSERT_NETISR_NCPUS(mycpuid);
6584
6585         callout_stop_sync(&ctx->ipfw_stateto_ch);
6586         callout_stop_sync(&ctx->ipfw_trackto_ch);
6587         callout_stop_sync(&ctx->ipfw_keepalive_ch);
6588
6589         crit_enter();
6590         netisr_dropmsg(&ctx->ipfw_stateexp_more);
6591         netisr_dropmsg(&ctx->ipfw_stateexp_nm);
6592         netisr_dropmsg(&ctx->ipfw_trackexp_more);
6593         netisr_dropmsg(&ctx->ipfw_trackexp_nm);
6594         netisr_dropmsg(&ctx->ipfw_keepalive_more);
6595         netisr_dropmsg(&ctx->ipfw_keepalive_nm);
6596         crit_exit();
6597
6598         ipfw_table_flushall_oncpu(ctx, 1);
6599
6600         netisr_forwardmsg(&nmsg->base, mycpuid + 1);
6601 }
6602
6603 static void
6604 ipfw_fini_dispatch(netmsg_t nmsg)
6605 {
6606         struct netmsg_base nm;
6607         int error = 0, cpu;
6608
6609         ASSERT_NETISR0;
6610
6611         ipfw_crossref_reap();
6612
6613         if (ipfw_gd.ipfw_refcnt != 0) {
6614                 error = EBUSY;
6615                 goto reply;
6616         }
6617
6618         ip_fw_loaded = 0;
6619         ipfw_dehook();
6620
6621         /* Synchronize any inflight state/track expire IPIs. */
6622         lwkt_synchronize_ipiqs("ipfwfini");
6623
6624         netmsg_init(&nm, NULL, &curthread->td_msgport, MSGF_PRIORITY,
6625             ipfw_ctx_fini_dispatch);
6626         netisr_domsg_global(&nm);
6627
6628         callout_stop_sync(&ipfw_gd.ipfw_crossref_ch);
6629         crit_enter();
6630         netisr_dropmsg(&ipfw_gd.ipfw_crossref_nm);
6631         crit_exit();
6632
6633         ip_fw_chk_ptr = NULL;
6634         ip_fw_ctl_ptr = NULL;
6635         ip_fw_dn_io_ptr = NULL;
6636         ipfw_flush(1 /* kill default rule */);
6637
6638         /* Free pre-cpu context */
6639         for (cpu = 0; cpu < netisr_ncpus; ++cpu)
6640                 kfree(ipfw_ctx[cpu], M_IPFW);
6641
6642         kprintf("IP firewall unloaded\n");
6643 reply:
6644         netisr_replymsg(&nmsg->base, error);
6645 }
6646
6647 static int
6648 ipfw_fini(void)
6649 {
6650         struct netmsg_base smsg;
6651
6652         netmsg_init(&smsg, NULL, &curthread->td_msgport, MSGF_PRIORITY,
6653             ipfw_fini_dispatch);
6654         return netisr_domsg(&smsg, 0);
6655 }
6656
6657 #endif  /* KLD_MODULE */
6658
6659 static int
6660 ipfw_modevent(module_t mod, int type, void *unused)
6661 {
6662         int err = 0;
6663
6664         switch (type) {
6665         case MOD_LOAD:
6666                 err = ipfw_init();
6667                 break;
6668
6669         case MOD_UNLOAD:
6670 #ifndef KLD_MODULE
6671                 kprintf("ipfw statically compiled, cannot unload\n");
6672                 err = EBUSY;
6673 #else
6674                 err = ipfw_fini();
6675 #endif
6676                 break;
6677         default:
6678                 break;
6679         }
6680         return err;
6681 }
6682
6683 static moduledata_t ipfwmod = {
6684         "ipfw",
6685         ipfw_modevent,
6686         0
6687 };
6688 DECLARE_MODULE(ipfw, ipfwmod, SI_SUB_PROTO_END, SI_ORDER_ANY);
6689 MODULE_VERSION(ipfw, 1);