2 * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * $FreeBSD: src/sys/netinet/ip_fw2.c,v 1.6.2.12 2003/04/08 10:42:32 maxim Exp $
26 * $DragonFly: src/sys/net/ipfw/ip_fw2.c,v 1.100 2008/11/22 11:03:35 sephe Exp $
30 * Implement IP packet firewall (new version)
36 #error IPFIREWALL requires INET.
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
43 #include <sys/kernel.h>
45 #include <sys/socket.h>
46 #include <sys/socketvar.h>
47 #include <sys/sysctl.h>
48 #include <sys/syslog.h>
49 #include <sys/ucred.h>
50 #include <sys/in_cksum.h>
54 #include <net/route.h>
56 #include <net/dummynet/ip_dummynet.h>
58 #include <sys/thread2.h>
59 #include <sys/mplock2.h>
60 #include <net/netmsg2.h>
62 #include <netinet/in.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/in_var.h>
65 #include <netinet/in_pcb.h>
66 #include <netinet/ip.h>
67 #include <netinet/ip_var.h>
68 #include <netinet/ip_icmp.h>
69 #include <netinet/tcp.h>
70 #include <netinet/tcp_timer.h>
71 #include <netinet/tcp_var.h>
72 #include <netinet/tcpip.h>
73 #include <netinet/udp.h>
74 #include <netinet/udp_var.h>
75 #include <netinet/ip_divert.h>
76 #include <netinet/if_ether.h> /* XXX for ETHERTYPE_IP */
78 #include <net/ipfw/ip_fw2.h>
80 #ifdef IPFIREWALL_DEBUG
81 #define DPRINTF(fmt, ...) \
84 kprintf(fmt, __VA_ARGS__); \
87 #define DPRINTF(fmt, ...) ((void)0)
91 * Description about per-CPU rule duplication:
93 * Module loading/unloading and all ioctl operations are serialized
94 * by netisr0, so we don't have any ordering or locking problems.
96 * Following graph shows how operation on per-CPU rule list is
97 * performed [2 CPU case]:
101 * netisr0 <------------------------------------+
112 * forwardmsg---------->ifnet1 |
117 * replymsg--------------+
122 * Rules which will not create states (dyn rules) [2 CPU case]
125 * layer3_chain layer3_chain
128 * +-------+ sibling +-------+ sibling
129 * | rule1 |--------->| rule1 |--------->NULL
130 * +-------+ +-------+
134 * +-------+ sibling +-------+ sibling
135 * | rule2 |--------->| rule2 |--------->NULL
136 * +-------+ +-------+
139 * 1) Ease statistics calculation during IP_FW_GET. We only need to
140 * iterate layer3_chain on CPU0; the current rule's duplication on
141 * the other CPUs could safely be read-only accessed by using
143 * 2) Accelerate rule insertion and deletion, e.g. rule insertion:
144 * a) In netisr0 (on CPU0) rule3 is determined to be inserted between
145 * rule1 and rule2. To make this decision we need to iterate the
146 * layer3_chain on CPU0. The netmsg, which is used to insert the
147 * rule, will contain rule1 on CPU0 as prev_rule and rule2 on CPU0
149 * b) After the insertion on CPU0 is done, we will move on to CPU1.
150 * But instead of relocating the rule3's position on CPU1 by
151 * iterating the layer3_chain on CPU1, we set the netmsg's prev_rule
152 * to rule1->sibling and next_rule to rule2->sibling before the
153 * netmsg is forwarded to CPU1 from CPU0
157 * Rules which will create states (dyn rules) [2 CPU case]
158 * (unnecessary parts are omitted; they are same as in the previous figure)
162 * +-------+ +-------+
163 * | rule1 | | rule1 |
164 * +-------+ +-------+
171 * | +--------------------+ |
173 * | | (read-only shared) | |
175 * | | back pointer array | |
176 * | | (indexed by cpuid) | |
178 * +----|---------[0] | |
179 * | [1]--------|----+
181 * +--------------------+
184 * ........|............|............
188 * : +---------+ +---------+ :
189 * : | state1a | | state1b | .... :
190 * : +---------+ +---------+ :
194 * : (protected by dyn_lock) :
195 * ..................................
197 * [state1a and state1b are states created by rule1]
200 * This structure is introduced so that shared (locked) state table could
201 * work with per-CPU (duplicated) static rules. It mainly bridges states
202 * and static rules and serves as static rule's place holder (a read-only
203 * shared part of duplicated rules) from states point of view.
205 * IPFW_RULE_F_STATE (only for rules which create states):
206 * o During rule installation, this flag is turned on after rule's
207 * duplications reach all CPUs, to avoid at least following race:
208 * 1) rule1 is duplicated on CPU0 and is not duplicated on CPU1 yet
209 * 2) rule1 creates state1
210 * 3) state1 is located on CPU1 by check-state
211 * But rule1 is not duplicated on CPU1 yet
212 * o During rule deletion, this flag is turned off before deleting states
213 * created by the rule and before deleting the rule itself, so no
214 * more states will be created by the to-be-deleted rule even when its
215 * duplication on certain CPUs are not eliminated yet.
218 #define IPFW_AUTOINC_STEP_MIN 1
219 #define IPFW_AUTOINC_STEP_MAX 1000
220 #define IPFW_AUTOINC_STEP_DEF 100
222 #define IPFW_DEFAULT_RULE 65535 /* rulenum for the default rule */
223 #define IPFW_DEFAULT_SET 31 /* set number for the default rule */
226 struct netmsg_base base;
227 const struct ipfw_ioc_rule *ioc_rule;
228 struct ip_fw *next_rule;
229 struct ip_fw *prev_rule;
230 struct ip_fw *sibling;
231 struct ip_fw_stub *stub;
235 struct netmsg_base base;
236 struct ip_fw *start_rule;
237 struct ip_fw *prev_rule;
244 struct netmsg_base base;
245 struct ip_fw *start_rule;
250 struct ipfw_context {
251 struct ip_fw *ipfw_layer3_chain; /* list of rules for layer3 */
252 struct ip_fw *ipfw_default_rule; /* default rule */
253 uint64_t ipfw_norule_counter; /* counter for ipfw_log(NULL) */
256 * ipfw_set_disable contains one bit per set value (0..31).
257 * If the bit is set, all rules with the corresponding set
258 * are disabled. Set IPDW_DEFAULT_SET is reserved for the
259 * default rule and CANNOT be disabled.
261 uint32_t ipfw_set_disable;
262 uint32_t ipfw_gen; /* generation of rule list */
265 static struct ipfw_context *ipfw_ctx[MAXCPU];
269 * Module can not be unloaded, if there are references to
270 * certains rules of ipfw(4), e.g. dummynet(4)
272 static int ipfw_refcnt;
275 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's");
278 * Following two global variables are accessed and
279 * updated only on CPU0
281 static uint32_t static_count; /* # of static rules */
282 static uint32_t static_ioc_len; /* bytes of static rules */
285 * If 1, then ipfw static rules are being flushed,
286 * ipfw_chk() will skip to the default rule.
288 static int ipfw_flushing;
290 static int fw_verbose;
291 static int verbose_limit;
294 static int autoinc_step = IPFW_AUTOINC_STEP_DEF;
296 static int ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS);
297 static int ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS);
298 static int ipfw_sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS);
299 static int ipfw_sysctl_dyn_fin(SYSCTL_HANDLER_ARGS);
300 static int ipfw_sysctl_dyn_rst(SYSCTL_HANDLER_ARGS);
302 SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW, 0, "Firewall");
303 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
304 &fw_enable, 0, ipfw_sysctl_enable, "I", "Enable ipfw");
305 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, autoinc_step, CTLTYPE_INT | CTLFLAG_RW,
306 &autoinc_step, 0, ipfw_sysctl_autoinc_step, "I",
307 "Rule number autincrement step");
308 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO,one_pass,CTLFLAG_RW,
310 "Only do a single pass through ipfw when using dummynet(4)");
311 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, debug, CTLFLAG_RW,
312 &fw_debug, 0, "Enable printing of debug ip_fw statements");
313 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose, CTLFLAG_RW,
314 &fw_verbose, 0, "Log matches to ipfw rules");
315 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit, CTLFLAG_RW,
316 &verbose_limit, 0, "Set upper limit of matches of ipfw rules logged");
319 * Description of dynamic rules.
321 * Dynamic rules are stored in lists accessed through a hash table
322 * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can
323 * be modified through the sysctl variable dyn_buckets which is
324 * updated when the table becomes empty.
326 * XXX currently there is only one list, ipfw_dyn.
328 * When a packet is received, its address fields are first masked
329 * with the mask defined for the rule, then hashed, then matched
330 * against the entries in the corresponding list.
331 * Dynamic rules can be used for different purposes:
333 * + enforcing limits on the number of sessions;
334 * + in-kernel NAT (not implemented yet)
336 * The lifetime of dynamic rules is regulated by dyn_*_lifetime,
337 * measured in seconds and depending on the flags.
339 * The total number of dynamic rules is stored in dyn_count.
340 * The max number of dynamic rules is dyn_max. When we reach
341 * the maximum number of rules we do not create anymore. This is
342 * done to avoid consuming too much memory, but also too much
343 * time when searching on each packet (ideally, we should try instead
344 * to put a limit on the length of the list on each bucket...).
346 * Each dynamic rule holds a pointer to the parent ipfw rule so
347 * we know what action to perform. Dynamic rules are removed when
348 * the parent rule is deleted. XXX we should make them survive.
350 * There are some limitations with dynamic rules -- we do not
351 * obey the 'randomized match', and we do not do multiple
352 * passes through the firewall. XXX check the latter!!!
354 * NOTE about the SHARED LOCKMGR LOCK during dynamic rule looking up:
355 * Only TCP state transition will change dynamic rule's state and ack
356 * sequences, while all packets of one TCP connection only goes through
357 * one TCP thread, so it is safe to use shared lockmgr lock during dynamic
358 * rule looking up. The keep alive callout uses exclusive lockmgr lock
359 * when it tries to find suitable dynamic rules to send keep alive, so
360 * it will not see half updated state and ack sequences. Though the expire
361 * field updating looks racy for other protocols, the resolution (second)
362 * of expire field makes this kind of race harmless.
363 * XXX statistics' updating is _not_ MPsafe!!!
364 * XXX once UDP output path is fixed, we could use lockless dynamic rule
367 static ipfw_dyn_rule **ipfw_dyn_v = NULL;
368 static uint32_t dyn_buckets = 256; /* must be power of 2 */
369 static uint32_t curr_dyn_buckets = 256; /* must be power of 2 */
370 static uint32_t dyn_buckets_gen; /* generation of dyn buckets array */
371 static struct lock dyn_lock; /* dynamic rules' hash table lock */
373 static struct netmsg_base ipfw_timeout_netmsg; /* schedule ipfw timeout */
374 static struct callout ipfw_timeout_h;
377 * Timeouts for various events in handing dynamic rules.
379 static uint32_t dyn_ack_lifetime = 300;
380 static uint32_t dyn_syn_lifetime = 20;
381 static uint32_t dyn_fin_lifetime = 1;
382 static uint32_t dyn_rst_lifetime = 1;
383 static uint32_t dyn_udp_lifetime = 10;
384 static uint32_t dyn_short_lifetime = 5;
387 * Keepalives are sent if dyn_keepalive is set. They are sent every
388 * dyn_keepalive_period seconds, in the last dyn_keepalive_interval
389 * seconds of lifetime of a rule.
390 * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower
391 * than dyn_keepalive_period.
394 static uint32_t dyn_keepalive_interval = 20;
395 static uint32_t dyn_keepalive_period = 5;
396 static uint32_t dyn_keepalive = 1; /* do send keepalives */
398 static uint32_t dyn_count; /* # of dynamic rules */
399 static uint32_t dyn_max = 4096; /* max # of dynamic rules */
401 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_buckets, CTLTYPE_INT | CTLFLAG_RW,
402 &dyn_buckets, 0, ipfw_sysctl_dyn_buckets, "I", "Number of dyn. buckets");
403 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets, CTLFLAG_RD,
404 &curr_dyn_buckets, 0, "Current Number of dyn. buckets");
405 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_count, CTLFLAG_RD,
406 &dyn_count, 0, "Number of dyn. rules");
407 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_max, CTLFLAG_RW,
408 &dyn_max, 0, "Max number of dyn. rules");
409 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, static_count, CTLFLAG_RD,
410 &static_count, 0, "Number of static rules");
411 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime, CTLFLAG_RW,
412 &dyn_ack_lifetime, 0, "Lifetime of dyn. rules for acks");
413 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime, CTLFLAG_RW,
414 &dyn_syn_lifetime, 0, "Lifetime of dyn. rules for syn");
415 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime,
416 CTLTYPE_INT | CTLFLAG_RW, &dyn_fin_lifetime, 0, ipfw_sysctl_dyn_fin, "I",
417 "Lifetime of dyn. rules for fin");
418 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime,
419 CTLTYPE_INT | CTLFLAG_RW, &dyn_rst_lifetime, 0, ipfw_sysctl_dyn_rst, "I",
420 "Lifetime of dyn. rules for rst");
421 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime, CTLFLAG_RW,
422 &dyn_udp_lifetime, 0, "Lifetime of dyn. rules for UDP");
423 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime, CTLFLAG_RW,
424 &dyn_short_lifetime, 0, "Lifetime of dyn. rules for other situations");
425 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive, CTLFLAG_RW,
426 &dyn_keepalive, 0, "Enable keepalives for dyn. rules");
428 static ip_fw_chk_t ipfw_chk;
429 static void ipfw_tick(void *);
432 ipfw_free_rule(struct ip_fw *rule)
434 KASSERT(rule->cpuid == mycpuid, ("rule freed on cpu%d\n", mycpuid));
435 KASSERT(rule->refcnt > 0, ("invalid refcnt %u\n", rule->refcnt));
437 if (rule->refcnt == 0) {
445 ipfw_unref_rule(void *priv)
447 ipfw_free_rule(priv);
449 atomic_subtract_int(&ipfw_refcnt, 1);
454 ipfw_ref_rule(struct ip_fw *rule)
456 KASSERT(rule->cpuid == mycpuid, ("rule used on cpu%d\n", mycpuid));
458 atomic_add_int(&ipfw_refcnt, 1);
464 * This macro maps an ip pointer into a layer3 header pointer of type T
466 #define L3HDR(T, ip) ((T *)((uint32_t *)(ip) + (ip)->ip_hl))
469 icmptype_match(struct ip *ip, ipfw_insn_u32 *cmd)
471 int type = L3HDR(struct icmp,ip)->icmp_type;
473 return (type <= ICMP_MAXTYPE && (cmd->d[0] & (1 << type)));
476 #define TT ((1 << ICMP_ECHO) | \
477 (1 << ICMP_ROUTERSOLICIT) | \
478 (1 << ICMP_TSTAMP) | \
483 is_icmp_query(struct ip *ip)
485 int type = L3HDR(struct icmp, ip)->icmp_type;
487 return (type <= ICMP_MAXTYPE && (TT & (1 << type)));
493 * The following checks use two arrays of 8 or 16 bits to store the
494 * bits that we want set or clear, respectively. They are in the
495 * low and high half of cmd->arg1 or cmd->d[0].
497 * We scan options and store the bits we find set. We succeed if
499 * (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear
501 * The code is sometimes optimized not to store additional variables.
505 flags_match(ipfw_insn *cmd, uint8_t bits)
510 if (((cmd->arg1 & 0xff) & bits) != 0)
511 return 0; /* some bits we want set were clear */
513 want_clear = (cmd->arg1 >> 8) & 0xff;
514 if ((want_clear & bits) != want_clear)
515 return 0; /* some bits we want clear were set */
520 ipopts_match(struct ip *ip, ipfw_insn *cmd)
522 int optlen, bits = 0;
523 u_char *cp = (u_char *)(ip + 1);
524 int x = (ip->ip_hl << 2) - sizeof(struct ip);
526 for (; x > 0; x -= optlen, cp += optlen) {
527 int opt = cp[IPOPT_OPTVAL];
529 if (opt == IPOPT_EOL)
532 if (opt == IPOPT_NOP) {
535 optlen = cp[IPOPT_OLEN];
536 if (optlen <= 0 || optlen > x)
537 return 0; /* invalid or truncated */
542 bits |= IP_FW_IPOPT_LSRR;
546 bits |= IP_FW_IPOPT_SSRR;
550 bits |= IP_FW_IPOPT_RR;
554 bits |= IP_FW_IPOPT_TS;
561 return (flags_match(cmd, bits));
565 tcpopts_match(struct ip *ip, ipfw_insn *cmd)
567 int optlen, bits = 0;
568 struct tcphdr *tcp = L3HDR(struct tcphdr,ip);
569 u_char *cp = (u_char *)(tcp + 1);
570 int x = (tcp->th_off << 2) - sizeof(struct tcphdr);
572 for (; x > 0; x -= optlen, cp += optlen) {
575 if (opt == TCPOPT_EOL)
578 if (opt == TCPOPT_NOP) {
588 bits |= IP_FW_TCPOPT_MSS;
592 bits |= IP_FW_TCPOPT_WINDOW;
595 case TCPOPT_SACK_PERMITTED:
597 bits |= IP_FW_TCPOPT_SACK;
600 case TCPOPT_TIMESTAMP:
601 bits |= IP_FW_TCPOPT_TS;
607 bits |= IP_FW_TCPOPT_CC;
614 return (flags_match(cmd, bits));
618 iface_match(struct ifnet *ifp, ipfw_insn_if *cmd)
620 if (ifp == NULL) /* no iface with this packet, match fails */
623 /* Check by name or by IP address */
624 if (cmd->name[0] != '\0') { /* match by name */
627 if (kfnmatch(cmd->name, ifp->if_xname, 0) == 0)
630 if (strncmp(ifp->if_xname, cmd->name, IFNAMSIZ) == 0)
634 struct ifaddr_container *ifac;
636 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
637 struct ifaddr *ia = ifac->ifa;
639 if (ia->ifa_addr == NULL)
641 if (ia->ifa_addr->sa_family != AF_INET)
643 if (cmd->p.ip.s_addr == ((struct sockaddr_in *)
644 (ia->ifa_addr))->sin_addr.s_addr)
645 return(1); /* match */
648 return(0); /* no match, fail ... */
651 #define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0
654 * We enter here when we have a rule with O_LOG.
655 * XXX this function alone takes about 2Kbytes of code!
658 ipfw_log(struct ip_fw *f, u_int hlen, struct ether_header *eh,
659 struct mbuf *m, struct ifnet *oif)
662 int limit_reached = 0;
663 char action2[40], proto[48], fragment[28];
668 if (f == NULL) { /* bogus pkt */
669 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
671 if (verbose_limit != 0 &&
672 ctx->ipfw_norule_counter >= verbose_limit)
674 ctx->ipfw_norule_counter++;
675 if (ctx->ipfw_norule_counter == verbose_limit)
676 limit_reached = verbose_limit;
678 } else { /* O_LOG is the first action, find the real one */
679 ipfw_insn *cmd = ACTION_PTR(f);
680 ipfw_insn_log *l = (ipfw_insn_log *)cmd;
682 if (l->max_log != 0 && l->log_left == 0)
685 if (l->log_left == 0)
686 limit_reached = l->max_log;
687 cmd += F_LEN(cmd); /* point to first action */
688 if (cmd->opcode == O_PROB)
692 switch (cmd->opcode) {
698 if (cmd->arg1==ICMP_REJECT_RST) {
700 } else if (cmd->arg1==ICMP_UNREACH_HOST) {
703 ksnprintf(SNPARGS(action2, 0), "Unreach %d",
717 ksnprintf(SNPARGS(action2, 0), "Divert %d", cmd->arg1);
721 ksnprintf(SNPARGS(action2, 0), "Tee %d", cmd->arg1);
725 ksnprintf(SNPARGS(action2, 0), "SkipTo %d", cmd->arg1);
729 ksnprintf(SNPARGS(action2, 0), "Pipe %d", cmd->arg1);
733 ksnprintf(SNPARGS(action2, 0), "Queue %d", cmd->arg1);
738 ipfw_insn_sa *sa = (ipfw_insn_sa *)cmd;
741 len = ksnprintf(SNPARGS(action2, 0),
743 inet_ntoa(sa->sa.sin_addr));
744 if (sa->sa.sin_port) {
745 ksnprintf(SNPARGS(action2, len), ":%d",
757 if (hlen == 0) { /* non-ip */
758 ksnprintf(SNPARGS(proto, 0), "MAC");
760 struct ip *ip = mtod(m, struct ip *);
761 /* these three are all aliases to the same thing */
762 struct icmp *const icmp = L3HDR(struct icmp, ip);
763 struct tcphdr *const tcp = (struct tcphdr *)icmp;
764 struct udphdr *const udp = (struct udphdr *)icmp;
766 int ip_off, offset, ip_len;
769 if (eh != NULL) { /* layer 2 packets are as on the wire */
770 ip_off = ntohs(ip->ip_off);
771 ip_len = ntohs(ip->ip_len);
776 offset = ip_off & IP_OFFMASK;
779 len = ksnprintf(SNPARGS(proto, 0), "TCP %s",
780 inet_ntoa(ip->ip_src));
782 ksnprintf(SNPARGS(proto, len), ":%d %s:%d",
783 ntohs(tcp->th_sport),
784 inet_ntoa(ip->ip_dst),
785 ntohs(tcp->th_dport));
787 ksnprintf(SNPARGS(proto, len), " %s",
788 inet_ntoa(ip->ip_dst));
793 len = ksnprintf(SNPARGS(proto, 0), "UDP %s",
794 inet_ntoa(ip->ip_src));
796 ksnprintf(SNPARGS(proto, len), ":%d %s:%d",
797 ntohs(udp->uh_sport),
798 inet_ntoa(ip->ip_dst),
799 ntohs(udp->uh_dport));
801 ksnprintf(SNPARGS(proto, len), " %s",
802 inet_ntoa(ip->ip_dst));
808 len = ksnprintf(SNPARGS(proto, 0),
813 len = ksnprintf(SNPARGS(proto, 0), "ICMP ");
815 len += ksnprintf(SNPARGS(proto, len), "%s",
816 inet_ntoa(ip->ip_src));
817 ksnprintf(SNPARGS(proto, len), " %s",
818 inet_ntoa(ip->ip_dst));
822 len = ksnprintf(SNPARGS(proto, 0), "P:%d %s", ip->ip_p,
823 inet_ntoa(ip->ip_src));
824 ksnprintf(SNPARGS(proto, len), " %s",
825 inet_ntoa(ip->ip_dst));
829 if (ip_off & (IP_MF | IP_OFFMASK)) {
830 ksnprintf(SNPARGS(fragment, 0), " (frag %d:%d@%d%s)",
831 ntohs(ip->ip_id), ip_len - (ip->ip_hl << 2),
832 offset << 3, (ip_off & IP_MF) ? "+" : "");
836 if (oif || m->m_pkthdr.rcvif) {
837 log(LOG_SECURITY | LOG_INFO,
838 "ipfw: %d %s %s %s via %s%s\n",
840 action, proto, oif ? "out" : "in",
841 oif ? oif->if_xname : m->m_pkthdr.rcvif->if_xname,
844 log(LOG_SECURITY | LOG_INFO,
845 "ipfw: %d %s %s [no if info]%s\n",
847 action, proto, fragment);
851 log(LOG_SECURITY | LOG_NOTICE,
852 "ipfw: limit %d reached on entry %d\n",
853 limit_reached, f ? f->rulenum : -1);
860 * IMPORTANT: the hash function for dynamic rules must be commutative
861 * in source and destination (ip,port), because rules are bidirectional
862 * and we want to find both in the same bucket.
865 hash_packet(struct ipfw_flow_id *id)
869 i = (id->dst_ip) ^ (id->src_ip) ^ (id->dst_port) ^ (id->src_port);
870 i &= (curr_dyn_buckets - 1);
875 * unlink a dynamic rule from a chain. prev is a pointer to
876 * the previous one, q is a pointer to the rule to delete,
877 * head is a pointer to the head of the queue.
878 * Modifies q and potentially also head.
880 #define UNLINK_DYN_RULE(prev, head, q) \
882 ipfw_dyn_rule *old_q = q; \
884 /* remove a refcount to the parent */ \
885 if (q->dyn_type == O_LIMIT) \
886 q->parent->count--; \
887 DPRINTF("-- unlink entry 0x%08x %d -> 0x%08x %d, %d left\n", \
888 q->id.src_ip, q->id.src_port, \
889 q->id.dst_ip, q->id.dst_port, dyn_count - 1); \
891 prev->next = q = q->next; \
893 head = q = q->next; \
894 KASSERT(dyn_count > 0, ("invalid dyn count %u\n", dyn_count)); \
896 kfree(old_q, M_IPFW); \
899 #define TIME_LEQ(a, b) ((int)((a) - (b)) <= 0)
902 * Remove dynamic rules pointing to "rule", or all of them if rule == NULL.
904 * If keep_me == NULL, rules are deleted even if not expired,
905 * otherwise only expired rules are removed.
907 * The value of the second parameter is also used to point to identify
908 * a rule we absolutely do not want to remove (e.g. because we are
909 * holding a reference to it -- this is the case with O_LIMIT_PARENT
910 * rules). The pointer is only used for comparison, so any non-null
914 remove_dyn_rule_locked(struct ip_fw *rule, ipfw_dyn_rule *keep_me)
916 static uint32_t last_remove = 0; /* XXX */
918 #define FORCE (keep_me == NULL)
920 ipfw_dyn_rule *prev, *q;
921 int i, pass = 0, max_pass = 0, unlinked = 0;
923 if (ipfw_dyn_v == NULL || dyn_count == 0)
925 /* do not expire more than once per second, it is useless */
926 if (!FORCE && last_remove == time_second)
928 last_remove = time_second;
931 * because O_LIMIT refer to parent rules, during the first pass only
932 * remove child and mark any pending LIMIT_PARENT, and remove
933 * them in a second pass.
936 for (i = 0; i < curr_dyn_buckets; i++) {
937 for (prev = NULL, q = ipfw_dyn_v[i]; q;) {
939 * Logic can become complex here, so we split tests.
943 if (rule != NULL && rule->stub != q->stub)
944 goto next; /* not the one we are looking for */
945 if (q->dyn_type == O_LIMIT_PARENT) {
947 * handle parent in the second pass,
948 * record we need one.
953 if (FORCE && q->count != 0) {
954 /* XXX should not happen! */
955 kprintf("OUCH! cannot remove rule, "
956 "count %d\n", q->count);
959 if (!FORCE && !TIME_LEQ(q->expire, time_second))
963 UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q);
970 if (pass++ < max_pass)
980 * lookup a dynamic rule.
982 static ipfw_dyn_rule *
983 lookup_dyn_rule(struct ipfw_flow_id *pkt, int *match_direction,
987 * stateful ipfw extensions.
988 * Lookup into dynamic session queue
990 #define MATCH_REVERSE 0
991 #define MATCH_FORWARD 1
993 #define MATCH_UNKNOWN 3
994 int i, dir = MATCH_NONE;
995 ipfw_dyn_rule *prev, *q=NULL;
997 if (ipfw_dyn_v == NULL)
998 goto done; /* not found */
1000 i = hash_packet(pkt);
1001 for (prev = NULL, q = ipfw_dyn_v[i]; q != NULL;) {
1002 if (q->dyn_type == O_LIMIT_PARENT)
1005 if (TIME_LEQ(q->expire, time_second)) {
1007 * Entry expired; skip.
1008 * Let ipfw_tick() take care of it
1013 if (pkt->proto == q->id.proto) {
1014 if (pkt->src_ip == q->id.src_ip &&
1015 pkt->dst_ip == q->id.dst_ip &&
1016 pkt->src_port == q->id.src_port &&
1017 pkt->dst_port == q->id.dst_port) {
1018 dir = MATCH_FORWARD;
1021 if (pkt->src_ip == q->id.dst_ip &&
1022 pkt->dst_ip == q->id.src_ip &&
1023 pkt->src_port == q->id.dst_port &&
1024 pkt->dst_port == q->id.src_port) {
1025 dir = MATCH_REVERSE;
1034 goto done; /* q = NULL, not found */
1036 if (pkt->proto == IPPROTO_TCP) { /* update state according to flags */
1037 u_char flags = pkt->flags & (TH_FIN|TH_SYN|TH_RST);
1039 #define BOTH_SYN (TH_SYN | (TH_SYN << 8))
1040 #define BOTH_FIN (TH_FIN | (TH_FIN << 8))
1042 q->state |= (dir == MATCH_FORWARD ) ? flags : (flags << 8);
1044 case TH_SYN: /* opening */
1045 q->expire = time_second + dyn_syn_lifetime;
1048 case BOTH_SYN: /* move to established */
1049 case BOTH_SYN | TH_FIN : /* one side tries to close */
1050 case BOTH_SYN | (TH_FIN << 8) :
1052 uint32_t ack = ntohl(tcp->th_ack);
1054 #define _SEQ_GE(a, b) ((int)(a) - (int)(b) >= 0)
1056 if (dir == MATCH_FORWARD) {
1057 if (q->ack_fwd == 0 ||
1058 _SEQ_GE(ack, q->ack_fwd))
1060 else /* ignore out-of-sequence */
1063 if (q->ack_rev == 0 ||
1064 _SEQ_GE(ack, q->ack_rev))
1066 else /* ignore out-of-sequence */
1071 q->expire = time_second + dyn_ack_lifetime;
1074 case BOTH_SYN | BOTH_FIN: /* both sides closed */
1075 KKASSERT(dyn_fin_lifetime < dyn_keepalive_period);
1076 q->expire = time_second + dyn_fin_lifetime;
1082 * reset or some invalid combination, but can also
1083 * occur if we use keep-state the wrong way.
1085 if ((q->state & ((TH_RST << 8) | TH_RST)) == 0)
1086 kprintf("invalid state: 0x%x\n", q->state);
1088 KKASSERT(dyn_rst_lifetime < dyn_keepalive_period);
1089 q->expire = time_second + dyn_rst_lifetime;
1092 } else if (pkt->proto == IPPROTO_UDP) {
1093 q->expire = time_second + dyn_udp_lifetime;
1095 /* other protocols */
1096 q->expire = time_second + dyn_short_lifetime;
1099 if (match_direction)
1100 *match_direction = dir;
1104 static struct ip_fw *
1105 lookup_rule(struct ipfw_flow_id *pkt, int *match_direction, struct tcphdr *tcp,
1106 uint16_t len, int *deny)
1108 struct ip_fw *rule = NULL;
1110 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1114 gen = ctx->ipfw_gen;
1116 lockmgr(&dyn_lock, LK_SHARED);
1118 if (ctx->ipfw_gen != gen) {
1120 * Static rules had been change when we were waiting
1121 * for the dynamic hash table lock; deny this packet,
1122 * since it is _not_ known whether it is safe to keep
1123 * iterating the static rules.
1129 q = lookup_dyn_rule(pkt, match_direction, tcp);
1133 rule = q->stub->rule[mycpuid];
1134 KKASSERT(rule->stub == q->stub && rule->cpuid == mycpuid);
1141 lockmgr(&dyn_lock, LK_RELEASE);
1146 realloc_dynamic_table(void)
1148 ipfw_dyn_rule **old_dyn_v;
1149 uint32_t old_curr_dyn_buckets;
1151 KASSERT(dyn_buckets <= 65536 && (dyn_buckets & (dyn_buckets - 1)) == 0,
1152 ("invalid dyn_buckets %d\n", dyn_buckets));
1154 /* Save the current buckets array for later error recovery */
1155 old_dyn_v = ipfw_dyn_v;
1156 old_curr_dyn_buckets = curr_dyn_buckets;
1158 curr_dyn_buckets = dyn_buckets;
1160 ipfw_dyn_v = kmalloc(curr_dyn_buckets * sizeof(ipfw_dyn_rule *),
1161 M_IPFW, M_NOWAIT | M_ZERO);
1162 if (ipfw_dyn_v != NULL || curr_dyn_buckets <= 2)
1165 curr_dyn_buckets /= 2;
1166 if (curr_dyn_buckets <= old_curr_dyn_buckets &&
1167 old_dyn_v != NULL) {
1169 * Don't try allocating smaller buckets array, reuse
1170 * the old one, which alreay contains enough buckets
1176 if (ipfw_dyn_v != NULL) {
1177 if (old_dyn_v != NULL)
1178 kfree(old_dyn_v, M_IPFW);
1180 /* Allocation failed, restore old buckets array */
1181 ipfw_dyn_v = old_dyn_v;
1182 curr_dyn_buckets = old_curr_dyn_buckets;
1185 if (ipfw_dyn_v != NULL)
1190 * Install state of type 'type' for a dynamic session.
1191 * The hash table contains two type of rules:
1192 * - regular rules (O_KEEP_STATE)
1193 * - rules for sessions with limited number of sess per user
1194 * (O_LIMIT). When they are created, the parent is
1195 * increased by 1, and decreased on delete. In this case,
1196 * the third parameter is the parent rule and not the chain.
1197 * - "parent" rules for the above (O_LIMIT_PARENT).
1199 static ipfw_dyn_rule *
1200 add_dyn_rule(struct ipfw_flow_id *id, uint8_t dyn_type, struct ip_fw *rule)
1205 if (ipfw_dyn_v == NULL ||
1206 (dyn_count == 0 && dyn_buckets != curr_dyn_buckets)) {
1207 realloc_dynamic_table();
1208 if (ipfw_dyn_v == NULL)
1209 return NULL; /* failed ! */
1211 i = hash_packet(id);
1213 r = kmalloc(sizeof(*r), M_IPFW, M_NOWAIT | M_ZERO);
1215 kprintf ("sorry cannot allocate state\n");
1219 /* increase refcount on parent, and set pointer */
1220 if (dyn_type == O_LIMIT) {
1221 ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule;
1223 if (parent->dyn_type != O_LIMIT_PARENT)
1224 panic("invalid parent");
1227 rule = parent->stub->rule[mycpuid];
1228 KKASSERT(rule->stub == parent->stub);
1230 KKASSERT(rule->cpuid == mycpuid && rule->stub != NULL);
1233 r->expire = time_second + dyn_syn_lifetime;
1234 r->stub = rule->stub;
1235 r->dyn_type = dyn_type;
1236 r->pcnt = r->bcnt = 0;
1240 r->next = ipfw_dyn_v[i];
1244 DPRINTF("-- add dyn entry ty %d 0x%08x %d -> 0x%08x %d, total %d\n",
1246 r->id.src_ip, r->id.src_port,
1247 r->id.dst_ip, r->id.dst_port, dyn_count);
1252 * lookup dynamic parent rule using pkt and rule as search keys.
1253 * If the lookup fails, then install one.
1255 static ipfw_dyn_rule *
1256 lookup_dyn_parent(struct ipfw_flow_id *pkt, struct ip_fw *rule)
1262 i = hash_packet(pkt);
1263 for (q = ipfw_dyn_v[i]; q != NULL; q = q->next) {
1264 if (q->dyn_type == O_LIMIT_PARENT &&
1265 rule->stub == q->stub &&
1266 pkt->proto == q->id.proto &&
1267 pkt->src_ip == q->id.src_ip &&
1268 pkt->dst_ip == q->id.dst_ip &&
1269 pkt->src_port == q->id.src_port &&
1270 pkt->dst_port == q->id.dst_port) {
1271 q->expire = time_second + dyn_short_lifetime;
1272 DPRINTF("lookup_dyn_parent found 0x%p\n", q);
1277 return add_dyn_rule(pkt, O_LIMIT_PARENT, rule);
1281 * Install dynamic state for rule type cmd->o.opcode
1283 * Returns 1 (failure) if state is not installed because of errors or because
1284 * session limitations are enforced.
1287 install_state_locked(struct ip_fw *rule, ipfw_insn_limit *cmd,
1288 struct ip_fw_args *args)
1290 static int last_log; /* XXX */
1294 DPRINTF("-- install state type %d 0x%08x %u -> 0x%08x %u\n",
1296 args->f_id.src_ip, args->f_id.src_port,
1297 args->f_id.dst_ip, args->f_id.dst_port);
1299 q = lookup_dyn_rule(&args->f_id, NULL, NULL);
1300 if (q != NULL) { /* should never occur */
1301 if (last_log != time_second) {
1302 last_log = time_second;
1303 kprintf(" install_state: entry already present, done\n");
1308 if (dyn_count >= dyn_max) {
1310 * Run out of slots, try to remove any expired rule.
1312 remove_dyn_rule_locked(NULL, (ipfw_dyn_rule *)1);
1313 if (dyn_count >= dyn_max) {
1314 if (last_log != time_second) {
1315 last_log = time_second;
1316 kprintf("install_state: "
1317 "Too many dynamic rules\n");
1319 return 1; /* cannot install, notify caller */
1323 switch (cmd->o.opcode) {
1324 case O_KEEP_STATE: /* bidir rule */
1325 if (add_dyn_rule(&args->f_id, O_KEEP_STATE, rule) == NULL)
1329 case O_LIMIT: /* limit number of sessions */
1331 uint16_t limit_mask = cmd->limit_mask;
1332 struct ipfw_flow_id id;
1333 ipfw_dyn_rule *parent;
1335 DPRINTF("installing dyn-limit rule %d\n",
1338 id.dst_ip = id.src_ip = 0;
1339 id.dst_port = id.src_port = 0;
1340 id.proto = args->f_id.proto;
1342 if (limit_mask & DYN_SRC_ADDR)
1343 id.src_ip = args->f_id.src_ip;
1344 if (limit_mask & DYN_DST_ADDR)
1345 id.dst_ip = args->f_id.dst_ip;
1346 if (limit_mask & DYN_SRC_PORT)
1347 id.src_port = args->f_id.src_port;
1348 if (limit_mask & DYN_DST_PORT)
1349 id.dst_port = args->f_id.dst_port;
1351 parent = lookup_dyn_parent(&id, rule);
1352 if (parent == NULL) {
1353 kprintf("add parent failed\n");
1357 if (parent->count >= cmd->conn_limit) {
1359 * See if we can remove some expired rule.
1361 remove_dyn_rule_locked(rule, parent);
1362 if (parent->count >= cmd->conn_limit) {
1364 last_log != time_second) {
1365 last_log = time_second;
1366 log(LOG_SECURITY | LOG_DEBUG,
1368 "too many entries\n");
1373 if (add_dyn_rule(&args->f_id, O_LIMIT,
1374 (struct ip_fw *)parent) == NULL)
1379 kprintf("unknown dynamic rule type %u\n", cmd->o.opcode);
1382 lookup_dyn_rule(&args->f_id, NULL, NULL); /* XXX just set lifetime */
1387 install_state(struct ip_fw *rule, ipfw_insn_limit *cmd,
1388 struct ip_fw_args *args, int *deny)
1390 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1395 gen = ctx->ipfw_gen;
1397 lockmgr(&dyn_lock, LK_EXCLUSIVE);
1398 if (ctx->ipfw_gen != gen) {
1399 /* See the comment in lookup_rule() */
1402 ret = install_state_locked(rule, cmd, args);
1404 lockmgr(&dyn_lock, LK_RELEASE);
1410 * Transmit a TCP packet, containing either a RST or a keepalive.
1411 * When flags & TH_RST, we are sending a RST packet, because of a
1412 * "reset" action matched the packet.
1413 * Otherwise we are sending a keepalive, and flags & TH_
1416 send_pkt(struct ipfw_flow_id *id, uint32_t seq, uint32_t ack, int flags)
1421 struct route sro; /* fake route */
1423 MGETHDR(m, MB_DONTWAIT, MT_HEADER);
1426 m->m_pkthdr.rcvif = NULL;
1427 m->m_pkthdr.len = m->m_len = sizeof(struct ip) + sizeof(struct tcphdr);
1428 m->m_data += max_linkhdr;
1430 ip = mtod(m, struct ip *);
1431 bzero(ip, m->m_len);
1432 tcp = (struct tcphdr *)(ip + 1); /* no IP options */
1433 ip->ip_p = IPPROTO_TCP;
1437 * Assume we are sending a RST (or a keepalive in the reverse
1438 * direction), swap src and destination addresses and ports.
1440 ip->ip_src.s_addr = htonl(id->dst_ip);
1441 ip->ip_dst.s_addr = htonl(id->src_ip);
1442 tcp->th_sport = htons(id->dst_port);
1443 tcp->th_dport = htons(id->src_port);
1444 if (flags & TH_RST) { /* we are sending a RST */
1445 if (flags & TH_ACK) {
1446 tcp->th_seq = htonl(ack);
1447 tcp->th_ack = htonl(0);
1448 tcp->th_flags = TH_RST;
1452 tcp->th_seq = htonl(0);
1453 tcp->th_ack = htonl(seq);
1454 tcp->th_flags = TH_RST | TH_ACK;
1458 * We are sending a keepalive. flags & TH_SYN determines
1459 * the direction, forward if set, reverse if clear.
1460 * NOTE: seq and ack are always assumed to be correct
1461 * as set by the caller. This may be confusing...
1463 if (flags & TH_SYN) {
1465 * we have to rewrite the correct addresses!
1467 ip->ip_dst.s_addr = htonl(id->dst_ip);
1468 ip->ip_src.s_addr = htonl(id->src_ip);
1469 tcp->th_dport = htons(id->dst_port);
1470 tcp->th_sport = htons(id->src_port);
1472 tcp->th_seq = htonl(seq);
1473 tcp->th_ack = htonl(ack);
1474 tcp->th_flags = TH_ACK;
1478 * set ip_len to the payload size so we can compute
1479 * the tcp checksum on the pseudoheader
1480 * XXX check this, could save a couple of words ?
1482 ip->ip_len = htons(sizeof(struct tcphdr));
1483 tcp->th_sum = in_cksum(m, m->m_pkthdr.len);
1486 * now fill fields left out earlier
1488 ip->ip_ttl = ip_defttl;
1489 ip->ip_len = m->m_pkthdr.len;
1491 bzero(&sro, sizeof(sro));
1492 ip_rtaddr(ip->ip_dst, &sro);
1494 m->m_pkthdr.fw_flags |= IPFW_MBUF_GENERATED;
1495 ip_output(m, NULL, &sro, 0, NULL, NULL);
1501 * sends a reject message, consuming the mbuf passed as an argument.
1504 send_reject(struct ip_fw_args *args, int code, int offset, int ip_len)
1506 if (code != ICMP_REJECT_RST) { /* Send an ICMP unreach */
1507 /* We need the IP header in host order for icmp_error(). */
1508 if (args->eh != NULL) {
1509 struct ip *ip = mtod(args->m, struct ip *);
1511 ip->ip_len = ntohs(ip->ip_len);
1512 ip->ip_off = ntohs(ip->ip_off);
1514 icmp_error(args->m, ICMP_UNREACH, code, 0L, 0);
1515 } else if (offset == 0 && args->f_id.proto == IPPROTO_TCP) {
1516 struct tcphdr *const tcp =
1517 L3HDR(struct tcphdr, mtod(args->m, struct ip *));
1519 if ((tcp->th_flags & TH_RST) == 0) {
1520 send_pkt(&args->f_id, ntohl(tcp->th_seq),
1521 ntohl(tcp->th_ack), tcp->th_flags | TH_RST);
1532 * Given an ip_fw *, lookup_next_rule will return a pointer
1533 * to the next rule, which can be either the jump
1534 * target (for skipto instructions) or the next one in the list (in
1535 * all other cases including a missing jump target).
1536 * The result is also written in the "next_rule" field of the rule.
1537 * Backward jumps are not allowed, so start looking from the next
1540 * This never returns NULL -- in case we do not have an exact match,
1541 * the next rule is returned. When the ruleset is changed,
1542 * pointers are flushed so we are always correct.
1545 static struct ip_fw *
1546 lookup_next_rule(struct ip_fw *me)
1548 struct ip_fw *rule = NULL;
1551 /* look for action, in case it is a skipto */
1552 cmd = ACTION_PTR(me);
1553 if (cmd->opcode == O_LOG)
1555 if (cmd->opcode == O_SKIPTO) {
1556 for (rule = me->next; rule; rule = rule->next) {
1557 if (rule->rulenum >= cmd->arg1)
1561 if (rule == NULL) /* failure or not a skipto */
1563 me->next_rule = rule;
1568 _ipfw_match_uid(const struct ipfw_flow_id *fid, struct ifnet *oif,
1569 enum ipfw_opcodes opcode, uid_t uid)
1571 struct in_addr src_ip, dst_ip;
1572 struct inpcbinfo *pi;
1576 if (fid->proto == IPPROTO_TCP) {
1578 pi = &tcbinfo[mycpuid];
1579 } else if (fid->proto == IPPROTO_UDP) {
1587 * Values in 'fid' are in host byte order
1589 dst_ip.s_addr = htonl(fid->dst_ip);
1590 src_ip.s_addr = htonl(fid->src_ip);
1592 pcb = in_pcblookup_hash(pi,
1593 dst_ip, htons(fid->dst_port),
1594 src_ip, htons(fid->src_port),
1597 pcb = in_pcblookup_hash(pi,
1598 src_ip, htons(fid->src_port),
1599 dst_ip, htons(fid->dst_port),
1602 if (pcb == NULL || pcb->inp_socket == NULL)
1605 if (opcode == O_UID) {
1606 #define socheckuid(a,b) ((a)->so_cred->cr_uid != (b))
1607 return !socheckuid(pcb->inp_socket, uid);
1610 return groupmember(uid, pcb->inp_socket->so_cred);
1615 ipfw_match_uid(const struct ipfw_flow_id *fid, struct ifnet *oif,
1616 enum ipfw_opcodes opcode, uid_t uid, int *deny)
1618 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1623 gen = ctx->ipfw_gen;
1626 if (gen != ctx->ipfw_gen) {
1627 /* See the comment in lookup_rule() */
1630 match = _ipfw_match_uid(fid, oif, opcode, uid);
1637 * The main check routine for the firewall.
1639 * All arguments are in args so we can modify them and return them
1640 * back to the caller.
1644 * args->m (in/out) The packet; we set to NULL when/if we nuke it.
1645 * Starts with the IP header.
1646 * args->eh (in) Mac header if present, or NULL for layer3 packet.
1647 * args->oif Outgoing interface, or NULL if packet is incoming.
1648 * The incoming interface is in the mbuf. (in)
1650 * args->rule Pointer to the last matching rule (in/out)
1651 * args->f_id Addresses grabbed from the packet (out)
1655 * If the packet was denied/rejected and has been dropped, *m is equal
1656 * to NULL upon return.
1658 * IP_FW_DENY the packet must be dropped.
1659 * IP_FW_PASS The packet is to be accepted and routed normally.
1660 * IP_FW_DIVERT Divert the packet to port (args->cookie)
1661 * IP_FW_TEE Tee the packet to port (args->cookie)
1662 * IP_FW_DUMMYNET Send the packet to pipe/queue (args->cookie)
1666 ipfw_chk(struct ip_fw_args *args)
1669 * Local variables hold state during the processing of a packet.
1671 * IMPORTANT NOTE: to speed up the processing of rules, there
1672 * are some assumption on the values of the variables, which
1673 * are documented here. Should you change them, please check
1674 * the implementation of the various instructions to make sure
1675 * that they still work.
1677 * args->eh The MAC header. It is non-null for a layer2
1678 * packet, it is NULL for a layer-3 packet.
1680 * m | args->m Pointer to the mbuf, as received from the caller.
1681 * It may change if ipfw_chk() does an m_pullup, or if it
1682 * consumes the packet because it calls send_reject().
1683 * XXX This has to change, so that ipfw_chk() never modifies
1684 * or consumes the buffer.
1685 * ip is simply an alias of the value of m, and it is kept
1686 * in sync with it (the packet is supposed to start with
1689 struct mbuf *m = args->m;
1690 struct ip *ip = mtod(m, struct ip *);
1693 * oif | args->oif If NULL, ipfw_chk has been called on the
1694 * inbound path (ether_input, ip_input).
1695 * If non-NULL, ipfw_chk has been called on the outbound path
1696 * (ether_output, ip_output).
1698 struct ifnet *oif = args->oif;
1700 struct ip_fw *f = NULL; /* matching rule */
1701 int retval = IP_FW_PASS;
1703 struct divert_info *divinfo;
1706 * hlen The length of the IPv4 header.
1707 * hlen >0 means we have an IPv4 packet.
1709 u_int hlen = 0; /* hlen >0 means we have an IP pkt */
1712 * offset The offset of a fragment. offset != 0 means that
1713 * we have a fragment at this offset of an IPv4 packet.
1714 * offset == 0 means that (if this is an IPv4 packet)
1715 * this is the first or only fragment.
1720 * Local copies of addresses. They are only valid if we have
1723 * proto The protocol. Set to 0 for non-ip packets,
1724 * or to the protocol read from the packet otherwise.
1725 * proto != 0 means that we have an IPv4 packet.
1727 * src_port, dst_port port numbers, in HOST format. Only
1728 * valid for TCP and UDP packets.
1730 * src_ip, dst_ip ip addresses, in NETWORK format.
1731 * Only valid for IPv4 packets.
1734 uint16_t src_port = 0, dst_port = 0; /* NOTE: host format */
1735 struct in_addr src_ip, dst_ip; /* NOTE: network format */
1736 uint16_t ip_len = 0;
1739 * dyn_dir = MATCH_UNKNOWN when rules unchecked,
1740 * MATCH_NONE when checked and not matched (dyn_f = NULL),
1741 * MATCH_FORWARD or MATCH_REVERSE otherwise (dyn_f != NULL)
1743 int dyn_dir = MATCH_UNKNOWN;
1744 struct ip_fw *dyn_f = NULL;
1745 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1747 if (m->m_pkthdr.fw_flags & IPFW_MBUF_GENERATED)
1748 return IP_FW_PASS; /* accept */
1750 if (args->eh == NULL || /* layer 3 packet */
1751 (m->m_pkthdr.len >= sizeof(struct ip) &&
1752 ntohs(args->eh->ether_type) == ETHERTYPE_IP))
1753 hlen = ip->ip_hl << 2;
1756 * Collect parameters into local variables for faster matching.
1758 if (hlen == 0) { /* do not grab addresses for non-ip pkts */
1759 proto = args->f_id.proto = 0; /* mark f_id invalid */
1760 goto after_ip_checks;
1763 proto = args->f_id.proto = ip->ip_p;
1764 src_ip = ip->ip_src;
1765 dst_ip = ip->ip_dst;
1766 if (args->eh != NULL) { /* layer 2 packets are as on the wire */
1767 offset = ntohs(ip->ip_off) & IP_OFFMASK;
1768 ip_len = ntohs(ip->ip_len);
1770 offset = ip->ip_off & IP_OFFMASK;
1771 ip_len = ip->ip_len;
1774 #define PULLUP_TO(len) \
1776 if (m->m_len < (len)) { \
1777 args->m = m = m_pullup(m, (len));\
1779 goto pullup_failed; \
1780 ip = mtod(m, struct ip *); \
1790 PULLUP_TO(hlen + sizeof(struct tcphdr));
1791 tcp = L3HDR(struct tcphdr, ip);
1792 dst_port = tcp->th_dport;
1793 src_port = tcp->th_sport;
1794 args->f_id.flags = tcp->th_flags;
1802 PULLUP_TO(hlen + sizeof(struct udphdr));
1803 udp = L3HDR(struct udphdr, ip);
1804 dst_port = udp->uh_dport;
1805 src_port = udp->uh_sport;
1810 PULLUP_TO(hlen + 4); /* type, code and checksum. */
1811 args->f_id.flags = L3HDR(struct icmp, ip)->icmp_type;
1821 args->f_id.src_ip = ntohl(src_ip.s_addr);
1822 args->f_id.dst_ip = ntohl(dst_ip.s_addr);
1823 args->f_id.src_port = src_port = ntohs(src_port);
1824 args->f_id.dst_port = dst_port = ntohs(dst_port);
1829 * Packet has already been tagged. Look for the next rule
1830 * to restart processing.
1832 * If fw_one_pass != 0 then just accept it.
1833 * XXX should not happen here, but optimized out in
1839 /* This rule is being/has been flushed */
1843 KASSERT(args->rule->cpuid == mycpuid,
1844 ("rule used on cpu%d\n", mycpuid));
1846 /* This rule was deleted */
1847 if (args->rule->rule_flags & IPFW_RULE_F_INVALID)
1850 f = args->rule->next_rule;
1852 f = lookup_next_rule(args->rule);
1855 * Find the starting rule. It can be either the first
1856 * one, or the one after divert_rule if asked so.
1860 mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL);
1862 divinfo = m_tag_data(mtag);
1863 skipto = divinfo->skipto;
1868 f = ctx->ipfw_layer3_chain;
1869 if (args->eh == NULL && skipto != 0) {
1870 /* No skipto during rule flushing */
1874 if (skipto >= IPFW_DEFAULT_RULE)
1875 return IP_FW_DENY; /* invalid */
1877 while (f && f->rulenum <= skipto)
1879 if (f == NULL) /* drop packet */
1881 } else if (ipfw_flushing) {
1882 /* Rules are being flushed; skip to default rule */
1883 f = ctx->ipfw_default_rule;
1886 if ((mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL)) != NULL)
1887 m_tag_delete(m, mtag);
1890 * Now scan the rules, and parse microinstructions for each rule.
1892 for (; f; f = f->next) {
1895 int skip_or; /* skip rest of OR block */
1898 if (ctx->ipfw_set_disable & (1 << f->set))
1902 for (l = f->cmd_len, cmd = f->cmd; l > 0;
1903 l -= cmdlen, cmd += cmdlen) {
1907 * check_body is a jump target used when we find a
1908 * CHECK_STATE, and need to jump to the body of
1913 cmdlen = F_LEN(cmd);
1915 * An OR block (insn_1 || .. || insn_n) has the
1916 * F_OR bit set in all but the last instruction.
1917 * The first match will set "skip_or", and cause
1918 * the following instructions to be skipped until
1919 * past the one with the F_OR bit clear.
1921 if (skip_or) { /* skip this instruction */
1922 if ((cmd->len & F_OR) == 0)
1923 skip_or = 0; /* next one is good */
1926 match = 0; /* set to 1 if we succeed */
1928 switch (cmd->opcode) {
1930 * The first set of opcodes compares the packet's
1931 * fields with some pattern, setting 'match' if a
1932 * match is found. At the end of the loop there is
1933 * logic to deal with F_NOT and F_OR flags associated
1941 kprintf("ipfw: opcode %d unimplemented\n",
1948 * We only check offset == 0 && proto != 0,
1949 * as this ensures that we have an IPv4
1950 * packet with the ports info.
1955 match = ipfw_match_uid(&args->f_id, oif,
1957 (uid_t)((ipfw_insn_u32 *)cmd)->d[0],
1964 match = iface_match(m->m_pkthdr.rcvif,
1965 (ipfw_insn_if *)cmd);
1969 match = iface_match(oif, (ipfw_insn_if *)cmd);
1973 match = iface_match(oif ? oif :
1974 m->m_pkthdr.rcvif, (ipfw_insn_if *)cmd);
1978 if (args->eh != NULL) { /* have MAC header */
1979 uint32_t *want = (uint32_t *)
1980 ((ipfw_insn_mac *)cmd)->addr;
1981 uint32_t *mask = (uint32_t *)
1982 ((ipfw_insn_mac *)cmd)->mask;
1983 uint32_t *hdr = (uint32_t *)args->eh;
1986 (want[0] == (hdr[0] & mask[0]) &&
1987 want[1] == (hdr[1] & mask[1]) &&
1988 want[2] == (hdr[2] & mask[2]));
1993 if (args->eh != NULL) {
1995 ntohs(args->eh->ether_type);
1997 ((ipfw_insn_u16 *)cmd)->ports;
2000 /* Special vlan handling */
2001 if (m->m_flags & M_VLANTAG)
2004 for (i = cmdlen - 1; !match && i > 0;
2007 (t >= p[0] && t <= p[1]);
2013 match = (hlen > 0 && offset != 0);
2016 case O_IN: /* "out" is "not in" */
2017 match = (oif == NULL);
2021 match = (args->eh != NULL);
2026 * We do not allow an arg of 0 so the
2027 * check of "proto" only suffices.
2029 match = (proto == cmd->arg1);
2033 match = (hlen > 0 &&
2034 ((ipfw_insn_ip *)cmd)->addr.s_addr ==
2039 match = (hlen > 0 &&
2040 ((ipfw_insn_ip *)cmd)->addr.s_addr ==
2042 ((ipfw_insn_ip *)cmd)->mask.s_addr));
2049 tif = INADDR_TO_IFP(&src_ip);
2050 match = (tif != NULL);
2057 uint32_t *d = (uint32_t *)(cmd + 1);
2059 cmd->opcode == O_IP_DST_SET ?
2065 addr -= d[0]; /* subtract base */
2067 (addr < cmd->arg1) &&
2068 (d[1 + (addr >> 5)] &
2069 (1 << (addr & 0x1f)));
2074 match = (hlen > 0 &&
2075 ((ipfw_insn_ip *)cmd)->addr.s_addr ==
2080 match = (hlen > 0) &&
2081 (((ipfw_insn_ip *)cmd)->addr.s_addr ==
2083 ((ipfw_insn_ip *)cmd)->mask.s_addr));
2090 tif = INADDR_TO_IFP(&dst_ip);
2091 match = (tif != NULL);
2098 * offset == 0 && proto != 0 is enough
2099 * to guarantee that we have an IPv4
2100 * packet with port info.
2102 if ((proto==IPPROTO_UDP || proto==IPPROTO_TCP)
2105 (cmd->opcode == O_IP_SRCPORT) ?
2106 src_port : dst_port ;
2108 ((ipfw_insn_u16 *)cmd)->ports;
2111 for (i = cmdlen - 1; !match && i > 0;
2114 (x >= p[0] && x <= p[1]);
2120 match = (offset == 0 && proto==IPPROTO_ICMP &&
2121 icmptype_match(ip, (ipfw_insn_u32 *)cmd));
2125 match = (hlen > 0 && ipopts_match(ip, cmd));
2129 match = (hlen > 0 && cmd->arg1 == ip->ip_v);
2133 match = (hlen > 0 && cmd->arg1 == ip->ip_ttl);
2137 match = (hlen > 0 &&
2138 cmd->arg1 == ntohs(ip->ip_id));
2142 match = (hlen > 0 && cmd->arg1 == ip_len);
2145 case O_IPPRECEDENCE:
2146 match = (hlen > 0 &&
2147 (cmd->arg1 == (ip->ip_tos & 0xe0)));
2151 match = (hlen > 0 &&
2152 flags_match(cmd, ip->ip_tos));
2156 match = (proto == IPPROTO_TCP && offset == 0 &&
2158 L3HDR(struct tcphdr,ip)->th_flags));
2162 match = (proto == IPPROTO_TCP && offset == 0 &&
2163 tcpopts_match(ip, cmd));
2167 match = (proto == IPPROTO_TCP && offset == 0 &&
2168 ((ipfw_insn_u32 *)cmd)->d[0] ==
2169 L3HDR(struct tcphdr,ip)->th_seq);
2173 match = (proto == IPPROTO_TCP && offset == 0 &&
2174 ((ipfw_insn_u32 *)cmd)->d[0] ==
2175 L3HDR(struct tcphdr,ip)->th_ack);
2179 match = (proto == IPPROTO_TCP && offset == 0 &&
2181 L3HDR(struct tcphdr,ip)->th_win);
2185 /* reject packets which have SYN only */
2186 /* XXX should i also check for TH_ACK ? */
2187 match = (proto == IPPROTO_TCP && offset == 0 &&
2188 (L3HDR(struct tcphdr,ip)->th_flags &
2189 (TH_RST | TH_ACK | TH_SYN)) != TH_SYN);
2194 ipfw_log(f, hlen, args->eh, m, oif);
2199 match = (krandom() <
2200 ((ipfw_insn_u32 *)cmd)->d[0]);
2204 * The second set of opcodes represents 'actions',
2205 * i.e. the terminal part of a rule once the packet
2206 * matches all previous patterns.
2207 * Typically there is only one action for each rule,
2208 * and the opcode is stored at the end of the rule
2209 * (but there are exceptions -- see below).
2211 * In general, here we set retval and terminate the
2212 * outer loop (would be a 'break 3' in some language,
2213 * but we need to do a 'goto done').
2216 * O_COUNT and O_SKIPTO actions:
2217 * instead of terminating, we jump to the next rule
2218 * ('goto next_rule', equivalent to a 'break 2'),
2219 * or to the SKIPTO target ('goto again' after
2220 * having set f, cmd and l), respectively.
2222 * O_LIMIT and O_KEEP_STATE: these opcodes are
2223 * not real 'actions', and are stored right
2224 * before the 'action' part of the rule.
2225 * These opcodes try to install an entry in the
2226 * state tables; if successful, we continue with
2227 * the next opcode (match=1; break;), otherwise
2228 * the packet must be dropped ('goto done' after
2229 * setting retval). If static rules are changed
2230 * during the state installation, the packet will
2231 * be dropped and rule's stats will not beupdated
2232 * ('return IP_FW_DENY').
2234 * O_PROBE_STATE and O_CHECK_STATE: these opcodes
2235 * cause a lookup of the state table, and a jump
2236 * to the 'action' part of the parent rule
2237 * ('goto check_body') if an entry is found, or
2238 * (CHECK_STATE only) a jump to the next rule if
2239 * the entry is not found ('goto next_rule').
2240 * The result of the lookup is cached to make
2241 * further instances of these opcodes are
2242 * effectively NOPs. If static rules are changed
2243 * during the state looking up, the packet will
2244 * be dropped and rule's stats will not be updated
2245 * ('return IP_FW_DENY').
2249 if (!(f->rule_flags & IPFW_RULE_F_STATE)) {
2250 kprintf("%s rule (%d) is not ready "
2252 cmd->opcode == O_LIMIT ?
2253 "limit" : "keep state",
2254 f->rulenum, f->cpuid);
2257 if (install_state(f,
2258 (ipfw_insn_limit *)cmd, args, &deny)) {
2262 retval = IP_FW_DENY;
2263 goto done; /* error/limit violation */
2273 * dynamic rules are checked at the first
2274 * keep-state or check-state occurrence,
2275 * with the result being stored in dyn_dir.
2276 * The compiler introduces a PROBE_STATE
2277 * instruction for us when we have a
2278 * KEEP_STATE (because PROBE_STATE needs
2281 if (dyn_dir == MATCH_UNKNOWN) {
2282 dyn_f = lookup_rule(&args->f_id,
2284 proto == IPPROTO_TCP ?
2285 L3HDR(struct tcphdr, ip) : NULL,
2289 if (dyn_f != NULL) {
2291 * Found a rule from a dynamic
2292 * entry; jump to the 'action'
2296 cmd = ACTION_PTR(f);
2297 l = f->cmd_len - f->act_ofs;
2302 * Dynamic entry not found. If CHECK_STATE,
2303 * skip to next rule, if PROBE_STATE just
2304 * ignore and continue with next opcode.
2306 if (cmd->opcode == O_CHECK_STATE)
2308 else if (!(f->rule_flags & IPFW_RULE_F_STATE))
2309 goto next_rule; /* not ready yet */
2314 retval = IP_FW_PASS; /* accept */
2319 args->rule = f; /* report matching rule */
2320 args->cookie = cmd->arg1;
2321 retval = IP_FW_DUMMYNET;
2326 if (args->eh) /* not on layer 2 */
2329 mtag = m_tag_get(PACKET_TAG_IPFW_DIVERT,
2330 sizeof(*divinfo), MB_DONTWAIT);
2332 retval = IP_FW_DENY;
2335 divinfo = m_tag_data(mtag);
2337 divinfo->skipto = f->rulenum;
2338 divinfo->port = cmd->arg1;
2339 divinfo->tee = (cmd->opcode == O_TEE);
2340 m_tag_prepend(m, mtag);
2342 args->cookie = cmd->arg1;
2343 retval = (cmd->opcode == O_DIVERT) ?
2344 IP_FW_DIVERT : IP_FW_TEE;
2349 f->pcnt++; /* update stats */
2351 f->timestamp = time_second;
2352 if (cmd->opcode == O_COUNT)
2355 if (f->next_rule == NULL)
2356 lookup_next_rule(f);
2362 * Drop the packet and send a reject notice
2363 * if the packet is not ICMP (or is an ICMP
2364 * query), and it is not multicast/broadcast.
2367 (proto != IPPROTO_ICMP ||
2368 is_icmp_query(ip)) &&
2369 !(m->m_flags & (M_BCAST|M_MCAST)) &&
2370 !IN_MULTICAST(ntohl(dst_ip.s_addr))) {
2372 * Update statistics before the possible
2373 * blocking 'send_reject'
2377 f->timestamp = time_second;
2379 send_reject(args, cmd->arg1,
2384 * Return directly here, rule stats
2385 * have been updated above.
2391 retval = IP_FW_DENY;
2395 if (args->eh) /* not valid on layer2 pkts */
2397 if (!dyn_f || dyn_dir == MATCH_FORWARD) {
2398 struct sockaddr_in *sin;
2400 mtag = m_tag_get(PACKET_TAG_IPFORWARD,
2401 sizeof(*sin), MB_DONTWAIT);
2403 retval = IP_FW_DENY;
2406 sin = m_tag_data(mtag);
2408 /* Structure copy */
2409 *sin = ((ipfw_insn_sa *)cmd)->sa;
2411 m_tag_prepend(m, mtag);
2412 m->m_pkthdr.fw_flags |=
2413 IPFORWARD_MBUF_TAGGED;
2414 m->m_pkthdr.fw_flags &=
2415 ~BRIDGE_MBUF_TAGGED;
2417 retval = IP_FW_PASS;
2421 panic("-- unknown opcode %d\n", cmd->opcode);
2422 } /* end of switch() on opcodes */
2424 if (cmd->len & F_NOT)
2428 if (cmd->len & F_OR)
2431 if (!(cmd->len & F_OR)) /* not an OR block, */
2432 break; /* try next rule */
2435 } /* end of inner for, scan opcodes */
2437 next_rule:; /* try next rule */
2439 } /* end of outer for, scan rules */
2440 kprintf("+++ ipfw: ouch!, skip past end of rules, denying packet\n");
2444 /* Update statistics */
2447 f->timestamp = time_second;
2452 kprintf("pullup failed\n");
2457 ipfw_dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
2462 const struct ipfw_flow_id *id;
2463 struct dn_flow_id *fid;
2467 mtag = m_tag_get(PACKET_TAG_DUMMYNET, sizeof(*pkt), MB_DONTWAIT);
2472 m_tag_prepend(m, mtag);
2474 pkt = m_tag_data(mtag);
2475 bzero(pkt, sizeof(*pkt));
2477 cmd = fwa->rule->cmd + fwa->rule->act_ofs;
2478 if (cmd->opcode == O_LOG)
2480 KASSERT(cmd->opcode == O_PIPE || cmd->opcode == O_QUEUE,
2481 ("Rule is not PIPE or QUEUE, opcode %d\n", cmd->opcode));
2484 pkt->dn_flags = (dir & DN_FLAGS_DIR_MASK);
2485 pkt->ifp = fwa->oif;
2486 pkt->pipe_nr = pipe_nr;
2488 pkt->cpuid = mycpuid;
2489 pkt->msgport = cur_netport();
2493 fid->fid_dst_ip = id->dst_ip;
2494 fid->fid_src_ip = id->src_ip;
2495 fid->fid_dst_port = id->dst_port;
2496 fid->fid_src_port = id->src_port;
2497 fid->fid_proto = id->proto;
2498 fid->fid_flags = id->flags;
2500 ipfw_ref_rule(fwa->rule);
2501 pkt->dn_priv = fwa->rule;
2502 pkt->dn_unref_priv = ipfw_unref_rule;
2504 if (cmd->opcode == O_PIPE)
2505 pkt->dn_flags |= DN_FLAGS_IS_PIPE;
2507 m->m_pkthdr.fw_flags |= DUMMYNET_MBUF_TAGGED;
2511 * When a rule is added/deleted, clear the next_rule pointers in all rules.
2512 * These will be reconstructed on the fly as packets are matched.
2513 * Must be called at splimp().
2516 ipfw_flush_rule_ptrs(struct ipfw_context *ctx)
2520 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next)
2521 rule->next_rule = NULL;
2524 static __inline void
2525 ipfw_inc_static_count(struct ip_fw *rule)
2527 /* Static rule's counts are updated only on CPU0 */
2528 KKASSERT(mycpuid == 0);
2531 static_ioc_len += IOC_RULESIZE(rule);
2534 static __inline void
2535 ipfw_dec_static_count(struct ip_fw *rule)
2537 int l = IOC_RULESIZE(rule);
2539 /* Static rule's counts are updated only on CPU0 */
2540 KKASSERT(mycpuid == 0);
2542 KASSERT(static_count > 0, ("invalid static count %u\n", static_count));
2545 KASSERT(static_ioc_len >= l,
2546 ("invalid static len %u\n", static_ioc_len));
2547 static_ioc_len -= l;
2551 ipfw_link_sibling(struct netmsg_ipfw *fwmsg, struct ip_fw *rule)
2553 if (fwmsg->sibling != NULL) {
2554 KKASSERT(mycpuid > 0 && fwmsg->sibling->cpuid == mycpuid - 1);
2555 fwmsg->sibling->sibling = rule;
2557 fwmsg->sibling = rule;
2560 static struct ip_fw *
2561 ipfw_create_rule(const struct ipfw_ioc_rule *ioc_rule, struct ip_fw_stub *stub)
2565 rule = kmalloc(RULESIZE(ioc_rule), M_IPFW, M_WAITOK | M_ZERO);
2567 rule->act_ofs = ioc_rule->act_ofs;
2568 rule->cmd_len = ioc_rule->cmd_len;
2569 rule->rulenum = ioc_rule->rulenum;
2570 rule->set = ioc_rule->set;
2571 rule->usr_flags = ioc_rule->usr_flags;
2573 bcopy(ioc_rule->cmd, rule->cmd, rule->cmd_len * 4 /* XXX */);
2576 rule->cpuid = mycpuid;
2580 stub->rule[mycpuid] = rule;
2586 ipfw_add_rule_dispatch(netmsg_t nmsg)
2588 struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
2589 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2592 rule = ipfw_create_rule(fwmsg->ioc_rule, fwmsg->stub);
2595 * Bump generation after ipfw_create_rule(),
2596 * since this function is blocking
2601 * Insert rule into the pre-determined position
2603 if (fwmsg->prev_rule != NULL) {
2604 struct ip_fw *prev, *next;
2606 prev = fwmsg->prev_rule;
2607 KKASSERT(prev->cpuid == mycpuid);
2609 next = fwmsg->next_rule;
2610 KKASSERT(next->cpuid == mycpuid);
2616 * Move to the position on the next CPU
2617 * before the msg is forwarded.
2619 fwmsg->prev_rule = prev->sibling;
2620 fwmsg->next_rule = next->sibling;
2622 KKASSERT(fwmsg->next_rule == NULL);
2623 rule->next = ctx->ipfw_layer3_chain;
2624 ctx->ipfw_layer3_chain = rule;
2627 /* Link rule CPU sibling */
2628 ipfw_link_sibling(fwmsg, rule);
2630 ipfw_flush_rule_ptrs(ctx);
2633 /* Statistics only need to be updated once */
2634 ipfw_inc_static_count(rule);
2636 /* Return the rule on CPU0 */
2637 nmsg->lmsg.u.ms_resultp = rule;
2640 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
2644 ipfw_enable_state_dispatch(netmsg_t nmsg)
2646 struct lwkt_msg *lmsg = &nmsg->lmsg;
2647 struct ip_fw *rule = lmsg->u.ms_resultp;
2648 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2652 KKASSERT(rule->cpuid == mycpuid);
2653 KKASSERT(rule->stub != NULL && rule->stub->rule[mycpuid] == rule);
2654 KKASSERT(!(rule->rule_flags & IPFW_RULE_F_STATE));
2655 rule->rule_flags |= IPFW_RULE_F_STATE;
2656 lmsg->u.ms_resultp = rule->sibling;
2658 ifnet_forwardmsg(lmsg, mycpuid + 1);
2662 * Add a new rule to the list. Copy the rule into a malloc'ed area,
2663 * then possibly create a rule number and add the rule to the list.
2664 * Update the rule_number in the input struct so the caller knows
2668 ipfw_add_rule(struct ipfw_ioc_rule *ioc_rule, uint32_t rule_flags)
2670 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2671 struct netmsg_ipfw fwmsg;
2672 struct netmsg_base *nmsg;
2673 struct ip_fw *f, *prev, *rule;
2674 struct ip_fw_stub *stub;
2676 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
2679 * If rulenum is 0, find highest numbered rule before the
2680 * default rule, and add rule number incremental step.
2682 if (ioc_rule->rulenum == 0) {
2683 int step = autoinc_step;
2685 KKASSERT(step >= IPFW_AUTOINC_STEP_MIN &&
2686 step <= IPFW_AUTOINC_STEP_MAX);
2689 * Locate the highest numbered rule before default
2691 for (f = ctx->ipfw_layer3_chain; f; f = f->next) {
2692 if (f->rulenum == IPFW_DEFAULT_RULE)
2694 ioc_rule->rulenum = f->rulenum;
2696 if (ioc_rule->rulenum < IPFW_DEFAULT_RULE - step)
2697 ioc_rule->rulenum += step;
2699 KASSERT(ioc_rule->rulenum != IPFW_DEFAULT_RULE &&
2700 ioc_rule->rulenum != 0,
2701 ("invalid rule num %d\n", ioc_rule->rulenum));
2704 * Now find the right place for the new rule in the sorted list.
2706 for (prev = NULL, f = ctx->ipfw_layer3_chain; f;
2707 prev = f, f = f->next) {
2708 if (f->rulenum > ioc_rule->rulenum) {
2709 /* Found the location */
2713 KASSERT(f != NULL, ("no default rule?!\n"));
2715 if (rule_flags & IPFW_RULE_F_STATE) {
2719 * If the new rule will create states, then allocate
2720 * a rule stub, which will be referenced by states
2723 size = sizeof(*stub) + ((ncpus - 1) * sizeof(struct ip_fw *));
2724 stub = kmalloc(size, M_IPFW, M_WAITOK | M_ZERO);
2730 * Duplicate the rule onto each CPU.
2731 * The rule duplicated on CPU0 will be returned.
2733 bzero(&fwmsg, sizeof(fwmsg));
2735 netmsg_init(nmsg, NULL, &curthread->td_msgport,
2736 0, ipfw_add_rule_dispatch);
2737 fwmsg.ioc_rule = ioc_rule;
2738 fwmsg.prev_rule = prev;
2739 fwmsg.next_rule = prev == NULL ? NULL : f;
2742 ifnet_domsg(&nmsg->lmsg, 0);
2743 KKASSERT(fwmsg.prev_rule == NULL && fwmsg.next_rule == NULL);
2745 rule = nmsg->lmsg.u.ms_resultp;
2746 KKASSERT(rule != NULL && rule->cpuid == mycpuid);
2748 if (rule_flags & IPFW_RULE_F_STATE) {
2750 * Turn on state flag, _after_ everything on all
2751 * CPUs have been setup.
2753 bzero(nmsg, sizeof(*nmsg));
2754 netmsg_init(nmsg, NULL, &curthread->td_msgport,
2755 0, ipfw_enable_state_dispatch);
2756 nmsg->lmsg.u.ms_resultp = rule;
2758 ifnet_domsg(&nmsg->lmsg, 0);
2759 KKASSERT(nmsg->lmsg.u.ms_resultp == NULL);
2762 DPRINTF("++ installed rule %d, static count now %d\n",
2763 rule->rulenum, static_count);
2767 * Free storage associated with a static rule (including derived
2769 * The caller is in charge of clearing rule pointers to avoid
2770 * dangling pointers.
2771 * @return a pointer to the next entry.
2772 * Arguments are not checked, so they better be correct.
2773 * Must be called at splimp().
2775 static struct ip_fw *
2776 ipfw_delete_rule(struct ipfw_context *ctx,
2777 struct ip_fw *prev, struct ip_fw *rule)
2780 struct ip_fw_stub *stub;
2784 /* STATE flag should have been cleared before we reach here */
2785 KKASSERT((rule->rule_flags & IPFW_RULE_F_STATE) == 0);
2790 ctx->ipfw_layer3_chain = n;
2794 /* Mark the rule as invalid */
2795 rule->rule_flags |= IPFW_RULE_F_INVALID;
2796 rule->next_rule = NULL;
2797 rule->sibling = NULL;
2800 /* Don't reset cpuid here; keep various assertion working */
2804 /* Statistics only need to be updated once */
2806 ipfw_dec_static_count(rule);
2808 /* Free 'stub' on the last CPU */
2809 if (stub != NULL && mycpuid == ncpus - 1)
2810 kfree(stub, M_IPFW);
2812 /* Try to free this rule */
2813 ipfw_free_rule(rule);
2815 /* Return the next rule */
2820 ipfw_flush_dispatch(netmsg_t nmsg)
2822 struct lwkt_msg *lmsg = &nmsg->lmsg;
2823 int kill_default = lmsg->u.ms_result;
2824 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2827 ipfw_flush_rule_ptrs(ctx); /* more efficient to do outside the loop */
2829 while ((rule = ctx->ipfw_layer3_chain) != NULL &&
2830 (kill_default || rule->rulenum != IPFW_DEFAULT_RULE))
2831 ipfw_delete_rule(ctx, NULL, rule);
2833 ifnet_forwardmsg(lmsg, mycpuid + 1);
2837 ipfw_disable_rule_state_dispatch(netmsg_t nmsg)
2839 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
2840 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2845 rule = dmsg->start_rule;
2847 KKASSERT(rule->cpuid == mycpuid);
2850 * Move to the position on the next CPU
2851 * before the msg is forwarded.
2853 dmsg->start_rule = rule->sibling;
2855 KKASSERT(dmsg->rulenum == 0);
2856 rule = ctx->ipfw_layer3_chain;
2859 while (rule != NULL) {
2860 if (dmsg->rulenum && rule->rulenum != dmsg->rulenum)
2862 rule->rule_flags &= ~IPFW_RULE_F_STATE;
2866 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
2870 * Deletes all rules from a chain (including the default rule
2871 * if the second argument is set).
2872 * Must be called at splimp().
2875 ipfw_flush(int kill_default)
2877 struct netmsg_del dmsg;
2878 struct netmsg_base nmsg;
2879 struct lwkt_msg *lmsg;
2881 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2883 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
2886 * If 'kill_default' then caller has done the necessary
2887 * msgport syncing; unnecessary to do it again.
2889 if (!kill_default) {
2891 * Let ipfw_chk() know the rules are going to
2892 * be flushed, so it could jump directly to
2896 netmsg_service_sync();
2900 * Clear STATE flag on rules, so no more states (dyn rules)
2903 bzero(&dmsg, sizeof(dmsg));
2904 netmsg_init(&dmsg.base, NULL, &curthread->td_msgport,
2905 0, ipfw_disable_rule_state_dispatch);
2906 ifnet_domsg(&dmsg.base.lmsg, 0);
2909 * This actually nukes all states (dyn rules)
2911 lockmgr(&dyn_lock, LK_EXCLUSIVE);
2912 for (rule = ctx->ipfw_layer3_chain; rule != NULL; rule = rule->next) {
2914 * Can't check IPFW_RULE_F_STATE here,
2915 * since it has been cleared previously.
2916 * Check 'stub' instead.
2918 if (rule->stub != NULL) {
2920 remove_dyn_rule_locked(rule, NULL);
2923 lockmgr(&dyn_lock, LK_RELEASE);
2926 * Press the 'flush' button
2928 bzero(&nmsg, sizeof(nmsg));
2929 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
2930 0, ipfw_flush_dispatch);
2932 lmsg->u.ms_result = kill_default;
2933 ifnet_domsg(lmsg, 0);
2935 KASSERT(dyn_count == 0, ("%u dyn rule remains\n", dyn_count));
2938 if (ipfw_dyn_v != NULL) {
2940 * Free dynamic rules(state) hash table
2942 kfree(ipfw_dyn_v, M_IPFW);
2946 KASSERT(static_count == 0,
2947 ("%u static rules remains\n", static_count));
2948 KASSERT(static_ioc_len == 0,
2949 ("%u bytes of static rules remains\n", static_ioc_len));
2951 KASSERT(static_count == 1,
2952 ("%u static rules remains\n", static_count));
2953 KASSERT(static_ioc_len == IOC_RULESIZE(ctx->ipfw_default_rule),
2954 ("%u bytes of static rules remains, should be %lu\n",
2956 (u_long)IOC_RULESIZE(ctx->ipfw_default_rule)));
2964 ipfw_alt_delete_rule_dispatch(netmsg_t nmsg)
2966 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
2967 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2968 struct ip_fw *rule, *prev;
2970 rule = dmsg->start_rule;
2971 KKASSERT(rule->cpuid == mycpuid);
2972 dmsg->start_rule = rule->sibling;
2974 prev = dmsg->prev_rule;
2976 KKASSERT(prev->cpuid == mycpuid);
2979 * Move to the position on the next CPU
2980 * before the msg is forwarded.
2982 dmsg->prev_rule = prev->sibling;
2986 * flush pointers outside the loop, then delete all matching
2987 * rules. 'prev' remains the same throughout the cycle.
2989 ipfw_flush_rule_ptrs(ctx);
2990 while (rule && rule->rulenum == dmsg->rulenum)
2991 rule = ipfw_delete_rule(ctx, prev, rule);
2993 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
2997 ipfw_alt_delete_rule(uint16_t rulenum)
2999 struct ip_fw *prev, *rule, *f;
3000 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3001 struct netmsg_del dmsg;
3002 struct netmsg_base *nmsg;
3006 * Locate first rule to delete
3008 for (prev = NULL, rule = ctx->ipfw_layer3_chain;
3009 rule && rule->rulenum < rulenum;
3010 prev = rule, rule = rule->next)
3012 if (rule->rulenum != rulenum)
3016 * Check whether any rules with the given number will
3020 for (f = rule; f && f->rulenum == rulenum; f = f->next) {
3021 if (f->rule_flags & IPFW_RULE_F_STATE) {
3029 * Clear the STATE flag, so no more states will be
3030 * created based the rules numbered 'rulenum'.
3032 bzero(&dmsg, sizeof(dmsg));
3034 netmsg_init(nmsg, NULL, &curthread->td_msgport,
3035 0, ipfw_disable_rule_state_dispatch);
3036 dmsg.start_rule = rule;
3037 dmsg.rulenum = rulenum;
3039 ifnet_domsg(&nmsg->lmsg, 0);
3040 KKASSERT(dmsg.start_rule == NULL);
3043 * Nuke all related states
3045 lockmgr(&dyn_lock, LK_EXCLUSIVE);
3046 for (f = rule; f && f->rulenum == rulenum; f = f->next) {
3048 * Can't check IPFW_RULE_F_STATE here,
3049 * since it has been cleared previously.
3050 * Check 'stub' instead.
3052 if (f->stub != NULL) {
3054 remove_dyn_rule_locked(f, NULL);
3057 lockmgr(&dyn_lock, LK_RELEASE);
3061 * Get rid of the rule duplications on all CPUs
3063 bzero(&dmsg, sizeof(dmsg));
3065 netmsg_init(nmsg, NULL, &curthread->td_msgport,
3066 0, ipfw_alt_delete_rule_dispatch);
3067 dmsg.prev_rule = prev;
3068 dmsg.start_rule = rule;
3069 dmsg.rulenum = rulenum;
3071 ifnet_domsg(&nmsg->lmsg, 0);
3072 KKASSERT(dmsg.prev_rule == NULL && dmsg.start_rule == NULL);
3077 ipfw_alt_delete_ruleset_dispatch(netmsg_t nmsg)
3079 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3080 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3081 struct ip_fw *prev, *rule;
3086 ipfw_flush_rule_ptrs(ctx);
3089 rule = ctx->ipfw_layer3_chain;
3090 while (rule != NULL) {
3091 if (rule->set == dmsg->from_set) {
3092 rule = ipfw_delete_rule(ctx, prev, rule);
3101 KASSERT(del, ("no match set?!\n"));
3103 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
3107 ipfw_disable_ruleset_state_dispatch(netmsg_t nmsg)
3109 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3110 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3118 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3119 if (rule->set == dmsg->from_set) {
3123 rule->rule_flags &= ~IPFW_RULE_F_STATE;
3126 KASSERT(cleared, ("no match set?!\n"));
3128 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
3132 ipfw_alt_delete_ruleset(uint8_t set)
3134 struct netmsg_del dmsg;
3135 struct netmsg_base *nmsg;
3138 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3141 * Check whether the 'set' exists. If it exists,
3142 * then check whether any rules within the set will
3143 * try to create states.
3147 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3148 if (rule->set == set) {
3150 if (rule->rule_flags & IPFW_RULE_F_STATE) {
3157 return 0; /* XXX EINVAL? */
3161 * Clear the STATE flag, so no more states will be
3162 * created based the rules in this set.
3164 bzero(&dmsg, sizeof(dmsg));
3166 netmsg_init(nmsg, NULL, &curthread->td_msgport,
3167 0, ipfw_disable_ruleset_state_dispatch);
3168 dmsg.from_set = set;
3170 ifnet_domsg(&nmsg->lmsg, 0);
3173 * Nuke all related states
3175 lockmgr(&dyn_lock, LK_EXCLUSIVE);
3176 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3177 if (rule->set != set)
3181 * Can't check IPFW_RULE_F_STATE here,
3182 * since it has been cleared previously.
3183 * Check 'stub' instead.
3185 if (rule->stub != NULL) {
3187 remove_dyn_rule_locked(rule, NULL);
3190 lockmgr(&dyn_lock, LK_RELEASE);
3196 bzero(&dmsg, sizeof(dmsg));
3198 netmsg_init(nmsg, NULL, &curthread->td_msgport,
3199 0, ipfw_alt_delete_ruleset_dispatch);
3200 dmsg.from_set = set;
3202 ifnet_domsg(&nmsg->lmsg, 0);
3207 ipfw_alt_move_rule_dispatch(netmsg_t nmsg)
3209 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3212 rule = dmsg->start_rule;
3213 KKASSERT(rule->cpuid == mycpuid);
3216 * Move to the position on the next CPU
3217 * before the msg is forwarded.
3219 dmsg->start_rule = rule->sibling;
3221 while (rule && rule->rulenum <= dmsg->rulenum) {
3222 if (rule->rulenum == dmsg->rulenum)
3223 rule->set = dmsg->to_set;
3226 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
3230 ipfw_alt_move_rule(uint16_t rulenum, uint8_t set)
3232 struct netmsg_del dmsg;
3233 struct netmsg_base *nmsg;
3235 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3238 * Locate first rule to move
3240 for (rule = ctx->ipfw_layer3_chain; rule && rule->rulenum <= rulenum;
3241 rule = rule->next) {
3242 if (rule->rulenum == rulenum && rule->set != set)
3245 if (rule == NULL || rule->rulenum > rulenum)
3246 return 0; /* XXX error? */
3248 bzero(&dmsg, sizeof(dmsg));
3250 netmsg_init(nmsg, NULL, &curthread->td_msgport,
3251 0, ipfw_alt_move_rule_dispatch);
3252 dmsg.start_rule = rule;
3253 dmsg.rulenum = rulenum;
3256 ifnet_domsg(&nmsg->lmsg, 0);
3257 KKASSERT(dmsg.start_rule == NULL);
3262 ipfw_alt_move_ruleset_dispatch(netmsg_t nmsg)
3264 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3265 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3268 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3269 if (rule->set == dmsg->from_set)
3270 rule->set = dmsg->to_set;
3272 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
3276 ipfw_alt_move_ruleset(uint8_t from_set, uint8_t to_set)
3278 struct netmsg_del dmsg;
3279 struct netmsg_base *nmsg;
3281 bzero(&dmsg, sizeof(dmsg));
3283 netmsg_init(nmsg, NULL, &curthread->td_msgport,
3284 0, ipfw_alt_move_ruleset_dispatch);
3285 dmsg.from_set = from_set;
3286 dmsg.to_set = to_set;
3288 ifnet_domsg(&nmsg->lmsg, 0);
3293 ipfw_alt_swap_ruleset_dispatch(netmsg_t nmsg)
3295 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3296 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3299 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3300 if (rule->set == dmsg->from_set)
3301 rule->set = dmsg->to_set;
3302 else if (rule->set == dmsg->to_set)
3303 rule->set = dmsg->from_set;
3305 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
3309 ipfw_alt_swap_ruleset(uint8_t set1, uint8_t set2)
3311 struct netmsg_del dmsg;
3312 struct netmsg_base *nmsg;
3314 bzero(&dmsg, sizeof(dmsg));
3316 netmsg_init(nmsg, NULL, &curthread->td_msgport,
3317 0, ipfw_alt_swap_ruleset_dispatch);
3318 dmsg.from_set = set1;
3321 ifnet_domsg(&nmsg->lmsg, 0);
3326 * Remove all rules with given number, and also do set manipulation.
3328 * The argument is an uint32_t. The low 16 bit are the rule or set number,
3329 * the next 8 bits are the new set, the top 8 bits are the command:
3331 * 0 delete rules with given number
3332 * 1 delete rules with given set number
3333 * 2 move rules with given number to new set
3334 * 3 move rules with given set number to new set
3335 * 4 swap sets with given numbers
3338 ipfw_ctl_alter(uint32_t arg)
3341 uint8_t cmd, new_set;
3344 rulenum = arg & 0xffff;
3345 cmd = (arg >> 24) & 0xff;
3346 new_set = (arg >> 16) & 0xff;
3350 if (new_set >= IPFW_DEFAULT_SET)
3352 if (cmd == 0 || cmd == 2) {
3353 if (rulenum == IPFW_DEFAULT_RULE)
3356 if (rulenum >= IPFW_DEFAULT_SET)
3361 case 0: /* delete rules with given number */
3362 error = ipfw_alt_delete_rule(rulenum);
3365 case 1: /* delete all rules with given set number */
3366 error = ipfw_alt_delete_ruleset(rulenum);
3369 case 2: /* move rules with given number to new set */
3370 error = ipfw_alt_move_rule(rulenum, new_set);
3373 case 3: /* move rules with given set number to new set */
3374 error = ipfw_alt_move_ruleset(rulenum, new_set);
3377 case 4: /* swap two sets */
3378 error = ipfw_alt_swap_ruleset(rulenum, new_set);
3385 * Clear counters for a specific rule.
3388 clear_counters(struct ip_fw *rule, int log_only)
3390 ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule);
3392 if (log_only == 0) {
3393 rule->bcnt = rule->pcnt = 0;
3394 rule->timestamp = 0;
3396 if (l->o.opcode == O_LOG)
3397 l->log_left = l->max_log;
3401 ipfw_zero_entry_dispatch(netmsg_t nmsg)
3403 struct netmsg_zent *zmsg = (struct netmsg_zent *)nmsg;
3404 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3407 if (zmsg->rulenum == 0) {
3408 KKASSERT(zmsg->start_rule == NULL);
3410 ctx->ipfw_norule_counter = 0;
3411 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next)
3412 clear_counters(rule, zmsg->log_only);
3414 struct ip_fw *start = zmsg->start_rule;
3416 KKASSERT(start->cpuid == mycpuid);
3417 KKASSERT(start->rulenum == zmsg->rulenum);
3420 * We can have multiple rules with the same number, so we
3421 * need to clear them all.
3423 for (rule = start; rule && rule->rulenum == zmsg->rulenum;
3425 clear_counters(rule, zmsg->log_only);
3428 * Move to the position on the next CPU
3429 * before the msg is forwarded.
3431 zmsg->start_rule = start->sibling;
3433 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
3437 * Reset some or all counters on firewall rules.
3438 * @arg frwl is null to clear all entries, or contains a specific
3440 * @arg log_only is 1 if we only want to reset logs, zero otherwise.
3443 ipfw_ctl_zero_entry(int rulenum, int log_only)
3445 struct netmsg_zent zmsg;
3446 struct netmsg_base *nmsg;
3448 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3450 bzero(&zmsg, sizeof(zmsg));
3452 netmsg_init(nmsg, NULL, &curthread->td_msgport,
3453 0, ipfw_zero_entry_dispatch);
3454 zmsg.log_only = log_only;
3457 msg = log_only ? "ipfw: All logging counts reset.\n"
3458 : "ipfw: Accounting cleared.\n";
3463 * Locate the first rule with 'rulenum'
3465 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3466 if (rule->rulenum == rulenum)
3469 if (rule == NULL) /* we did not find any matching rules */
3471 zmsg.start_rule = rule;
3472 zmsg.rulenum = rulenum;
3474 msg = log_only ? "ipfw: Entry %d logging count reset.\n"
3475 : "ipfw: Entry %d cleared.\n";
3477 ifnet_domsg(&nmsg->lmsg, 0);
3478 KKASSERT(zmsg.start_rule == NULL);
3481 log(LOG_SECURITY | LOG_NOTICE, msg, rulenum);
3486 * Check validity of the structure before insert.
3487 * Fortunately rules are simple, so this mostly need to check rule sizes.
3490 ipfw_check_ioc_rule(struct ipfw_ioc_rule *rule, int size, uint32_t *rule_flags)
3493 int have_action = 0;
3498 /* Check for valid size */
3499 if (size < sizeof(*rule)) {
3500 kprintf("ipfw: rule too short\n");
3503 l = IOC_RULESIZE(rule);
3505 kprintf("ipfw: size mismatch (have %d want %d)\n", size, l);
3509 /* Check rule number */
3510 if (rule->rulenum == IPFW_DEFAULT_RULE) {
3511 kprintf("ipfw: invalid rule number\n");
3516 * Now go for the individual checks. Very simple ones, basically only
3517 * instruction sizes.
3519 for (l = rule->cmd_len, cmd = rule->cmd; l > 0;
3520 l -= cmdlen, cmd += cmdlen) {
3521 cmdlen = F_LEN(cmd);
3523 kprintf("ipfw: opcode %d size truncated\n",
3528 DPRINTF("ipfw: opcode %d\n", cmd->opcode);
3530 if (cmd->opcode == O_KEEP_STATE || cmd->opcode == O_LIMIT) {
3531 /* This rule will create states */
3532 *rule_flags |= IPFW_RULE_F_STATE;
3535 switch (cmd->opcode) {
3549 case O_IPPRECEDENCE:
3556 if (cmdlen != F_INSN_SIZE(ipfw_insn))
3568 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32))
3573 if (cmdlen != F_INSN_SIZE(ipfw_insn_limit))
3578 if (cmdlen != F_INSN_SIZE(ipfw_insn_log))
3581 ((ipfw_insn_log *)cmd)->log_left =
3582 ((ipfw_insn_log *)cmd)->max_log;
3588 if (cmdlen != F_INSN_SIZE(ipfw_insn_ip))
3590 if (((ipfw_insn_ip *)cmd)->mask.s_addr == 0) {
3591 kprintf("ipfw: opcode %d, useless rule\n",
3599 if (cmd->arg1 == 0 || cmd->arg1 > 256) {
3600 kprintf("ipfw: invalid set size %d\n",
3604 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
3610 if (cmdlen != F_INSN_SIZE(ipfw_insn_mac))
3616 case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */
3617 if (cmdlen < 2 || cmdlen > 31)
3624 if (cmdlen != F_INSN_SIZE(ipfw_insn_if))
3630 if (cmdlen != F_INSN_SIZE(ipfw_insn_pipe))
3635 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa)) {
3640 fwd_addr = ((ipfw_insn_sa *)cmd)->
3642 if (IN_MULTICAST(ntohl(fwd_addr))) {
3643 kprintf("ipfw: try forwarding to "
3644 "multicast address\n");
3650 case O_FORWARD_MAC: /* XXX not implemented yet */
3659 if (cmdlen != F_INSN_SIZE(ipfw_insn))
3663 kprintf("ipfw: opcode %d, multiple actions"
3670 kprintf("ipfw: opcode %d, action must be"
3677 kprintf("ipfw: opcode %d, unknown opcode\n",
3682 if (have_action == 0) {
3683 kprintf("ipfw: missing action\n");
3689 kprintf("ipfw: opcode %d size %d wrong\n",
3690 cmd->opcode, cmdlen);
3695 ipfw_ctl_add_rule(struct sockopt *sopt)
3697 struct ipfw_ioc_rule *ioc_rule;
3699 uint32_t rule_flags;
3702 size = sopt->sopt_valsize;
3703 if (size > (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX) ||
3704 size < sizeof(*ioc_rule)) {
3707 if (size != (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX)) {
3708 sopt->sopt_val = krealloc(sopt->sopt_val, sizeof(uint32_t) *
3709 IPFW_RULE_SIZE_MAX, M_TEMP, M_WAITOK);
3711 ioc_rule = sopt->sopt_val;
3713 error = ipfw_check_ioc_rule(ioc_rule, size, &rule_flags);
3717 ipfw_add_rule(ioc_rule, rule_flags);
3719 if (sopt->sopt_dir == SOPT_GET)
3720 sopt->sopt_valsize = IOC_RULESIZE(ioc_rule);
3725 ipfw_copy_rule(const struct ip_fw *rule, struct ipfw_ioc_rule *ioc_rule)
3727 const struct ip_fw *sibling;
3732 KKASSERT(rule->cpuid == IPFW_CFGCPUID);
3734 ioc_rule->act_ofs = rule->act_ofs;
3735 ioc_rule->cmd_len = rule->cmd_len;
3736 ioc_rule->rulenum = rule->rulenum;
3737 ioc_rule->set = rule->set;
3738 ioc_rule->usr_flags = rule->usr_flags;
3740 ioc_rule->set_disable = ipfw_ctx[mycpuid]->ipfw_set_disable;
3741 ioc_rule->static_count = static_count;
3742 ioc_rule->static_len = static_ioc_len;
3745 * Visit (read-only) all of the rule's duplications to get
3746 * the necessary statistics
3753 ioc_rule->timestamp = 0;
3754 for (sibling = rule; sibling != NULL; sibling = sibling->sibling) {
3755 ioc_rule->pcnt += sibling->pcnt;
3756 ioc_rule->bcnt += sibling->bcnt;
3757 if (sibling->timestamp > ioc_rule->timestamp)
3758 ioc_rule->timestamp = sibling->timestamp;
3763 KASSERT(i == ncpus, ("static rule is not duplicated on every cpu\n"));
3765 bcopy(rule->cmd, ioc_rule->cmd, ioc_rule->cmd_len * 4 /* XXX */);
3767 return ((uint8_t *)ioc_rule + IOC_RULESIZE(ioc_rule));
3771 ipfw_copy_state(const ipfw_dyn_rule *dyn_rule,
3772 struct ipfw_ioc_state *ioc_state)
3774 const struct ipfw_flow_id *id;
3775 struct ipfw_ioc_flowid *ioc_id;
3777 ioc_state->expire = TIME_LEQ(dyn_rule->expire, time_second) ?
3778 0 : dyn_rule->expire - time_second;
3779 ioc_state->pcnt = dyn_rule->pcnt;
3780 ioc_state->bcnt = dyn_rule->bcnt;
3782 ioc_state->dyn_type = dyn_rule->dyn_type;
3783 ioc_state->count = dyn_rule->count;
3785 ioc_state->rulenum = dyn_rule->stub->rule[mycpuid]->rulenum;
3788 ioc_id = &ioc_state->id;
3790 ioc_id->type = ETHERTYPE_IP;
3791 ioc_id->u.ip.dst_ip = id->dst_ip;
3792 ioc_id->u.ip.src_ip = id->src_ip;
3793 ioc_id->u.ip.dst_port = id->dst_port;
3794 ioc_id->u.ip.src_port = id->src_port;
3795 ioc_id->u.ip.proto = id->proto;
3799 ipfw_ctl_get_rules(struct sockopt *sopt)
3801 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3805 uint32_t dcount = 0;
3808 * pass up a copy of the current rules. Static rules
3809 * come first (the last of which has number IPFW_DEFAULT_RULE),
3810 * followed by a possibly empty list of dynamic rule.
3813 size = static_ioc_len; /* size of static rules */
3814 if (ipfw_dyn_v) { /* add size of dyn.rules */
3816 size += dcount * sizeof(struct ipfw_ioc_state);
3819 if (sopt->sopt_valsize < size) {
3820 /* short length, no need to return incomplete rules */
3821 /* XXX: if superuser, no need to zero buffer */
3822 bzero(sopt->sopt_val, sopt->sopt_valsize);
3825 bp = sopt->sopt_val;
3827 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next)
3828 bp = ipfw_copy_rule(rule, bp);
3830 if (ipfw_dyn_v && dcount != 0) {
3831 struct ipfw_ioc_state *ioc_state = bp;
3832 uint32_t dcount2 = 0;
3834 size_t old_size = size;
3838 lockmgr(&dyn_lock, LK_SHARED);
3840 /* Check 'ipfw_dyn_v' again with lock held */
3841 if (ipfw_dyn_v == NULL)
3844 for (i = 0; i < curr_dyn_buckets; i++) {
3848 * The # of dynamic rules may have grown after the
3849 * snapshot of 'dyn_count' was taken, so we will have
3850 * to check 'dcount' (snapshot of dyn_count) here to
3851 * make sure that we don't overflow the pre-allocated
3854 for (p = ipfw_dyn_v[i]; p != NULL && dcount != 0;
3855 p = p->next, ioc_state++, dcount--, dcount2++)
3856 ipfw_copy_state(p, ioc_state);
3859 lockmgr(&dyn_lock, LK_RELEASE);
3862 * The # of dynamic rules may be shrinked after the
3863 * snapshot of 'dyn_count' was taken. To give user a
3864 * correct dynamic rule count, we use the 'dcount2'
3865 * calculated above (with shared lockmgr lock held).
3867 size = static_ioc_len +
3868 (dcount2 * sizeof(struct ipfw_ioc_state));
3869 KKASSERT(size <= old_size);
3872 sopt->sopt_valsize = size;
3877 ipfw_set_disable_dispatch(netmsg_t nmsg)
3879 struct lwkt_msg *lmsg = &nmsg->lmsg;
3880 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3883 ctx->ipfw_set_disable = lmsg->u.ms_result32;
3885 ifnet_forwardmsg(lmsg, mycpuid + 1);
3889 ipfw_ctl_set_disable(uint32_t disable, uint32_t enable)
3891 struct netmsg_base nmsg;
3892 struct lwkt_msg *lmsg;
3893 uint32_t set_disable;
3895 /* IPFW_DEFAULT_SET is always enabled */
3896 enable |= (1 << IPFW_DEFAULT_SET);
3897 set_disable = (ipfw_ctx[mycpuid]->ipfw_set_disable | disable) & ~enable;
3899 bzero(&nmsg, sizeof(nmsg));
3900 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
3901 0, ipfw_set_disable_dispatch);
3903 lmsg->u.ms_result32 = set_disable;
3905 ifnet_domsg(lmsg, 0);
3909 * {set|get}sockopt parser.
3912 ipfw_ctl(struct sockopt *sopt)
3920 switch (sopt->sopt_name) {
3922 error = ipfw_ctl_get_rules(sopt);
3926 ipfw_flush(0 /* keep default rule */);
3930 error = ipfw_ctl_add_rule(sopt);
3935 * IP_FW_DEL is used for deleting single rules or sets,
3936 * and (ab)used to atomically manipulate sets.
3937 * Argument size is used to distinguish between the two:
3939 * delete single rule or set of rules,
3940 * or reassign rules (or sets) to a different set.
3941 * 2 * sizeof(uint32_t)
3942 * atomic disable/enable sets.
3943 * first uint32_t contains sets to be disabled,
3944 * second uint32_t contains sets to be enabled.
3946 masks = sopt->sopt_val;
3947 size = sopt->sopt_valsize;
3948 if (size == sizeof(*masks)) {
3950 * Delete or reassign static rule
3952 error = ipfw_ctl_alter(masks[0]);
3953 } else if (size == (2 * sizeof(*masks))) {
3955 * Set enable/disable
3957 ipfw_ctl_set_disable(masks[0], masks[1]);
3964 case IP_FW_RESETLOG: /* argument is an int, the rule number */
3967 if (sopt->sopt_val != 0) {
3968 error = soopt_to_kbuf(sopt, &rulenum,
3969 sizeof(int), sizeof(int));
3973 error = ipfw_ctl_zero_entry(rulenum,
3974 sopt->sopt_name == IP_FW_RESETLOG);
3978 kprintf("ipfw_ctl invalid option %d\n", sopt->sopt_name);
3985 * This procedure is only used to handle keepalives. It is invoked
3986 * every dyn_keepalive_period
3989 ipfw_tick_dispatch(netmsg_t nmsg)
3995 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
3996 KKASSERT(IPFW_LOADED);
4000 lwkt_replymsg(&nmsg->lmsg, 0);
4003 if (ipfw_dyn_v == NULL || dyn_count == 0)
4006 keep_alive = time_second;
4008 lockmgr(&dyn_lock, LK_EXCLUSIVE);
4010 if (ipfw_dyn_v == NULL || dyn_count == 0) {
4011 lockmgr(&dyn_lock, LK_RELEASE);
4014 gen = dyn_buckets_gen;
4016 for (i = 0; i < curr_dyn_buckets; i++) {
4017 ipfw_dyn_rule *q, *prev;
4019 for (prev = NULL, q = ipfw_dyn_v[i]; q != NULL;) {
4020 uint32_t ack_rev, ack_fwd;
4021 struct ipfw_flow_id id;
4023 if (q->dyn_type == O_LIMIT_PARENT)
4026 if (TIME_LEQ(q->expire, time_second)) {
4028 UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q);
4033 * Keep alive processing
4038 if (q->id.proto != IPPROTO_TCP)
4040 if ((q->state & BOTH_SYN) != BOTH_SYN)
4042 if (TIME_LEQ(time_second + dyn_keepalive_interval,
4044 goto next; /* too early */
4045 if (q->keep_alive == keep_alive)
4046 goto next; /* alreay done */
4049 * Save necessary information, so that they could
4050 * survive after possible blocking in send_pkt()
4053 ack_rev = q->ack_rev;
4054 ack_fwd = q->ack_fwd;
4056 /* Sending has been started */
4057 q->keep_alive = keep_alive;
4059 /* Release lock to avoid possible dead lock */
4060 lockmgr(&dyn_lock, LK_RELEASE);
4061 send_pkt(&id, ack_rev - 1, ack_fwd, TH_SYN);
4062 send_pkt(&id, ack_fwd - 1, ack_rev, 0);
4063 lockmgr(&dyn_lock, LK_EXCLUSIVE);
4065 if (gen != dyn_buckets_gen) {
4067 * Dyn bucket array has been changed during
4068 * the above two sending; reiterate.
4077 lockmgr(&dyn_lock, LK_RELEASE);
4079 callout_reset(&ipfw_timeout_h, dyn_keepalive_period * hz,
4084 * This procedure is only used to handle keepalives. It is invoked
4085 * every dyn_keepalive_period
4088 ipfw_tick(void *dummy __unused)
4090 struct lwkt_msg *lmsg = &ipfw_timeout_netmsg.lmsg;
4092 KKASSERT(mycpuid == IPFW_CFGCPUID);
4096 KKASSERT(lmsg->ms_flags & MSGF_DONE);
4098 lwkt_sendmsg(IPFW_CFGPORT, lmsg);
4099 /* ipfw_timeout_netmsg's handler reset this callout */
4106 ipfw_check_in(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir)
4108 struct ip_fw_args args;
4109 struct mbuf *m = *m0;
4111 int tee = 0, error = 0, ret;
4113 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
4114 /* Extract info from dummynet tag */
4115 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
4116 KKASSERT(mtag != NULL);
4117 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv;
4118 KKASSERT(args.rule != NULL);
4120 m_tag_delete(m, mtag);
4121 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
4129 ret = ipfw_chk(&args);
4147 case IP_FW_DUMMYNET:
4148 /* Send packet to the appropriate pipe */
4149 ipfw_dummynet_io(m, args.cookie, DN_TO_IP_IN, &args);
4158 * Must clear bridge tag when changing
4160 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
4161 if (ip_divert_p != NULL) {
4162 m = ip_divert_p(m, tee, 1);
4166 /* not sure this is the right error msg */
4172 panic("unknown ipfw return value: %d\n", ret);
4180 ipfw_check_out(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir)
4182 struct ip_fw_args args;
4183 struct mbuf *m = *m0;
4185 int tee = 0, error = 0, ret;
4187 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
4188 /* Extract info from dummynet tag */
4189 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
4190 KKASSERT(mtag != NULL);
4191 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv;
4192 KKASSERT(args.rule != NULL);
4194 m_tag_delete(m, mtag);
4195 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
4203 ret = ipfw_chk(&args);
4221 case IP_FW_DUMMYNET:
4222 ipfw_dummynet_io(m, args.cookie, DN_TO_IP_OUT, &args);
4230 if (ip_divert_p != NULL) {
4231 m = ip_divert_p(m, tee, 0);
4235 /* not sure this is the right error msg */
4241 panic("unknown ipfw return value: %d\n", ret);
4251 struct pfil_head *pfh;
4253 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
4255 pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET);
4259 pfil_add_hook(ipfw_check_in, NULL, PFIL_IN | PFIL_MPSAFE, pfh);
4260 pfil_add_hook(ipfw_check_out, NULL, PFIL_OUT | PFIL_MPSAFE, pfh);
4266 struct pfil_head *pfh;
4268 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
4270 pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET);
4274 pfil_remove_hook(ipfw_check_in, NULL, PFIL_IN, pfh);
4275 pfil_remove_hook(ipfw_check_out, NULL, PFIL_OUT, pfh);
4279 ipfw_sysctl_enable_dispatch(netmsg_t nmsg)
4281 struct lwkt_msg *lmsg = &nmsg->lmsg;
4282 int enable = lmsg->u.ms_result;
4284 if (fw_enable == enable)
4293 lwkt_replymsg(lmsg, 0);
4297 ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS)
4299 struct netmsg_base nmsg;
4300 struct lwkt_msg *lmsg;
4304 error = sysctl_handle_int(oidp, &enable, 0, req);
4305 if (error || req->newptr == NULL)
4308 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
4309 0, ipfw_sysctl_enable_dispatch);
4311 lmsg->u.ms_result = enable;
4313 return lwkt_domsg(IPFW_CFGPORT, lmsg, 0);
4317 ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS)
4319 return sysctl_int_range(oidp, arg1, arg2, req,
4320 IPFW_AUTOINC_STEP_MIN, IPFW_AUTOINC_STEP_MAX);
4324 ipfw_sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS)
4328 lockmgr(&dyn_lock, LK_EXCLUSIVE);
4330 value = dyn_buckets;
4331 error = sysctl_handle_int(oidp, &value, 0, req);
4332 if (error || !req->newptr)
4336 * Make sure we have a power of 2 and
4337 * do not allow more than 64k entries.
4340 if (value <= 1 || value > 65536)
4342 if ((value & (value - 1)) != 0)
4346 dyn_buckets = value;
4348 lockmgr(&dyn_lock, LK_RELEASE);
4353 ipfw_sysctl_dyn_fin(SYSCTL_HANDLER_ARGS)
4355 return sysctl_int_range(oidp, arg1, arg2, req,
4356 1, dyn_keepalive_period - 1);
4360 ipfw_sysctl_dyn_rst(SYSCTL_HANDLER_ARGS)
4362 return sysctl_int_range(oidp, arg1, arg2, req,
4363 1, dyn_keepalive_period - 1);
4367 ipfw_ctx_init_dispatch(netmsg_t nmsg)
4369 struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
4370 struct ipfw_context *ctx;
4371 struct ip_fw *def_rule;
4373 ctx = kmalloc(sizeof(*ctx), M_IPFW, M_WAITOK | M_ZERO);
4374 ipfw_ctx[mycpuid] = ctx;
4376 def_rule = kmalloc(sizeof(*def_rule), M_IPFW, M_WAITOK | M_ZERO);
4378 def_rule->act_ofs = 0;
4379 def_rule->rulenum = IPFW_DEFAULT_RULE;
4380 def_rule->cmd_len = 1;
4381 def_rule->set = IPFW_DEFAULT_SET;
4383 def_rule->cmd[0].len = 1;
4384 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT
4385 def_rule->cmd[0].opcode = O_ACCEPT;
4387 def_rule->cmd[0].opcode = O_DENY;
4390 def_rule->refcnt = 1;
4391 def_rule->cpuid = mycpuid;
4393 /* Install the default rule */
4394 ctx->ipfw_default_rule = def_rule;
4395 ctx->ipfw_layer3_chain = def_rule;
4397 /* Link rule CPU sibling */
4398 ipfw_link_sibling(fwmsg, def_rule);
4400 /* Statistics only need to be updated once */
4402 ipfw_inc_static_count(def_rule);
4404 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
4408 ipfw_init_dispatch(netmsg_t nmsg)
4410 struct netmsg_ipfw fwmsg;
4414 kprintf("IP firewall already loaded\n");
4419 bzero(&fwmsg, sizeof(fwmsg));
4420 netmsg_init(&fwmsg.base, NULL, &curthread->td_msgport,
4421 0, ipfw_ctx_init_dispatch);
4422 ifnet_domsg(&fwmsg.base.lmsg, 0);
4424 ip_fw_chk_ptr = ipfw_chk;
4425 ip_fw_ctl_ptr = ipfw_ctl;
4426 ip_fw_dn_io_ptr = ipfw_dummynet_io;
4428 kprintf("ipfw2 initialized, default to %s, logging ",
4429 ipfw_ctx[mycpuid]->ipfw_default_rule->cmd[0].opcode ==
4430 O_ACCEPT ? "accept" : "deny");
4432 #ifdef IPFIREWALL_VERBOSE
4435 #ifdef IPFIREWALL_VERBOSE_LIMIT
4436 verbose_limit = IPFIREWALL_VERBOSE_LIMIT;
4438 if (fw_verbose == 0) {
4439 kprintf("disabled\n");
4440 } else if (verbose_limit == 0) {
4441 kprintf("unlimited\n");
4443 kprintf("limited to %d packets/entry by default\n",
4447 callout_init_mp(&ipfw_timeout_h);
4448 netmsg_init(&ipfw_timeout_netmsg, NULL, &netisr_adone_rport,
4449 MSGF_DROPABLE | MSGF_PRIORITY,
4450 ipfw_tick_dispatch);
4451 lockinit(&dyn_lock, "ipfw_dyn", 0, 0);
4454 callout_reset(&ipfw_timeout_h, hz, ipfw_tick, NULL);
4459 lwkt_replymsg(&nmsg->lmsg, error);
4465 struct netmsg_base smsg;
4467 netmsg_init(&smsg, NULL, &curthread->td_msgport,
4468 0, ipfw_init_dispatch);
4469 return lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0);
4475 ipfw_fini_dispatch(netmsg_t nmsg)
4479 if (ipfw_refcnt != 0) {
4487 callout_stop(&ipfw_timeout_h);
4489 netmsg_service_sync();
4492 if ((ipfw_timeout_netmsg.lmsg.ms_flags & MSGF_DONE) == 0) {
4494 * Callout message is pending; drop it
4496 lwkt_dropmsg(&ipfw_timeout_netmsg.lmsg);
4500 ip_fw_chk_ptr = NULL;
4501 ip_fw_ctl_ptr = NULL;
4502 ip_fw_dn_io_ptr = NULL;
4503 ipfw_flush(1 /* kill default rule */);
4505 /* Free pre-cpu context */
4506 for (cpu = 0; cpu < ncpus; ++cpu)
4507 kfree(ipfw_ctx[cpu], M_IPFW);
4509 kprintf("IP firewall unloaded\n");
4511 lwkt_replymsg(&nmsg->lmsg, error);
4517 struct netmsg_base smsg;
4519 netmsg_init(&smsg, NULL, &curthread->td_msgport,
4520 0, ipfw_fini_dispatch);
4521 return lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0);
4524 #endif /* KLD_MODULE */
4527 ipfw_modevent(module_t mod, int type, void *unused)
4538 kprintf("ipfw statically compiled, cannot unload\n");
4550 static moduledata_t ipfwmod = {
4555 DECLARE_MODULE(ipfw, ipfwmod, SI_SUB_PROTO_END, SI_ORDER_ANY);
4556 MODULE_VERSION(ipfw, 1);