2 * Copyright (c) 1993 Daniel Boulet
3 * Copyright (c) 1994 Ugen J.S.Antsilevich
4 * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
5 * Copyright (c) 2015 The DragonFly Project. All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
8 * by Bill Yuan <bycn82@gmail.com>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 #error IPFIREWALL3 requires INET.
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
49 #include <sys/kernel.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
55 #include <sys/ucred.h>
56 #include <sys/in_cksum.h>
58 #include <sys/thread2.h>
59 #include <sys/mplock2.h>
61 #include <netinet/in.h>
62 #include <netinet/in_systm.h>
63 #include <netinet/in_var.h>
64 #include <netinet/in_pcb.h>
65 #include <netinet/ip.h>
66 #include <netinet/ip_var.h>
67 #include <netinet/ip_icmp.h>
68 #include <netinet/tcp.h>
69 #include <netinet/tcp_timer.h>
70 #include <netinet/tcp_var.h>
71 #include <netinet/tcpip.h>
72 #include <netinet/udp.h>
73 #include <netinet/udp_var.h>
74 #include <netinet/ip_divert.h>
75 #include <netinet/if_ether.h>
78 #include <net/radix.h>
79 #include <net/route.h>
81 #include <net/netmsg2.h>
83 #include <net/ipfw3/ip_fw.h>
84 #include <net/ipfw3/ip_fw3_log.h>
85 #include <net/ipfw3/ip_fw3_table.h>
86 #include <net/ipfw3_basic/ip_fw3_basic.h>
87 #include <net/ipfw3_nat/ip_fw3_nat.h>
88 #include <net/dummynet3/ip_dummynet3.h>
90 MALLOC_DEFINE(M_IPFW3, "IPFW3", "ip_fw3 default module");
92 #ifdef IPFIREWALL_DEBUG
93 #define DPRINTF(fmt, ...) \
96 kprintf(fmt, __VA_ARGS__); \
99 #define DPRINTF(fmt, ...) ((void)0)
102 #define MAX_MODULE 10
103 #define MAX_OPCODE_PER_MODULE 100
105 #define IPFW_AUTOINC_STEP_MIN 1
106 #define IPFW_AUTOINC_STEP_MAX 1000
107 #define IPFW_AUTOINC_STEP_DEF 100
111 struct netmsg_base base;
112 const struct ipfw_ioc_rule *ioc_rule;
114 struct ip_fw *next_rule;
115 struct ip_fw *prev_rule;
116 struct ip_fw *sibling; /* sibling in prevous CPU */
120 struct netmsg_base base;
122 struct ip_fw *start_rule;
123 struct ip_fw *prev_rule;
124 struct ipfw_ioc_state *ioc_state;
131 struct netmsg_base base;
132 struct ip_fw *start_rule;
137 ipfw_nat_cfg_t *ipfw_nat_cfg_ptr;
138 ipfw_nat_cfg_t *ipfw_nat_del_ptr;
139 ipfw_nat_cfg_t *ipfw_nat_flush_ptr;
140 ipfw_nat_cfg_t *ipfw_nat_get_cfg_ptr;
141 ipfw_nat_cfg_t *ipfw_nat_get_log_ptr;
143 /* handlers which implemented in ipfw_basic module */
144 ipfw_basic_delete_state_t *ipfw_basic_flush_state_prt = NULL;
145 ipfw_basic_append_state_t *ipfw_basic_append_state_prt = NULL;
147 static struct ipfw_nat_context *ipfw_nat_ctx;
149 extern int ip_fw_loaded;
150 static uint32_t static_count; /* # of static rules */
151 static uint32_t static_ioc_len; /* bytes of static rules */
152 static int ipfw_flushing;
153 static int fw_verbose = 0;
155 static int autoinc_step = IPFW_AUTOINC_STEP_DEF;
157 static int ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS);
158 static int ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS);
160 SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw3, CTLFLAG_RW, 0, "Firewall");
161 SYSCTL_PROC(_net_inet_ip_fw3, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
162 &fw3_enable, 0, ipfw_sysctl_enable, "I", "Enable ipfw");
163 SYSCTL_PROC(_net_inet_ip_fw3, OID_AUTO, autoinc_step, CTLTYPE_INT | CTLFLAG_RW,
164 &autoinc_step, 0, ipfw_sysctl_autoinc_step, "I",
165 "Rule number autincrement step");
166 SYSCTL_INT(_net_inet_ip_fw3, OID_AUTO,one_pass,CTLFLAG_RW,
168 "Only do a single pass through ipfw when using dummynet(4)");
169 SYSCTL_INT(_net_inet_ip_fw3, OID_AUTO, debug, CTLFLAG_RW,
170 &fw_debug, 0, "Enable printing of debug ip_fw statements");
171 SYSCTL_INT(_net_inet_ip_fw3, OID_AUTO, verbose, CTLFLAG_RW,
172 &fw_verbose, 0, "Log matches to ipfw rules");
173 SYSCTL_INT(_net_inet_ip_fw3, OID_AUTO, static_count, CTLFLAG_RD,
174 &static_count, 0, "Number of static rules");
176 filter_func filter_funcs[MAX_MODULE][MAX_OPCODE_PER_MODULE];
177 struct ipfw_module ipfw_modules[MAX_MODULE];
178 struct ipfw_context *ipfw_ctx[MAXCPU];
179 static int ipfw_ctl(struct sockopt *sopt);
183 check_accept(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
184 struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len);
186 check_deny(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
187 struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len);
188 void init_module(void);
192 register_ipfw_module(int module_id,char *module_name)
194 struct ipfw_module *tmp;
198 for (i=0; i < MAX_MODULE; i++) {
199 if (tmp->type == 0) {
202 strncpy(tmp->name, module_name, strlen(module_name));
207 kprintf("ipfw3 module %s loaded ", module_name);
211 unregister_ipfw_module(int module_id)
213 struct ipfw_module *tmp;
216 int i, len, cmdlen, found;
220 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
221 fw = ctx->ipfw_rule_chain;
222 for (; fw; fw = fw->next) {
223 for (len = fw->cmd_len, cmd = fw->cmd; len > 0;
225 cmd = (ipfw_insn *)((uint32_t *)cmd + cmdlen)) {
227 if (cmd->module == 0 &&
228 (cmd->opcode == 0 || cmd->opcode == 1)) {
229 //action accept or deny
230 } else if (cmd->module == module_id) {
240 for (i = 0; i < MAX_MODULE; i++) {
241 if (tmp->type == 1 && tmp->id == module_id) {
243 kprintf("ipfw3 module %s unloaded ", tmp->name);
249 for (i = 0; i < MAX_OPCODE_PER_MODULE; i++) {
250 if (module_id == 0) {
251 if (i ==0 || i == 1) {
255 filter_funcs[module_id][i] = NULL;
262 register_ipfw_filter_funcs(int module, int opcode, filter_func func)
264 filter_funcs[module][opcode] = func;
268 check_accept(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
269 struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len)
271 *cmd_val = IP_FW_PASS;
272 *cmd_ctl = IP_FW_CTL_DONE;
274 ipfw_log((*args)->m, (*args)->eh, cmd->arg1);
279 check_deny(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
280 struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len)
282 *cmd_val = IP_FW_DENY;
283 *cmd_ctl = IP_FW_CTL_DONE;
285 ipfw_log((*args)->m, (*args)->eh, cmd->arg1);
292 memset(ipfw_modules, 0, sizeof(struct ipfw_module) * MAX_MODULE);
293 memset(filter_funcs, 0, sizeof(filter_func) *
294 MAX_OPCODE_PER_MODULE * MAX_MODULE);
295 register_ipfw_filter_funcs(0, O_BASIC_ACCEPT,
296 (filter_func)check_accept);
297 register_ipfw_filter_funcs(0, O_BASIC_DENY, (filter_func)check_deny);
301 ipfw_free_rule(struct ip_fw *rule)
303 kfree(rule, M_IPFW3);
308 static struct ip_fw *
309 lookup_next_rule(struct ip_fw *me)
311 struct ip_fw *rule = NULL;
314 /* look for action, in case it is a skipto */
315 cmd = ACTION_PTR(me);
316 if ((int)cmd->module == MODULE_BASIC_ID &&
317 (int)cmd->opcode == O_BASIC_SKIPTO) {
318 for (rule = me->next; rule; rule = rule->next) {
319 if (rule->rulenum >= cmd->arg1)
323 if (rule == NULL) { /* failure or not a skipto */
326 me->next_rule = rule;
331 * rules are stored in ctx->ipfw_rule_chain.
332 * and each rule is combination of multiple cmds.(ipfw_insn)
333 * in each rule, it begin with filter cmds. and end with action cmds.
334 * 'outer/inner loop' are looping the rules/cmds.
335 * it will invoke the cmds relatived function according to the cmd's
336 * module id and opcode id. and process according to return value.
339 ipfw_chk(struct ip_fw_args *args)
341 struct mbuf *m = args->m;
342 struct ip *ip = mtod(m, struct ip *);
343 struct ip_fw *f = NULL; /* matching rule */
344 int cmd_val = IP_FW_PASS;
346 struct divert_info *divinfo;
349 * hlen The length of the IPv4 header.
350 * hlen >0 means we have an IPv4 packet.
352 u_int hlen = 0; /* hlen >0 means we have an IP pkt */
355 * offset The offset of a fragment. offset != 0 means that
356 * we have a fragment at this offset of an IPv4 packet.
357 * offset == 0 means that (if this is an IPv4 packet)
358 * this is the first or only fragment.
363 uint16_t src_port = 0, dst_port = 0; /* NOTE: host format */
364 struct in_addr src_ip, dst_ip; /* NOTE: network format */
366 uint8_t prev_module = -1, prev_opcode = -1; /* previous module & opcode */
367 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
369 if (m->m_pkthdr.fw_flags & IPFW_MBUF_GENERATED)
370 return IP_FW_PASS; /* accept */
372 if (args->eh == NULL || /* layer 3 packet */
373 (m->m_pkthdr.len >= sizeof(struct ip) &&
374 ntohs(args->eh->ether_type) == ETHERTYPE_IP))
375 hlen = ip->ip_hl << 2;
378 * Collect parameters into local variables for faster matching.
380 if (hlen == 0) { /* do not grab addresses for non-ip pkts */
381 proto = args->f_id.proto = 0; /* mark f_id invalid */
382 goto after_ip_checks;
385 proto = args->f_id.proto = ip->ip_p;
388 if (args->eh != NULL) { /* layer 2 packets are as on the wire */
389 offset = ntohs(ip->ip_off) & IP_OFFMASK;
390 ip_len = ntohs(ip->ip_len);
392 offset = ip->ip_off & IP_OFFMASK;
396 #define PULLUP_TO(len) \
398 if (m->m_len < (len)) { \
399 args->m = m = m_pullup(m, (len)); \
401 goto pullup_failed; \
402 ip = mtod(m, struct ip *); \
412 PULLUP_TO(hlen + sizeof(struct tcphdr));
413 tcp = L3HDR(struct tcphdr, ip);
414 dst_port = tcp->th_dport;
415 src_port = tcp->th_sport;
416 args->f_id.flags = tcp->th_flags;
424 PULLUP_TO(hlen + sizeof(struct udphdr));
425 udp = L3HDR(struct udphdr, ip);
426 dst_port = udp->uh_dport;
427 src_port = udp->uh_sport;
434 L3HDR(struct icmp, ip)->icmp_type;
444 args->f_id.src_ip = ntohl(src_ip.s_addr);
445 args->f_id.dst_ip = ntohl(dst_ip.s_addr);
446 args->f_id.src_port = src_port = ntohs(src_port);
447 args->f_id.dst_port = dst_port = ntohs(dst_port);
452 * Packet has already been tagged. Look for the next rule
453 * to restart processing.
455 * If fw3_one_pass != 0 then just accept it.
456 * XXX should not happen here, but optimized out in
462 /* This rule is being/has been flushed */
466 f = args->rule->next_rule;
468 f = lookup_next_rule(args->rule);
471 * Find the starting rule. It can be either the first
472 * one, or the one after divert_rule if asked so.
476 mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL);
478 divinfo = m_tag_data(mtag);
479 skipto = divinfo->skipto;
484 f = ctx->ipfw_rule_chain;
485 if (args->eh == NULL && skipto != 0) {
486 /* No skipto during rule flushing */
490 if (skipto >= IPFW_DEFAULT_RULE) {
491 return IP_FW_DENY; /* invalid */
493 while (f && f->rulenum <= skipto) {
496 if (f == NULL) { /* drop packet */
499 } else if (ipfw_flushing) {
500 /* Rules are being flushed; skip to default rule */
501 f = ctx->ipfw_default_rule;
504 if ((mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL)) != NULL) {
505 m_tag_delete(m, mtag);
509 * Now scan the rules, and parse microinstructions for each rule.
511 int prev_val; /* previous result of 'or' filter */
515 /* foreach rule in chain */
516 for (; f; f = f->next) {
517 again: /* check the rule again*/
518 if (ctx->ipfw_set_disable & (1 << f->set)) {
523 /* foreach cmd in rule */
524 for (l = f->cmd_len, cmd = f->cmd; l > 0; l -= cmdlen,
525 cmd = (ipfw_insn *)((uint32_t *)cmd+ cmdlen)) {
528 /* skip 'or' filter when already match */
529 if (cmd->len & F_OR &&
530 cmd->module == prev_module &&
531 cmd->opcode == prev_opcode &&
536 check_body: /* check the body of the rule again.*/
537 (filter_funcs[cmd->module][cmd->opcode])
538 (&cmd_ctl, &cmd_val, &args, &f, cmd, ip_len);
541 if (prev_val == 0) /* but 'or' failed */
544 case IP_FW_CTL_AGAIN:
551 case IP_FW_CTL_CHK_STATE:
552 /* update the cmd and l */
554 l = f->cmd_len - f->act_ofs;
557 if (cmd->len & F_NOT)
560 if (cmd->len & F_OR) { /* has 'or' */
561 if (!cmd_val) { /* not matched */
562 if(prev_val == -1){ /* first 'or' */
564 prev_module = cmd->module;
565 prev_opcode = cmd->opcode;
566 } else if (prev_module == cmd->module &&
567 prev_opcode == cmd->opcode) {
568 /* continuous 'or' filter */
569 } else if (prev_module != cmd->module ||
570 prev_opcode != cmd->opcode) {
571 /* 'or' filter changed */
576 prev_module = cmd->module;
577 prev_opcode = cmd->opcode;
580 } else { /* has 'or' and matched */
582 prev_module = cmd->module;
583 prev_opcode = cmd->opcode;
586 if (!cmd_val) { /* not matched */
590 /* previous 'or' not matched */
598 } /* end of inner for, scan opcodes */
599 next_rule:; /* try next rule */
600 } /* end of outer for, scan rules */
601 kprintf("+++ ipfw: ouch!, skip past end of rules, denying packet\n");
605 /* Update statistics */
608 f->timestamp = time_second;
613 kprintf("pullup failed\n");
618 ipfw_dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
623 const struct ipfw_flow_id *id;
624 struct dn_flow_id *fid;
628 mtag = m_tag_get(PACKET_TAG_DUMMYNET, sizeof(*pkt), M_NOWAIT);
633 m_tag_prepend(m, mtag);
635 pkt = m_tag_data(mtag);
636 bzero(pkt, sizeof(*pkt));
638 cmd = (ipfw_insn *)((uint32_t *)fwa->rule->cmd + fwa->rule->act_ofs);
639 KASSERT(cmd->opcode == O_DUMMYNET_PIPE ||
640 cmd->opcode == O_DUMMYNET_QUEUE,
641 ("Rule is not PIPE or QUEUE, opcode %d", cmd->opcode));
644 pkt->dn_flags = (dir & DN_FLAGS_DIR_MASK);
646 pkt->pipe_nr = pipe_nr;
648 pkt->cpuid = mycpuid;
649 pkt->msgport = netisr_curport();
653 fid->fid_dst_ip = id->dst_ip;
654 fid->fid_src_ip = id->src_ip;
655 fid->fid_dst_port = id->dst_port;
656 fid->fid_src_port = id->src_port;
657 fid->fid_proto = id->proto;
658 fid->fid_flags = id->flags;
660 pkt->dn_priv = fwa->rule;
662 if ((int)cmd->opcode == O_DUMMYNET_PIPE)
663 pkt->dn_flags |= DN_FLAGS_IS_PIPE;
665 m->m_pkthdr.fw_flags |= DUMMYNET_MBUF_TAGGED;
669 ipfw_inc_static_count(struct ip_fw *rule)
671 /* Static rule's counts are updated only on CPU0 */
672 KKASSERT(mycpuid == 0);
675 static_ioc_len += IOC_RULESIZE(rule);
679 ipfw_dec_static_count(struct ip_fw *rule)
681 int l = IOC_RULESIZE(rule);
683 /* Static rule's counts are updated only on CPU0 */
684 KKASSERT(mycpuid == 0);
686 KASSERT(static_count > 0, ("invalid static count %u", static_count));
689 KASSERT(static_ioc_len >= l,
690 ("invalid static len %u", static_ioc_len));
695 ipfw_add_rule_dispatch(netmsg_t nmsg)
697 struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
698 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
699 struct ip_fw *rule, *prev,*next;
700 const struct ipfw_ioc_rule *ioc_rule;
702 ioc_rule = fwmsg->ioc_rule;
703 // create rule by ioc_rule
704 rule = kmalloc(RULESIZE(ioc_rule), M_IPFW3, M_WAITOK | M_ZERO);
705 rule->act_ofs = ioc_rule->act_ofs;
706 rule->cmd_len = ioc_rule->cmd_len;
707 rule->rulenum = ioc_rule->rulenum;
708 rule->set = ioc_rule->set;
709 bcopy(ioc_rule->cmd, rule->cmd, rule->cmd_len * 4);
711 for (prev = NULL, next = ctx->ipfw_rule_chain;
712 next; prev = next, next = next->next) {
713 if (next->rulenum > ioc_rule->rulenum) {
717 KASSERT(next != NULL, ("no default rule?!"));
720 * Insert rule into the pre-determined position
726 rule->next = ctx->ipfw_rule_chain;
727 ctx->ipfw_rule_chain = rule;
731 * if sibiling in last CPU is exists,
732 * then it's sibling should be current rule
734 if (fwmsg->sibling != NULL) {
735 fwmsg->sibling->sibling = rule;
737 /* prepare for next CPU */
738 fwmsg->sibling = rule;
741 /* Statistics only need to be updated once */
742 ipfw_inc_static_count(rule);
744 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
748 * confirm the rulenumber
749 * call dispatch function to add rule into the list
750 * Update the statistic
753 ipfw_add_rule(struct ipfw_ioc_rule *ioc_rule)
755 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
756 struct netmsg_ipfw fwmsg;
757 struct netmsg_base *nmsg;
760 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
763 * If rulenum is 0, find highest numbered rule before the
764 * default rule, and add rule number incremental step.
766 if (ioc_rule->rulenum == 0) {
767 int step = autoinc_step;
769 KKASSERT(step >= IPFW_AUTOINC_STEP_MIN &&
770 step <= IPFW_AUTOINC_STEP_MAX);
773 * Locate the highest numbered rule before default
775 for (f = ctx->ipfw_rule_chain; f; f = f->next) {
776 if (f->rulenum == IPFW_DEFAULT_RULE)
778 ioc_rule->rulenum = f->rulenum;
780 if (ioc_rule->rulenum < IPFW_DEFAULT_RULE - step)
781 ioc_rule->rulenum += step;
783 KASSERT(ioc_rule->rulenum != IPFW_DEFAULT_RULE &&
784 ioc_rule->rulenum != 0,
785 ("invalid rule num %d", ioc_rule->rulenum));
787 bzero(&fwmsg, sizeof(fwmsg));
789 netmsg_init(nmsg, NULL, &curthread->td_msgport,
790 0, ipfw_add_rule_dispatch);
791 fwmsg.ioc_rule = ioc_rule;
793 ifnet_domsg(&nmsg->lmsg, 0);
795 DPRINTF("++ installed rule %d, static count now %d\n",
796 ioc_rule->rulenum, static_count);
800 * Free storage associated with a static rule (including derived
802 * The caller is in charge of clearing rule pointers to avoid
804 * @return a pointer to the next entry.
805 * Arguments are not checked, so they better be correct.
806 * Must be called at splimp().
808 static struct ip_fw *
809 ipfw_delete_rule(struct ipfw_context *ctx,
810 struct ip_fw *prev, struct ip_fw *rule)
813 ctx->ipfw_rule_chain = rule->next;
815 prev->next = rule->next;
817 if (mycpuid == IPFW_CFGCPUID)
818 ipfw_dec_static_count(rule);
820 kfree(rule, M_IPFW3);
826 ipfw_flush_rule_dispatch(netmsg_t nmsg)
828 struct lwkt_msg *lmsg = &nmsg->lmsg;
829 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
830 struct ip_fw *rule, *the_rule;
831 int kill_default = lmsg->u.ms_result;
833 rule = ctx->ipfw_rule_chain;
834 while (rule != NULL) {
835 if (rule->rulenum == IPFW_DEFAULT_RULE && kill_default == 0) {
836 ctx->ipfw_rule_chain = rule;
841 if (mycpuid == IPFW_CFGCPUID)
842 ipfw_dec_static_count(the_rule);
844 kfree(the_rule, M_IPFW3);
847 ifnet_forwardmsg(lmsg, mycpuid + 1);
851 ipfw_append_state_dispatch(netmsg_t nmsg)
853 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
854 struct ipfw_ioc_state *ioc_state = dmsg->ioc_state;
855 (*ipfw_basic_append_state_prt)(ioc_state);
856 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
860 ipfw_delete_state_dispatch(netmsg_t nmsg)
862 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
863 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
864 struct ip_fw *rule = ctx->ipfw_rule_chain;
865 while (rule != NULL) {
866 if (rule->rulenum == dmsg->rulenum) {
872 (*ipfw_basic_flush_state_prt)(rule);
873 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
877 * Deletes all rules from a chain (including the default rule
878 * if the second argument is set).
879 * Must be called at splimp().
882 ipfw_ctl_flush_rule(int kill_default)
884 struct netmsg_del dmsg;
885 struct netmsg_base nmsg;
886 struct lwkt_msg *lmsg;
888 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
891 * If 'kill_default' then caller has done the necessary
892 * msgport syncing; unnecessary to do it again.
896 * Let ipfw_chk() know the rules are going to
897 * be flushed, so it could jump directly to
901 netmsg_service_sync();
905 * if ipfw_basic_flush_state_prt
906 * flush all states in all CPU
908 if (ipfw_basic_flush_state_prt != NULL) {
909 bzero(&dmsg, sizeof(dmsg));
910 netmsg_init(&dmsg.base, NULL, &curthread->td_msgport,
911 0, ipfw_delete_state_dispatch);
912 ifnet_domsg(&dmsg.base.lmsg, 0);
915 * Press the 'flush' button
917 bzero(&nmsg, sizeof(nmsg));
918 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
919 0, ipfw_flush_rule_dispatch);
921 lmsg->u.ms_result = kill_default;
922 ifnet_domsg(lmsg, 0);
925 KASSERT(static_count == 0,
926 ("%u static rules remain", static_count));
927 KASSERT(static_ioc_len == 0,
928 ("%u bytes of static rules remain", static_ioc_len));
936 ipfw_delete_rule_dispatch(netmsg_t nmsg)
938 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
939 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
940 struct ip_fw *rule, *prev = NULL;
942 rule = ctx->ipfw_rule_chain;
944 if (rule->rulenum == dmsg->rulenum) {
945 ipfw_delete_rule(ctx, prev, rule);
952 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
956 ipfw_alt_delete_rule(uint16_t rulenum)
958 struct netmsg_del dmsg;
959 struct netmsg_base *nmsg;
962 * delete the state which stub is the rule
963 * which belongs to the CPU and the rulenum
965 bzero(&dmsg, sizeof(dmsg));
967 netmsg_init(nmsg, NULL, &curthread->td_msgport,
968 0, ipfw_delete_state_dispatch);
969 dmsg.rulenum = rulenum;
970 ifnet_domsg(&nmsg->lmsg, 0);
973 * Get rid of the rule duplications on all CPUs
975 bzero(&dmsg, sizeof(dmsg));
977 netmsg_init(nmsg, NULL, &curthread->td_msgport,
978 0, ipfw_delete_rule_dispatch);
979 dmsg.rulenum = rulenum;
980 ifnet_domsg(&nmsg->lmsg, 0);
985 ipfw_alt_delete_ruleset_dispatch(netmsg_t nmsg)
987 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
988 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
989 struct ip_fw *prev, *rule;
995 rule = ctx->ipfw_rule_chain;
996 while (rule != NULL) {
997 if (rule->set == dmsg->from_set) {
998 rule = ipfw_delete_rule(ctx, prev, rule);
1007 KASSERT(del, ("no match set?!"));
1009 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1013 ipfw_disable_ruleset_state_dispatch(netmsg_t nmsg)
1015 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
1016 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1022 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1023 if (rule->set == dmsg->from_set) {
1029 KASSERT(cleared, ("no match set?!"));
1031 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1035 ipfw_alt_delete_ruleset(uint8_t set)
1037 struct netmsg_del dmsg;
1038 struct netmsg_base *nmsg;
1041 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1044 * Check whether the 'set' exists. If it exists,
1045 * then check whether any rules within the set will
1046 * try to create states.
1050 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1051 if (rule->set == set) {
1056 return 0; /* XXX EINVAL? */
1060 * Clear the STATE flag, so no more states will be
1061 * created based the rules in this set.
1063 bzero(&dmsg, sizeof(dmsg));
1065 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1066 0, ipfw_disable_ruleset_state_dispatch);
1067 dmsg.from_set = set;
1069 ifnet_domsg(&nmsg->lmsg, 0);
1075 bzero(&dmsg, sizeof(dmsg));
1077 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1078 0, ipfw_alt_delete_ruleset_dispatch);
1079 dmsg.from_set = set;
1081 ifnet_domsg(&nmsg->lmsg, 0);
1086 ipfw_alt_move_rule_dispatch(netmsg_t nmsg)
1088 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
1091 rule = dmsg->start_rule;
1094 * Move to the position on the next CPU
1095 * before the msg is forwarded.
1098 while (rule && rule->rulenum <= dmsg->rulenum) {
1099 if (rule->rulenum == dmsg->rulenum)
1100 rule->set = dmsg->to_set;
1103 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1107 ipfw_alt_move_rule(uint16_t rulenum, uint8_t set)
1109 struct netmsg_del dmsg;
1110 struct netmsg_base *nmsg;
1112 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1115 * Locate first rule to move
1117 for (rule = ctx->ipfw_rule_chain;
1118 rule && rule->rulenum <= rulenum; rule = rule->next) {
1119 if (rule->rulenum == rulenum && rule->set != set)
1122 if (rule == NULL || rule->rulenum > rulenum)
1123 return 0; /* XXX error? */
1125 bzero(&dmsg, sizeof(dmsg));
1127 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1128 0, ipfw_alt_move_rule_dispatch);
1129 dmsg.start_rule = rule;
1130 dmsg.rulenum = rulenum;
1133 ifnet_domsg(&nmsg->lmsg, 0);
1134 KKASSERT(dmsg.start_rule == NULL);
1139 ipfw_alt_move_ruleset_dispatch(netmsg_t nmsg)
1141 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
1142 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1145 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1146 if (rule->set == dmsg->from_set)
1147 rule->set = dmsg->to_set;
1149 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1153 ipfw_alt_move_ruleset(uint8_t from_set, uint8_t to_set)
1155 struct netmsg_del dmsg;
1156 struct netmsg_base *nmsg;
1158 bzero(&dmsg, sizeof(dmsg));
1160 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1161 0, ipfw_alt_move_ruleset_dispatch);
1162 dmsg.from_set = from_set;
1163 dmsg.to_set = to_set;
1165 ifnet_domsg(&nmsg->lmsg, 0);
1170 ipfw_alt_swap_ruleset_dispatch(netmsg_t nmsg)
1172 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
1173 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1176 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1177 if (rule->set == dmsg->from_set)
1178 rule->set = dmsg->to_set;
1179 else if (rule->set == dmsg->to_set)
1180 rule->set = dmsg->from_set;
1182 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1186 ipfw_alt_swap_ruleset(uint8_t set1, uint8_t set2)
1188 struct netmsg_del dmsg;
1189 struct netmsg_base *nmsg;
1191 bzero(&dmsg, sizeof(dmsg));
1193 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1194 0, ipfw_alt_swap_ruleset_dispatch);
1195 dmsg.from_set = set1;
1198 ifnet_domsg(&nmsg->lmsg, 0);
1204 ipfw_ctl_alter(uint32_t arg)
1207 uint8_t cmd, new_set;
1210 rulenum = arg & 0xffff;
1211 cmd = (arg >> 24) & 0xff;
1212 new_set = (arg >> 16) & 0xff;
1216 if (new_set >= IPFW_DEFAULT_SET)
1218 if (cmd == 0 || cmd == 2) {
1219 if (rulenum == IPFW_DEFAULT_RULE)
1222 if (rulenum >= IPFW_DEFAULT_SET)
1227 case 0: /* delete rules with given number */
1228 error = ipfw_alt_delete_rule(rulenum);
1231 case 1: /* delete all rules with given set number */
1232 error = ipfw_alt_delete_ruleset(rulenum);
1235 case 2: /* move rules with given number to new set */
1236 error = ipfw_alt_move_rule(rulenum, new_set);
1239 case 3: /* move rules with given set number to new set */
1240 error = ipfw_alt_move_ruleset(rulenum, new_set);
1243 case 4: /* swap two sets */
1244 error = ipfw_alt_swap_ruleset(rulenum, new_set);
1251 * Clear counters for a specific rule.
1254 clear_counters(struct ip_fw *rule)
1256 rule->bcnt = rule->pcnt = 0;
1257 rule->timestamp = 0;
1261 ipfw_zero_entry_dispatch(netmsg_t nmsg)
1263 struct netmsg_zent *zmsg = (struct netmsg_zent *)nmsg;
1264 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1267 if (zmsg->rulenum == 0) {
1268 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1269 clear_counters(rule);
1272 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1273 if (rule->rulenum == zmsg->rulenum) {
1274 clear_counters(rule);
1278 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1282 * Reset some or all counters on firewall rules.
1283 * @arg frwl is null to clear all entries, or contains a specific
1285 * @arg log_only is 1 if we only want to reset logs, zero otherwise.
1288 ipfw_ctl_zero_entry(int rulenum, int log_only)
1290 struct netmsg_zent zmsg;
1291 struct netmsg_base *nmsg;
1293 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1295 bzero(&zmsg, sizeof(zmsg));
1297 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1298 0, ipfw_zero_entry_dispatch);
1299 zmsg.log_only = log_only;
1302 msg = log_only ? "ipfw: All logging counts reset.\n"
1303 : "ipfw: Accounting cleared.\n";
1308 * Locate the first rule with 'rulenum'
1310 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1311 if (rule->rulenum == rulenum)
1314 if (rule == NULL) /* we did not find any matching rules */
1316 zmsg.start_rule = rule;
1317 zmsg.rulenum = rulenum;
1319 msg = log_only ? "ipfw: Entry %d logging count reset.\n"
1320 : "ipfw: Entry %d cleared.\n";
1322 ifnet_domsg(&nmsg->lmsg, 0);
1323 KKASSERT(zmsg.start_rule == NULL);
1326 log(LOG_SECURITY | LOG_NOTICE, msg, rulenum);
1331 ipfw_ctl_add_state(struct sockopt *sopt)
1333 struct ipfw_ioc_state *ioc_state;
1334 ioc_state = sopt->sopt_val;
1335 if (ipfw_basic_append_state_prt != NULL) {
1336 struct netmsg_del dmsg;
1337 bzero(&dmsg, sizeof(dmsg));
1338 netmsg_init(&dmsg.base, NULL, &curthread->td_msgport,
1339 0, ipfw_append_state_dispatch);
1340 (&dmsg)->ioc_state = ioc_state;
1341 ifnet_domsg(&dmsg.base.lmsg, 0);
1347 ipfw_ctl_delete_state(struct sockopt *sopt)
1349 int rulenum = 0, error;
1350 if (sopt->sopt_valsize != 0) {
1351 error = soopt_to_kbuf(sopt, &rulenum, sizeof(int), sizeof(int));
1356 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1357 struct ip_fw *rule = ctx->ipfw_rule_chain;
1359 while (rule!=NULL) {
1360 if (rule->rulenum == rulenum) {
1369 struct netmsg_del dmsg;
1370 struct netmsg_base *nmsg;
1372 * delete the state which stub is the rule
1373 * which belongs to the CPU and the rulenum
1375 bzero(&dmsg, sizeof(dmsg));
1377 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1378 0, ipfw_delete_state_dispatch);
1379 dmsg.rulenum = rulenum;
1380 ifnet_domsg(&nmsg->lmsg, 0);
1385 ipfw_ctl_flush_state(struct sockopt *sopt)
1387 struct netmsg_del dmsg;
1388 struct netmsg_base *nmsg;
1390 * delete the state which stub is the rule
1391 * which belongs to the CPU and the rulenum
1393 bzero(&dmsg, sizeof(dmsg));
1395 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1396 0, ipfw_delete_state_dispatch);
1398 ifnet_domsg(&nmsg->lmsg, 0);
1403 * Get the ioc_rule from the sopt
1404 * call ipfw_add_rule to add the rule
1407 ipfw_ctl_add_rule(struct sockopt *sopt)
1409 struct ipfw_ioc_rule *ioc_rule;
1412 size = sopt->sopt_valsize;
1413 if (size > (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX) ||
1414 size < sizeof(*ioc_rule)) {
1417 if (size != (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX)) {
1418 sopt->sopt_val = krealloc(sopt->sopt_val, sizeof(uint32_t) *
1419 IPFW_RULE_SIZE_MAX, M_TEMP, M_WAITOK);
1421 ioc_rule = sopt->sopt_val;
1423 ipfw_add_rule(ioc_rule);
1428 ipfw_copy_state(struct ip_fw_state *state, struct ipfw_ioc_state *ioc_state, int cpuid)
1430 ioc_state->pcnt = state->pcnt;
1431 ioc_state->bcnt = state->bcnt;
1432 ioc_state->lifetime = state->lifetime;
1433 ioc_state->timestamp = state->timestamp;
1434 ioc_state->cpuid = cpuid;
1435 ioc_state->expiry = state->expiry;
1436 ioc_state->rulenum = state->stub->rulenum;
1438 bcopy(&state->flow_id, &ioc_state->flow_id, sizeof(struct ipfw_flow_id));
1439 return ioc_state + 1;
1443 ipfw_copy_rule(const struct ip_fw *rule, struct ipfw_ioc_rule *ioc_rule)
1445 const struct ip_fw *sibling;
1450 ioc_rule->act_ofs = rule->act_ofs;
1451 ioc_rule->cmd_len = rule->cmd_len;
1452 ioc_rule->rulenum = rule->rulenum;
1453 ioc_rule->set = rule->set;
1455 ioc_rule->set_disable = ipfw_ctx[mycpuid]->ipfw_set_disable;
1456 ioc_rule->static_count = static_count;
1457 ioc_rule->static_len = static_ioc_len;
1461 ioc_rule->timestamp = 0;
1468 ioc_rule->timestamp = 0;
1469 for (sibling = rule; sibling != NULL; sibling = sibling->sibling) {
1470 ioc_rule->pcnt += sibling->pcnt;
1471 ioc_rule->bcnt += sibling->bcnt;
1472 if (sibling->timestamp > ioc_rule->timestamp)
1473 ioc_rule->timestamp = sibling->timestamp;
1479 KASSERT(i == ncpus, ("static rule is not duplicated on every cpu"));
1481 bcopy(rule->cmd, ioc_rule->cmd, ioc_rule->cmd_len * 4 /* XXX */);
1483 return ((uint8_t *)ioc_rule + IOC_RULESIZE(ioc_rule));
1487 ipfw_ctl_get_modules(struct sockopt *sopt)
1490 struct ipfw_module *mod;
1491 char module_str[1024];
1492 memset(module_str,0,1024);
1493 for (i = 0, mod = ipfw_modules; i < MAX_MODULE; i++, mod++) {
1494 if (mod->type != 0) {
1496 strcat(module_str,",");
1497 strcat(module_str,mod->name);
1500 bzero(sopt->sopt_val, sopt->sopt_valsize);
1501 bcopy(module_str, sopt->sopt_val, strlen(module_str));
1502 sopt->sopt_valsize = strlen(module_str);
1507 * Copy all static rules and states on all CPU
1510 ipfw_ctl_get_rules(struct sockopt *sopt)
1512 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1513 struct ipfw_state_context *state_ctx;
1515 struct ip_fw_state *state;
1518 int i, j, state_count = 0;
1520 size = static_ioc_len;
1521 for (i = 0; i < ncpus; i++) {
1522 for (j = 0; j < ctx->state_hash_size; j++) {
1523 state_ctx = &ipfw_ctx[i]->state_ctx[j];
1524 state_count += state_ctx->count;
1527 if (state_count > 0) {
1528 size += state_count * sizeof(struct ipfw_ioc_state);
1531 if (sopt->sopt_valsize < size) {
1532 /* XXX TODO sopt_val is not big enough */
1533 bzero(sopt->sopt_val, sopt->sopt_valsize);
1537 sopt->sopt_valsize = size;
1538 bp = sopt->sopt_val;
1540 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1541 bp = ipfw_copy_rule(rule, bp);
1543 if (state_count > 0 ) {
1544 for (i = 0; i < ncpus; i++) {
1545 for (j = 0; j < ctx->state_hash_size; j++) {
1546 state_ctx = &ipfw_ctx[i]->state_ctx[j];
1547 state = state_ctx->state;
1548 while (state != NULL) {
1549 bp = ipfw_copy_state(state, bp, i);
1550 state = state->next;
1559 ipfw_set_disable_dispatch(netmsg_t nmsg)
1561 struct lwkt_msg *lmsg = &nmsg->lmsg;
1562 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1564 ctx->ipfw_set_disable = lmsg->u.ms_result32;
1566 ifnet_forwardmsg(lmsg, mycpuid + 1);
1570 ipfw_ctl_set_disable(uint32_t disable, uint32_t enable)
1572 struct netmsg_base nmsg;
1573 struct lwkt_msg *lmsg;
1574 uint32_t set_disable;
1576 /* IPFW_DEFAULT_SET is always enabled */
1577 enable |= (1 << IPFW_DEFAULT_SET);
1578 set_disable = (ipfw_ctx[mycpuid]->ipfw_set_disable | disable) & ~enable;
1580 bzero(&nmsg, sizeof(nmsg));
1581 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1582 0, ipfw_set_disable_dispatch);
1584 lmsg->u.ms_result32 = set_disable;
1586 ifnet_domsg(lmsg, 0);
1591 * ipfw_ctl_x - extended version of ipfw_ctl
1592 * remove the x_header, and adjust the sopt_name,sopt_val and sopt_valsize.
1595 ipfw_ctl_x(struct sockopt *sopt)
1597 ip_fw_x_header *x_header;
1598 x_header = (ip_fw_x_header *)(sopt->sopt_val);
1599 sopt->sopt_name = x_header->opcode;
1600 sopt->sopt_valsize -= sizeof(ip_fw_x_header);
1601 bcopy(++x_header, sopt->sopt_val, sopt->sopt_valsize);
1602 return ipfw_ctl(sopt);
1607 * {set|get}sockopt parser.
1610 ipfw_ctl(struct sockopt *sopt)
1617 switch (sopt->sopt_name) {
1622 error = ipfw_ctl_get_rules(sopt);
1625 error = ipfw_ctl_get_modules(sopt);
1629 ipfw_ctl_flush_rule(0);
1633 error = ipfw_ctl_add_rule(sopt);
1638 * IP_FW_DEL is used for deleting single rules or sets,
1639 * and (ab)used to atomically manipulate sets.
1640 * Argument size is used to distinguish between the two:
1642 * delete single rule or set of rules,
1643 * or reassign rules (or sets) to a different set.
1644 * 2 * sizeof(uint32_t)
1645 * atomic disable/enable sets.
1646 * first uint32_t contains sets to be disabled,
1647 * second uint32_t contains sets to be enabled.
1649 masks = sopt->sopt_val;
1650 size = sopt->sopt_valsize;
1651 if (size == sizeof(*masks)) {
1653 * Delete or reassign static rule
1655 error = ipfw_ctl_alter(masks[0]);
1656 } else if (size == (2 * sizeof(*masks))) {
1658 * Set enable/disable
1660 ipfw_ctl_set_disable(masks[0], masks[1]);
1666 case IP_FW_RESETLOG: /* argument is an int, the rule number */
1668 if (sopt->sopt_valsize != 0) {
1669 error = soopt_to_kbuf(sopt, &rulenum,
1670 sizeof(int), sizeof(int));
1675 error = ipfw_ctl_zero_entry(rulenum,
1676 sopt->sopt_name == IP_FW_RESETLOG);
1679 error = ipfw_nat_cfg_ptr(sopt);
1682 error = ipfw_nat_del_ptr(sopt);
1684 case IP_FW_NAT_FLUSH:
1685 error = ipfw_nat_flush_ptr(sopt);
1688 error = ipfw_nat_get_cfg_ptr(sopt);
1691 error = ipfw_nat_get_log_ptr(sopt);
1693 case IP_DUMMYNET_GET:
1694 case IP_DUMMYNET_CONFIGURE:
1695 case IP_DUMMYNET_DEL:
1696 case IP_DUMMYNET_FLUSH:
1697 error = ip_dn_sockopt(sopt);
1699 case IP_FW_STATE_ADD:
1700 error = ipfw_ctl_add_state(sopt);
1702 case IP_FW_STATE_DEL:
1703 error = ipfw_ctl_delete_state(sopt);
1705 case IP_FW_STATE_FLUSH:
1706 error = ipfw_ctl_flush_state(sopt);
1708 case IP_FW_TABLE_CREATE:
1709 case IP_FW_TABLE_DELETE:
1710 case IP_FW_TABLE_APPEND:
1711 case IP_FW_TABLE_REMOVE:
1712 case IP_FW_TABLE_LIST:
1713 case IP_FW_TABLE_FLUSH:
1714 case IP_FW_TABLE_SHOW:
1715 case IP_FW_TABLE_TEST:
1716 case IP_FW_TABLE_RENAME:
1717 error = ipfw_ctl_table_sockopt(sopt);
1720 kprintf("ipfw_ctl invalid option %d\n",
1728 ipfw_check_in(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir)
1730 struct ip_fw_args args;
1731 struct mbuf *m = *m0;
1733 int tee = 0, error = 0, ret;
1735 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
1736 /* Extract info from dummynet tag */
1737 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
1738 KKASSERT(mtag != NULL);
1739 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv;
1740 KKASSERT(args.rule != NULL);
1742 m_tag_delete(m, mtag);
1743 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
1751 ret = ipfw_chk(&args);
1768 case IP_FW_DUMMYNET:
1769 /* Send packet to the appropriate pipe */
1770 ipfw_dummynet_io(m, args.cookie, DN_TO_IP_IN, &args);
1779 * Must clear bridge tag when changing
1781 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
1782 if (ip_divert_p != NULL) {
1783 m = ip_divert_p(m, tee, 1);
1787 /* not sure this is the right error msg */
1797 panic("unknown ipfw return value: %d", ret);
1805 ipfw_check_out(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir)
1807 struct ip_fw_args args;
1808 struct mbuf *m = *m0;
1810 int tee = 0, error = 0, ret;
1812 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
1813 /* Extract info from dummynet tag */
1814 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
1815 KKASSERT(mtag != NULL);
1816 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv;
1817 KKASSERT(args.rule != NULL);
1819 m_tag_delete(m, mtag);
1820 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
1828 ret = ipfw_chk(&args);
1846 case IP_FW_DUMMYNET:
1847 ipfw_dummynet_io(m, args.cookie, DN_TO_IP_OUT, &args);
1855 if (ip_divert_p != NULL) {
1856 m = ip_divert_p(m, tee, 0);
1860 /* not sure this is the right error msg */
1870 panic("unknown ipfw return value: %d", ret);
1880 struct pfil_head *pfh;
1881 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
1883 pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET);
1887 pfil_add_hook(ipfw_check_in, NULL, PFIL_IN | PFIL_MPSAFE, pfh);
1888 pfil_add_hook(ipfw_check_out, NULL, PFIL_OUT | PFIL_MPSAFE, pfh);
1894 struct pfil_head *pfh;
1896 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
1898 pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET);
1902 pfil_remove_hook(ipfw_check_in, NULL, PFIL_IN, pfh);
1903 pfil_remove_hook(ipfw_check_out, NULL, PFIL_OUT, pfh);
1907 ipfw_sysctl_enable_dispatch(netmsg_t nmsg)
1909 struct lwkt_msg *lmsg = &nmsg->lmsg;
1910 int enable = lmsg->u.ms_result;
1912 if (fw3_enable == enable)
1915 fw3_enable = enable;
1922 lwkt_replymsg(lmsg, 0);
1926 ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS)
1928 struct netmsg_base nmsg;
1929 struct lwkt_msg *lmsg;
1932 enable = fw3_enable;
1933 error = sysctl_handle_int(oidp, &enable, 0, req);
1934 if (error || req->newptr == NULL)
1937 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1938 0, ipfw_sysctl_enable_dispatch);
1940 lmsg->u.ms_result = enable;
1942 return lwkt_domsg(IPFW_CFGPORT, lmsg, 0);
1946 ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS)
1948 return sysctl_int_range(oidp, arg1, arg2, req,
1949 IPFW_AUTOINC_STEP_MIN, IPFW_AUTOINC_STEP_MAX);
1954 ipfw_ctx_init_dispatch(netmsg_t nmsg)
1956 struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
1957 struct ipfw_context *ctx;
1958 struct ip_fw *def_rule;
1960 if (mycpuid == 0 ) {
1961 ipfw_nat_ctx = kmalloc(sizeof(struct ipfw_nat_context),
1962 M_IPFW3, M_WAITOK | M_ZERO);
1965 ctx = kmalloc(sizeof(struct ipfw_context), M_IPFW3, M_WAITOK | M_ZERO);
1966 ipfw_ctx[mycpuid] = ctx;
1968 def_rule = kmalloc(sizeof(struct ip_fw), M_IPFW3, M_WAITOK | M_ZERO);
1969 def_rule->act_ofs = 0;
1970 def_rule->rulenum = IPFW_DEFAULT_RULE;
1971 def_rule->cmd_len = 2;
1972 def_rule->set = IPFW_DEFAULT_SET;
1974 def_rule->cmd[0].len = LEN_OF_IPFWINSN;
1975 def_rule->cmd[0].module = MODULE_BASIC_ID;
1976 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT
1977 def_rule->cmd[0].opcode = O_BASIC_ACCEPT;
1979 if (filters_default_to_accept)
1980 def_rule->cmd[0].opcode = O_BASIC_ACCEPT;
1982 def_rule->cmd[0].opcode = O_BASIC_DENY;
1985 /* Install the default rule */
1986 ctx->ipfw_default_rule = def_rule;
1987 ctx->ipfw_rule_chain = def_rule;
1990 * if sibiling in last CPU is exists,
1991 * then it's sibling should be current rule
1993 if (fwmsg->sibling != NULL) {
1994 fwmsg->sibling->sibling = def_rule;
1996 /* prepare for next CPU */
1997 fwmsg->sibling = def_rule;
1999 /* Statistics only need to be updated once */
2001 ipfw_inc_static_count(def_rule);
2003 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
2007 ipfw_init_dispatch(netmsg_t nmsg)
2009 struct netmsg_ipfw fwmsg;
2012 kprintf("IP firewall already loaded\n");
2017 bzero(&fwmsg, sizeof(fwmsg));
2018 netmsg_init(&fwmsg.base, NULL, &curthread->td_msgport,
2019 0, ipfw_ctx_init_dispatch);
2020 ifnet_domsg(&fwmsg.base.lmsg, 0);
2022 ip_fw_chk_ptr = ipfw_chk;
2023 ip_fw_ctl_x_ptr = ipfw_ctl_x;
2024 ip_fw_dn_io_ptr = ipfw_dummynet_io;
2026 kprintf("ipfw3 initialized, default to %s, logging ",
2027 (int)(ipfw_ctx[mycpuid]->ipfw_default_rule->cmd[0].opcode) ==
2028 O_BASIC_ACCEPT ? "accept" : "deny");
2030 #ifdef IPFIREWALL_VERBOSE
2033 if (fw_verbose == 0) {
2034 kprintf("disabled ");
2041 lwkt_replymsg(&nmsg->lmsg, error);
2047 struct netmsg_base smsg;
2050 ipfw3_log_modevent(MOD_LOAD);
2052 netmsg_init(&smsg, NULL, &curthread->td_msgport,
2053 0, ipfw_init_dispatch);
2054 error = lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0);
2055 netmsg_init(&smsg, NULL, &curthread->td_msgport,
2056 0, table_init_dispatch);
2057 error = lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0);
2064 ipfw_fini_dispatch(netmsg_t nmsg)
2071 netmsg_service_sync();
2072 ip_fw_chk_ptr = NULL;
2073 ip_fw_ctl_x_ptr = NULL;
2074 ip_fw_dn_io_ptr = NULL;
2075 ipfw_ctl_flush_rule(1 /* kill default rule */);
2077 /* Free pre-cpu context */
2078 for (cpu = 0; cpu < ncpus; ++cpu) {
2079 if (ipfw_ctx[cpu] != NULL) {
2080 kfree(ipfw_ctx[cpu], M_IPFW3);
2081 ipfw_ctx[cpu] = NULL;
2084 kfree(ipfw_nat_ctx,M_IPFW3);
2085 ipfw_nat_ctx = NULL;
2086 kprintf("IP firewall unloaded\n");
2088 lwkt_replymsg(&nmsg->lmsg, error);
2094 struct netmsg_base smsg;
2096 ipfw3_log_modevent(MOD_UNLOAD);
2097 netmsg_init(&smsg, NULL, &curthread->td_msgport,
2098 0, ipfw_fini_dispatch);
2099 return lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0);
2102 #endif /* KLD_MODULE */
2105 ipfw3_modevent(module_t mod, int type, void *unused)
2117 kprintf("ipfw statically compiled, cannot unload\n");
2129 static moduledata_t ipfw3mod = {
2134 DECLARE_MODULE(ipfw3, ipfw3mod, SI_SUB_PROTO_END, SI_ORDER_ANY);
2135 MODULE_VERSION(ipfw3, 1);