2 * Copyright (c) 2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Bill Yuan <bycn82@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 #error IPFIREWALL3 requires INET.
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
46 #include <sys/kernel.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/sysctl.h>
51 #include <sys/syslog.h>
52 #include <sys/ucred.h>
53 #include <sys/in_cksum.h>
55 #include <sys/thread2.h>
56 #include <sys/mplock2.h>
58 #include <netinet/in.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in_var.h>
61 #include <netinet/in_pcb.h>
62 #include <netinet/ip.h>
63 #include <netinet/ip_var.h>
64 #include <netinet/ip_icmp.h>
65 #include <netinet/tcp.h>
66 #include <netinet/tcp_timer.h>
67 #include <netinet/tcp_var.h>
68 #include <netinet/tcpip.h>
69 #include <netinet/udp.h>
70 #include <netinet/udp_var.h>
71 #include <netinet/ip_divert.h>
72 #include <netinet/if_ether.h>
75 #include <net/route.h>
77 #include <net/netmsg2.h>
79 #include <net/ipfw3/ip_fw.h>
80 #include <net/ipfw3_basic/ip_fw3_basic.h>
81 #include <net/ipfw3_nat/ip_fw3_nat.h>
82 #include <net/dummynet3/ip_dummynet3.h>
84 MALLOC_DEFINE(M_IPFW3, "IPFW3", "ip_fw3 default module");
86 #ifdef IPFIREWALL_DEBUG
87 #define DPRINTF(fmt, ...) \
90 kprintf(fmt, __VA_ARGS__); \
93 #define DPRINTF(fmt, ...) ((void)0)
97 #define MAX_OPCODE_PER_MODULE 100
99 #define IPFW_AUTOINC_STEP_MIN 1
100 #define IPFW_AUTOINC_STEP_MAX 1000
101 #define IPFW_AUTOINC_STEP_DEF 100
105 struct netmsg_base base;
106 const struct ipfw_ioc_rule *ioc_rule;
108 struct ip_fw *next_rule;
109 struct ip_fw *prev_rule;
110 struct ip_fw *sibling; /* sibling in prevous CPU */
114 struct netmsg_base base;
116 struct ip_fw *start_rule;
117 struct ip_fw *prev_rule;
118 struct ipfw_ioc_state *ioc_state;
125 struct netmsg_base base;
126 struct ip_fw *start_rule;
131 ipfw_nat_cfg_t *ipfw_nat_cfg_ptr;
132 ipfw_nat_cfg_t *ipfw_nat_del_ptr;
133 ipfw_nat_cfg_t *ipfw_nat_flush_ptr;
134 ipfw_nat_cfg_t *ipfw_nat_get_cfg_ptr;
135 ipfw_nat_cfg_t *ipfw_nat_get_log_ptr;
137 /* handlers which implemented in ipfw_basic module */
138 ipfw_basic_delete_state_t *ipfw_basic_flush_state_prt = NULL;
139 ipfw_basic_append_state_t *ipfw_basic_append_state_prt = NULL;
141 static struct ipfw_context *ipfw_ctx[MAXCPU];
142 static struct ipfw_nat_context *ipfw_nat_ctx;
144 extern int ip_fw_loaded;
145 static uint32_t static_count; /* # of static rules */
146 static uint32_t static_ioc_len; /* bytes of static rules */
147 static int ipfw_flushing;
148 static int fw_verbose;
149 static int verbose_limit;
151 static int autoinc_step = IPFW_AUTOINC_STEP_DEF;
153 static int ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS);
154 static int ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS);
156 SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw3, CTLFLAG_RW, 0, "Firewall");
157 SYSCTL_PROC(_net_inet_ip_fw3, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
158 &fw3_enable, 0, ipfw_sysctl_enable, "I", "Enable ipfw");
159 SYSCTL_PROC(_net_inet_ip_fw3, OID_AUTO, autoinc_step, CTLTYPE_INT | CTLFLAG_RW,
160 &autoinc_step, 0, ipfw_sysctl_autoinc_step, "I",
161 "Rule number autincrement step");
162 SYSCTL_INT(_net_inet_ip_fw3, OID_AUTO,one_pass,CTLFLAG_RW,
164 "Only do a single pass through ipfw when using dummynet(4)");
165 SYSCTL_INT(_net_inet_ip_fw3, OID_AUTO, debug, CTLFLAG_RW,
166 &fw_debug, 0, "Enable printing of debug ip_fw statements");
167 SYSCTL_INT(_net_inet_ip_fw3, OID_AUTO, verbose, CTLFLAG_RW,
168 &fw_verbose, 0, "Log matches to ipfw rules");
169 SYSCTL_INT(_net_inet_ip_fw3, OID_AUTO, verbose_limit, CTLFLAG_RW,
170 &verbose_limit, 0, "Set upper limit of matches of ipfw rules logged");
171 SYSCTL_INT(_net_inet_ip_fw3, OID_AUTO, static_count, CTLFLAG_RD,
172 &static_count, 0, "Number of static rules");
174 filter_func filter_funcs[MAX_MODULE][MAX_OPCODE_PER_MODULE];
175 struct ipfw_module ipfw_modules[MAX_MODULE];
176 static int ipfw_ctl(struct sockopt *sopt);
180 check_accept(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
181 struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len);
183 check_deny(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
184 struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len);
185 void init_module(void);
189 register_ipfw_module(int module_id,char *module_name)
191 struct ipfw_module *tmp;
195 for (i=0; i < MAX_MODULE; i++) {
196 if (tmp->type == 0) {
199 strncpy(tmp->name, module_name, strlen(module_name));
204 kprintf("ipfw3 module %s loaded ", module_name);
208 unregister_ipfw_module(int module_id)
210 struct ipfw_module *tmp;
213 int i, len, cmdlen, found;
217 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
218 fw = ctx->ipfw_rule_chain;
219 for (; fw; fw = fw->next) {
220 for (len = fw->cmd_len, cmd = fw->cmd; len > 0;
222 cmd = (ipfw_insn *)((uint32_t *)cmd + cmdlen)) {
224 if (cmd->module == 0 &&
225 (cmd->opcode == 0 || cmd->opcode == 1)) {
226 //action accept or deny
227 } else if (cmd->module == module_id) {
237 for (i = 0; i < MAX_MODULE; i++) {
238 if (tmp->type == 1 && tmp->id == module_id) {
240 kprintf("ipfw3 module %s unloaded ", tmp->name);
246 for (i = 0; i < MAX_OPCODE_PER_MODULE; i++) {
247 if (module_id == 0) {
248 if (i ==0 || i == 1) {
252 filter_funcs[module_id][i] = NULL;
259 register_ipfw_filter_funcs(int module, int opcode, filter_func func)
261 filter_funcs[module][opcode] = func;
265 check_accept(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
266 struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len)
268 *cmd_val = IP_FW_PASS;
269 *cmd_ctl = IP_FW_CTL_DONE;
273 check_deny(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
274 struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len)
276 *cmd_val = IP_FW_DENY;
277 *cmd_ctl = IP_FW_CTL_DONE;
283 memset(ipfw_modules, 0, sizeof(struct ipfw_module) * MAX_MODULE);
284 memset(filter_funcs, 0, sizeof(filter_func) *
285 MAX_OPCODE_PER_MODULE * MAX_MODULE);
286 register_ipfw_filter_funcs(0, O_BASIC_ACCEPT,
287 (filter_func)check_accept);
288 register_ipfw_filter_funcs(0, O_BASIC_DENY, (filter_func)check_deny);
292 ipfw_free_rule(struct ip_fw *rule)
294 kfree(rule, M_IPFW3);
299 static struct ip_fw *
300 lookup_next_rule(struct ip_fw *me)
302 struct ip_fw *rule = NULL;
305 /* look for action, in case it is a skipto */
306 cmd = ACTION_PTR(me);
307 if ((int)cmd->module == MODULE_BASIC_ID &&
308 (int)cmd->opcode == O_BASIC_SKIPTO) {
309 for (rule = me->next; rule; rule = rule->next) {
310 if (rule->rulenum >= cmd->arg1)
314 if (rule == NULL) { /* failure or not a skipto */
317 me->next_rule = rule;
322 * rules are stored in ctx->ipfw_rule_chain.
323 * and each rule is combination of multiple cmds.(ipfw_insn)
324 * in each rule, it begin with filter cmds. and end with action cmds.
325 * 'outer/inner loop' are looping the rules/cmds.
326 * it will invoke the cmds relatived function according to the cmd's
327 * module id and opcode id. and process according to return value.
330 ipfw_chk(struct ip_fw_args *args)
332 struct mbuf *m = args->m;
333 struct ip *ip = mtod(m, struct ip *);
334 struct ip_fw *f = NULL; /* matching rule */
335 int cmd_val = IP_FW_PASS;
337 struct divert_info *divinfo;
340 * hlen The length of the IPv4 header.
341 * hlen >0 means we have an IPv4 packet.
343 u_int hlen = 0; /* hlen >0 means we have an IP pkt */
346 * offset The offset of a fragment. offset != 0 means that
347 * we have a fragment at this offset of an IPv4 packet.
348 * offset == 0 means that (if this is an IPv4 packet)
349 * this is the first or only fragment.
354 uint16_t src_port = 0, dst_port = 0; /* NOTE: host format */
355 struct in_addr src_ip, dst_ip; /* NOTE: network format */
358 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
360 if (m->m_pkthdr.fw_flags & IPFW_MBUF_GENERATED)
361 return IP_FW_PASS; /* accept */
363 if (args->eh == NULL || /* layer 3 packet */
364 (m->m_pkthdr.len >= sizeof(struct ip) &&
365 ntohs(args->eh->ether_type) == ETHERTYPE_IP))
366 hlen = ip->ip_hl << 2;
369 * Collect parameters into local variables for faster matching.
371 if (hlen == 0) { /* do not grab addresses for non-ip pkts */
372 proto = args->f_id.proto = 0; /* mark f_id invalid */
373 goto after_ip_checks;
376 proto = args->f_id.proto = ip->ip_p;
379 if (args->eh != NULL) { /* layer 2 packets are as on the wire */
380 offset = ntohs(ip->ip_off) & IP_OFFMASK;
381 ip_len = ntohs(ip->ip_len);
383 offset = ip->ip_off & IP_OFFMASK;
387 #define PULLUP_TO(len) \
389 if (m->m_len < (len)) { \
390 args->m = m = m_pullup(m, (len)); \
392 goto pullup_failed; \
393 ip = mtod(m, struct ip *); \
403 PULLUP_TO(hlen + sizeof(struct tcphdr));
404 tcp = L3HDR(struct tcphdr, ip);
405 dst_port = tcp->th_dport;
406 src_port = tcp->th_sport;
407 args->f_id.flags = tcp->th_flags;
415 PULLUP_TO(hlen + sizeof(struct udphdr));
416 udp = L3HDR(struct udphdr, ip);
417 dst_port = udp->uh_dport;
418 src_port = udp->uh_sport;
425 L3HDR(struct icmp, ip)->icmp_type;
435 args->f_id.src_ip = ntohl(src_ip.s_addr);
436 args->f_id.dst_ip = ntohl(dst_ip.s_addr);
437 args->f_id.src_port = src_port = ntohs(src_port);
438 args->f_id.dst_port = dst_port = ntohs(dst_port);
443 * Packet has already been tagged. Look for the next rule
444 * to restart processing.
446 * If fw3_one_pass != 0 then just accept it.
447 * XXX should not happen here, but optimized out in
453 /* This rule is being/has been flushed */
457 f = args->rule->next_rule;
459 f = lookup_next_rule(args->rule);
462 * Find the starting rule. It can be either the first
463 * one, or the one after divert_rule if asked so.
467 mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL);
469 divinfo = m_tag_data(mtag);
470 skipto = divinfo->skipto;
475 f = ctx->ipfw_rule_chain;
476 if (args->eh == NULL && skipto != 0) {
477 /* No skipto during rule flushing */
481 if (skipto >= IPFW_DEFAULT_RULE) {
482 return IP_FW_DENY; /* invalid */
484 while (f && f->rulenum <= skipto) {
487 if (f == NULL) { /* drop packet */
490 } else if (ipfw_flushing) {
491 /* Rules are being flushed; skip to default rule */
492 f = ctx->ipfw_default_rule;
495 if ((mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL)) != NULL) {
496 m_tag_delete(m, mtag);
500 * Now scan the rules, and parse microinstructions for each rule.
502 for (; f; f = f->next) {
506 again: /* check the rule again*/
507 if (ctx->ipfw_set_disable & (1 << f->set)) {
511 for (l = f->cmd_len, cmd = f->cmd; l > 0; l -= cmdlen,
512 cmd=(ipfw_insn *)((uint32_t *)cmd+ cmdlen)) {
514 check_body: /* check the body of the rule again.*/
516 (filter_funcs[cmd->module][cmd->opcode])
517 (&cmd_ctl, &cmd_val, &args, &f, cmd, ip_len);
521 case IP_FW_CTL_AGAIN:
528 case IP_FW_CTL_CHK_STATE:
529 /* update the cmd and l */
531 l = f->cmd_len - f->act_ofs;
534 if (cmd->len & F_NOT)
538 } /* end of inner for, scan opcodes */
540 next_rule:; /* try next rule */
542 } /* end of outer for, scan rules */
543 kprintf("+++ ipfw: ouch!, skip past end of rules, denying packet\n");
547 /* Update statistics */
550 f->timestamp = time_second;
555 kprintf("pullup failed\n");
560 ipfw_dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
565 const struct ipfw_flow_id *id;
566 struct dn_flow_id *fid;
570 mtag = m_tag_get(PACKET_TAG_DUMMYNET, sizeof(*pkt), M_NOWAIT);
575 m_tag_prepend(m, mtag);
577 pkt = m_tag_data(mtag);
578 bzero(pkt, sizeof(*pkt));
580 cmd = fwa->rule->cmd + fwa->rule->act_ofs;
581 KASSERT(cmd->opcode == O_DUMMYNET_PIPE ||
582 cmd->opcode == O_DUMMYNET_QUEUE,
583 ("Rule is not PIPE or QUEUE, opcode %d", cmd->opcode));
586 pkt->dn_flags = (dir & DN_FLAGS_DIR_MASK);
588 pkt->pipe_nr = pipe_nr;
590 pkt->msgport = netisr_curport();
594 fid->fid_dst_ip = id->dst_ip;
595 fid->fid_src_ip = id->src_ip;
596 fid->fid_dst_port = id->dst_port;
597 fid->fid_src_port = id->src_port;
598 fid->fid_proto = id->proto;
599 fid->fid_flags = id->flags;
601 pkt->dn_priv = fwa->rule;
603 if ((int)cmd->opcode == O_DUMMYNET_PIPE)
604 pkt->dn_flags |= DN_FLAGS_IS_PIPE;
606 m->m_pkthdr.fw_flags |= DUMMYNET_MBUF_TAGGED;
610 ipfw_inc_static_count(struct ip_fw *rule)
612 /* Static rule's counts are updated only on CPU0 */
613 KKASSERT(mycpuid == 0);
616 static_ioc_len += IOC_RULESIZE(rule);
620 ipfw_dec_static_count(struct ip_fw *rule)
622 int l = IOC_RULESIZE(rule);
624 /* Static rule's counts are updated only on CPU0 */
625 KKASSERT(mycpuid == 0);
627 KASSERT(static_count > 0, ("invalid static count %u", static_count));
630 KASSERT(static_ioc_len >= l,
631 ("invalid static len %u", static_ioc_len));
636 ipfw_add_rule_dispatch(netmsg_t nmsg)
638 struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
639 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
640 struct ip_fw *rule, *prev,*next;
641 const struct ipfw_ioc_rule *ioc_rule;
643 ioc_rule = fwmsg->ioc_rule;
644 // create rule by ioc_rule
645 rule = kmalloc(RULESIZE(ioc_rule), M_IPFW3, M_WAITOK | M_ZERO);
646 rule->act_ofs = ioc_rule->act_ofs;
647 rule->cmd_len = ioc_rule->cmd_len;
648 rule->rulenum = ioc_rule->rulenum;
649 rule->set = ioc_rule->set;
650 bcopy(ioc_rule->cmd, rule->cmd, rule->cmd_len * 4);
652 for (prev = NULL, next = ctx->ipfw_rule_chain;
653 next; prev = next, next = next->next) {
654 if (next->rulenum > ioc_rule->rulenum) {
658 KASSERT(next != NULL, ("no default rule?!"));
661 * Insert rule into the pre-determined position
667 rule->next = ctx->ipfw_rule_chain;
668 ctx->ipfw_rule_chain = rule;
672 * if sibiling in last CPU is exists,
673 * then it's sibling should be current rule
675 if (fwmsg->sibling != NULL) {
676 fwmsg->sibling->sibling = rule;
678 /* prepare for next CPU */
679 fwmsg->sibling = rule;
682 /* Statistics only need to be updated once */
683 ipfw_inc_static_count(rule);
685 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
689 * confirm the rulenumber
690 * call dispatch function to add rule into the list
691 * Update the statistic
694 ipfw_add_rule(struct ipfw_ioc_rule *ioc_rule)
696 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
697 struct netmsg_ipfw fwmsg;
698 struct netmsg_base *nmsg;
701 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
704 * If rulenum is 0, find highest numbered rule before the
705 * default rule, and add rule number incremental step.
707 if (ioc_rule->rulenum == 0) {
708 int step = autoinc_step;
710 KKASSERT(step >= IPFW_AUTOINC_STEP_MIN &&
711 step <= IPFW_AUTOINC_STEP_MAX);
714 * Locate the highest numbered rule before default
716 for (f = ctx->ipfw_rule_chain; f; f = f->next) {
717 if (f->rulenum == IPFW_DEFAULT_RULE)
719 ioc_rule->rulenum = f->rulenum;
721 if (ioc_rule->rulenum < IPFW_DEFAULT_RULE - step)
722 ioc_rule->rulenum += step;
724 KASSERT(ioc_rule->rulenum != IPFW_DEFAULT_RULE &&
725 ioc_rule->rulenum != 0,
726 ("invalid rule num %d", ioc_rule->rulenum));
728 bzero(&fwmsg, sizeof(fwmsg));
730 netmsg_init(nmsg, NULL, &curthread->td_msgport,
731 0, ipfw_add_rule_dispatch);
732 fwmsg.ioc_rule = ioc_rule;
734 ifnet_domsg(&nmsg->lmsg, 0);
736 DPRINTF("++ installed rule %d, static count now %d\n",
737 rule->rulenum, static_count);
741 * Free storage associated with a static rule (including derived
743 * The caller is in charge of clearing rule pointers to avoid
745 * @return a pointer to the next entry.
746 * Arguments are not checked, so they better be correct.
747 * Must be called at splimp().
749 static struct ip_fw *
750 ipfw_delete_rule(struct ipfw_context *ctx,
751 struct ip_fw *prev, struct ip_fw *rule)
754 ctx->ipfw_rule_chain = rule->next;
756 prev->next = rule->next;
758 if (mycpuid == IPFW_CFGCPUID)
759 ipfw_dec_static_count(rule);
761 kfree(rule, M_IPFW3);
767 ipfw_flush_rule_dispatch(netmsg_t nmsg)
769 struct lwkt_msg *lmsg = &nmsg->lmsg;
770 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
771 struct ip_fw *rule, *the_rule;
772 int kill_default = lmsg->u.ms_result;
774 rule = ctx->ipfw_rule_chain;
775 while (rule != NULL) {
776 if (rule->rulenum == IPFW_DEFAULT_RULE && kill_default == 0) {
777 ctx->ipfw_rule_chain = rule;
782 if (mycpuid == IPFW_CFGCPUID)
783 ipfw_dec_static_count(the_rule);
785 kfree(the_rule, M_IPFW3);
788 ifnet_forwardmsg(lmsg, mycpuid + 1);
792 ipfw_append_state_dispatch(netmsg_t nmsg)
794 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
795 struct ipfw_ioc_state *ioc_state = dmsg->ioc_state;
796 (*ipfw_basic_append_state_prt)(ioc_state);
797 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
801 ipfw_delete_state_dispatch(netmsg_t nmsg)
803 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
804 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
805 struct ip_fw *rule = ctx->ipfw_rule_chain;
806 while (rule != NULL) {
807 if (rule->rulenum == dmsg->rulenum) {
813 (*ipfw_basic_flush_state_prt)(rule);
814 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
818 * Deletes all rules from a chain (including the default rule
819 * if the second argument is set).
820 * Must be called at splimp().
823 ipfw_ctl_flush_rule(int kill_default)
825 struct netmsg_del dmsg;
826 struct netmsg_base nmsg;
827 struct lwkt_msg *lmsg;
829 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
832 * If 'kill_default' then caller has done the necessary
833 * msgport syncing; unnecessary to do it again.
837 * Let ipfw_chk() know the rules are going to
838 * be flushed, so it could jump directly to
842 netmsg_service_sync();
846 * if ipfw_basic_flush_state_prt
847 * flush all states in all CPU
849 if (ipfw_basic_flush_state_prt != NULL) {
850 bzero(&dmsg, sizeof(dmsg));
851 netmsg_init(&dmsg.base, NULL, &curthread->td_msgport,
852 0, ipfw_delete_state_dispatch);
853 ifnet_domsg(&dmsg.base.lmsg, 0);
856 * Press the 'flush' button
858 bzero(&nmsg, sizeof(nmsg));
859 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
860 0, ipfw_flush_rule_dispatch);
862 lmsg->u.ms_result = kill_default;
863 ifnet_domsg(lmsg, 0);
866 KASSERT(static_count == 0,
867 ("%u static rules remain", static_count));
868 KASSERT(static_ioc_len == 0,
869 ("%u bytes of static rules remain", static_ioc_len));
877 ipfw_delete_rule_dispatch(netmsg_t nmsg)
879 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
880 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
881 struct ip_fw *rule, *prev = NULL;
883 rule = ctx->ipfw_rule_chain;
885 if (rule->rulenum == dmsg->rulenum) {
886 ipfw_delete_rule(ctx, prev, rule);
893 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
897 ipfw_alt_delete_rule(uint16_t rulenum)
899 struct netmsg_del dmsg;
900 struct netmsg_base *nmsg;
903 * delete the state which stub is the rule
904 * which belongs to the CPU and the rulenum
906 bzero(&dmsg, sizeof(dmsg));
908 netmsg_init(nmsg, NULL, &curthread->td_msgport,
909 0, ipfw_delete_state_dispatch);
910 dmsg.rulenum = rulenum;
911 ifnet_domsg(&nmsg->lmsg, 0);
914 * Get rid of the rule duplications on all CPUs
916 bzero(&dmsg, sizeof(dmsg));
918 netmsg_init(nmsg, NULL, &curthread->td_msgport,
919 0, ipfw_delete_rule_dispatch);
920 dmsg.rulenum = rulenum;
921 ifnet_domsg(&nmsg->lmsg, 0);
926 ipfw_alt_delete_ruleset_dispatch(netmsg_t nmsg)
928 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
929 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
930 struct ip_fw *prev, *rule;
936 rule = ctx->ipfw_rule_chain;
937 while (rule != NULL) {
938 if (rule->set == dmsg->from_set) {
939 rule = ipfw_delete_rule(ctx, prev, rule);
948 KASSERT(del, ("no match set?!"));
950 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
954 ipfw_disable_ruleset_state_dispatch(netmsg_t nmsg)
956 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
957 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
963 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
964 if (rule->set == dmsg->from_set) {
970 KASSERT(cleared, ("no match set?!"));
972 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
976 ipfw_alt_delete_ruleset(uint8_t set)
978 struct netmsg_del dmsg;
979 struct netmsg_base *nmsg;
982 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
985 * Check whether the 'set' exists. If it exists,
986 * then check whether any rules within the set will
987 * try to create states.
991 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
992 if (rule->set == set) {
997 return 0; /* XXX EINVAL? */
1001 * Clear the STATE flag, so no more states will be
1002 * created based the rules in this set.
1004 bzero(&dmsg, sizeof(dmsg));
1006 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1007 0, ipfw_disable_ruleset_state_dispatch);
1008 dmsg.from_set = set;
1010 ifnet_domsg(&nmsg->lmsg, 0);
1016 bzero(&dmsg, sizeof(dmsg));
1018 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1019 0, ipfw_alt_delete_ruleset_dispatch);
1020 dmsg.from_set = set;
1022 ifnet_domsg(&nmsg->lmsg, 0);
1027 ipfw_alt_move_rule_dispatch(netmsg_t nmsg)
1029 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
1032 rule = dmsg->start_rule;
1035 * Move to the position on the next CPU
1036 * before the msg is forwarded.
1039 while (rule && rule->rulenum <= dmsg->rulenum) {
1040 if (rule->rulenum == dmsg->rulenum)
1041 rule->set = dmsg->to_set;
1044 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1048 ipfw_alt_move_rule(uint16_t rulenum, uint8_t set)
1050 struct netmsg_del dmsg;
1051 struct netmsg_base *nmsg;
1053 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1056 * Locate first rule to move
1058 for (rule = ctx->ipfw_rule_chain;
1059 rule && rule->rulenum <= rulenum; rule = rule->next) {
1060 if (rule->rulenum == rulenum && rule->set != set)
1063 if (rule == NULL || rule->rulenum > rulenum)
1064 return 0; /* XXX error? */
1066 bzero(&dmsg, sizeof(dmsg));
1068 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1069 0, ipfw_alt_move_rule_dispatch);
1070 dmsg.start_rule = rule;
1071 dmsg.rulenum = rulenum;
1074 ifnet_domsg(&nmsg->lmsg, 0);
1075 KKASSERT(dmsg.start_rule == NULL);
1080 ipfw_alt_move_ruleset_dispatch(netmsg_t nmsg)
1082 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
1083 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1086 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1087 if (rule->set == dmsg->from_set)
1088 rule->set = dmsg->to_set;
1090 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1094 ipfw_alt_move_ruleset(uint8_t from_set, uint8_t to_set)
1096 struct netmsg_del dmsg;
1097 struct netmsg_base *nmsg;
1099 bzero(&dmsg, sizeof(dmsg));
1101 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1102 0, ipfw_alt_move_ruleset_dispatch);
1103 dmsg.from_set = from_set;
1104 dmsg.to_set = to_set;
1106 ifnet_domsg(&nmsg->lmsg, 0);
1111 ipfw_alt_swap_ruleset_dispatch(netmsg_t nmsg)
1113 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
1114 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1117 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1118 if (rule->set == dmsg->from_set)
1119 rule->set = dmsg->to_set;
1120 else if (rule->set == dmsg->to_set)
1121 rule->set = dmsg->from_set;
1123 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1127 ipfw_alt_swap_ruleset(uint8_t set1, uint8_t set2)
1129 struct netmsg_del dmsg;
1130 struct netmsg_base *nmsg;
1132 bzero(&dmsg, sizeof(dmsg));
1134 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1135 0, ipfw_alt_swap_ruleset_dispatch);
1136 dmsg.from_set = set1;
1139 ifnet_domsg(&nmsg->lmsg, 0);
1145 ipfw_ctl_alter(uint32_t arg)
1148 uint8_t cmd, new_set;
1151 rulenum = arg & 0xffff;
1152 cmd = (arg >> 24) & 0xff;
1153 new_set = (arg >> 16) & 0xff;
1157 if (new_set >= IPFW_DEFAULT_SET)
1159 if (cmd == 0 || cmd == 2) {
1160 if (rulenum == IPFW_DEFAULT_RULE)
1163 if (rulenum >= IPFW_DEFAULT_SET)
1168 case 0: /* delete rules with given number */
1169 error = ipfw_alt_delete_rule(rulenum);
1172 case 1: /* delete all rules with given set number */
1173 error = ipfw_alt_delete_ruleset(rulenum);
1176 case 2: /* move rules with given number to new set */
1177 error = ipfw_alt_move_rule(rulenum, new_set);
1180 case 3: /* move rules with given set number to new set */
1181 error = ipfw_alt_move_ruleset(rulenum, new_set);
1184 case 4: /* swap two sets */
1185 error = ipfw_alt_swap_ruleset(rulenum, new_set);
1192 * Clear counters for a specific rule.
1195 clear_counters(struct ip_fw *rule)
1197 rule->bcnt = rule->pcnt = 0;
1198 rule->timestamp = 0;
1202 ipfw_zero_entry_dispatch(netmsg_t nmsg)
1204 struct netmsg_zent *zmsg = (struct netmsg_zent *)nmsg;
1205 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1208 if (zmsg->rulenum == 0) {
1209 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1210 clear_counters(rule);
1213 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1214 if (rule->rulenum == zmsg->rulenum) {
1215 clear_counters(rule);
1219 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1223 * Reset some or all counters on firewall rules.
1224 * @arg frwl is null to clear all entries, or contains a specific
1226 * @arg log_only is 1 if we only want to reset logs, zero otherwise.
1229 ipfw_ctl_zero_entry(int rulenum, int log_only)
1231 struct netmsg_zent zmsg;
1232 struct netmsg_base *nmsg;
1234 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1236 bzero(&zmsg, sizeof(zmsg));
1238 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1239 0, ipfw_zero_entry_dispatch);
1240 zmsg.log_only = log_only;
1243 msg = log_only ? "ipfw: All logging counts reset.\n"
1244 : "ipfw: Accounting cleared.\n";
1249 * Locate the first rule with 'rulenum'
1251 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1252 if (rule->rulenum == rulenum)
1255 if (rule == NULL) /* we did not find any matching rules */
1257 zmsg.start_rule = rule;
1258 zmsg.rulenum = rulenum;
1260 msg = log_only ? "ipfw: Entry %d logging count reset.\n"
1261 : "ipfw: Entry %d cleared.\n";
1263 ifnet_domsg(&nmsg->lmsg, 0);
1264 KKASSERT(zmsg.start_rule == NULL);
1267 log(LOG_SECURITY | LOG_NOTICE, msg, rulenum);
1272 ipfw_ctl_add_state(struct sockopt *sopt)
1274 struct ipfw_ioc_state *ioc_state;
1275 ioc_state = sopt->sopt_val;
1276 if (ipfw_basic_append_state_prt != NULL) {
1277 struct netmsg_del dmsg;
1278 bzero(&dmsg, sizeof(dmsg));
1279 netmsg_init(&dmsg.base, NULL, &curthread->td_msgport,
1280 0, ipfw_append_state_dispatch);
1281 (&dmsg)->ioc_state = ioc_state;
1282 ifnet_domsg(&dmsg.base.lmsg, 0);
1288 ipfw_ctl_delete_state(struct sockopt *sopt)
1290 int rulenum = 0, error;
1291 if (sopt->sopt_valsize != 0) {
1292 error = soopt_to_kbuf(sopt, &rulenum, sizeof(int), sizeof(int));
1297 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1298 struct ip_fw *rule = ctx->ipfw_rule_chain;
1300 while (rule!=NULL) {
1301 if (rule->rulenum == rulenum) {
1310 struct netmsg_del dmsg;
1311 struct netmsg_base *nmsg;
1313 * delete the state which stub is the rule
1314 * which belongs to the CPU and the rulenum
1316 bzero(&dmsg, sizeof(dmsg));
1318 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1319 0, ipfw_delete_state_dispatch);
1320 dmsg.rulenum = rulenum;
1321 ifnet_domsg(&nmsg->lmsg, 0);
1326 ipfw_ctl_flush_state(struct sockopt *sopt)
1328 struct netmsg_del dmsg;
1329 struct netmsg_base *nmsg;
1331 * delete the state which stub is the rule
1332 * which belongs to the CPU and the rulenum
1334 bzero(&dmsg, sizeof(dmsg));
1336 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1337 0, ipfw_delete_state_dispatch);
1339 ifnet_domsg(&nmsg->lmsg, 0);
1344 * Get the ioc_rule from the sopt
1345 * call ipfw_add_rule to add the rule
1348 ipfw_ctl_add_rule(struct sockopt *sopt)
1350 struct ipfw_ioc_rule *ioc_rule;
1353 size = sopt->sopt_valsize;
1354 if (size > (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX) ||
1355 size < sizeof(*ioc_rule)) {
1358 if (size != (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX)) {
1359 sopt->sopt_val = krealloc(sopt->sopt_val, sizeof(uint32_t) *
1360 IPFW_RULE_SIZE_MAX, M_TEMP, M_WAITOK);
1362 ioc_rule = sopt->sopt_val;
1364 ipfw_add_rule(ioc_rule);
1369 ipfw_copy_state(struct ip_fw_state *state, struct ipfw_ioc_state *ioc_state, int cpuid)
1371 ioc_state->pcnt = state->pcnt;
1372 ioc_state->bcnt = state->bcnt;
1373 ioc_state->lifetime = state->lifetime;
1374 ioc_state->timestamp = state->timestamp;
1375 ioc_state->cpuid = cpuid;
1376 ioc_state->expiry = state->expiry;
1377 ioc_state->rulenum = state->stub->rulenum;
1379 bcopy(&state->flow_id, &ioc_state->flow_id, sizeof(struct ipfw_flow_id));
1380 return ioc_state + 1;
1384 ipfw_copy_rule(const struct ip_fw *rule, struct ipfw_ioc_rule *ioc_rule)
1386 const struct ip_fw *sibling;
1391 ioc_rule->act_ofs = rule->act_ofs;
1392 ioc_rule->cmd_len = rule->cmd_len;
1393 ioc_rule->rulenum = rule->rulenum;
1394 ioc_rule->set = rule->set;
1396 ioc_rule->set_disable = ipfw_ctx[mycpuid]->ipfw_set_disable;
1397 ioc_rule->static_count = static_count;
1398 ioc_rule->static_len = static_ioc_len;
1402 ioc_rule->timestamp = 0;
1409 ioc_rule->timestamp = 0;
1410 for (sibling = rule; sibling != NULL; sibling = sibling->sibling) {
1411 ioc_rule->pcnt += sibling->pcnt;
1412 ioc_rule->bcnt += sibling->bcnt;
1413 if (sibling->timestamp > ioc_rule->timestamp)
1414 ioc_rule->timestamp = sibling->timestamp;
1420 KASSERT(i == ncpus, ("static rule is not duplicated on every cpu"));
1422 bcopy(rule->cmd, ioc_rule->cmd, ioc_rule->cmd_len * 4 /* XXX */);
1424 return ((uint8_t *)ioc_rule + IOC_RULESIZE(ioc_rule));
1428 ipfw_ctl_get_modules(struct sockopt *sopt)
1431 struct ipfw_module *mod;
1432 char module_str[1024];
1433 memset(module_str,0,1024);
1434 for (i = 0, mod = ipfw_modules; i < MAX_MODULE; i++, mod++) {
1435 if (mod->type != 0) {
1437 strcat(module_str,",");
1438 strcat(module_str,mod->name);
1441 bzero(sopt->sopt_val, sopt->sopt_valsize);
1442 bcopy(module_str, sopt->sopt_val, strlen(module_str));
1443 sopt->sopt_valsize = strlen(module_str);
1448 * Copy all static rules and states on all CPU
1451 ipfw_ctl_get_rules(struct sockopt *sopt)
1453 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1454 struct ipfw_state_context *state_ctx;
1456 struct ip_fw_state *state;
1459 int i, j, state_count = 0;
1461 size = static_ioc_len;
1462 for (i = 0; i < ncpus; i++) {
1463 for (j = 0; j < ctx->state_hash_size; j++) {
1464 state_ctx = &ipfw_ctx[i]->state_ctx[j];
1465 state_count += state_ctx->count;
1468 if (state_count > 0) {
1469 size += state_count * sizeof(struct ipfw_ioc_state);
1472 if (sopt->sopt_valsize < size) {
1473 /* XXX TODO sopt_val is not big enough */
1474 bzero(sopt->sopt_val, sopt->sopt_valsize);
1478 sopt->sopt_valsize = size;
1479 bp = sopt->sopt_val;
1481 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1482 bp = ipfw_copy_rule(rule, bp);
1484 if (state_count > 0 ) {
1485 for (i = 0; i < ncpus; i++) {
1486 for (j = 0; j < ctx->state_hash_size; j++) {
1487 state_ctx = &ipfw_ctx[i]->state_ctx[j];
1488 state = state_ctx->state;
1489 while (state != NULL) {
1490 bp = ipfw_copy_state(state, bp, i);
1491 state = state->next;
1500 ipfw_set_disable_dispatch(netmsg_t nmsg)
1502 struct lwkt_msg *lmsg = &nmsg->lmsg;
1503 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1505 ctx->ipfw_set_disable = lmsg->u.ms_result32;
1507 ifnet_forwardmsg(lmsg, mycpuid + 1);
1511 ipfw_ctl_set_disable(uint32_t disable, uint32_t enable)
1513 struct netmsg_base nmsg;
1514 struct lwkt_msg *lmsg;
1515 uint32_t set_disable;
1517 /* IPFW_DEFAULT_SET is always enabled */
1518 enable |= (1 << IPFW_DEFAULT_SET);
1519 set_disable = (ipfw_ctx[mycpuid]->ipfw_set_disable | disable) & ~enable;
1521 bzero(&nmsg, sizeof(nmsg));
1522 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1523 0, ipfw_set_disable_dispatch);
1525 lmsg->u.ms_result32 = set_disable;
1527 ifnet_domsg(lmsg, 0);
1532 * ipfw_ctl_x - extended version of ipfw_ctl
1533 * remove the x_header, and adjust the sopt_name,sopt_val and sopt_valsize.
1536 ipfw_ctl_x(struct sockopt *sopt)
1538 ip_fw_x_header *x_header;
1539 x_header = (ip_fw_x_header *)(sopt->sopt_val);
1540 sopt->sopt_name = x_header->opcode;
1541 sopt->sopt_valsize -= sizeof(ip_fw_x_header);
1542 bcopy(++x_header, sopt->sopt_val, sopt->sopt_valsize);
1543 return ipfw_ctl(sopt);
1548 * {set|get}sockopt parser.
1551 ipfw_ctl(struct sockopt *sopt)
1558 switch (sopt->sopt_name) {
1563 error = ipfw_ctl_get_rules(sopt);
1566 error = ipfw_ctl_get_modules(sopt);
1570 ipfw_ctl_flush_rule(0);
1574 error = ipfw_ctl_add_rule(sopt);
1579 * IP_FW_DEL is used for deleting single rules or sets,
1580 * and (ab)used to atomically manipulate sets.
1581 * Argument size is used to distinguish between the two:
1583 * delete single rule or set of rules,
1584 * or reassign rules (or sets) to a different set.
1585 * 2 * sizeof(uint32_t)
1586 * atomic disable/enable sets.
1587 * first uint32_t contains sets to be disabled,
1588 * second uint32_t contains sets to be enabled.
1590 masks = sopt->sopt_val;
1591 size = sopt->sopt_valsize;
1592 if (size == sizeof(*masks)) {
1594 * Delete or reassign static rule
1596 error = ipfw_ctl_alter(masks[0]);
1597 } else if (size == (2 * sizeof(*masks))) {
1599 * Set enable/disable
1601 ipfw_ctl_set_disable(masks[0], masks[1]);
1607 case IP_FW_RESETLOG: /* argument is an int, the rule number */
1609 if (sopt->sopt_valsize != 0) {
1610 error = soopt_to_kbuf(sopt, &rulenum,
1611 sizeof(int), sizeof(int));
1616 error = ipfw_ctl_zero_entry(rulenum,
1617 sopt->sopt_name == IP_FW_RESETLOG);
1620 error = ipfw_nat_cfg_ptr(sopt);
1623 error = ipfw_nat_del_ptr(sopt);
1625 case IP_FW_NAT_FLUSH:
1626 error = ipfw_nat_flush_ptr(sopt);
1629 error = ipfw_nat_get_cfg_ptr(sopt);
1632 error = ipfw_nat_get_log_ptr(sopt);
1634 case IP_DUMMYNET_GET:
1635 case IP_DUMMYNET_CONFIGURE:
1636 case IP_DUMMYNET_DEL:
1637 case IP_DUMMYNET_FLUSH:
1638 error = ip_dn_sockopt(sopt);
1640 case IP_FW_STATE_ADD:
1641 error = ipfw_ctl_add_state(sopt);
1643 case IP_FW_STATE_DEL:
1644 error = ipfw_ctl_delete_state(sopt);
1646 case IP_FW_STATE_FLUSH:
1647 error = ipfw_ctl_flush_state(sopt);
1650 kprintf("ipfw_ctl invalid option %d\n",
1658 ipfw_check_in(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir)
1660 struct ip_fw_args args;
1661 struct mbuf *m = *m0;
1663 int tee = 0, error = 0, ret;
1665 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
1666 /* Extract info from dummynet tag */
1667 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
1668 KKASSERT(mtag != NULL);
1669 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv;
1670 KKASSERT(args.rule != NULL);
1672 m_tag_delete(m, mtag);
1673 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
1681 ret = ipfw_chk(&args);
1698 case IP_FW_DUMMYNET:
1699 /* Send packet to the appropriate pipe */
1700 ipfw_dummynet_io(m, args.cookie, DN_TO_IP_IN, &args);
1709 * Must clear bridge tag when changing
1711 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
1712 if (ip_divert_p != NULL) {
1713 m = ip_divert_p(m, tee, 1);
1717 /* not sure this is the right error msg */
1727 panic("unknown ipfw return value: %d", ret);
1735 ipfw_check_out(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir)
1737 struct ip_fw_args args;
1738 struct mbuf *m = *m0;
1740 int tee = 0, error = 0, ret;
1742 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
1743 /* Extract info from dummynet tag */
1744 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
1745 KKASSERT(mtag != NULL);
1746 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv;
1747 KKASSERT(args.rule != NULL);
1749 m_tag_delete(m, mtag);
1750 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
1758 ret = ipfw_chk(&args);
1776 case IP_FW_DUMMYNET:
1777 ipfw_dummynet_io(m, args.cookie, DN_TO_IP_OUT, &args);
1785 if (ip_divert_p != NULL) {
1786 m = ip_divert_p(m, tee, 0);
1790 /* not sure this is the right error msg */
1800 panic("unknown ipfw return value: %d", ret);
1810 struct pfil_head *pfh;
1811 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
1813 pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET);
1817 pfil_add_hook(ipfw_check_in, NULL, PFIL_IN | PFIL_MPSAFE, pfh);
1818 pfil_add_hook(ipfw_check_out, NULL, PFIL_OUT | PFIL_MPSAFE, pfh);
1824 struct pfil_head *pfh;
1826 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
1828 pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET);
1832 pfil_remove_hook(ipfw_check_in, NULL, PFIL_IN, pfh);
1833 pfil_remove_hook(ipfw_check_out, NULL, PFIL_OUT, pfh);
1837 ipfw_sysctl_enable_dispatch(netmsg_t nmsg)
1839 struct lwkt_msg *lmsg = &nmsg->lmsg;
1840 int enable = lmsg->u.ms_result;
1842 if (fw3_enable == enable)
1845 fw3_enable = enable;
1852 lwkt_replymsg(lmsg, 0);
1856 ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS)
1858 struct netmsg_base nmsg;
1859 struct lwkt_msg *lmsg;
1862 enable = fw3_enable;
1863 error = sysctl_handle_int(oidp, &enable, 0, req);
1864 if (error || req->newptr == NULL)
1867 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1868 0, ipfw_sysctl_enable_dispatch);
1870 lmsg->u.ms_result = enable;
1872 return lwkt_domsg(IPFW_CFGPORT, lmsg, 0);
1876 ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS)
1878 return sysctl_int_range(oidp, arg1, arg2, req,
1879 IPFW_AUTOINC_STEP_MIN, IPFW_AUTOINC_STEP_MAX);
1884 ipfw_ctx_init_dispatch(netmsg_t nmsg)
1886 struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
1887 struct ipfw_context *ctx;
1888 struct ip_fw *def_rule;
1890 if (mycpuid == 0 ) {
1891 ipfw_nat_ctx = kmalloc(sizeof(struct ipfw_nat_context),
1892 M_IPFW3, M_WAITOK | M_ZERO);
1895 ctx = kmalloc(sizeof(struct ipfw_context), M_IPFW3, M_WAITOK | M_ZERO);
1896 ipfw_ctx[mycpuid] = ctx;
1898 def_rule = kmalloc(sizeof(struct ip_fw), M_IPFW3, M_WAITOK | M_ZERO);
1899 def_rule->act_ofs = 0;
1900 def_rule->rulenum = IPFW_DEFAULT_RULE;
1901 def_rule->cmd_len = 2;
1902 def_rule->set = IPFW_DEFAULT_SET;
1904 def_rule->cmd[0].len = LEN_OF_IPFWINSN;
1905 def_rule->cmd[0].module = MODULE_BASIC_ID;
1906 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT
1907 def_rule->cmd[0].opcode = O_BASIC_ACCEPT;
1909 if (filters_default_to_accept)
1910 def_rule->cmd[0].opcode = O_BASIC_ACCEPT;
1912 def_rule->cmd[0].opcode = O_BASIC_DENY;
1915 /* Install the default rule */
1916 ctx->ipfw_default_rule = def_rule;
1917 ctx->ipfw_rule_chain = def_rule;
1920 * if sibiling in last CPU is exists,
1921 * then it's sibling should be current rule
1923 if (fwmsg->sibling != NULL) {
1924 fwmsg->sibling->sibling = def_rule;
1926 /* prepare for next CPU */
1927 fwmsg->sibling = def_rule;
1929 /* Statistics only need to be updated once */
1931 ipfw_inc_static_count(def_rule);
1933 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1937 ipfw_init_dispatch(netmsg_t nmsg)
1939 struct netmsg_ipfw fwmsg;
1942 kprintf("IP firewall already loaded\n");
1947 bzero(&fwmsg, sizeof(fwmsg));
1948 netmsg_init(&fwmsg.base, NULL, &curthread->td_msgport,
1949 0, ipfw_ctx_init_dispatch);
1950 ifnet_domsg(&fwmsg.base.lmsg, 0);
1952 ip_fw_chk_ptr = ipfw_chk;
1953 ip_fw_ctl_x_ptr = ipfw_ctl_x;
1954 ip_fw_dn_io_ptr = ipfw_dummynet_io;
1956 kprintf("ipfw3 initialized, default to %s, logging ",
1957 (int)(ipfw_ctx[mycpuid]->ipfw_default_rule->cmd[0].opcode) ==
1958 O_BASIC_ACCEPT ? "accept" : "deny");
1960 #ifdef IPFIREWALL_VERBOSE
1963 #ifdef IPFIREWALL_VERBOSE_LIMIT
1964 verbose_limit = IPFIREWALL_VERBOSE_LIMIT;
1966 if (fw_verbose == 0) {
1967 kprintf("disabled ");
1968 } else if (verbose_limit == 0) {
1969 kprintf("unlimited ");
1971 kprintf("limited to %d packets/entry by default ",
1979 lwkt_replymsg(&nmsg->lmsg, error);
1985 struct netmsg_base smsg;
1987 netmsg_init(&smsg, NULL, &curthread->td_msgport,
1988 0, ipfw_init_dispatch);
1989 return lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0);
1995 ipfw_fini_dispatch(netmsg_t nmsg)
2002 netmsg_service_sync();
2003 ip_fw_chk_ptr = NULL;
2004 ip_fw_ctl_x_ptr = NULL;
2005 ip_fw_dn_io_ptr = NULL;
2006 ipfw_ctl_flush_rule(1 /* kill default rule */);
2007 /* Free pre-cpu context */
2008 for (cpu = 0; cpu < ncpus; ++cpu) {
2009 if (ipfw_ctx[cpu] != NULL) {
2010 kfree(ipfw_ctx[cpu], M_IPFW3);
2011 ipfw_ctx[cpu] = NULL;
2014 kfree(ipfw_nat_ctx,M_IPFW3);
2015 ipfw_nat_ctx = NULL;
2016 kprintf("IP firewall unloaded\n");
2018 lwkt_replymsg(&nmsg->lmsg, error);
2024 struct netmsg_base smsg;
2025 netmsg_init(&smsg, NULL, &curthread->td_msgport,
2026 0, ipfw_fini_dispatch);
2027 return lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0);
2030 #endif /* KLD_MODULE */
2033 ipfw3_modevent(module_t mod, int type, void *unused)
2045 kprintf("ipfw statically compiled, cannot unload\n");
2057 static moduledata_t ipfw3mod = {
2062 DECLARE_MODULE(ipfw3, ipfw3mod, SI_SUB_PROTO_END, SI_ORDER_ANY);
2063 MODULE_VERSION(ipfw3, 1);