2 * Copyright (c) 2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Bill Yuan <bycn82@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 #error IPFIREWALL requires INET.
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
46 #include <sys/kernel.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/sysctl.h>
51 #include <sys/syslog.h>
52 #include <sys/ucred.h>
53 #include <sys/in_cksum.h>
55 #include <sys/thread2.h>
56 #include <sys/mplock2.h>
58 #include <netinet/in.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in_var.h>
61 #include <netinet/in_pcb.h>
62 #include <netinet/ip.h>
63 #include <netinet/ip_var.h>
64 #include <netinet/ip_icmp.h>
65 #include <netinet/tcp.h>
66 #include <netinet/tcp_timer.h>
67 #include <netinet/tcp_var.h>
68 #include <netinet/tcpip.h>
69 #include <netinet/udp.h>
70 #include <netinet/udp_var.h>
71 #include <netinet/ip_divert.h>
72 #include <netinet/if_ether.h>
75 #include <net/route.h>
77 #include <net/netmsg2.h>
79 #include <net/ipfw2/ip_fw.h>
80 #include <net/ipfw2_basic/ip_fw2_basic.h>
81 #include <net/ipfw2_nat/ip_fw2_nat.h>
82 #include <net/dummynet2/ip_dummynet2.h>
84 MALLOC_DEFINE(M_IPFW2, "IPFW2", "ip_fw2 default module");
86 #ifdef IPFIREWALL_DEBUG
87 #define DPRINTF(fmt, ...) \
90 kprintf(fmt, __VA_ARGS__); \
93 #define DPRINTF(fmt, ...) ((void)0)
97 #define MAX_OPCODE_PER_MODULE 100
99 #define IPFW_AUTOINC_STEP_MIN 1
100 #define IPFW_AUTOINC_STEP_MAX 1000
101 #define IPFW_AUTOINC_STEP_DEF 100
105 struct netmsg_base base;
106 const struct ipfw_ioc_rule *ioc_rule;
108 struct ip_fw *next_rule;
109 struct ip_fw *prev_rule;
110 struct ip_fw *sibling; /* sibling in prevous CPU */
114 struct netmsg_base base;
116 struct ip_fw *start_rule;
117 struct ip_fw *prev_rule;
118 struct ipfw_ioc_state *ioc_state;
125 struct netmsg_base base;
126 struct ip_fw *start_rule;
131 ipfw_nat_cfg_t *ipfw_nat_cfg_ptr;
132 ipfw_nat_cfg_t *ipfw_nat_del_ptr;
133 ipfw_nat_cfg_t *ipfw_nat_flush_ptr;
134 ipfw_nat_cfg_t *ipfw_nat_get_cfg_ptr;
135 ipfw_nat_cfg_t *ipfw_nat_get_log_ptr;
137 /* handlers which implemented in ipfw_basic module */
138 ipfw_basic_delete_state_t *ipfw_basic_flush_state_prt = NULL;
139 ipfw_basic_append_state_t *ipfw_basic_append_state_prt = NULL;
141 static struct ipfw_context *ipfw_ctx[MAXCPU];
142 static struct ipfw_nat_context *ipfw_nat_ctx;
144 extern int ip_fw_loaded;
145 static uint32_t static_count; /* # of static rules */
146 static uint32_t static_ioc_len; /* bytes of static rules */
147 static int ipfw_flushing;
148 static int fw_verbose;
149 static int verbose_limit;
151 static int autoinc_step = IPFW_AUTOINC_STEP_DEF;
153 static int ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS);
154 static int ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS);
156 SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw2, CTLFLAG_RW, 0, "Firewall");
157 SYSCTL_PROC(_net_inet_ip_fw2, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
158 &fw2_enable, 0, ipfw_sysctl_enable, "I", "Enable ipfw");
159 SYSCTL_PROC(_net_inet_ip_fw2, OID_AUTO, autoinc_step, CTLTYPE_INT | CTLFLAG_RW,
160 &autoinc_step, 0, ipfw_sysctl_autoinc_step, "I",
161 "Rule number autincrement step");
162 SYSCTL_INT(_net_inet_ip_fw2, OID_AUTO,one_pass,CTLFLAG_RW,
164 "Only do a single pass through ipfw when using dummynet(4)");
165 SYSCTL_INT(_net_inet_ip_fw2, OID_AUTO, debug, CTLFLAG_RW,
166 &fw_debug, 0, "Enable printing of debug ip_fw statements");
167 SYSCTL_INT(_net_inet_ip_fw2, OID_AUTO, verbose, CTLFLAG_RW,
168 &fw_verbose, 0, "Log matches to ipfw rules");
169 SYSCTL_INT(_net_inet_ip_fw2, OID_AUTO, verbose_limit, CTLFLAG_RW,
170 &verbose_limit, 0, "Set upper limit of matches of ipfw rules logged");
171 SYSCTL_INT(_net_inet_ip_fw2, OID_AUTO, static_count, CTLFLAG_RD,
172 &static_count, 0, "Number of static rules");
174 filter_func filter_funcs[MAX_MODULE][MAX_OPCODE_PER_MODULE];
175 struct ipfw_module ipfw_modules[MAX_MODULE];
176 static int ipfw_ctl(struct sockopt *sopt);
180 check_accept(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
181 struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len);
183 check_deny(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
184 struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len);
185 void init_module(void);
189 register_ipfw_module(int module_id,char *module_name)
191 struct ipfw_module *tmp;
195 for (i=0; i < MAX_MODULE; i++) {
196 if (tmp->type == 0) {
199 strncpy(tmp->name, module_name, strlen(module_name));
204 kprintf("ipfw2 module %s loaded ", module_name);
208 unregister_ipfw_module(int module_id)
210 struct ipfw_module *tmp;
213 int i, len, cmdlen, found;
217 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
218 fw = ctx->ipfw_rule_chain;
219 for (; fw; fw = fw->next) {
220 for (len = fw->cmd_len, cmd = fw->cmd; len > 0;
222 cmd = (ipfw_insn *)((uint32_t *)cmd + cmdlen)) {
224 if (cmd->module == 0 &&
225 (cmd->opcode == 0 || cmd->opcode == 1)) {
226 //action accept or deny
227 } else if (cmd->module == module_id) {
237 for (i = 0; i < MAX_MODULE; i++) {
238 if (tmp->type == 1 && tmp->id == module_id) {
240 kprintf("ipfw2 module %s unloaded ", tmp->name);
246 for (i = 0; i < MAX_OPCODE_PER_MODULE; i++) {
247 if (module_id == 0) {
248 if (i ==0 || i == 1) {
252 filter_funcs[module_id][i] = NULL;
259 register_ipfw_filter_funcs(int module, int opcode, filter_func func)
261 filter_funcs[module][opcode] = func;
265 check_accept(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
266 struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len)
268 *cmd_val = IP_FW_PASS;
269 *cmd_ctl = IP_FW_CTL_DONE;
273 check_deny(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
274 struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len)
276 *cmd_val = IP_FW_DENY;
277 *cmd_ctl = IP_FW_CTL_DONE;
283 memset(ipfw_modules, 0, sizeof(struct ipfw_module) * MAX_MODULE);
284 memset(filter_funcs, 0, sizeof(filter_func) *
285 MAX_OPCODE_PER_MODULE * MAX_MODULE);
286 register_ipfw_filter_funcs(0, O_BASIC_ACCEPT,
287 (filter_func)check_accept);
288 register_ipfw_filter_funcs(0, O_BASIC_DENY, (filter_func)check_deny);
292 ipfw_free_rule(struct ip_fw *rule)
294 kfree(rule, M_IPFW2);
299 static struct ip_fw *
300 lookup_next_rule(struct ip_fw *me)
302 struct ip_fw *rule = NULL;
305 /* look for action, in case it is a skipto */
306 cmd = ACTION_PTR(me);
307 if ((int)cmd->module == MODULE_BASIC_ID &&
308 (int)cmd->opcode == O_BASIC_SKIPTO) {
309 for (rule = me->next; rule; rule = rule->next) {
310 if (rule->rulenum >= cmd->arg1)
314 if (rule == NULL) { /* failure or not a skipto */
317 me->next_rule = rule;
322 * rules are stored in ctx->ipfw_rule_chain.
323 * and each rule is combination of multiple cmds.(ipfw_insn)
324 * in each rule, it begin with filter cmds. and end with action cmds.
325 * 'outer/inner loop' are looping the rules/cmds.
326 * it will invoke the cmds relatived function according to the cmd's
327 * module id and opcode id. and process according to return value.
330 ipfw_chk(struct ip_fw_args *args)
332 struct mbuf *m = args->m;
333 struct ip *ip = mtod(m, struct ip *);
334 struct ip_fw *f = NULL; /* matching rule */
335 int cmd_val = IP_FW_PASS;
337 struct divert_info *divinfo;
340 * hlen The length of the IPv4 header.
341 * hlen >0 means we have an IPv4 packet.
343 u_int hlen = 0; /* hlen >0 means we have an IP pkt */
346 * offset The offset of a fragment. offset != 0 means that
347 * we have a fragment at this offset of an IPv4 packet.
348 * offset == 0 means that (if this is an IPv4 packet)
349 * this is the first or only fragment.
354 uint16_t src_port = 0, dst_port = 0; /* NOTE: host format */
355 struct in_addr src_ip, dst_ip; /* NOTE: network format */
358 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
360 if (m->m_pkthdr.fw_flags & IPFW_MBUF_GENERATED)
361 return IP_FW_PASS; /* accept */
363 if (args->eh == NULL || /* layer 3 packet */
364 (m->m_pkthdr.len >= sizeof(struct ip) &&
365 ntohs(args->eh->ether_type) == ETHERTYPE_IP))
366 hlen = ip->ip_hl << 2;
369 * Collect parameters into local variables for faster matching.
371 if (hlen == 0) { /* do not grab addresses for non-ip pkts */
372 proto = args->f_id.proto = 0; /* mark f_id invalid */
373 goto after_ip_checks;
376 proto = args->f_id.proto = ip->ip_p;
379 if (args->eh != NULL) { /* layer 2 packets are as on the wire */
380 offset = ntohs(ip->ip_off) & IP_OFFMASK;
381 ip_len = ntohs(ip->ip_len);
383 offset = ip->ip_off & IP_OFFMASK;
387 #define PULLUP_TO(len) \
389 if (m->m_len < (len)) { \
390 args->m = m = m_pullup(m, (len)); \
392 goto pullup_failed; \
393 ip = mtod(m, struct ip *); \
403 PULLUP_TO(hlen + sizeof(struct tcphdr));
404 tcp = L3HDR(struct tcphdr, ip);
405 dst_port = tcp->th_dport;
406 src_port = tcp->th_sport;
407 args->f_id.flags = tcp->th_flags;
415 PULLUP_TO(hlen + sizeof(struct udphdr));
416 udp = L3HDR(struct udphdr, ip);
417 dst_port = udp->uh_dport;
418 src_port = udp->uh_sport;
425 L3HDR(struct icmp, ip)->icmp_type;
435 args->f_id.src_ip = ntohl(src_ip.s_addr);
436 args->f_id.dst_ip = ntohl(dst_ip.s_addr);
437 args->f_id.src_port = src_port = ntohs(src_port);
438 args->f_id.dst_port = dst_port = ntohs(dst_port);
443 * Packet has already been tagged. Look for the next rule
444 * to restart processing.
446 * If fw2_one_pass != 0 then just accept it.
447 * XXX should not happen here, but optimized out in
453 /* This rule is being/has been flushed */
457 f = args->rule->next_rule;
459 f = lookup_next_rule(args->rule);
462 * Find the starting rule. It can be either the first
463 * one, or the one after divert_rule if asked so.
467 mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL);
469 divinfo = m_tag_data(mtag);
470 skipto = divinfo->skipto;
475 f = ctx->ipfw_rule_chain;
476 if (args->eh == NULL && skipto != 0) {
477 /* No skipto during rule flushing */
481 if (skipto >= IPFW_DEFAULT_RULE) {
482 return IP_FW_DENY; /* invalid */
484 while (f && f->rulenum <= skipto) {
487 if (f == NULL) { /* drop packet */
490 } else if (ipfw_flushing) {
491 /* Rules are being flushed; skip to default rule */
492 f = ctx->ipfw_default_rule;
495 if ((mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL)) != NULL) {
496 m_tag_delete(m, mtag);
500 * Now scan the rules, and parse microinstructions for each rule.
502 for (; f; f = f->next) {
506 again: /* check the rule again*/
507 if (ctx->ipfw_set_disable & (1 << f->set)) {
511 for (l = f->cmd_len, cmd = f->cmd; l > 0; l -= cmdlen,
512 cmd=(ipfw_insn *)((uint32_t *)cmd+ cmdlen)) {
514 check_body: /* check the body of the rule again.*/
516 (filter_funcs[cmd->module][cmd->opcode])
517 (&cmd_ctl, &cmd_val, &args, &f, cmd, ip_len);
521 case IP_FW_CTL_AGAIN:
528 case IP_FW_CTL_CHK_STATE:
529 /* update the cmd and l */
531 l = f->cmd_len - f->act_ofs;
534 if (cmd->len & F_NOT) {
540 } /* end of inner for, scan opcodes */
542 next_rule:; /* try next rule */
544 } /* end of outer for, scan rules */
545 kprintf("+++ ipfw: ouch!, skip past end of rules, denying packet\n");
549 /* Update statistics */
552 f->timestamp = time_second;
557 kprintf("pullup failed\n");
562 ipfw_dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
567 const struct ipfw_flow_id *id;
568 struct dn_flow_id *fid;
572 mtag = m_tag_get(PACKET_TAG_DUMMYNET, sizeof(*pkt), MB_DONTWAIT);
577 m_tag_prepend(m, mtag);
579 pkt = m_tag_data(mtag);
580 bzero(pkt, sizeof(*pkt));
582 cmd = fwa->rule->cmd + fwa->rule->act_ofs;
583 KASSERT(cmd->opcode == O_DUMMYNET_PIPE ||
584 cmd->opcode == O_DUMMYNET_QUEUE,
585 ("Rule is not PIPE or QUEUE, opcode %d", cmd->opcode));
588 pkt->dn_flags = (dir & DN_FLAGS_DIR_MASK);
590 pkt->pipe_nr = pipe_nr;
592 pkt->msgport = netisr_curport();
596 fid->fid_dst_ip = id->dst_ip;
597 fid->fid_src_ip = id->src_ip;
598 fid->fid_dst_port = id->dst_port;
599 fid->fid_src_port = id->src_port;
600 fid->fid_proto = id->proto;
601 fid->fid_flags = id->flags;
603 pkt->dn_priv = fwa->rule;
605 if ((int)cmd->opcode == O_DUMMYNET_PIPE)
606 pkt->dn_flags |= DN_FLAGS_IS_PIPE;
608 m->m_pkthdr.fw_flags |= DUMMYNET_MBUF_TAGGED;
612 ipfw_inc_static_count(struct ip_fw *rule)
614 /* Static rule's counts are updated only on CPU0 */
615 KKASSERT(mycpuid == 0);
618 static_ioc_len += IOC_RULESIZE(rule);
622 ipfw_dec_static_count(struct ip_fw *rule)
624 int l = IOC_RULESIZE(rule);
626 /* Static rule's counts are updated only on CPU0 */
627 KKASSERT(mycpuid == 0);
629 KASSERT(static_count > 0, ("invalid static count %u", static_count));
632 KASSERT(static_ioc_len >= l,
633 ("invalid static len %u", static_ioc_len));
638 ipfw_add_rule_dispatch(netmsg_t nmsg)
640 struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
641 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
642 struct ip_fw *rule, *prev,*next;
643 const struct ipfw_ioc_rule *ioc_rule;
645 ioc_rule = fwmsg->ioc_rule;
646 // create rule by ioc_rule
647 rule = kmalloc(RULESIZE(ioc_rule), M_IPFW2, M_WAITOK | M_ZERO);
648 rule->act_ofs = ioc_rule->act_ofs;
649 rule->cmd_len = ioc_rule->cmd_len;
650 rule->rulenum = ioc_rule->rulenum;
651 rule->set = ioc_rule->set;
652 bcopy(ioc_rule->cmd, rule->cmd, rule->cmd_len * 4);
654 for (prev = NULL, next = ctx->ipfw_rule_chain;
655 next; prev = next, next = next->next) {
656 if (next->rulenum > ioc_rule->rulenum) {
660 KASSERT(next != NULL, ("no default rule?!"));
663 * Insert rule into the pre-determined position
669 rule->next = ctx->ipfw_rule_chain;
670 ctx->ipfw_rule_chain = rule;
674 * if sibiling in last CPU is exists,
675 * then it's sibling should be current rule
677 if (fwmsg->sibling != NULL) {
678 fwmsg->sibling->sibling = rule;
680 /* prepare for next CPU */
681 fwmsg->sibling = rule;
684 /* Statistics only need to be updated once */
685 ipfw_inc_static_count(rule);
687 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
691 * confirm the rulenumber
692 * call dispatch function to add rule into the list
693 * Update the statistic
696 ipfw_add_rule(struct ipfw_ioc_rule *ioc_rule)
698 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
699 struct netmsg_ipfw fwmsg;
700 struct netmsg_base *nmsg;
703 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
706 * If rulenum is 0, find highest numbered rule before the
707 * default rule, and add rule number incremental step.
709 if (ioc_rule->rulenum == 0) {
710 int step = autoinc_step;
712 KKASSERT(step >= IPFW_AUTOINC_STEP_MIN &&
713 step <= IPFW_AUTOINC_STEP_MAX);
716 * Locate the highest numbered rule before default
718 for (f = ctx->ipfw_rule_chain; f; f = f->next) {
719 if (f->rulenum == IPFW_DEFAULT_RULE)
721 ioc_rule->rulenum = f->rulenum;
723 if (ioc_rule->rulenum < IPFW_DEFAULT_RULE - step)
724 ioc_rule->rulenum += step;
726 KASSERT(ioc_rule->rulenum != IPFW_DEFAULT_RULE &&
727 ioc_rule->rulenum != 0,
728 ("invalid rule num %d", ioc_rule->rulenum));
730 bzero(&fwmsg, sizeof(fwmsg));
732 netmsg_init(nmsg, NULL, &curthread->td_msgport,
733 0, ipfw_add_rule_dispatch);
734 fwmsg.ioc_rule = ioc_rule;
736 ifnet_domsg(&nmsg->lmsg, 0);
738 DPRINTF("++ installed rule %d, static count now %d\n",
739 rule->rulenum, static_count);
743 * Free storage associated with a static rule (including derived
745 * The caller is in charge of clearing rule pointers to avoid
747 * @return a pointer to the next entry.
748 * Arguments are not checked, so they better be correct.
749 * Must be called at splimp().
751 static struct ip_fw *
752 ipfw_delete_rule(struct ipfw_context *ctx,
753 struct ip_fw *prev, struct ip_fw *rule)
756 ctx->ipfw_rule_chain = rule->next;
758 prev->next = rule->next;
760 if (mycpuid == IPFW_CFGCPUID)
761 ipfw_dec_static_count(rule);
763 kfree(rule, M_IPFW2);
769 ipfw_flush_rule_dispatch(netmsg_t nmsg)
771 struct lwkt_msg *lmsg = &nmsg->lmsg;
772 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
773 struct ip_fw *rule, *the_rule;
774 int kill_default = lmsg->u.ms_result;
776 rule = ctx->ipfw_rule_chain;
777 while (rule != NULL) {
778 if (rule->rulenum == IPFW_DEFAULT_RULE && kill_default == 0) {
779 ctx->ipfw_rule_chain = rule;
784 if (mycpuid == IPFW_CFGCPUID)
785 ipfw_dec_static_count(the_rule);
787 kfree(the_rule, M_IPFW2);
790 ifnet_forwardmsg(lmsg, mycpuid + 1);
794 ipfw_append_state_dispatch(netmsg_t nmsg)
796 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
797 struct ipfw_ioc_state *ioc_state = dmsg->ioc_state;
798 (*ipfw_basic_append_state_prt)(ioc_state);
799 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
803 ipfw_delete_state_dispatch(netmsg_t nmsg)
805 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
806 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
807 struct ip_fw *rule = ctx->ipfw_rule_chain;
808 while (rule != NULL) {
809 if (rule->rulenum == dmsg->rulenum) {
815 (*ipfw_basic_flush_state_prt)(rule);
816 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
820 * Deletes all rules from a chain (including the default rule
821 * if the second argument is set).
822 * Must be called at splimp().
825 ipfw_ctl_flush_rule(int kill_default)
827 struct netmsg_del dmsg;
828 struct netmsg_base nmsg;
829 struct lwkt_msg *lmsg;
831 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
834 * If 'kill_default' then caller has done the necessary
835 * msgport syncing; unnecessary to do it again.
839 * Let ipfw_chk() know the rules are going to
840 * be flushed, so it could jump directly to
844 netmsg_service_sync();
848 * if ipfw_basic_flush_state_prt
849 * flush all states in all CPU
851 if (ipfw_basic_flush_state_prt != NULL) {
852 bzero(&dmsg, sizeof(dmsg));
853 netmsg_init(&dmsg.base, NULL, &curthread->td_msgport,
854 0, ipfw_delete_state_dispatch);
855 ifnet_domsg(&dmsg.base.lmsg, 0);
858 * Press the 'flush' button
860 bzero(&nmsg, sizeof(nmsg));
861 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
862 0, ipfw_flush_rule_dispatch);
864 lmsg->u.ms_result = kill_default;
865 ifnet_domsg(lmsg, 0);
868 KASSERT(static_count == 0,
869 ("%u static rules remain", static_count));
870 KASSERT(static_ioc_len == 0,
871 ("%u bytes of static rules remain", static_ioc_len));
879 ipfw_delete_rule_dispatch(netmsg_t nmsg)
881 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
882 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
883 struct ip_fw *rule, *prev = NULL;
885 rule = ctx->ipfw_rule_chain;
887 if (rule->rulenum == dmsg->rulenum) {
888 ipfw_delete_rule(ctx, prev, rule);
895 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
899 ipfw_alt_delete_rule(uint16_t rulenum)
901 struct netmsg_del dmsg;
902 struct netmsg_base *nmsg;
905 * delete the state which stub is the rule
906 * which belongs to the CPU and the rulenum
908 bzero(&dmsg, sizeof(dmsg));
910 netmsg_init(nmsg, NULL, &curthread->td_msgport,
911 0, ipfw_delete_state_dispatch);
912 dmsg.rulenum = rulenum;
913 ifnet_domsg(&nmsg->lmsg, 0);
916 * Get rid of the rule duplications on all CPUs
918 bzero(&dmsg, sizeof(dmsg));
920 netmsg_init(nmsg, NULL, &curthread->td_msgport,
921 0, ipfw_delete_rule_dispatch);
922 dmsg.rulenum = rulenum;
923 ifnet_domsg(&nmsg->lmsg, 0);
928 ipfw_alt_delete_ruleset_dispatch(netmsg_t nmsg)
930 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
931 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
932 struct ip_fw *prev, *rule;
938 rule = ctx->ipfw_rule_chain;
939 while (rule != NULL) {
940 if (rule->set == dmsg->from_set) {
941 rule = ipfw_delete_rule(ctx, prev, rule);
950 KASSERT(del, ("no match set?!"));
952 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
956 ipfw_disable_ruleset_state_dispatch(netmsg_t nmsg)
958 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
959 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
965 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
966 if (rule->set == dmsg->from_set) {
972 KASSERT(cleared, ("no match set?!"));
974 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
978 ipfw_alt_delete_ruleset(uint8_t set)
980 struct netmsg_del dmsg;
981 struct netmsg_base *nmsg;
984 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
987 * Check whether the 'set' exists. If it exists,
988 * then check whether any rules within the set will
989 * try to create states.
993 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
994 if (rule->set == set) {
999 return 0; /* XXX EINVAL? */
1003 * Clear the STATE flag, so no more states will be
1004 * created based the rules in this set.
1006 bzero(&dmsg, sizeof(dmsg));
1008 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1009 0, ipfw_disable_ruleset_state_dispatch);
1010 dmsg.from_set = set;
1012 ifnet_domsg(&nmsg->lmsg, 0);
1018 bzero(&dmsg, sizeof(dmsg));
1020 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1021 0, ipfw_alt_delete_ruleset_dispatch);
1022 dmsg.from_set = set;
1024 ifnet_domsg(&nmsg->lmsg, 0);
1029 ipfw_alt_move_rule_dispatch(netmsg_t nmsg)
1031 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
1034 rule = dmsg->start_rule;
1037 * Move to the position on the next CPU
1038 * before the msg is forwarded.
1041 while (rule && rule->rulenum <= dmsg->rulenum) {
1042 if (rule->rulenum == dmsg->rulenum)
1043 rule->set = dmsg->to_set;
1046 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1050 ipfw_alt_move_rule(uint16_t rulenum, uint8_t set)
1052 struct netmsg_del dmsg;
1053 struct netmsg_base *nmsg;
1055 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1058 * Locate first rule to move
1060 for (rule = ctx->ipfw_rule_chain;
1061 rule && rule->rulenum <= rulenum; rule = rule->next) {
1062 if (rule->rulenum == rulenum && rule->set != set)
1065 if (rule == NULL || rule->rulenum > rulenum)
1066 return 0; /* XXX error? */
1068 bzero(&dmsg, sizeof(dmsg));
1070 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1071 0, ipfw_alt_move_rule_dispatch);
1072 dmsg.start_rule = rule;
1073 dmsg.rulenum = rulenum;
1076 ifnet_domsg(&nmsg->lmsg, 0);
1077 KKASSERT(dmsg.start_rule == NULL);
1082 ipfw_alt_move_ruleset_dispatch(netmsg_t nmsg)
1084 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
1085 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1088 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1089 if (rule->set == dmsg->from_set)
1090 rule->set = dmsg->to_set;
1092 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1096 ipfw_alt_move_ruleset(uint8_t from_set, uint8_t to_set)
1098 struct netmsg_del dmsg;
1099 struct netmsg_base *nmsg;
1101 bzero(&dmsg, sizeof(dmsg));
1103 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1104 0, ipfw_alt_move_ruleset_dispatch);
1105 dmsg.from_set = from_set;
1106 dmsg.to_set = to_set;
1108 ifnet_domsg(&nmsg->lmsg, 0);
1113 ipfw_alt_swap_ruleset_dispatch(netmsg_t nmsg)
1115 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
1116 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1119 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1120 if (rule->set == dmsg->from_set)
1121 rule->set = dmsg->to_set;
1122 else if (rule->set == dmsg->to_set)
1123 rule->set = dmsg->from_set;
1125 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1129 ipfw_alt_swap_ruleset(uint8_t set1, uint8_t set2)
1131 struct netmsg_del dmsg;
1132 struct netmsg_base *nmsg;
1134 bzero(&dmsg, sizeof(dmsg));
1136 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1137 0, ipfw_alt_swap_ruleset_dispatch);
1138 dmsg.from_set = set1;
1141 ifnet_domsg(&nmsg->lmsg, 0);
1147 ipfw_ctl_alter(uint32_t arg)
1150 uint8_t cmd, new_set;
1153 rulenum = arg & 0xffff;
1154 cmd = (arg >> 24) & 0xff;
1155 new_set = (arg >> 16) & 0xff;
1159 if (new_set >= IPFW_DEFAULT_SET)
1161 if (cmd == 0 || cmd == 2) {
1162 if (rulenum == IPFW_DEFAULT_RULE)
1165 if (rulenum >= IPFW_DEFAULT_SET)
1170 case 0: /* delete rules with given number */
1171 error = ipfw_alt_delete_rule(rulenum);
1174 case 1: /* delete all rules with given set number */
1175 error = ipfw_alt_delete_ruleset(rulenum);
1178 case 2: /* move rules with given number to new set */
1179 error = ipfw_alt_move_rule(rulenum, new_set);
1182 case 3: /* move rules with given set number to new set */
1183 error = ipfw_alt_move_ruleset(rulenum, new_set);
1186 case 4: /* swap two sets */
1187 error = ipfw_alt_swap_ruleset(rulenum, new_set);
1194 * Clear counters for a specific rule.
1197 clear_counters(struct ip_fw *rule)
1199 rule->bcnt = rule->pcnt = 0;
1200 rule->timestamp = 0;
1204 ipfw_zero_entry_dispatch(netmsg_t nmsg)
1206 struct netmsg_zent *zmsg = (struct netmsg_zent *)nmsg;
1207 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1210 if (zmsg->rulenum == 0) {
1211 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1212 clear_counters(rule);
1215 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1216 if (rule->rulenum == zmsg->rulenum) {
1217 clear_counters(rule);
1221 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1225 * Reset some or all counters on firewall rules.
1226 * @arg frwl is null to clear all entries, or contains a specific
1228 * @arg log_only is 1 if we only want to reset logs, zero otherwise.
1231 ipfw_ctl_zero_entry(int rulenum, int log_only)
1233 struct netmsg_zent zmsg;
1234 struct netmsg_base *nmsg;
1236 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1238 bzero(&zmsg, sizeof(zmsg));
1240 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1241 0, ipfw_zero_entry_dispatch);
1242 zmsg.log_only = log_only;
1245 msg = log_only ? "ipfw: All logging counts reset.\n"
1246 : "ipfw: Accounting cleared.\n";
1251 * Locate the first rule with 'rulenum'
1253 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1254 if (rule->rulenum == rulenum)
1257 if (rule == NULL) /* we did not find any matching rules */
1259 zmsg.start_rule = rule;
1260 zmsg.rulenum = rulenum;
1262 msg = log_only ? "ipfw: Entry %d logging count reset.\n"
1263 : "ipfw: Entry %d cleared.\n";
1265 ifnet_domsg(&nmsg->lmsg, 0);
1266 KKASSERT(zmsg.start_rule == NULL);
1269 log(LOG_SECURITY | LOG_NOTICE, msg, rulenum);
1274 ipfw_ctl_add_state(struct sockopt *sopt)
1276 struct ipfw_ioc_state *ioc_state;
1277 ioc_state = sopt->sopt_val;
1278 if (ipfw_basic_append_state_prt != NULL) {
1279 struct netmsg_del dmsg;
1280 bzero(&dmsg, sizeof(dmsg));
1281 netmsg_init(&dmsg.base, NULL, &curthread->td_msgport,
1282 0, ipfw_append_state_dispatch);
1283 (&dmsg)->ioc_state = ioc_state;
1284 ifnet_domsg(&dmsg.base.lmsg, 0);
1290 ipfw_ctl_delete_state(struct sockopt *sopt)
1292 int rulenum = 0, error;
1293 if (sopt->sopt_valsize != 0) {
1294 error = soopt_to_kbuf(sopt, &rulenum, sizeof(int), sizeof(int));
1299 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1300 struct ip_fw *rule = ctx->ipfw_rule_chain;
1302 while (rule!=NULL) {
1303 if (rule->rulenum == rulenum) {
1312 struct netmsg_del dmsg;
1313 struct netmsg_base *nmsg;
1315 * delete the state which stub is the rule
1316 * which belongs to the CPU and the rulenum
1318 bzero(&dmsg, sizeof(dmsg));
1320 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1321 0, ipfw_delete_state_dispatch);
1322 dmsg.rulenum = rulenum;
1323 ifnet_domsg(&nmsg->lmsg, 0);
1328 ipfw_ctl_flush_state(struct sockopt *sopt)
1330 struct netmsg_del dmsg;
1331 struct netmsg_base *nmsg;
1333 * delete the state which stub is the rule
1334 * which belongs to the CPU and the rulenum
1336 bzero(&dmsg, sizeof(dmsg));
1338 netmsg_init(nmsg, NULL, &curthread->td_msgport,
1339 0, ipfw_delete_state_dispatch);
1341 ifnet_domsg(&nmsg->lmsg, 0);
1346 * Get the ioc_rule from the sopt
1347 * call ipfw_add_rule to add the rule
1350 ipfw_ctl_add_rule(struct sockopt *sopt)
1352 struct ipfw_ioc_rule *ioc_rule;
1355 size = sopt->sopt_valsize;
1356 if (size > (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX) ||
1357 size < sizeof(*ioc_rule)) {
1360 if (size != (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX)) {
1361 sopt->sopt_val = krealloc(sopt->sopt_val, sizeof(uint32_t) *
1362 IPFW_RULE_SIZE_MAX, M_TEMP, M_WAITOK);
1364 ioc_rule = sopt->sopt_val;
1366 ipfw_add_rule(ioc_rule);
1371 ipfw_copy_state(struct ip_fw_state *state, struct ipfw_ioc_state *ioc_state, int cpuid)
1373 ioc_state->pcnt = state->pcnt;
1374 ioc_state->bcnt = state->bcnt;
1375 ioc_state->lifetime = state->lifetime;
1376 ioc_state->timestamp = state->timestamp;
1377 ioc_state->cpuid = cpuid;
1378 ioc_state->expiry = state->expiry;
1379 ioc_state->rulenum = state->stub->rulenum;
1381 bcopy(&state->flow_id, &ioc_state->flow_id, sizeof(struct ipfw_flow_id));
1382 return ioc_state + 1;
1386 ipfw_copy_rule(const struct ip_fw *rule, struct ipfw_ioc_rule *ioc_rule)
1388 const struct ip_fw *sibling;
1393 ioc_rule->act_ofs = rule->act_ofs;
1394 ioc_rule->cmd_len = rule->cmd_len;
1395 ioc_rule->rulenum = rule->rulenum;
1396 ioc_rule->set = rule->set;
1398 ioc_rule->set_disable = ipfw_ctx[mycpuid]->ipfw_set_disable;
1399 ioc_rule->static_count = static_count;
1400 ioc_rule->static_len = static_ioc_len;
1404 ioc_rule->timestamp = 0;
1411 ioc_rule->timestamp = 0;
1412 for (sibling = rule; sibling != NULL; sibling = sibling->sibling) {
1413 ioc_rule->pcnt += sibling->pcnt;
1414 ioc_rule->bcnt += sibling->bcnt;
1415 if (sibling->timestamp > ioc_rule->timestamp)
1416 ioc_rule->timestamp = sibling->timestamp;
1422 KASSERT(i == ncpus, ("static rule is not duplicated on every cpu"));
1424 bcopy(rule->cmd, ioc_rule->cmd, ioc_rule->cmd_len * 4 /* XXX */);
1426 return ((uint8_t *)ioc_rule + IOC_RULESIZE(ioc_rule));
1430 ipfw_ctl_get_modules(struct sockopt *sopt)
1433 struct ipfw_module *mod;
1434 char module_str[1024];
1435 memset(module_str,0,1024);
1436 for (i = 0, mod = ipfw_modules; i < MAX_MODULE; i++, mod++) {
1437 if (mod->type != 0) {
1439 strcat(module_str,",");
1440 strcat(module_str,mod->name);
1443 bzero(sopt->sopt_val, sopt->sopt_valsize);
1444 bcopy(module_str, sopt->sopt_val, strlen(module_str));
1445 sopt->sopt_valsize = strlen(module_str);
1450 * Copy all static rules and states on all CPU
1453 ipfw_ctl_get_rules(struct sockopt *sopt)
1455 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1456 struct ipfw_state_context *state_ctx;
1458 struct ip_fw_state *state;
1461 int i, j, state_count = 0;
1463 size = static_ioc_len;
1464 for (i = 0; i < ncpus; i++) {
1465 for (j = 0; j < ctx->state_hash_size; j++) {
1466 state_ctx = &ipfw_ctx[i]->state_ctx[j];
1467 state_count += state_ctx->count;
1470 if (state_count > 0) {
1471 size += state_count * sizeof(struct ipfw_ioc_state);
1474 if (sopt->sopt_valsize < size) {
1475 /* XXX TODO sopt_val is not big enough */
1476 bzero(sopt->sopt_val, sopt->sopt_valsize);
1480 sopt->sopt_valsize = size;
1481 bp = sopt->sopt_val;
1483 for (rule = ctx->ipfw_rule_chain; rule; rule = rule->next) {
1484 bp = ipfw_copy_rule(rule, bp);
1486 if (state_count > 0 ) {
1487 for (i = 0; i < ncpus; i++) {
1488 for (j = 0; j < ctx->state_hash_size; j++) {
1489 state_ctx = &ipfw_ctx[i]->state_ctx[j];
1490 state = state_ctx->state;
1491 while (state != NULL) {
1492 bp = ipfw_copy_state(state, bp, i);
1493 state = state->next;
1502 ipfw_set_disable_dispatch(netmsg_t nmsg)
1504 struct lwkt_msg *lmsg = &nmsg->lmsg;
1505 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1507 ctx->ipfw_set_disable = lmsg->u.ms_result32;
1509 ifnet_forwardmsg(lmsg, mycpuid + 1);
1513 ipfw_ctl_set_disable(uint32_t disable, uint32_t enable)
1515 struct netmsg_base nmsg;
1516 struct lwkt_msg *lmsg;
1517 uint32_t set_disable;
1519 /* IPFW_DEFAULT_SET is always enabled */
1520 enable |= (1 << IPFW_DEFAULT_SET);
1521 set_disable = (ipfw_ctx[mycpuid]->ipfw_set_disable | disable) & ~enable;
1523 bzero(&nmsg, sizeof(nmsg));
1524 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1525 0, ipfw_set_disable_dispatch);
1527 lmsg->u.ms_result32 = set_disable;
1529 ifnet_domsg(lmsg, 0);
1534 * ipfw_ctl_x - extended version of ipfw_ctl
1535 * remove the x_header, and adjust the sopt_name,sopt_val and sopt_valsize.
1538 ipfw_ctl_x(struct sockopt *sopt)
1540 ip_fw_x_header *x_header;
1541 x_header = (ip_fw_x_header *)(sopt->sopt_val);
1542 sopt->sopt_name = x_header->opcode;
1543 sopt->sopt_valsize -= sizeof(ip_fw_x_header);
1544 bcopy(++x_header, sopt->sopt_val, sopt->sopt_valsize);
1545 return ipfw_ctl(sopt);
1550 * {set|get}sockopt parser.
1553 ipfw_ctl(struct sockopt *sopt)
1560 switch (sopt->sopt_name) {
1565 error = ipfw_ctl_get_rules(sopt);
1568 error = ipfw_ctl_get_modules(sopt);
1572 ipfw_ctl_flush_rule(0);
1576 error = ipfw_ctl_add_rule(sopt);
1581 * IP_FW_DEL is used for deleting single rules or sets,
1582 * and (ab)used to atomically manipulate sets.
1583 * Argument size is used to distinguish between the two:
1585 * delete single rule or set of rules,
1586 * or reassign rules (or sets) to a different set.
1587 * 2 * sizeof(uint32_t)
1588 * atomic disable/enable sets.
1589 * first uint32_t contains sets to be disabled,
1590 * second uint32_t contains sets to be enabled.
1592 masks = sopt->sopt_val;
1593 size = sopt->sopt_valsize;
1594 if (size == sizeof(*masks)) {
1596 * Delete or reassign static rule
1598 error = ipfw_ctl_alter(masks[0]);
1599 } else if (size == (2 * sizeof(*masks))) {
1601 * Set enable/disable
1603 ipfw_ctl_set_disable(masks[0], masks[1]);
1609 case IP_FW_RESETLOG: /* argument is an int, the rule number */
1611 if (sopt->sopt_valsize != 0) {
1612 error = soopt_to_kbuf(sopt, &rulenum,
1613 sizeof(int), sizeof(int));
1618 error = ipfw_ctl_zero_entry(rulenum,
1619 sopt->sopt_name == IP_FW_RESETLOG);
1622 error = ipfw_nat_cfg_ptr(sopt);
1625 error = ipfw_nat_del_ptr(sopt);
1627 case IP_FW_NAT_FLUSH:
1628 error = ipfw_nat_flush_ptr(sopt);
1631 error = ipfw_nat_get_cfg_ptr(sopt);
1634 error = ipfw_nat_get_log_ptr(sopt);
1636 case IP_DUMMYNET_GET:
1637 case IP_DUMMYNET_CONFIGURE:
1638 case IP_DUMMYNET_DEL:
1639 case IP_DUMMYNET_FLUSH:
1640 error = ip_dn_sockopt(sopt);
1642 case IP_FW_STATE_ADD:
1643 error = ipfw_ctl_add_state(sopt);
1645 case IP_FW_STATE_DEL:
1646 error = ipfw_ctl_delete_state(sopt);
1648 case IP_FW_STATE_FLUSH:
1649 error = ipfw_ctl_flush_state(sopt);
1652 kprintf("ipfw_ctl invalid option %d\n",
1660 ipfw_check_in(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir)
1662 struct ip_fw_args args;
1663 struct mbuf *m = *m0;
1665 int tee = 0, error = 0, ret;
1667 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
1668 /* Extract info from dummynet tag */
1669 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
1670 KKASSERT(mtag != NULL);
1671 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv;
1672 KKASSERT(args.rule != NULL);
1674 m_tag_delete(m, mtag);
1675 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
1683 ret = ipfw_chk(&args);
1700 case IP_FW_DUMMYNET:
1701 /* Send packet to the appropriate pipe */
1702 ipfw_dummynet_io(m, args.cookie, DN_TO_IP_IN, &args);
1711 * Must clear bridge tag when changing
1713 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
1714 if (ip_divert_p != NULL) {
1715 m = ip_divert_p(m, tee, 1);
1719 /* not sure this is the right error msg */
1729 panic("unknown ipfw return value: %d", ret);
1737 ipfw_check_out(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir)
1739 struct ip_fw_args args;
1740 struct mbuf *m = *m0;
1742 int tee = 0, error = 0, ret;
1744 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
1745 /* Extract info from dummynet tag */
1746 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
1747 KKASSERT(mtag != NULL);
1748 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv;
1749 KKASSERT(args.rule != NULL);
1751 m_tag_delete(m, mtag);
1752 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
1760 ret = ipfw_chk(&args);
1778 case IP_FW_DUMMYNET:
1779 ipfw_dummynet_io(m, args.cookie, DN_TO_IP_OUT, &args);
1787 if (ip_divert_p != NULL) {
1788 m = ip_divert_p(m, tee, 0);
1792 /* not sure this is the right error msg */
1802 panic("unknown ipfw return value: %d", ret);
1812 struct pfil_head *pfh;
1813 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
1815 pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET);
1819 pfil_add_hook(ipfw_check_in, NULL, PFIL_IN | PFIL_MPSAFE, pfh);
1820 pfil_add_hook(ipfw_check_out, NULL, PFIL_OUT | PFIL_MPSAFE, pfh);
1826 struct pfil_head *pfh;
1828 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
1830 pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET);
1834 pfil_remove_hook(ipfw_check_in, NULL, PFIL_IN, pfh);
1835 pfil_remove_hook(ipfw_check_out, NULL, PFIL_OUT, pfh);
1839 ipfw_sysctl_enable_dispatch(netmsg_t nmsg)
1841 struct lwkt_msg *lmsg = &nmsg->lmsg;
1842 int enable = lmsg->u.ms_result;
1844 if (fw2_enable == enable)
1847 fw2_enable = enable;
1854 lwkt_replymsg(lmsg, 0);
1858 ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS)
1860 struct netmsg_base nmsg;
1861 struct lwkt_msg *lmsg;
1864 enable = fw2_enable;
1865 error = sysctl_handle_int(oidp, &enable, 0, req);
1866 if (error || req->newptr == NULL)
1869 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1870 0, ipfw_sysctl_enable_dispatch);
1872 lmsg->u.ms_result = enable;
1874 return lwkt_domsg(IPFW_CFGPORT, lmsg, 0);
1878 ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS)
1880 return sysctl_int_range(oidp, arg1, arg2, req,
1881 IPFW_AUTOINC_STEP_MIN, IPFW_AUTOINC_STEP_MAX);
1886 ipfw_ctx_init_dispatch(netmsg_t nmsg)
1888 struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
1889 struct ipfw_context *ctx;
1890 struct ip_fw *def_rule;
1892 if (mycpuid == 0 ) {
1893 ipfw_nat_ctx = kmalloc(sizeof(struct ipfw_nat_context),
1894 M_IPFW2, M_WAITOK | M_ZERO);
1897 ctx = kmalloc(sizeof(struct ipfw_context), M_IPFW2, M_WAITOK | M_ZERO);
1898 ipfw_ctx[mycpuid] = ctx;
1900 def_rule = kmalloc(sizeof(struct ip_fw), M_IPFW2, M_WAITOK | M_ZERO);
1901 def_rule->act_ofs = 0;
1902 def_rule->rulenum = IPFW_DEFAULT_RULE;
1903 def_rule->cmd_len = 2;
1904 def_rule->set = IPFW_DEFAULT_SET;
1906 def_rule->cmd[0].len = LEN_OF_IPFWINSN;
1907 def_rule->cmd[0].module = MODULE_BASIC_ID;
1908 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT
1909 def_rule->cmd[0].opcode = O_BASIC_ACCEPT;
1911 def_rule->cmd[0].opcode = O_BASIC_DENY;
1914 /* Install the default rule */
1915 ctx->ipfw_default_rule = def_rule;
1916 ctx->ipfw_rule_chain = def_rule;
1919 * if sibiling in last CPU is exists,
1920 * then it's sibling should be current rule
1922 if (fwmsg->sibling != NULL) {
1923 fwmsg->sibling->sibling = def_rule;
1925 /* prepare for next CPU */
1926 fwmsg->sibling = def_rule;
1928 /* Statistics only need to be updated once */
1930 ipfw_inc_static_count(def_rule);
1932 ifnet_forwardmsg(&nmsg->lmsg, mycpuid + 1);
1936 ipfw_init_dispatch(netmsg_t nmsg)
1938 struct netmsg_ipfw fwmsg;
1941 kprintf("IP firewall already loaded\n");
1946 bzero(&fwmsg, sizeof(fwmsg));
1947 netmsg_init(&fwmsg.base, NULL, &curthread->td_msgport,
1948 0, ipfw_ctx_init_dispatch);
1949 ifnet_domsg(&fwmsg.base.lmsg, 0);
1951 ip_fw_chk_ptr = ipfw_chk;
1952 ip_fw_ctl_x_ptr = ipfw_ctl_x;
1953 ip_fw_dn_io_ptr = ipfw_dummynet_io;
1955 kprintf("ipfw2 initialized, default to %s, logging ",
1956 (int)(ipfw_ctx[mycpuid]->ipfw_default_rule->cmd[0].opcode) ==
1957 O_BASIC_ACCEPT ? "accept" : "deny");
1959 #ifdef IPFIREWALL_VERBOSE
1962 #ifdef IPFIREWALL_VERBOSE_LIMIT
1963 verbose_limit = IPFIREWALL_VERBOSE_LIMIT;
1965 if (fw_verbose == 0) {
1966 kprintf("disabled ");
1967 } else if (verbose_limit == 0) {
1968 kprintf("unlimited ");
1970 kprintf("limited to %d packets/entry by default ",
1978 lwkt_replymsg(&nmsg->lmsg, error);
1984 struct netmsg_base smsg;
1986 netmsg_init(&smsg, NULL, &curthread->td_msgport,
1987 0, ipfw_init_dispatch);
1988 return lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0);
1994 ipfw_fini_dispatch(netmsg_t nmsg)
2001 netmsg_service_sync();
2002 ip_fw_chk_ptr = NULL;
2003 ip_fw_ctl_x_ptr = NULL;
2004 ip_fw_dn_io_ptr = NULL;
2005 ipfw_ctl_flush_rule(1 /* kill default rule */);
2006 /* Free pre-cpu context */
2007 for (cpu = 0; cpu < ncpus; ++cpu) {
2008 if (ipfw_ctx[cpu] != NULL) {
2009 kfree(ipfw_ctx[cpu], M_IPFW2);
2010 ipfw_ctx[cpu] = NULL;
2013 kfree(ipfw_nat_ctx,M_IPFW2);
2014 ipfw_nat_ctx = NULL;
2015 kprintf("IP firewall unloaded\n");
2017 lwkt_replymsg(&nmsg->lmsg, error);
2023 struct netmsg_base smsg;
2024 netmsg_init(&smsg, NULL, &curthread->td_msgport,
2025 0, ipfw_fini_dispatch);
2026 return lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0);
2029 #endif /* KLD_MODULE */
2032 ipfw2_modevent(module_t mod, int type, void *unused)
2044 kprintf("ipfw statically compiled, cannot unload\n");
2056 static moduledata_t ipfw2mod = {
2061 DECLARE_MODULE(ipfw2, ipfw2mod, SI_SUB_PROTO_END, SI_ORDER_ANY);
2062 MODULE_VERSION(ipfw2, 1);