1 /* $OpenBSD: pf_ioctl.c,v 1.209 2008/06/29 08:42:15 mcbride Exp $ */
2 /*add $OpenBSD: pf_ioctl.c,v 1.212 2009/02/15 20:42:33 mbalmer Exp $ */
5 * Copyright (c) 2010 The DragonFly Project. All rights reserved.
7 * Copyright (c) 2001 Daniel Hartmeier
8 * Copyright (c) 2002,2003 Henning Brauer
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * - Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
30 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
35 * Effort sponsored in part by the Defense Advanced Research Projects
36 * Agency (DARPA) and Air Force Research Laboratory, Air Force
37 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
42 #include "opt_inet6.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
47 #include <sys/device.h>
49 #include <sys/filio.h>
50 #include <sys/fcntl.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/kernel.h>
54 #include <sys/kthread.h>
57 #include <sys/malloc.h>
58 #include <sys/module.h>
61 #include <sys/thread2.h>
64 #include <net/if_types.h>
65 #include <net/route.h>
67 #include <netinet/in.h>
68 #include <netinet/in_var.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/ip.h>
71 #include <netinet/ip_var.h>
72 #include <netinet/ip_icmp.h>
74 #include <net/pf/pfvar.h>
77 #include <net/pf/if_pflog.h>
78 #include <net/pf/if_pfsync.h>
81 #include <netinet/ip6.h>
82 #include <netinet/in_pcb.h>
86 #include <net/altq/altq.h>
89 #include <machine/limits.h>
91 #include <sys/mutex.h>
93 u_int rt_numfibs = RT_NUMFIBS;
96 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
97 u_int8_t, u_int8_t, u_int8_t);
99 void pf_mv_pool(struct pf_palist *, struct pf_palist *);
100 void pf_empty_pool(struct pf_palist *);
102 int pf_begin_altq(u_int32_t *);
103 int pf_rollback_altq(u_int32_t);
104 int pf_commit_altq(u_int32_t);
105 int pf_enable_altq(struct pf_altq *);
106 int pf_disable_altq(struct pf_altq *);
108 int pf_begin_rules(u_int32_t *, int, const char *);
109 int pf_rollback_rules(u_int32_t, int, char *);
110 int pf_setup_pfsync_matching(struct pf_ruleset *);
111 void pf_hash_rule(MD5_CTX *, struct pf_rule *);
112 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
113 int pf_commit_rules(u_int32_t, int, char *);
114 int pf_addr_setup(struct pf_ruleset *,
115 struct pf_addr_wrap *, sa_family_t);
116 void pf_addr_copyout(struct pf_addr_wrap *);
118 struct pf_rule pf_default_rule;
119 struct lock pf_consistency_lock;
120 struct lock pf_global_statetbl_lock;
122 static int pf_altq_running;
125 #define TAGID_MAX 50000
126 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
127 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
129 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
130 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
132 u_int16_t tagname2tag(struct pf_tags *, char *);
133 void tag2tagname(struct pf_tags *, u_int16_t, char *);
134 void tag_unref(struct pf_tags *, u_int16_t);
135 int pf_rtlabel_add(struct pf_addr_wrap *);
136 void pf_rtlabel_remove(struct pf_addr_wrap *);
137 void pf_rtlabel_copyout(struct pf_addr_wrap *);
139 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) kprintf x
141 static cdev_t pf_dev;
143 static MALLOC_DEFINE(M_PFRULEPL, "pfrulepl", "pf rule pool list");
144 static MALLOC_DEFINE(M_PFALTQPL, "pfaltqpl", "pf altq pool list");
145 static MALLOC_DEFINE(M_PFPOOLADDRPL, "pfpooladdrpl", "pf pool address pool list");
146 static MALLOC_DEFINE(M_PFFRENTPL, "pffrent", "pf frent pool list");
150 * XXX - These are new and need to be checked when moveing to a new version
152 static void pf_clear_states(void);
153 static int pf_clear_tables(void);
154 static void pf_clear_srcnodes(void);
156 * XXX - These are new and need to be checked when moveing to a new version
160 * Wrapper functions for pfil(9) hooks
162 static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
164 static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
167 static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
169 static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
173 static int hook_pf(void);
174 static int dehook_pf(void);
175 static int shutdown_pf(void);
176 static int pf_load(void);
177 static int pf_unload(void);
183 static struct dev_ops pf_ops = { /* XXX convert to port model */
190 static volatile int pf_pfil_hooked = 0;
191 int pf_end_threads = 0;
193 int debug_pfugidhack = 0;
194 SYSCTL_INT(_debug, OID_AUTO, pfugidhack, CTLFLAG_RW, &debug_pfugidhack, 0,
195 "Enable/disable pf user/group rules mpsafe hack");
200 u_int32_t *my_timeout = pf_default_rule.timeout;
203 if (!rn_inithead((void **)&pf_maskhead, NULL, 0)) {
204 kprintf("pf mask radix tree create failed\n");
207 kmalloc_create(&pf_state_pl, "pf state pool list");
208 kmalloc_raise_limit(pf_state_pl, 0);
209 kmalloc_create(&pf_frent_pl, "pf fragment pool list");
210 kmalloc_raise_limit(pf_frent_pl, 0);
211 kmalloc_create(&pf_cent_pl, "pf cent pool list");
212 kmalloc_raise_limit(pf_cent_pl, 0);
216 pf_osfp_initialize();
218 pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl;
219 pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
220 pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl;
221 pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
222 if (ctob(physmem) <= 100*1024*1024)
223 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
224 PFR_KENTRY_HIWAT_SMALL;
226 for (nn = 0; nn < ncpus; ++nn) {
227 RB_INIT(&tree_src_tracking[nn]);
228 RB_INIT(&tree_id[nn]);
230 RB_INIT(&pf_anchors);
231 pf_init_ruleset(&pf_main_ruleset);
232 TAILQ_INIT(&pf_altqs[0]);
233 TAILQ_INIT(&pf_altqs[1]);
234 TAILQ_INIT(&pf_pabuf);
235 pf_altqs_active = &pf_altqs[0];
236 pf_altqs_inactive = &pf_altqs[1];
237 for (nn = 0; nn < ncpus; ++nn)
238 TAILQ_INIT(&state_list[nn]);
240 /* default rule should never be garbage collected */
241 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
242 pf_default_rule.action = PF_PASS;
243 pf_default_rule.nr = (uint32_t)(-1);
244 pf_default_rule.rtableid = -1;
246 /* initialize default timeouts */
247 my_timeout[PFTM_TCP_FIRST_PACKET] = 120; /* First TCP packet */
248 my_timeout[PFTM_TCP_OPENING] = 30; /* No response yet */
249 my_timeout[PFTM_TCP_ESTABLISHED] = 24*60*60; /* Established */
250 my_timeout[PFTM_TCP_CLOSING] = 15 * 60; /* Half closed */
251 my_timeout[PFTM_TCP_FIN_WAIT] = 45; /* Got both FINs */
252 my_timeout[PFTM_TCP_CLOSED] = 90; /* Got a RST */
253 my_timeout[PFTM_UDP_FIRST_PACKET] = 60; /* First UDP packet */
254 my_timeout[PFTM_UDP_SINGLE] = 30; /* Unidirectional */
255 my_timeout[PFTM_UDP_MULTIPLE] = 60; /* Bidirectional */
256 my_timeout[PFTM_ICMP_FIRST_PACKET] = 20; /* First ICMP packet */
257 my_timeout[PFTM_ICMP_ERROR_REPLY] = 10; /* Got error response */
258 my_timeout[PFTM_OTHER_FIRST_PACKET] = 60; /* First packet */
259 my_timeout[PFTM_OTHER_SINGLE] = 30; /* Unidirectional */
260 my_timeout[PFTM_OTHER_MULTIPLE] = 60; /* Bidirectional */
261 my_timeout[PFTM_FRAG] = 30; /* Fragment expire */
262 my_timeout[PFTM_INTERVAL] = 10; /* Expire interval */
263 my_timeout[PFTM_SRC_NODE] = 0; /* Source Tracking */
264 my_timeout[PFTM_TS_DIFF] = 30; /* Allowed TS diff */
265 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
266 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
269 bzero(&pf_status, sizeof(pf_status));
270 pf_status.debug = PF_DEBUG_URGENT;
271 /* XXX do our best to avoid a conflict */
272 pf_status.hostid = karc4random();
274 if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge"))
275 panic("pfpurge thread");
279 pfopen(struct dev_open_args *ap)
281 lwkt_gettoken(&pf_token);
282 cdev_t dev = ap->a_head.a_dev;
283 if (minor(dev) >= 1) {
284 lwkt_reltoken(&pf_token);
287 lwkt_reltoken(&pf_token);
292 pfclose(struct dev_close_args *ap)
294 lwkt_gettoken(&pf_token);
295 cdev_t dev = ap->a_head.a_dev;
296 if (minor(dev) >= 1) {
297 lwkt_reltoken(&pf_token);
300 lwkt_reltoken(&pf_token);
305 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
306 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
307 u_int8_t check_ticket)
309 struct pf_ruleset *ruleset;
310 struct pf_rule *rule;
313 ruleset = pf_find_ruleset(anchor);
316 rs_num = pf_get_ruleset_number(rule_action);
317 if (rs_num >= PF_RULESET_MAX)
320 if (check_ticket && ticket !=
321 ruleset->rules[rs_num].active.ticket)
324 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
327 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
329 if (check_ticket && ticket !=
330 ruleset->rules[rs_num].inactive.ticket)
333 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
336 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
339 while ((rule != NULL) && (rule->nr != rule_number))
340 rule = TAILQ_NEXT(rule, entries);
345 return (&rule->rpool);
349 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
351 struct pf_pooladdr *mv_pool_pa;
353 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
354 TAILQ_REMOVE(poola, mv_pool_pa, entries);
355 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
360 pf_empty_pool(struct pf_palist *poola)
362 struct pf_pooladdr *empty_pool_pa;
364 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
365 pfi_dynaddr_remove(&empty_pool_pa->addr);
366 pf_tbladdr_remove(&empty_pool_pa->addr);
367 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
368 TAILQ_REMOVE(poola, empty_pool_pa, entries);
369 kfree(empty_pool_pa, M_PFPOOLADDRPL);
374 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
376 if (rulequeue != NULL) {
377 if (rule->states_cur <= 0) {
379 * XXX - we need to remove the table *before* detaching
380 * the rule to make sure the table code does not delete
381 * the anchor under our feet.
383 pf_tbladdr_remove(&rule->src.addr);
384 pf_tbladdr_remove(&rule->dst.addr);
385 if (rule->overload_tbl)
386 pfr_detach_table(rule->overload_tbl);
388 TAILQ_REMOVE(rulequeue, rule, entries);
389 rule->entries.tqe_prev = NULL;
393 if (rule->states_cur > 0 || rule->src_nodes > 0 ||
394 rule->entries.tqe_prev != NULL)
396 pf_tag_unref(rule->tag);
397 pf_tag_unref(rule->match_tag);
399 if (rule->pqid != rule->qid)
400 pf_qid_unref(rule->pqid);
401 pf_qid_unref(rule->qid);
403 pf_rtlabel_remove(&rule->src.addr);
404 pf_rtlabel_remove(&rule->dst.addr);
405 pfi_dynaddr_remove(&rule->src.addr);
406 pfi_dynaddr_remove(&rule->dst.addr);
407 if (rulequeue == NULL) {
408 pf_tbladdr_remove(&rule->src.addr);
409 pf_tbladdr_remove(&rule->dst.addr);
410 if (rule->overload_tbl)
411 pfr_detach_table(rule->overload_tbl);
413 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
414 pf_anchor_remove(rule);
415 pf_empty_pool(&rule->rpool.list);
416 kfree(rule, M_PFRULEPL);
420 tagname2tag(struct pf_tags *head, char *tagname)
422 struct pf_tagname *tag, *p = NULL;
423 u_int16_t new_tagid = 1;
425 TAILQ_FOREACH(tag, head, entries)
426 if (strcmp(tagname, tag->name) == 0) {
432 * to avoid fragmentation, we do a linear search from the beginning
433 * and take the first free slot we find. if there is none or the list
434 * is empty, append a new entry at the end.
438 if (!TAILQ_EMPTY(head))
439 for (p = TAILQ_FIRST(head); p != NULL &&
440 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
441 new_tagid = p->tag + 1;
443 if (new_tagid > TAGID_MAX)
446 /* allocate and fill new struct pf_tagname */
447 tag = kmalloc(sizeof(*tag), M_TEMP, M_WAITOK);
448 strlcpy(tag->name, tagname, sizeof(tag->name));
449 tag->tag = new_tagid;
452 if (p != NULL) /* insert new entry before p */
453 TAILQ_INSERT_BEFORE(p, tag, entries);
454 else /* either list empty or no free slot in between */
455 TAILQ_INSERT_TAIL(head, tag, entries);
461 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
463 struct pf_tagname *tag;
465 TAILQ_FOREACH(tag, head, entries)
466 if (tag->tag == tagid) {
467 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
473 tag_unref(struct pf_tags *head, u_int16_t tag)
475 struct pf_tagname *p, *next;
480 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
481 next = TAILQ_NEXT(p, entries);
484 TAILQ_REMOVE(head, p, entries);
493 pf_tagname2tag(char *tagname)
495 return (tagname2tag(&pf_tags, tagname));
499 pf_tag2tagname(u_int16_t tagid, char *p)
501 tag2tagname(&pf_tags, tagid, p);
505 pf_tag_ref(u_int16_t tag)
507 struct pf_tagname *t;
509 TAILQ_FOREACH(t, &pf_tags, entries)
517 pf_tag_unref(u_int16_t tag)
519 tag_unref(&pf_tags, tag);
523 pf_rtlabel_add(struct pf_addr_wrap *a)
529 pf_rtlabel_remove(struct pf_addr_wrap *a)
534 pf_rtlabel_copyout(struct pf_addr_wrap *a)
536 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel)
537 strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname));
542 pf_qname2qid(char *qname)
544 return ((u_int32_t)tagname2tag(&pf_qids, qname));
548 pf_qid2qname(u_int32_t qid, char *p)
550 tag2tagname(&pf_qids, (u_int16_t)qid, p);
554 pf_qid_unref(u_int32_t qid)
556 tag_unref(&pf_qids, (u_int16_t)qid);
560 pf_begin_altq(u_int32_t *ticket)
562 struct pf_altq *altq;
565 /* Purge the old altq list */
566 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
567 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
568 if (altq->qname[0] == 0) {
569 /* detach and destroy the discipline */
570 error = altq_remove(altq);
572 pf_qid_unref(altq->qid);
573 kfree(altq, M_PFALTQPL);
577 *ticket = ++ticket_altqs_inactive;
578 altqs_inactive_open = 1;
583 pf_rollback_altq(u_int32_t ticket)
585 struct pf_altq *altq;
588 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
590 /* Purge the old altq list */
591 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
592 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
593 if (altq->qname[0] == 0) {
594 /* detach and destroy the discipline */
595 error = altq_remove(altq);
597 pf_qid_unref(altq->qid);
598 kfree(altq, M_PFALTQPL);
600 altqs_inactive_open = 0;
605 pf_commit_altq(u_int32_t ticket)
607 struct pf_altqqueue *old_altqs;
608 struct pf_altq *altq;
611 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
614 /* swap altqs, keep the old. */
616 old_altqs = pf_altqs_active;
617 pf_altqs_active = pf_altqs_inactive;
618 pf_altqs_inactive = old_altqs;
619 ticket_altqs_active = ticket_altqs_inactive;
621 /* Attach new disciplines */
622 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
623 if (altq->qname[0] == 0) {
624 /* attach the discipline */
625 error = altq_pfattach(altq);
633 /* Purge the old altq list */
634 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
635 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
636 if (altq->qname[0] == 0) {
637 /* detach and destroy the discipline */
639 error = pf_disable_altq(altq);
640 err = altq_pfdetach(altq);
641 if (err != 0 && error == 0)
643 err = altq_remove(altq);
644 if (err != 0 && error == 0)
647 pf_qid_unref(altq->qid);
648 kfree(altq, M_PFALTQPL);
652 altqs_inactive_open = 0;
657 pf_enable_altq(struct pf_altq *altq)
660 struct tb_profile tb;
665 if ((ifp = ifunit(altq->ifname)) == NULL) {
670 if (ifp->if_snd.altq_type != ALTQT_NONE)
671 error = altq_enable(&ifp->if_snd);
673 /* set tokenbucket regulator */
674 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
675 tb.rate = altq->ifbandwidth;
676 tb.depth = altq->tbrsize;
678 error = tbr_set(&ifp->if_snd, &tb);
687 pf_disable_altq(struct pf_altq *altq)
690 struct tb_profile tb;
695 if ((ifp = ifunit(altq->ifname)) == NULL) {
701 * when the discipline is no longer referenced, it was overridden
702 * by a new one. if so, just return.
704 if (altq->altq_disc != ifp->if_snd.altq_disc) {
709 error = altq_disable(&ifp->if_snd);
712 /* clear tokenbucket regulator */
715 error = tbr_set(&ifp->if_snd, &tb);
725 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
727 struct pf_ruleset *rs;
728 struct pf_rule *rule;
730 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
732 rs = pf_find_or_create_ruleset(anchor);
735 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
736 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
737 rs->rules[rs_num].inactive.rcount--;
739 *ticket = ++rs->rules[rs_num].inactive.ticket;
740 rs->rules[rs_num].inactive.open = 1;
745 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
747 struct pf_ruleset *rs;
748 struct pf_rule *rule;
750 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
752 rs = pf_find_ruleset(anchor);
753 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
754 rs->rules[rs_num].inactive.ticket != ticket)
756 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
757 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
758 rs->rules[rs_num].inactive.rcount--;
760 rs->rules[rs_num].inactive.open = 0;
764 #define PF_MD5_UPD(st, elm) \
765 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
767 #define PF_MD5_UPD_STR(st, elm) \
768 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
770 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
771 (stor) = htonl((st)->elm); \
772 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
775 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
776 (stor) = htons((st)->elm); \
777 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
781 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
783 PF_MD5_UPD(pfr, addr.type);
784 switch (pfr->addr.type) {
785 case PF_ADDR_DYNIFTL:
786 PF_MD5_UPD(pfr, addr.v.ifname);
787 PF_MD5_UPD(pfr, addr.iflags);
790 PF_MD5_UPD(pfr, addr.v.tblname);
792 case PF_ADDR_ADDRMASK:
794 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
795 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
797 case PF_ADDR_RTLABEL:
798 PF_MD5_UPD(pfr, addr.v.rtlabelname);
802 PF_MD5_UPD(pfr, port[0]);
803 PF_MD5_UPD(pfr, port[1]);
804 PF_MD5_UPD(pfr, neg);
805 PF_MD5_UPD(pfr, port_op);
809 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
814 pf_hash_rule_addr(ctx, &rule->src);
815 pf_hash_rule_addr(ctx, &rule->dst);
816 PF_MD5_UPD_STR(rule, label);
817 PF_MD5_UPD_STR(rule, ifname);
818 PF_MD5_UPD_STR(rule, match_tagname);
819 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
820 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
821 PF_MD5_UPD_HTONL(rule, prob, y);
822 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
823 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
824 PF_MD5_UPD(rule, uid.op);
825 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
826 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
827 PF_MD5_UPD(rule, gid.op);
828 PF_MD5_UPD_HTONL(rule, rule_flag, y);
829 PF_MD5_UPD(rule, action);
830 PF_MD5_UPD(rule, direction);
831 PF_MD5_UPD(rule, af);
832 PF_MD5_UPD(rule, quick);
833 PF_MD5_UPD(rule, ifnot);
834 PF_MD5_UPD(rule, match_tag_not);
835 PF_MD5_UPD(rule, natpass);
836 PF_MD5_UPD(rule, keep_state);
837 PF_MD5_UPD(rule, proto);
838 PF_MD5_UPD(rule, type);
839 PF_MD5_UPD(rule, code);
840 PF_MD5_UPD(rule, flags);
841 PF_MD5_UPD(rule, flagset);
842 PF_MD5_UPD(rule, allow_opts);
843 PF_MD5_UPD(rule, rt);
844 PF_MD5_UPD(rule, tos);
848 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
850 struct pf_ruleset *rs;
851 struct pf_rule *rule, **old_array;
852 struct pf_rulequeue *old_rules;
854 u_int32_t old_rcount;
856 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
858 rs = pf_find_ruleset(anchor);
859 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
860 ticket != rs->rules[rs_num].inactive.ticket)
863 /* Calculate checksum for the main ruleset */
864 if (rs == &pf_main_ruleset) {
865 error = pf_setup_pfsync_matching(rs);
870 /* Swap rules, keep the old. */
872 old_rules = rs->rules[rs_num].active.ptr;
873 old_rcount = rs->rules[rs_num].active.rcount;
874 old_array = rs->rules[rs_num].active.ptr_array;
876 rs->rules[rs_num].active.ptr =
877 rs->rules[rs_num].inactive.ptr;
878 rs->rules[rs_num].active.ptr_array =
879 rs->rules[rs_num].inactive.ptr_array;
880 rs->rules[rs_num].active.rcount =
881 rs->rules[rs_num].inactive.rcount;
882 rs->rules[rs_num].inactive.ptr = old_rules;
883 rs->rules[rs_num].inactive.ptr_array = old_array;
884 rs->rules[rs_num].inactive.rcount = old_rcount;
886 rs->rules[rs_num].active.ticket =
887 rs->rules[rs_num].inactive.ticket;
888 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
891 /* Purge the old rule list. */
892 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
893 pf_rm_rule(old_rules, rule);
894 if (rs->rules[rs_num].inactive.ptr_array)
895 kfree(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
896 rs->rules[rs_num].inactive.ptr_array = NULL;
897 rs->rules[rs_num].inactive.rcount = 0;
898 rs->rules[rs_num].inactive.open = 0;
899 pf_remove_if_empty_ruleset(rs);
905 pf_setup_pfsync_matching(struct pf_ruleset *rs)
908 struct pf_rule *rule;
910 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
913 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
914 /* XXX PF_RULESET_SCRUB as well? */
915 if (rs_cnt == PF_RULESET_SCRUB)
918 if (rs->rules[rs_cnt].inactive.ptr_array)
919 kfree(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
920 rs->rules[rs_cnt].inactive.ptr_array = NULL;
922 if (rs->rules[rs_cnt].inactive.rcount) {
923 rs->rules[rs_cnt].inactive.ptr_array =
924 kmalloc(sizeof(caddr_t) *
925 rs->rules[rs_cnt].inactive.rcount,
929 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
931 pf_hash_rule(&ctx, rule);
932 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
936 MD5Final(digest, &ctx);
937 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
942 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
945 if (pfi_dynaddr_setup(addr, af) ||
946 pf_tbladdr_setup(ruleset, addr))
953 pf_addr_copyout(struct pf_addr_wrap *addr)
955 pfi_dynaddr_copyout(addr);
956 pf_tbladdr_copyout(addr);
957 pf_rtlabel_copyout(addr);
961 pfioctl(struct dev_ioctl_args *ap)
963 u_long cmd = ap->a_cmd;
964 caddr_t addr = ap->a_data;
965 struct pf_pooladdr *pa = NULL;
966 struct pf_pool *pool = NULL;
969 lwkt_gettoken(&pf_token);
971 /* XXX keep in sync with switch() below */
972 if (securelevel > 1) {
979 case DIOCSETSTATUSIF:
986 case DIOCCLRRULECTRS:
991 case DIOCGETRULESETS:
1001 case DIOCRGETASTATS:
1002 case DIOCRCLRASTATS:
1005 case DIOCGETSRCNODES:
1006 case DIOCCLRSRCNODES:
1007 case DIOCIGETIFACES:
1012 case DIOCRCLRTABLES:
1013 case DIOCRADDTABLES:
1014 case DIOCRDELTABLES:
1015 case DIOCRSETTFLAGS:
1016 if (((struct pfioc_table *)addr)->pfrio_flags &
1018 break; /* dummy operation ok */
1019 lwkt_reltoken(&pf_token);
1022 lwkt_reltoken(&pf_token);
1027 if (!(ap->a_fflag & FWRITE)) {
1035 case DIOCGETTIMEOUT:
1040 case DIOCGETRULESETS:
1041 case DIOCGETRULESET:
1043 case DIOCRGETTABLES:
1044 case DIOCRGETTSTATS:
1046 case DIOCRGETASTATS:
1049 case DIOCGETSRCNODES:
1050 case DIOCIGETIFACES:
1053 case DIOCRCLRTABLES:
1054 case DIOCRADDTABLES:
1055 case DIOCRDELTABLES:
1056 case DIOCRCLRTSTATS:
1061 case DIOCRSETTFLAGS:
1062 if (((struct pfioc_table *)addr)->pfrio_flags &
1064 break; /* dummy operation ok */
1065 lwkt_reltoken(&pf_token);
1068 if (((struct pfioc_rule *)addr)->action ==
1070 lwkt_reltoken(&pf_token);
1075 lwkt_reltoken(&pf_token);
1082 if (pf_status.running)
1087 DPFPRINTF(PF_DEBUG_MISC,
1088 ("pf: pfil registration fail\n"));
1091 pf_status.running = 1;
1092 pf_status.since = time_second;
1093 if (pf_status.stateid == 0) {
1094 pf_status.stateid = time_second;
1095 pf_status.stateid = pf_status.stateid << 32;
1097 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1102 if (!pf_status.running)
1105 pf_status.running = 0;
1106 error = dehook_pf();
1108 pf_status.running = 1;
1109 DPFPRINTF(PF_DEBUG_MISC,
1110 ("pf: pfil unregistration failed\n"));
1112 pf_status.since = time_second;
1113 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1118 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1119 struct pf_ruleset *ruleset;
1120 struct pf_rule *rule, *tail;
1121 struct pf_pooladdr *pa;
1124 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1125 ruleset = pf_find_ruleset(pr->anchor);
1126 if (ruleset == NULL) {
1130 rs_num = pf_get_ruleset_number(pr->rule.action);
1131 if (rs_num >= PF_RULESET_MAX) {
1135 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1139 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1143 if (pr->pool_ticket != ticket_pabuf) {
1147 rule = kmalloc(sizeof(struct pf_rule), M_PFRULEPL, M_WAITOK);
1148 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1149 rule->cuid = ap->a_cred->cr_ruid;
1151 rule->anchor = NULL;
1153 TAILQ_INIT(&rule->rpool.list);
1154 /* initialize refcounting */
1155 rule->states_cur = 0;
1156 rule->src_nodes = 0;
1157 rule->entries.tqe_prev = NULL;
1159 if (rule->af == AF_INET) {
1160 kfree(rule, M_PFRULEPL);
1161 error = EAFNOSUPPORT;
1166 if (rule->af == AF_INET6) {
1167 kfree(rule, M_PFRULEPL);
1168 error = EAFNOSUPPORT;
1172 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1175 rule->nr = tail->nr + 1;
1178 if (rule->ifname[0]) {
1179 rule->kif = pfi_kif_get(rule->ifname);
1180 if (rule->kif == NULL) {
1181 kfree(rule, M_PFRULEPL);
1185 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
1188 if (rule->rtableid > 0 && rule->rtableid > rt_numfibs)
1193 if (rule->qname[0] != 0) {
1194 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1196 else if (rule->pqname[0] != 0) {
1198 pf_qname2qid(rule->pqname)) == 0)
1201 rule->pqid = rule->qid;
1204 if (rule->tagname[0])
1205 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1207 if (rule->match_tagname[0])
1208 if ((rule->match_tag =
1209 pf_tagname2tag(rule->match_tagname)) == 0)
1211 if (rule->rt && !rule->direction)
1216 if (rule->logif >= PFLOGIFS_MAX)
1219 if (pf_rtlabel_add(&rule->src.addr) ||
1220 pf_rtlabel_add(&rule->dst.addr))
1222 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1224 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1226 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1228 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1229 if (pf_tbladdr_setup(ruleset, &pa->addr))
1232 if (rule->overload_tblname[0]) {
1233 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1234 rule->overload_tblname)) == NULL)
1237 rule->overload_tbl->pfrkt_flags |=
1241 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1242 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1243 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1244 (rule->rt > PF_FASTROUTE)) &&
1245 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1249 pf_rm_rule(NULL, rule);
1252 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1253 rule->evaluations = rule->packets[0] = rule->packets[1] =
1254 rule->bytes[0] = rule->bytes[1] = 0;
1255 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1257 ruleset->rules[rs_num].inactive.rcount++;
1261 case DIOCGETRULES: {
1262 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1263 struct pf_ruleset *ruleset;
1264 struct pf_rule *tail;
1267 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1268 ruleset = pf_find_ruleset(pr->anchor);
1269 if (ruleset == NULL) {
1273 rs_num = pf_get_ruleset_number(pr->rule.action);
1274 if (rs_num >= PF_RULESET_MAX) {
1278 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1281 pr->nr = tail->nr + 1;
1284 pr->ticket = ruleset->rules[rs_num].active.ticket;
1289 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1290 struct pf_ruleset *ruleset;
1291 struct pf_rule *rule;
1294 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1295 ruleset = pf_find_ruleset(pr->anchor);
1296 if (ruleset == NULL) {
1300 rs_num = pf_get_ruleset_number(pr->rule.action);
1301 if (rs_num >= PF_RULESET_MAX) {
1305 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1309 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1310 while ((rule != NULL) && (rule->nr != pr->nr))
1311 rule = TAILQ_NEXT(rule, entries);
1316 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1317 if (pf_anchor_copyout(ruleset, rule, pr)) {
1321 pf_addr_copyout(&pr->rule.src.addr);
1322 pf_addr_copyout(&pr->rule.dst.addr);
1323 for (i = 0; i < PF_SKIP_COUNT; ++i)
1324 if (rule->skip[i].ptr == NULL)
1325 pr->rule.skip[i].nr = (uint32_t)(-1);
1327 pr->rule.skip[i].nr =
1328 rule->skip[i].ptr->nr;
1330 if (pr->action == PF_GET_CLR_CNTR) {
1331 rule->evaluations = 0;
1332 rule->packets[0] = rule->packets[1] = 0;
1333 rule->bytes[0] = rule->bytes[1] = 0;
1334 rule->states_tot = 0;
1339 case DIOCCHANGERULE: {
1340 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1341 struct pf_ruleset *ruleset;
1342 struct pf_rule *oldrule = NULL, *newrule = NULL;
1346 if (!(pcr->action == PF_CHANGE_REMOVE ||
1347 pcr->action == PF_CHANGE_GET_TICKET) &&
1348 pcr->pool_ticket != ticket_pabuf) {
1353 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1354 pcr->action > PF_CHANGE_GET_TICKET) {
1358 ruleset = pf_find_ruleset(pcr->anchor);
1359 if (ruleset == NULL) {
1363 rs_num = pf_get_ruleset_number(pcr->rule.action);
1364 if (rs_num >= PF_RULESET_MAX) {
1369 if (pcr->action == PF_CHANGE_GET_TICKET) {
1370 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1374 ruleset->rules[rs_num].active.ticket) {
1378 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1384 if (pcr->action != PF_CHANGE_REMOVE) {
1385 newrule = kmalloc(sizeof(struct pf_rule), M_PFRULEPL, M_WAITOK|M_NULLOK);
1386 if (newrule == NULL) {
1390 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1391 newrule->cuid = ap->a_cred->cr_ruid;
1393 TAILQ_INIT(&newrule->rpool.list);
1394 /* initialize refcounting */
1395 newrule->states_cur = 0;
1396 newrule->entries.tqe_prev = NULL;
1398 if (newrule->af == AF_INET) {
1399 kfree(newrule, M_PFRULEPL);
1400 error = EAFNOSUPPORT;
1405 if (newrule->af == AF_INET6) {
1406 kfree(newrule, M_PFRULEPL);
1407 error = EAFNOSUPPORT;
1411 if (newrule->ifname[0]) {
1412 newrule->kif = pfi_kif_get(newrule->ifname);
1413 if (newrule->kif == NULL) {
1414 kfree(newrule, M_PFRULEPL);
1418 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
1420 newrule->kif = NULL;
1422 if (newrule->rtableid > 0 &&
1423 newrule->rtableid > rt_numfibs)
1428 if (newrule->qname[0] != 0) {
1430 pf_qname2qid(newrule->qname)) == 0)
1432 else if (newrule->pqname[0] != 0) {
1433 if ((newrule->pqid =
1434 pf_qname2qid(newrule->pqname)) == 0)
1437 newrule->pqid = newrule->qid;
1440 if (newrule->tagname[0])
1442 pf_tagname2tag(newrule->tagname)) == 0)
1444 if (newrule->match_tagname[0])
1445 if ((newrule->match_tag = pf_tagname2tag(
1446 newrule->match_tagname)) == 0)
1448 if (newrule->rt && !newrule->direction)
1453 if (newrule->logif >= PFLOGIFS_MAX)
1456 if (pf_rtlabel_add(&newrule->src.addr) ||
1457 pf_rtlabel_add(&newrule->dst.addr))
1459 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1461 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1463 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1465 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1466 if (pf_tbladdr_setup(ruleset, &pa->addr))
1469 if (newrule->overload_tblname[0]) {
1470 if ((newrule->overload_tbl = pfr_attach_table(
1471 ruleset, newrule->overload_tblname)) ==
1475 newrule->overload_tbl->pfrkt_flags |=
1479 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1480 if (((((newrule->action == PF_NAT) ||
1481 (newrule->action == PF_RDR) ||
1482 (newrule->action == PF_BINAT) ||
1483 (newrule->rt > PF_FASTROUTE)) &&
1484 !newrule->anchor)) &&
1485 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1489 pf_rm_rule(NULL, newrule);
1492 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1493 newrule->evaluations = 0;
1494 newrule->packets[0] = newrule->packets[1] = 0;
1495 newrule->bytes[0] = newrule->bytes[1] = 0;
1497 pf_empty_pool(&pf_pabuf);
1499 if (pcr->action == PF_CHANGE_ADD_HEAD)
1500 oldrule = TAILQ_FIRST(
1501 ruleset->rules[rs_num].active.ptr);
1502 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1503 oldrule = TAILQ_LAST(
1504 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1506 oldrule = TAILQ_FIRST(
1507 ruleset->rules[rs_num].active.ptr);
1508 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1509 oldrule = TAILQ_NEXT(oldrule, entries);
1510 if (oldrule == NULL) {
1511 if (newrule != NULL)
1512 pf_rm_rule(NULL, newrule);
1518 if (pcr->action == PF_CHANGE_REMOVE) {
1519 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1520 ruleset->rules[rs_num].active.rcount--;
1522 if (oldrule == NULL)
1524 ruleset->rules[rs_num].active.ptr,
1526 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1527 pcr->action == PF_CHANGE_ADD_BEFORE)
1528 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1531 ruleset->rules[rs_num].active.ptr,
1532 oldrule, newrule, entries);
1533 ruleset->rules[rs_num].active.rcount++;
1537 TAILQ_FOREACH(oldrule,
1538 ruleset->rules[rs_num].active.ptr, entries)
1541 ruleset->rules[rs_num].active.ticket++;
1543 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1544 pf_remove_if_empty_ruleset(ruleset);
1549 case DIOCCLRSTATES: {
1550 struct pf_state *s, *nexts;
1551 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1553 globaldata_t save_gd = mycpu;
1556 for (nn = 0; nn < ncpus; ++nn) {
1557 lwkt_setcpu_self(globaldata_find(nn));
1558 for (s = RB_MIN(pf_state_tree_id, &tree_id[nn]);
1560 nexts = RB_NEXT(pf_state_tree_id,
1563 if (!psk->psk_ifname[0] ||
1564 !strcmp(psk->psk_ifname,
1565 s->kif->pfik_name)) {
1567 * don't send out individual
1570 s->sync_flags = PFSTATE_NOSYNC;
1576 lwkt_setcpu_self(save_gd);
1577 psk->psk_killed = killed;
1578 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1582 case DIOCKILLSTATES: {
1583 struct pf_state *s, *nexts;
1584 struct pf_state_key *sk;
1585 struct pf_addr *srcaddr, *dstaddr;
1586 u_int16_t srcport, dstport;
1587 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1589 globaldata_t save_gd = mycpu;
1592 if (psk->psk_pfcmp.id) {
1593 if (psk->psk_pfcmp.creatorid == 0)
1594 psk->psk_pfcmp.creatorid = pf_status.hostid;
1595 for (nn = 0; nn < ncpus; ++nn) {
1596 lwkt_setcpu_self(globaldata_find(nn));
1597 if ((s = pf_find_state_byid(&psk->psk_pfcmp))) {
1598 /* send immediate delete of state */
1599 pfsync_delete_state(s);
1600 s->sync_flags |= PFSTATE_NOSYNC;
1605 lwkt_setcpu_self(save_gd);
1609 for (nn = 0; nn < ncpus; ++nn) {
1610 lwkt_setcpu_self(globaldata_find(nn));
1611 for (s = RB_MIN(pf_state_tree_id, &tree_id[nn]);
1613 nexts = RB_NEXT(pf_state_tree_id, &tree_id[nn], s);
1614 sk = s->key[PF_SK_WIRE];
1616 if (s->direction == PF_OUT) {
1617 srcaddr = &sk->addr[1];
1618 dstaddr = &sk->addr[0];
1619 srcport = sk->port[0];
1620 dstport = sk->port[0];
1622 srcaddr = &sk->addr[0];
1623 dstaddr = &sk->addr[1];
1624 srcport = sk->port[0];
1625 dstport = sk->port[0];
1627 if ((!psk->psk_af || sk->af == psk->psk_af)
1628 && (!psk->psk_proto || psk->psk_proto ==
1630 PF_MATCHA(psk->psk_src.neg,
1631 &psk->psk_src.addr.v.a.addr,
1632 &psk->psk_src.addr.v.a.mask,
1634 PF_MATCHA(psk->psk_dst.neg,
1635 &psk->psk_dst.addr.v.a.addr,
1636 &psk->psk_dst.addr.v.a.mask,
1638 (psk->psk_src.port_op == 0 ||
1639 pf_match_port(psk->psk_src.port_op,
1640 psk->psk_src.port[0],
1641 psk->psk_src.port[1],
1643 (psk->psk_dst.port_op == 0 ||
1644 pf_match_port(psk->psk_dst.port_op,
1645 psk->psk_dst.port[0],
1646 psk->psk_dst.port[1],
1648 (!psk->psk_label[0] ||
1649 (s->rule.ptr->label[0] &&
1650 !strcmp(psk->psk_label, s->rule.ptr->label))) &&
1651 (!psk->psk_ifname[0] ||
1652 !strcmp(psk->psk_ifname, s->kif->pfik_name))) {
1653 /* send immediate delete of state */
1654 pfsync_delete_state(s);
1655 s->sync_flags |= PFSTATE_NOSYNC;
1661 lwkt_setcpu_self(save_gd);
1662 psk->psk_killed = killed;
1666 case DIOCADDSTATE: {
1667 struct pfioc_state *ps = (struct pfioc_state *)addr;
1668 struct pfsync_state *sp = &ps->state;
1670 if (sp->timeout >= PFTM_MAX &&
1671 sp->timeout != PFTM_UNTIL_PACKET) {
1675 error = pfsync_state_import(sp, PFSYNC_SI_IOCTL);
1679 case DIOCGETSTATE: {
1680 struct pfioc_state *ps = (struct pfioc_state *)addr;
1682 struct pf_state_cmp id_key;
1683 globaldata_t save_gd = mycpu;
1686 bcopy(ps->state.id, &id_key.id, sizeof(id_key.id));
1687 id_key.creatorid = ps->state.creatorid;
1689 for (nn = 0; nn < ncpus; ++nn) {
1690 lwkt_setcpu_self(globaldata_find(nn));
1691 s = pf_find_state_byid(&id_key);
1696 pfsync_state_export(&ps->state, s);
1700 lwkt_setcpu_self(save_gd);
1704 case DIOCGETSTATES: {
1705 struct pfioc_states *ps = (struct pfioc_states *)addr;
1706 struct pf_state *state;
1707 struct pfsync_state *p, *pstore;
1709 globaldata_t save_gd = mycpu;
1712 if (ps->ps_len == 0) {
1713 nr = pf_status.states;
1714 ps->ps_len = sizeof(struct pfsync_state) * nr;
1718 pstore = kmalloc(sizeof(*pstore), M_TEMP, M_WAITOK);
1722 for (nn = 0; nn < ncpus; ++nn) {
1723 lwkt_setcpu_self(globaldata_find(nn));
1724 state = TAILQ_FIRST(&state_list[nn]);
1726 if (state->timeout != PFTM_UNLINKED) {
1727 if ((nr + 1) * sizeof(*p) >
1728 (unsigned)ps->ps_len) {
1731 pfsync_state_export(pstore, state);
1732 error = copyout(pstore, p, sizeof(*p));
1734 kfree(pstore, M_TEMP);
1735 lwkt_setcpu_self(save_gd);
1741 state = TAILQ_NEXT(state, entry_list);
1744 lwkt_setcpu_self(save_gd);
1745 ps->ps_len = sizeof(struct pfsync_state) * nr;
1746 kfree(pstore, M_TEMP);
1750 case DIOCGETSTATUS: {
1751 struct pf_status *s = (struct pf_status *)addr;
1752 bcopy(&pf_status, s, sizeof(struct pf_status));
1753 pfi_update_status(s->ifname, s);
1757 case DIOCSETSTATUSIF: {
1758 struct pfioc_if *pi = (struct pfioc_if *)addr;
1760 if (pi->ifname[0] == 0) {
1761 bzero(pf_status.ifname, IFNAMSIZ);
1764 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1768 case DIOCCLRSTATUS: {
1769 bzero(pf_status.counters, sizeof(pf_status.counters));
1770 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1771 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1772 pf_status.since = time_second;
1773 if (*pf_status.ifname)
1774 pfi_update_status(pf_status.ifname, NULL);
1779 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1780 struct pf_state_key *sk;
1781 struct pf_state *state;
1782 struct pf_state_key_cmp key;
1783 int m = 0, direction = pnl->direction;
1785 globaldata_t save_gd = mycpu;
1788 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
1789 sidx = (direction == PF_IN) ? 1 : 0;
1790 didx = (direction == PF_IN) ? 0 : 1;
1793 PF_AZERO(&pnl->saddr, pnl->af) ||
1794 PF_AZERO(&pnl->daddr, pnl->af) ||
1795 ((pnl->proto == IPPROTO_TCP ||
1796 pnl->proto == IPPROTO_UDP) &&
1797 (!pnl->dport || !pnl->sport)))
1801 key.proto = pnl->proto;
1802 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
1803 key.port[sidx] = pnl->sport;
1804 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
1805 key.port[didx] = pnl->dport;
1808 for (nn = 0; nn < ncpus; ++nn) {
1809 lwkt_setcpu_self(globaldata_find(nn));
1810 state = pf_find_state_all(&key, direction, &m);
1817 error = E2BIG; /* more than one state */
1818 } else if (state != NULL) {
1819 sk = state->key[sidx];
1820 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
1821 pnl->rsport = sk->port[sidx];
1822 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
1823 pnl->rdport = sk->port[didx];
1827 lwkt_setcpu_self(save_gd);
1832 case DIOCSETTIMEOUT: {
1833 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1836 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1841 old = pf_default_rule.timeout[pt->timeout];
1842 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1844 pf_default_rule.timeout[pt->timeout] = pt->seconds;
1845 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
1846 wakeup(pf_purge_thread);
1851 case DIOCGETTIMEOUT: {
1852 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1854 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1858 pt->seconds = pf_default_rule.timeout[pt->timeout];
1862 case DIOCGETLIMIT: {
1863 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1865 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1869 pl->limit = pf_pool_limits[pl->index].limit;
1873 case DIOCSETLIMIT: {
1874 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1877 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1878 pf_pool_limits[pl->index].pp == NULL) {
1883 /* XXX Get an API to set limits on the zone/pool */
1884 old_limit = pf_pool_limits[pl->index].limit;
1885 pf_pool_limits[pl->index].limit = pl->limit;
1886 pl->limit = old_limit;
1890 case DIOCSETDEBUG: {
1891 u_int32_t *level = (u_int32_t *)addr;
1893 pf_status.debug = *level;
1897 case DIOCCLRRULECTRS: {
1898 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1899 struct pf_ruleset *ruleset = &pf_main_ruleset;
1900 struct pf_rule *rule;
1903 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1904 rule->evaluations = 0;
1905 rule->packets[0] = rule->packets[1] = 0;
1906 rule->bytes[0] = rule->bytes[1] = 0;
1911 case DIOCGIFSPEED: {
1912 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr;
1913 struct pf_ifspeed ps;
1916 if (psp->ifname[0] != 0) {
1917 /* Can we completely trust user-land? */
1918 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
1920 ifp = ifunit(ps.ifname);
1922 psp->baudrate = ifp->if_baudrate;
1931 case DIOCSTARTALTQ: {
1932 struct pf_altq *altq;
1934 /* enable all altq interfaces on active list */
1935 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1936 if (altq->qname[0] == 0) {
1937 error = pf_enable_altq(altq);
1943 pf_altq_running = 1;
1944 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
1948 case DIOCSTOPALTQ: {
1949 struct pf_altq *altq;
1951 /* disable all altq interfaces on active list */
1952 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1953 if (altq->qname[0] == 0) {
1954 error = pf_disable_altq(altq);
1960 pf_altq_running = 0;
1961 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
1966 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
1967 struct pf_altq *altq, *a;
1969 if (pa->ticket != ticket_altqs_inactive) {
1973 altq = kmalloc(sizeof(struct pf_altq), M_PFALTQPL, M_WAITOK|M_NULLOK);
1978 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
1981 * if this is for a queue, find the discipline and
1982 * copy the necessary fields
1984 if (altq->qname[0] != 0) {
1985 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
1987 kfree(altq, M_PFALTQPL);
1990 altq->altq_disc = NULL;
1991 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
1992 if (strncmp(a->ifname, altq->ifname,
1993 IFNAMSIZ) == 0 && a->qname[0] == 0) {
1994 altq->altq_disc = a->altq_disc;
2000 error = altq_add(altq);
2002 kfree(altq, M_PFALTQPL);
2006 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2007 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2011 case DIOCGETALTQS: {
2012 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2013 struct pf_altq *altq;
2016 TAILQ_FOREACH(altq, pf_altqs_active, entries)
2018 pa->ticket = ticket_altqs_active;
2023 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2024 struct pf_altq *altq;
2027 if (pa->ticket != ticket_altqs_active) {
2032 altq = TAILQ_FIRST(pf_altqs_active);
2033 while ((altq != NULL) && (nr < pa->nr)) {
2034 altq = TAILQ_NEXT(altq, entries);
2041 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2045 case DIOCCHANGEALTQ:
2046 /* CHANGEALTQ not supported yet! */
2050 case DIOCGETQSTATS: {
2051 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2052 struct pf_altq *altq;
2056 if (pq->ticket != ticket_altqs_active) {
2060 nbytes = pq->nbytes;
2062 altq = TAILQ_FIRST(pf_altqs_active);
2063 while ((altq != NULL) && (nr < pq->nr)) {
2064 altq = TAILQ_NEXT(altq, entries);
2071 error = altq_getqstats(altq, pq->buf, &nbytes);
2073 pq->scheduler = altq->scheduler;
2074 pq->nbytes = nbytes;
2080 case DIOCBEGINADDRS: {
2081 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2083 pf_empty_pool(&pf_pabuf);
2084 pp->ticket = ++ticket_pabuf;
2089 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2091 if (pp->ticket != ticket_pabuf) {
2096 if (pp->af == AF_INET) {
2097 error = EAFNOSUPPORT;
2102 if (pp->af == AF_INET6) {
2103 error = EAFNOSUPPORT;
2107 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2108 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2109 pp->addr.addr.type != PF_ADDR_TABLE) {
2113 pa = kmalloc(sizeof(struct pf_altq), M_PFPOOLADDRPL, M_WAITOK|M_NULLOK);
2118 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2119 if (pa->ifname[0]) {
2120 pa->kif = pfi_kif_get(pa->ifname);
2121 if (pa->kif == NULL) {
2122 kfree(ap, M_PFPOOLADDRPL);
2126 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
2128 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2129 pfi_dynaddr_remove(&pa->addr);
2130 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
2131 kfree(pa, M_PFPOOLADDRPL);
2135 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2139 case DIOCGETADDRS: {
2140 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2143 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2144 pp->r_num, 0, 1, 0);
2149 TAILQ_FOREACH(pa, &pool->list, entries)
2155 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2158 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2159 pp->r_num, 0, 1, 1);
2164 pa = TAILQ_FIRST(&pool->list);
2165 while ((pa != NULL) && (nr < pp->nr)) {
2166 pa = TAILQ_NEXT(pa, entries);
2173 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2174 pf_addr_copyout(&pp->addr.addr);
2178 case DIOCCHANGEADDR: {
2179 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2180 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2181 struct pf_ruleset *ruleset;
2183 if (pca->action < PF_CHANGE_ADD_HEAD ||
2184 pca->action > PF_CHANGE_REMOVE) {
2188 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2189 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2190 pca->addr.addr.type != PF_ADDR_TABLE) {
2195 ruleset = pf_find_ruleset(pca->anchor);
2196 if (ruleset == NULL) {
2200 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2201 pca->r_num, pca->r_last, 1, 1);
2206 if (pca->action != PF_CHANGE_REMOVE) {
2207 newpa = kmalloc(sizeof(struct pf_pooladdr),
2208 M_PFPOOLADDRPL, M_WAITOK|M_NULLOK);
2209 if (newpa == NULL) {
2213 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2215 if (pca->af == AF_INET) {
2216 kfree(newpa, M_PFPOOLADDRPL);
2217 error = EAFNOSUPPORT;
2222 if (pca->af == AF_INET6) {
2223 kfree(newpa, M_PFPOOLADDRPL);
2224 error = EAFNOSUPPORT;
2228 if (newpa->ifname[0]) {
2229 newpa->kif = pfi_kif_get(newpa->ifname);
2230 if (newpa->kif == NULL) {
2231 kfree(newpa, M_PFPOOLADDRPL);
2235 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
2238 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2239 pf_tbladdr_setup(ruleset, &newpa->addr)) {
2240 pfi_dynaddr_remove(&newpa->addr);
2241 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
2242 kfree(newpa, M_PFPOOLADDRPL);
2248 if (pca->action == PF_CHANGE_ADD_HEAD)
2249 oldpa = TAILQ_FIRST(&pool->list);
2250 else if (pca->action == PF_CHANGE_ADD_TAIL)
2251 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2255 oldpa = TAILQ_FIRST(&pool->list);
2256 while ((oldpa != NULL) && (i < pca->nr)) {
2257 oldpa = TAILQ_NEXT(oldpa, entries);
2260 if (oldpa == NULL) {
2266 if (pca->action == PF_CHANGE_REMOVE) {
2267 TAILQ_REMOVE(&pool->list, oldpa, entries);
2268 pfi_dynaddr_remove(&oldpa->addr);
2269 pf_tbladdr_remove(&oldpa->addr);
2270 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
2271 kfree(oldpa, M_PFPOOLADDRPL);
2274 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2275 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2276 pca->action == PF_CHANGE_ADD_BEFORE)
2277 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2279 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2283 pool->cur = TAILQ_FIRST(&pool->list);
2284 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2289 case DIOCGETRULESETS: {
2290 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2291 struct pf_ruleset *ruleset;
2292 struct pf_anchor *anchor;
2294 pr->path[sizeof(pr->path) - 1] = 0;
2295 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2300 if (ruleset->anchor == NULL) {
2301 /* XXX kludge for pf_main_ruleset */
2302 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2303 if (anchor->parent == NULL)
2306 RB_FOREACH(anchor, pf_anchor_node,
2307 &ruleset->anchor->children)
2313 case DIOCGETRULESET: {
2314 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2315 struct pf_ruleset *ruleset;
2316 struct pf_anchor *anchor;
2319 pr->path[sizeof(pr->path) - 1] = 0;
2320 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2325 if (ruleset->anchor == NULL) {
2326 /* XXX kludge for pf_main_ruleset */
2327 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2328 if (anchor->parent == NULL && nr++ == pr->nr) {
2329 strlcpy(pr->name, anchor->name,
2334 RB_FOREACH(anchor, pf_anchor_node,
2335 &ruleset->anchor->children)
2336 if (nr++ == pr->nr) {
2337 strlcpy(pr->name, anchor->name,
2347 case DIOCRCLRTABLES: {
2348 struct pfioc_table *io = (struct pfioc_table *)addr;
2350 if (io->pfrio_esize != 0) {
2354 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2355 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2359 case DIOCRADDTABLES: {
2360 struct pfioc_table *io = (struct pfioc_table *)addr;
2362 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2366 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2367 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2371 case DIOCRDELTABLES: {
2372 struct pfioc_table *io = (struct pfioc_table *)addr;
2374 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2378 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2379 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2383 case DIOCRGETTABLES: {
2384 struct pfioc_table *io = (struct pfioc_table *)addr;
2386 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2390 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2391 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2395 case DIOCRGETTSTATS: {
2396 struct pfioc_table *io = (struct pfioc_table *)addr;
2398 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2402 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2403 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2407 case DIOCRCLRTSTATS: {
2408 struct pfioc_table *io = (struct pfioc_table *)addr;
2410 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2414 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2415 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2419 case DIOCRSETTFLAGS: {
2420 struct pfioc_table *io = (struct pfioc_table *)addr;
2422 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2426 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2427 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2428 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2432 case DIOCRCLRADDRS: {
2433 struct pfioc_table *io = (struct pfioc_table *)addr;
2435 if (io->pfrio_esize != 0) {
2439 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2440 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2444 case DIOCRADDADDRS: {
2445 struct pfioc_table *io = (struct pfioc_table *)addr;
2447 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2451 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2452 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2453 PFR_FLAG_USERIOCTL);
2457 case DIOCRDELADDRS: {
2458 struct pfioc_table *io = (struct pfioc_table *)addr;
2460 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2464 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2465 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2466 PFR_FLAG_USERIOCTL);
2470 case DIOCRSETADDRS: {
2471 struct pfioc_table *io = (struct pfioc_table *)addr;
2473 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2477 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2478 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2479 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2480 PFR_FLAG_USERIOCTL, 0);
2484 case DIOCRGETADDRS: {
2485 struct pfioc_table *io = (struct pfioc_table *)addr;
2487 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2491 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2492 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2496 case DIOCRGETASTATS: {
2497 struct pfioc_table *io = (struct pfioc_table *)addr;
2499 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2503 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2504 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2508 case DIOCRCLRASTATS: {
2509 struct pfioc_table *io = (struct pfioc_table *)addr;
2511 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2515 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2516 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2517 PFR_FLAG_USERIOCTL);
2521 case DIOCRTSTADDRS: {
2522 struct pfioc_table *io = (struct pfioc_table *)addr;
2524 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2528 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2529 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2530 PFR_FLAG_USERIOCTL);
2534 case DIOCRINADEFINE: {
2535 struct pfioc_table *io = (struct pfioc_table *)addr;
2537 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2541 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2542 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2543 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2548 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2549 error = pf_osfp_add(io);
2554 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2555 error = pf_osfp_get(io);
2560 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2561 struct pfioc_trans_e *ioe;
2562 struct pfr_table *table;
2565 if (io->esize != sizeof(*ioe)) {
2569 ioe = kmalloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2570 table = kmalloc(sizeof(*table), M_TEMP, M_WAITOK);
2571 for (i = 0; i < io->size; i++) {
2572 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2573 kfree(table, M_TEMP);
2578 switch (ioe->rs_num) {
2580 case PF_RULESET_ALTQ:
2581 if (ioe->anchor[0]) {
2582 kfree(table, M_TEMP);
2587 if ((error = pf_begin_altq(&ioe->ticket))) {
2588 kfree(table, M_TEMP);
2594 case PF_RULESET_TABLE:
2595 bzero(table, sizeof(*table));
2596 strlcpy(table->pfrt_anchor, ioe->anchor,
2597 sizeof(table->pfrt_anchor));
2598 if ((error = pfr_ina_begin(table,
2599 &ioe->ticket, NULL, 0))) {
2600 kfree(table, M_TEMP);
2606 if ((error = pf_begin_rules(&ioe->ticket,
2607 ioe->rs_num, ioe->anchor))) {
2608 kfree(table, M_TEMP);
2614 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
2615 kfree(table, M_TEMP);
2621 kfree(table, M_TEMP);
2626 case DIOCXROLLBACK: {
2627 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2628 struct pfioc_trans_e *ioe;
2629 struct pfr_table *table;
2632 if (io->esize != sizeof(*ioe)) {
2636 ioe = kmalloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2637 table = kmalloc(sizeof(*table), M_TEMP, M_WAITOK);
2638 for (i = 0; i < io->size; i++) {
2639 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2640 kfree(table, M_TEMP);
2645 switch (ioe->rs_num) {
2647 case PF_RULESET_ALTQ:
2648 if (ioe->anchor[0]) {
2649 kfree(table, M_TEMP);
2654 if ((error = pf_rollback_altq(ioe->ticket))) {
2655 kfree(table, M_TEMP);
2657 goto fail; /* really bad */
2661 case PF_RULESET_TABLE:
2662 bzero(table, sizeof(*table));
2663 strlcpy(table->pfrt_anchor, ioe->anchor,
2664 sizeof(table->pfrt_anchor));
2665 if ((error = pfr_ina_rollback(table,
2666 ioe->ticket, NULL, 0))) {
2667 kfree(table, M_TEMP);
2669 goto fail; /* really bad */
2673 if ((error = pf_rollback_rules(ioe->ticket,
2674 ioe->rs_num, ioe->anchor))) {
2675 kfree(table, M_TEMP);
2677 goto fail; /* really bad */
2682 kfree(table, M_TEMP);
2688 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2689 struct pfioc_trans_e *ioe;
2690 struct pfr_table *table;
2691 struct pf_ruleset *rs;
2694 if (io->esize != sizeof(*ioe)) {
2698 ioe = kmalloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2699 table = kmalloc(sizeof(*table), M_TEMP, M_WAITOK);
2700 /* first makes sure everything will succeed */
2701 for (i = 0; i < io->size; i++) {
2702 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2703 kfree(table, M_TEMP);
2708 switch (ioe->rs_num) {
2710 case PF_RULESET_ALTQ:
2711 if (ioe->anchor[0]) {
2712 kfree(table, M_TEMP);
2717 if (!altqs_inactive_open || ioe->ticket !=
2718 ticket_altqs_inactive) {
2719 kfree(table, M_TEMP);
2726 case PF_RULESET_TABLE:
2727 rs = pf_find_ruleset(ioe->anchor);
2728 if (rs == NULL || !rs->topen || ioe->ticket !=
2730 kfree(table, M_TEMP);
2737 if (ioe->rs_num < 0 || ioe->rs_num >=
2739 kfree(table, M_TEMP);
2744 rs = pf_find_ruleset(ioe->anchor);
2746 !rs->rules[ioe->rs_num].inactive.open ||
2747 rs->rules[ioe->rs_num].inactive.ticket !=
2749 kfree(table, M_TEMP);
2757 /* now do the commit - no errors should happen here */
2758 for (i = 0; i < io->size; i++) {
2759 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2760 kfree(table, M_TEMP);
2765 switch (ioe->rs_num) {
2767 case PF_RULESET_ALTQ:
2768 if ((error = pf_commit_altq(ioe->ticket))) {
2769 kfree(table, M_TEMP);
2771 goto fail; /* really bad */
2775 case PF_RULESET_TABLE:
2776 bzero(table, sizeof(*table));
2777 strlcpy(table->pfrt_anchor, ioe->anchor,
2778 sizeof(table->pfrt_anchor));
2779 if ((error = pfr_ina_commit(table, ioe->ticket,
2781 kfree(table, M_TEMP);
2783 goto fail; /* really bad */
2787 if ((error = pf_commit_rules(ioe->ticket,
2788 ioe->rs_num, ioe->anchor))) {
2789 kfree(table, M_TEMP);
2791 goto fail; /* really bad */
2796 kfree(table, M_TEMP);
2801 case DIOCGETSRCNODES: {
2802 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
2803 struct pf_src_node *n, *p, *pstore;
2805 int space = psn->psn_len;
2809 for (nn = 0; nn < ncpus; ++nn) {
2810 RB_FOREACH(n, pf_src_tree,
2811 &tree_src_tracking[nn]) {
2815 psn->psn_len = sizeof(struct pf_src_node) * nr;
2819 pstore = kmalloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2821 p = psn->psn_src_nodes;
2824 * WARNING: We are not switching cpus so we cannot call
2825 * nominal pf.c support routines for cpu-specific
2828 for (nn = 0; nn < ncpus; ++nn) {
2829 RB_FOREACH(n, pf_src_tree, &tree_src_tracking[nn]) {
2830 int secs = time_second, diff;
2832 if ((nr + 1) * sizeof(*p) >
2833 (unsigned)psn->psn_len) {
2837 bcopy(n, pstore, sizeof(*pstore));
2838 if (n->rule.ptr != NULL)
2839 pstore->rule.nr = n->rule.ptr->nr;
2840 pstore->creation = secs - pstore->creation;
2841 if (pstore->expire > secs)
2842 pstore->expire -= secs;
2846 /* adjust the connection rate estimate */
2847 diff = secs - n->conn_rate.last;
2848 if (diff >= n->conn_rate.seconds)
2849 pstore->conn_rate.count = 0;
2851 pstore->conn_rate.count -=
2852 n->conn_rate.count * diff /
2853 n->conn_rate.seconds;
2855 error = copyout(pstore, p, sizeof(*p));
2857 kfree(pstore, M_TEMP);
2864 psn->psn_len = sizeof(struct pf_src_node) * nr;
2865 kfree(pstore, M_TEMP);
2869 case DIOCCLRSRCNODES: {
2870 struct pf_src_node *n;
2871 struct pf_state *state;
2872 globaldata_t save_gd = mycpu;
2876 * WARNING: We are not switching cpus so we cannot call
2877 * nominal pf.c support routines for cpu-specific
2880 for (nn = 0; nn < ncpus; ++nn) {
2881 RB_FOREACH(state, pf_state_tree_id, &tree_id[nn]) {
2882 state->src_node = NULL;
2883 state->nat_src_node = NULL;
2885 RB_FOREACH(n, pf_src_tree, &tree_src_tracking[nn]) {
2892 * WARNING: Must move to the target cpu for nominal calls
2895 for (nn = 0; nn < ncpus; ++nn) {
2896 lwkt_setcpu_self(globaldata_find(nn));
2897 pf_purge_expired_src_nodes(1);
2899 lwkt_setcpu_self(save_gd);
2900 pf_status.src_nodes = 0;
2904 case DIOCKILLSRCNODES: {
2905 struct pf_src_node *sn;
2907 struct pfioc_src_node_kill *psnk =
2908 (struct pfioc_src_node_kill *)addr;
2910 globaldata_t save_gd = mycpu;
2914 * WARNING: We are not switching cpus so we cannot call
2915 * nominal pf.c support routines for cpu-specific
2918 for (nn = 0; nn < ncpus; ++nn) {
2919 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking[nn]) {
2920 if (PF_MATCHA(psnk->psnk_src.neg,
2921 &psnk->psnk_src.addr.v.a.addr,
2922 &psnk->psnk_src.addr.v.a.mask,
2923 &sn->addr, sn->af) &&
2924 PF_MATCHA(psnk->psnk_dst.neg,
2925 &psnk->psnk_dst.addr.v.a.addr,
2926 &psnk->psnk_dst.addr.v.a.mask,
2927 &sn->raddr, sn->af)) {
2928 /* Handle state to src_node linkage */
2929 if (sn->states != 0) {
2930 RB_FOREACH(s, pf_state_tree_id,
2932 if (s->src_node == sn)
2934 if (s->nat_src_node == sn)
2935 s->nat_src_node = NULL;
2945 for (nn = 0; nn < ncpus; ++nn) {
2946 lwkt_setcpu_self(globaldata_find(nn));
2947 pf_purge_expired_src_nodes(1);
2949 lwkt_setcpu_self(save_gd);
2952 psnk->psnk_killed = killed;
2956 case DIOCSETHOSTID: {
2957 u_int32_t *hostid = (u_int32_t *)addr;
2960 pf_status.hostid = karc4random();
2962 pf_status.hostid = *hostid;
2972 case DIOCIGETIFACES: {
2973 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2975 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
2979 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2984 case DIOCSETIFFLAG: {
2985 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2987 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2991 case DIOCCLRIFFLAG: {
2992 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2994 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
3003 lwkt_reltoken(&pf_token);
3008 * XXX - Check for version missmatch!!!
3011 pf_clear_states(void)
3013 struct pf_state *s, *nexts;
3014 globaldata_t save_gd = mycpu;
3017 for (nn = 0; nn < ncpus; ++nn) {
3018 lwkt_setcpu_self(globaldata_find(nn));
3019 for (s = RB_MIN(pf_state_tree_id, &tree_id[nn]); s; s = nexts) {
3020 nexts = RB_NEXT(pf_state_tree_id, &tree_id[nn], s);
3022 /* don't send out individual delete messages */
3023 s->sync_flags = PFSTATE_NOSYNC;
3028 lwkt_setcpu_self(save_gd);
3032 * XXX This is called on module unload, we do not want to sync that over? */
3034 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3039 pf_clear_tables(void)
3041 struct pfioc_table io;
3044 bzero(&io, sizeof(io));
3046 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
3053 pf_clear_srcnodes(void)
3055 struct pf_src_node *n;
3056 struct pf_state *state;
3057 globaldata_t save_gd = mycpu;
3060 for (nn = 0; nn < ncpus; ++nn) {
3061 lwkt_setcpu_self(globaldata_find(nn));
3062 RB_FOREACH(state, pf_state_tree_id, &tree_id[nn]) {
3063 state->src_node = NULL;
3064 state->nat_src_node = NULL;
3066 RB_FOREACH(n, pf_src_tree, &tree_src_tracking[nn]) {
3070 pf_purge_expired_src_nodes(0);
3072 lwkt_setcpu_self(save_gd);
3074 pf_status.src_nodes = 0;
3078 * XXX - Check for version missmatch!!!
3082 * Duplicate pfctl -Fa operation to get rid of as much as we can.
3092 pf_status.running = 0;
3093 error = dehook_pf();
3095 pf_status.running = 1;
3096 DPFPRINTF(PF_DEBUG_MISC,
3097 ("pf: pfil unregistration failed\n"));
3101 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) != 0) {
3102 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
3105 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) != 0) {
3106 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
3107 break; /* XXX: rollback? */
3109 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) != 0) {
3110 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
3111 break; /* XXX: rollback? */
3113 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
3115 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
3116 break; /* XXX: rollback? */
3118 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
3120 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
3121 break; /* XXX: rollback? */
3124 /* XXX: these should always succeed here */
3125 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
3126 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
3127 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
3128 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
3129 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
3131 if ((error = pf_clear_tables()) != 0)
3134 if ((error = pf_begin_altq(&t[0])) != 0) {
3135 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
3138 pf_commit_altq(t[0]);
3141 pf_clear_srcnodes();
3143 /* status does not use malloced mem so no need to cleanup */
3144 /* fingerprints and interfaces have their own cleanup code */
3150 pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
3153 * DragonFly's version of pf uses FreeBSD's native host byte ordering
3154 * for ip_len/ip_off. This is why we don't have to change byte order
3155 * like the FreeBSD-5 version does.
3159 lwkt_gettoken_shared(&pf_token);
3161 chk = pf_test(PF_IN, ifp, m, NULL, NULL);
3166 lwkt_reltoken(&pf_token);
3171 pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
3174 * DragonFly's version of pf uses FreeBSD's native host byte ordering
3175 * for ip_len/ip_off. This is why we don't have to change byte order
3176 * like the FreeBSD-5 version does.
3180 lwkt_gettoken_shared(&pf_token);
3182 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
3183 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
3184 in_delayed_cksum(*m);
3185 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
3187 chk = pf_test(PF_OUT, ifp, m, NULL, NULL);
3192 lwkt_reltoken(&pf_token);
3198 pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
3201 * IPv6 is not affected by ip_len/ip_off byte order changes.
3205 lwkt_gettoken_shared(&pf_token);
3207 chk = pf_test6(PF_IN, ifp, m, NULL, NULL);
3212 lwkt_reltoken(&pf_token);
3217 pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
3220 * IPv6 is not affected by ip_len/ip_off byte order changes.
3224 lwkt_gettoken_shared(&pf_token);
3226 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
3227 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
3228 in_delayed_cksum(*m);
3229 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
3231 chk = pf_test6(PF_OUT, ifp, m, NULL, NULL);
3236 lwkt_reltoken(&pf_token);
3244 struct pfil_head *pfh_inet;
3246 struct pfil_head *pfh_inet6;
3249 lwkt_gettoken(&pf_token);
3251 if (pf_pfil_hooked) {
3252 lwkt_reltoken(&pf_token);
3256 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3257 if (pfh_inet == NULL) {
3258 lwkt_reltoken(&pf_token);
3261 pfil_add_hook(pf_check_in, NULL, PFIL_IN, pfh_inet);
3262 pfil_add_hook(pf_check_out, NULL, PFIL_OUT, pfh_inet);
3264 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3265 if (pfh_inet6 == NULL) {
3266 pfil_remove_hook(pf_check_in, NULL, PFIL_IN, pfh_inet);
3267 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT, pfh_inet);
3268 lwkt_reltoken(&pf_token);
3271 pfil_add_hook(pf_check6_in, NULL, PFIL_IN, pfh_inet6);
3272 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT, pfh_inet6);
3276 lwkt_reltoken(&pf_token);
3283 struct pfil_head *pfh_inet;
3285 struct pfil_head *pfh_inet6;
3288 lwkt_gettoken(&pf_token);
3290 if (pf_pfil_hooked == 0) {
3291 lwkt_reltoken(&pf_token);
3295 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3296 if (pfh_inet == NULL) {
3297 lwkt_reltoken(&pf_token);
3300 pfil_remove_hook(pf_check_in, NULL, PFIL_IN, pfh_inet);
3301 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT, pfh_inet);
3303 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3304 if (pfh_inet6 == NULL) {
3305 lwkt_reltoken(&pf_token);
3308 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN, pfh_inet6);
3309 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT, pfh_inet6);
3313 lwkt_reltoken(&pf_token);
3320 lwkt_gettoken(&pf_token);
3322 pf_dev = make_dev(&pf_ops, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
3324 lockinit(&pf_consistency_lock, "pfconslck", 0, LK_CANRECURSE);
3325 lockinit(&pf_global_statetbl_lock, "pfglstlk", 0, 0);
3326 lwkt_reltoken(&pf_token);
3331 pf_mask_del(struct radix_node *rn, void *arg)
3333 struct radix_node_head *rnh = arg;
3335 rnh->rnh_deladdr(rn->rn_key, rn->rn_mask, rnh);
3344 pf_status.running = 0;
3346 lwkt_gettoken(&pf_token);
3348 error = dehook_pf();
3351 * Should not happen!
3352 * XXX Due to error code ESRCH, kldunload will show
3353 * a message like 'No such process'.
3355 kprintf("pfil unregistration fail\n");
3356 lwkt_reltoken(&pf_token);
3361 while (pf_end_threads < 2) {
3362 wakeup_one(pf_purge_thread);
3363 tsleep(pf_purge_thread, 0, "pftmo", hz);
3367 dev_ops_remove_all(&pf_ops);
3368 lockuninit(&pf_consistency_lock);
3369 lwkt_reltoken(&pf_token);
3371 if (pf_maskhead != NULL) {
3372 pf_maskhead->rnh_walktree(pf_maskhead,
3373 pf_mask_del, pf_maskhead);
3377 kmalloc_destroy(&pf_state_pl);
3378 kmalloc_destroy(&pf_frent_pl);
3379 kmalloc_destroy(&pf_cent_pl);
3384 pf_modevent(module_t mod, int type, void *data __unused)
3388 lwkt_gettoken(&pf_token);
3396 error = pf_unload();
3402 lwkt_reltoken(&pf_token);
3406 static moduledata_t pf_mod = {
3412 DECLARE_MODULE(pf, pf_mod, SI_SUB_PSEUDO, SI_ORDER_FIRST);
3413 MODULE_VERSION(pf, PF_MODVER);