1 /* $OpenBSD: pf_ioctl.c,v 1.209 2008/06/29 08:42:15 mcbride Exp $ */
2 /*add $OpenBSD: pf_ioctl.c,v 1.212 2009/02/15 20:42:33 mbalmer Exp $ */
5 * Copyright (c) 2010 The DragonFly Project. All rights reserved.
7 * Copyright (c) 2001 Daniel Hartmeier
8 * Copyright (c) 2002,2003 Henning Brauer
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * - Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
30 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
35 * Effort sponsored in part by the Defense Advanced Research Projects
36 * Agency (DARPA) and Air Force Research Laboratory, Air Force
37 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
42 #include "opt_inet6.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
47 #include <sys/device.h>
49 #include <sys/filio.h>
50 #include <sys/fcntl.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/kernel.h>
54 #include <sys/kthread.h>
57 #include <sys/malloc.h>
58 #include <sys/module.h>
61 #include <sys/thread2.h>
64 #include <net/if_types.h>
65 #include <net/route.h>
67 #include <netinet/in.h>
68 #include <netinet/in_var.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/ip.h>
71 #include <netinet/ip_var.h>
72 #include <netinet/ip_icmp.h>
74 #include <net/pf/pfvar.h>
77 #include <net/pf/if_pflog.h>
78 #include <net/pf/if_pfsync.h>
81 #include <netinet/ip6.h>
82 #include <netinet/in_pcb.h>
86 #include <net/altq/altq.h>
89 #include <machine/limits.h>
91 #include <sys/mutex.h>
93 u_int rt_numfibs = RT_NUMFIBS;
96 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
97 u_int8_t, u_int8_t, u_int8_t);
99 void pf_mv_pool(struct pf_palist *, struct pf_palist *);
100 void pf_empty_pool(struct pf_palist *);
102 int pf_begin_altq(u_int32_t *);
103 int pf_rollback_altq(u_int32_t);
104 int pf_commit_altq(u_int32_t);
105 int pf_enable_altq(struct pf_altq *);
106 int pf_disable_altq(struct pf_altq *);
108 int pf_begin_rules(u_int32_t *, int, const char *);
109 int pf_rollback_rules(u_int32_t, int, char *);
110 int pf_setup_pfsync_matching(struct pf_ruleset *);
111 void pf_hash_rule(MD5_CTX *, struct pf_rule *);
112 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
113 int pf_commit_rules(u_int32_t, int, char *);
114 int pf_addr_setup(struct pf_ruleset *,
115 struct pf_addr_wrap *, sa_family_t);
116 void pf_addr_copyout(struct pf_addr_wrap *);
118 struct pf_rule pf_default_rule;
119 struct lock pf_consistency_lock;
120 struct lock pf_global_statetbl_lock;
122 static int pf_altq_running;
125 #define TAGID_MAX 50000
126 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
127 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
129 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
130 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
132 u_int16_t tagname2tag(struct pf_tags *, char *);
133 void tag2tagname(struct pf_tags *, u_int16_t, char *);
134 void tag_unref(struct pf_tags *, u_int16_t);
135 int pf_rtlabel_add(struct pf_addr_wrap *);
136 void pf_rtlabel_remove(struct pf_addr_wrap *);
137 void pf_rtlabel_copyout(struct pf_addr_wrap *);
139 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) kprintf x
141 static cdev_t pf_dev;
143 static MALLOC_DEFINE(M_PFRULEPL, "pfrulepl", "pf rule pool list");
144 static MALLOC_DEFINE(M_PFALTQPL, "pfaltqpl", "pf altq pool list");
145 static MALLOC_DEFINE(M_PFPOOLADDRPL, "pfpooladdrpl", "pf pool address pool list");
146 static MALLOC_DEFINE(M_PFFRENTPL, "pffrent", "pf frent pool list");
150 * XXX - These are new and need to be checked when moveing to a new version
152 static void pf_clear_states(void);
153 static int pf_clear_tables(void);
154 static void pf_clear_srcnodes(void);
156 * XXX - These are new and need to be checked when moveing to a new version
160 * Wrapper functions for pfil(9) hooks
162 static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
164 static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
167 static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
169 static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
173 static int hook_pf(void);
174 static int dehook_pf(void);
175 static int shutdown_pf(void);
176 static int pf_load(void);
177 static int pf_unload(void);
183 static struct dev_ops pf_ops = { /* XXX convert to port model */
190 static volatile int pf_pfil_hooked = 0;
191 int pf_end_threads = 0;
193 int debug_pfugidhack = 0;
194 SYSCTL_INT(_debug, OID_AUTO, pfugidhack, CTLFLAG_RW, &debug_pfugidhack, 0,
195 "Enable/disable pf user/group rules mpsafe hack");
200 u_int32_t *my_timeout = pf_default_rule.timeout;
203 if (!rn_inithead((void **)&pf_maskhead, NULL, 0)) {
204 kprintf("pf mask radix tree create failed\n");
207 kmalloc_create(&pf_state_pl, "pf state pool list");
208 kmalloc_raise_limit(pf_state_pl, 0);
209 kmalloc_create(&pf_frent_pl, "pf fragment pool list");
210 kmalloc_raise_limit(pf_frent_pl, 0);
211 kmalloc_create(&pf_cent_pl, "pf cent pool list");
212 kmalloc_raise_limit(pf_cent_pl, 0);
216 pf_osfp_initialize();
218 pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl;
219 pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
220 pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl;
221 pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
222 if (ctob(physmem) <= 100*1024*1024)
223 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
224 PFR_KENTRY_HIWAT_SMALL;
226 for (nn = 0; nn < ncpus; ++nn) {
227 RB_INIT(&tree_src_tracking[nn]);
228 RB_INIT(&tree_id[nn]);
230 RB_INIT(&pf_anchors);
231 pf_init_ruleset(&pf_main_ruleset);
232 TAILQ_INIT(&pf_altqs[0]);
233 TAILQ_INIT(&pf_altqs[1]);
234 TAILQ_INIT(&pf_pabuf);
235 pf_altqs_active = &pf_altqs[0];
236 pf_altqs_inactive = &pf_altqs[1];
237 for (nn = 0; nn < ncpus; ++nn)
238 TAILQ_INIT(&state_list[nn]);
240 /* default rule should never be garbage collected */
241 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
242 pf_default_rule.action = PF_PASS;
243 pf_default_rule.nr = (uint32_t)(-1);
244 pf_default_rule.rtableid = -1;
246 /* initialize default timeouts */
247 my_timeout[PFTM_TCP_FIRST_PACKET] = 120; /* First TCP packet */
248 my_timeout[PFTM_TCP_OPENING] = 30; /* No response yet */
249 my_timeout[PFTM_TCP_ESTABLISHED] = 24*60*60; /* Established */
250 my_timeout[PFTM_TCP_CLOSING] = 15 * 60; /* Half closed */
251 my_timeout[PFTM_TCP_FIN_WAIT] = 45; /* Got both FINs */
252 my_timeout[PFTM_TCP_CLOSED] = 90; /* Got a RST */
253 my_timeout[PFTM_UDP_FIRST_PACKET] = 60; /* First UDP packet */
254 my_timeout[PFTM_UDP_SINGLE] = 30; /* Unidirectional */
255 my_timeout[PFTM_UDP_MULTIPLE] = 60; /* Bidirectional */
256 my_timeout[PFTM_ICMP_FIRST_PACKET] = 20; /* First ICMP packet */
257 my_timeout[PFTM_ICMP_ERROR_REPLY] = 10; /* Got error response */
258 my_timeout[PFTM_OTHER_FIRST_PACKET] = 60; /* First packet */
259 my_timeout[PFTM_OTHER_SINGLE] = 30; /* Unidirectional */
260 my_timeout[PFTM_OTHER_MULTIPLE] = 60; /* Bidirectional */
261 my_timeout[PFTM_FRAG] = 30; /* Fragment expire */
262 my_timeout[PFTM_INTERVAL] = 10; /* Expire interval */
263 my_timeout[PFTM_SRC_NODE] = 0; /* Source Tracking */
264 my_timeout[PFTM_TS_DIFF] = 30; /* Allowed TS diff */
265 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
266 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
269 bzero(&pf_status, sizeof(pf_status));
270 pf_status.debug = PF_DEBUG_URGENT;
271 /* XXX do our best to avoid a conflict */
272 pf_status.hostid = karc4random();
274 if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge"))
275 panic("pfpurge thread");
279 pfopen(struct dev_open_args *ap)
281 lwkt_gettoken(&pf_token);
282 cdev_t dev = ap->a_head.a_dev;
283 if (minor(dev) >= 1) {
284 lwkt_reltoken(&pf_token);
287 lwkt_reltoken(&pf_token);
292 pfclose(struct dev_close_args *ap)
294 lwkt_gettoken(&pf_token);
295 cdev_t dev = ap->a_head.a_dev;
296 if (minor(dev) >= 1) {
297 lwkt_reltoken(&pf_token);
300 lwkt_reltoken(&pf_token);
305 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
306 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
307 u_int8_t check_ticket)
309 struct pf_ruleset *ruleset;
310 struct pf_rule *rule;
313 ruleset = pf_find_ruleset(anchor);
316 rs_num = pf_get_ruleset_number(rule_action);
317 if (rs_num >= PF_RULESET_MAX)
320 if (check_ticket && ticket !=
321 ruleset->rules[rs_num].active.ticket)
324 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
327 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
329 if (check_ticket && ticket !=
330 ruleset->rules[rs_num].inactive.ticket)
333 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
336 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
339 while ((rule != NULL) && (rule->nr != rule_number))
340 rule = TAILQ_NEXT(rule, entries);
345 return (&rule->rpool);
349 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
351 struct pf_pooladdr *mv_pool_pa;
353 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
354 TAILQ_REMOVE(poola, mv_pool_pa, entries);
355 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
360 pf_empty_pool(struct pf_palist *poola)
362 struct pf_pooladdr *empty_pool_pa;
364 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
365 pfi_dynaddr_remove(&empty_pool_pa->addr);
366 pf_tbladdr_remove(&empty_pool_pa->addr);
367 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
368 TAILQ_REMOVE(poola, empty_pool_pa, entries);
369 kfree(empty_pool_pa, M_PFPOOLADDRPL);
374 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
376 if (rulequeue != NULL) {
377 if (rule->states_cur <= 0) {
379 * XXX - we need to remove the table *before* detaching
380 * the rule to make sure the table code does not delete
381 * the anchor under our feet.
383 pf_tbladdr_remove(&rule->src.addr);
384 pf_tbladdr_remove(&rule->dst.addr);
385 if (rule->overload_tbl)
386 pfr_detach_table(rule->overload_tbl);
388 TAILQ_REMOVE(rulequeue, rule, entries);
389 rule->entries.tqe_prev = NULL;
393 if (rule->states_cur > 0 || rule->src_nodes > 0 ||
394 rule->entries.tqe_prev != NULL)
396 pf_tag_unref(rule->tag);
397 pf_tag_unref(rule->match_tag);
399 if (rule->pqid != rule->qid)
400 pf_qid_unref(rule->pqid);
401 pf_qid_unref(rule->qid);
403 pf_rtlabel_remove(&rule->src.addr);
404 pf_rtlabel_remove(&rule->dst.addr);
405 pfi_dynaddr_remove(&rule->src.addr);
406 pfi_dynaddr_remove(&rule->dst.addr);
407 if (rulequeue == NULL) {
408 pf_tbladdr_remove(&rule->src.addr);
409 pf_tbladdr_remove(&rule->dst.addr);
410 if (rule->overload_tbl)
411 pfr_detach_table(rule->overload_tbl);
413 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
414 pf_anchor_remove(rule);
415 pf_empty_pool(&rule->rpool.list);
416 kfree(rule, M_PFRULEPL);
420 tagname2tag(struct pf_tags *head, char *tagname)
422 struct pf_tagname *tag, *p = NULL;
423 u_int16_t new_tagid = 1;
425 TAILQ_FOREACH(tag, head, entries)
426 if (strcmp(tagname, tag->name) == 0) {
432 * to avoid fragmentation, we do a linear search from the beginning
433 * and take the first free slot we find. if there is none or the list
434 * is empty, append a new entry at the end.
438 if (!TAILQ_EMPTY(head))
439 for (p = TAILQ_FIRST(head); p != NULL &&
440 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
441 new_tagid = p->tag + 1;
443 if (new_tagid > TAGID_MAX)
446 /* allocate and fill new struct pf_tagname */
447 tag = kmalloc(sizeof(*tag), M_TEMP, M_WAITOK);
448 strlcpy(tag->name, tagname, sizeof(tag->name));
449 tag->tag = new_tagid;
452 if (p != NULL) /* insert new entry before p */
453 TAILQ_INSERT_BEFORE(p, tag, entries);
454 else /* either list empty or no free slot in between */
455 TAILQ_INSERT_TAIL(head, tag, entries);
461 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
463 struct pf_tagname *tag;
465 TAILQ_FOREACH(tag, head, entries)
466 if (tag->tag == tagid) {
467 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
473 tag_unref(struct pf_tags *head, u_int16_t tag)
475 struct pf_tagname *p, *next;
480 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
481 next = TAILQ_NEXT(p, entries);
484 TAILQ_REMOVE(head, p, entries);
493 pf_tagname2tag(char *tagname)
495 return (tagname2tag(&pf_tags, tagname));
499 pf_tag2tagname(u_int16_t tagid, char *p)
501 tag2tagname(&pf_tags, tagid, p);
505 pf_tag_ref(u_int16_t tag)
507 struct pf_tagname *t;
509 TAILQ_FOREACH(t, &pf_tags, entries)
517 pf_tag_unref(u_int16_t tag)
519 tag_unref(&pf_tags, tag);
523 pf_rtlabel_add(struct pf_addr_wrap *a)
529 pf_rtlabel_remove(struct pf_addr_wrap *a)
534 pf_rtlabel_copyout(struct pf_addr_wrap *a)
536 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel)
537 strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname));
542 pf_qname2qid(char *qname)
544 return ((u_int32_t)tagname2tag(&pf_qids, qname));
548 pf_qid2qname(u_int32_t qid, char *p)
550 tag2tagname(&pf_qids, (u_int16_t)qid, p);
554 pf_qid_unref(u_int32_t qid)
556 tag_unref(&pf_qids, (u_int16_t)qid);
560 pf_begin_altq(u_int32_t *ticket)
562 struct pf_altq *altq;
565 /* Purge the old altq list */
566 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
567 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
568 if (altq->qname[0] == 0 &&
569 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
570 /* detach and destroy the discipline */
571 error = altq_remove(altq);
573 pf_qid_unref(altq->qid);
574 kfree(altq, M_PFALTQPL);
578 *ticket = ++ticket_altqs_inactive;
579 altqs_inactive_open = 1;
584 pf_rollback_altq(u_int32_t ticket)
586 struct pf_altq *altq;
589 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
591 /* Purge the old altq list */
592 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
593 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
594 if (altq->qname[0] == 0 &&
595 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
596 /* detach and destroy the discipline */
597 error = altq_remove(altq);
599 pf_qid_unref(altq->qid);
600 kfree(altq, M_PFALTQPL);
602 altqs_inactive_open = 0;
607 pf_commit_altq(u_int32_t ticket)
609 struct pf_altqqueue *old_altqs;
610 struct pf_altq *altq;
613 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
616 /* swap altqs, keep the old. */
618 old_altqs = pf_altqs_active;
619 pf_altqs_active = pf_altqs_inactive;
620 pf_altqs_inactive = old_altqs;
621 ticket_altqs_active = ticket_altqs_inactive;
623 /* Attach new disciplines */
624 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
625 if (altq->qname[0] == 0 &&
626 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
627 /* attach the discipline */
628 error = altq_pfattach(altq);
636 /* Purge the old altq list */
637 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
638 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
639 if (altq->qname[0] == 0 &&
640 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
641 /* detach and destroy the discipline */
643 error = pf_disable_altq(altq);
644 err = altq_pfdetach(altq);
645 if (err != 0 && error == 0)
647 err = altq_remove(altq);
648 if (err != 0 && error == 0)
651 pf_qid_unref(altq->qid);
652 kfree(altq, M_PFALTQPL);
656 altqs_inactive_open = 0;
661 pf_enable_altq(struct pf_altq *altq)
664 struct tb_profile tb;
668 ifp = ifunit(altq->ifname);
674 if (ifp->if_snd.altq_type != ALTQT_NONE)
675 error = altq_enable(&ifp->if_snd);
677 /* set tokenbucket regulator */
678 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
679 tb.rate = altq->ifbandwidth;
680 tb.depth = altq->tbrsize;
682 error = tbr_set(&ifp->if_snd, &tb);
690 pf_disable_altq(struct pf_altq *altq)
693 struct tb_profile tb;
697 ifp = ifunit(altq->ifname);
704 * when the discipline is no longer referenced, it was overridden
705 * by a new one. if so, just return.
707 if (altq->altq_disc != ifp->if_snd.altq_disc)
710 error = altq_disable(&ifp->if_snd);
713 /* clear tokenbucket regulator */
716 error = tbr_set(&ifp->if_snd, &tb);
724 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
727 struct pf_altq *a1, *a2, *a3;
731 /* Interrupt userland queue modifications */
732 if (altqs_inactive_open)
733 pf_rollback_altq(ticket_altqs_inactive);
735 /* Start new altq ruleset */
736 if (pf_begin_altq(&ticket))
739 /* Copy the current active set */
740 TAILQ_FOREACH(a1, pf_altqs_active, entries) {
741 a2 = kmalloc(sizeof(*a2), M_PFALTQPL, M_INTWAIT);
746 bcopy(a1, a2, sizeof(struct pf_altq));
748 if (a2->qname[0] != 0) {
749 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
751 kfree(a2, M_PFALTQPL);
754 a2->altq_disc = NULL;
755 TAILQ_FOREACH(a3, pf_altqs_inactive, entries) {
756 if (strncmp(a3->ifname, a2->ifname,
757 IFNAMSIZ) == 0 && a3->qname[0] == 0) {
758 a2->altq_disc = a3->altq_disc;
763 /* Deactivate the interface in question */
764 a2->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
766 ifp1 = ifunit(a2->ifname);
768 if ((ifp1 == NULL) || (remove && ifp1 == ifp)) {
769 a2->local_flags |= PFALTQ_FLAG_IF_REMOVED;
771 error = altq_add(a2);
773 if (ticket != ticket_altqs_inactive)
777 kfree(a2, M_PFALTQPL);
782 TAILQ_INSERT_TAIL(pf_altqs_inactive, a2, entries);
786 pf_rollback_altq(ticket);
788 pf_commit_altq(ticket);
793 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
795 struct pf_ruleset *rs;
796 struct pf_rule *rule;
798 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
800 rs = pf_find_or_create_ruleset(anchor);
803 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
804 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
805 rs->rules[rs_num].inactive.rcount--;
807 *ticket = ++rs->rules[rs_num].inactive.ticket;
808 rs->rules[rs_num].inactive.open = 1;
813 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
815 struct pf_ruleset *rs;
816 struct pf_rule *rule;
818 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
820 rs = pf_find_ruleset(anchor);
821 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
822 rs->rules[rs_num].inactive.ticket != ticket)
824 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
825 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
826 rs->rules[rs_num].inactive.rcount--;
828 rs->rules[rs_num].inactive.open = 0;
832 #define PF_MD5_UPD(st, elm) \
833 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
835 #define PF_MD5_UPD_STR(st, elm) \
836 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
838 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
839 (stor) = htonl((st)->elm); \
840 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
843 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
844 (stor) = htons((st)->elm); \
845 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
849 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
851 PF_MD5_UPD(pfr, addr.type);
852 switch (pfr->addr.type) {
853 case PF_ADDR_DYNIFTL:
854 PF_MD5_UPD(pfr, addr.v.ifname);
855 PF_MD5_UPD(pfr, addr.iflags);
858 PF_MD5_UPD(pfr, addr.v.tblname);
860 case PF_ADDR_ADDRMASK:
862 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
863 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
865 case PF_ADDR_RTLABEL:
866 PF_MD5_UPD(pfr, addr.v.rtlabelname);
870 PF_MD5_UPD(pfr, port[0]);
871 PF_MD5_UPD(pfr, port[1]);
872 PF_MD5_UPD(pfr, neg);
873 PF_MD5_UPD(pfr, port_op);
877 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
882 pf_hash_rule_addr(ctx, &rule->src);
883 pf_hash_rule_addr(ctx, &rule->dst);
884 PF_MD5_UPD_STR(rule, label);
885 PF_MD5_UPD_STR(rule, ifname);
886 PF_MD5_UPD_STR(rule, match_tagname);
887 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
888 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
889 PF_MD5_UPD_HTONL(rule, prob, y);
890 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
891 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
892 PF_MD5_UPD(rule, uid.op);
893 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
894 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
895 PF_MD5_UPD(rule, gid.op);
896 PF_MD5_UPD_HTONL(rule, rule_flag, y);
897 PF_MD5_UPD(rule, action);
898 PF_MD5_UPD(rule, direction);
899 PF_MD5_UPD(rule, af);
900 PF_MD5_UPD(rule, quick);
901 PF_MD5_UPD(rule, ifnot);
902 PF_MD5_UPD(rule, match_tag_not);
903 PF_MD5_UPD(rule, natpass);
904 PF_MD5_UPD(rule, keep_state);
905 PF_MD5_UPD(rule, proto);
906 PF_MD5_UPD(rule, type);
907 PF_MD5_UPD(rule, code);
908 PF_MD5_UPD(rule, flags);
909 PF_MD5_UPD(rule, flagset);
910 PF_MD5_UPD(rule, allow_opts);
911 PF_MD5_UPD(rule, rt);
912 PF_MD5_UPD(rule, tos);
916 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
918 struct pf_ruleset *rs;
919 struct pf_rule *rule, **old_array;
920 struct pf_rulequeue *old_rules;
922 u_int32_t old_rcount;
924 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
926 rs = pf_find_ruleset(anchor);
927 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
928 ticket != rs->rules[rs_num].inactive.ticket)
931 /* Calculate checksum for the main ruleset */
932 if (rs == &pf_main_ruleset) {
933 error = pf_setup_pfsync_matching(rs);
938 /* Swap rules, keep the old. */
940 old_rules = rs->rules[rs_num].active.ptr;
941 old_rcount = rs->rules[rs_num].active.rcount;
942 old_array = rs->rules[rs_num].active.ptr_array;
944 rs->rules[rs_num].active.ptr =
945 rs->rules[rs_num].inactive.ptr;
946 rs->rules[rs_num].active.ptr_array =
947 rs->rules[rs_num].inactive.ptr_array;
948 rs->rules[rs_num].active.rcount =
949 rs->rules[rs_num].inactive.rcount;
950 rs->rules[rs_num].inactive.ptr = old_rules;
951 rs->rules[rs_num].inactive.ptr_array = old_array;
952 rs->rules[rs_num].inactive.rcount = old_rcount;
954 rs->rules[rs_num].active.ticket =
955 rs->rules[rs_num].inactive.ticket;
956 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
959 /* Purge the old rule list. */
960 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
961 pf_rm_rule(old_rules, rule);
962 if (rs->rules[rs_num].inactive.ptr_array)
963 kfree(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
964 rs->rules[rs_num].inactive.ptr_array = NULL;
965 rs->rules[rs_num].inactive.rcount = 0;
966 rs->rules[rs_num].inactive.open = 0;
967 pf_remove_if_empty_ruleset(rs);
973 pf_setup_pfsync_matching(struct pf_ruleset *rs)
976 struct pf_rule *rule;
978 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
981 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
982 /* XXX PF_RULESET_SCRUB as well? */
983 if (rs_cnt == PF_RULESET_SCRUB)
986 if (rs->rules[rs_cnt].inactive.ptr_array)
987 kfree(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
988 rs->rules[rs_cnt].inactive.ptr_array = NULL;
990 if (rs->rules[rs_cnt].inactive.rcount) {
991 rs->rules[rs_cnt].inactive.ptr_array =
992 kmalloc(sizeof(caddr_t) *
993 rs->rules[rs_cnt].inactive.rcount,
997 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
999 pf_hash_rule(&ctx, rule);
1000 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1004 MD5Final(digest, &ctx);
1005 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
1010 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
1013 if (pfi_dynaddr_setup(addr, af) ||
1014 pf_tbladdr_setup(ruleset, addr))
1021 pf_addr_copyout(struct pf_addr_wrap *addr)
1023 pfi_dynaddr_copyout(addr);
1024 pf_tbladdr_copyout(addr);
1025 pf_rtlabel_copyout(addr);
1029 pfioctl(struct dev_ioctl_args *ap)
1031 u_long cmd = ap->a_cmd;
1032 caddr_t addr = ap->a_data;
1033 struct pf_pooladdr *pa = NULL;
1034 struct pf_pool *pool = NULL;
1037 lwkt_gettoken(&pf_token);
1039 /* XXX keep in sync with switch() below */
1040 if (securelevel > 1) {
1047 case DIOCSETSTATUSIF:
1053 case DIOCGETTIMEOUT:
1054 case DIOCCLRRULECTRS:
1059 case DIOCGETRULESETS:
1060 case DIOCGETRULESET:
1061 case DIOCRGETTABLES:
1062 case DIOCRGETTSTATS:
1063 case DIOCRCLRTSTATS:
1069 case DIOCRGETASTATS:
1070 case DIOCRCLRASTATS:
1073 case DIOCGETSRCNODES:
1074 case DIOCCLRSRCNODES:
1075 case DIOCIGETIFACES:
1080 case DIOCRCLRTABLES:
1081 case DIOCRADDTABLES:
1082 case DIOCRDELTABLES:
1083 case DIOCRSETTFLAGS:
1084 if (((struct pfioc_table *)addr)->pfrio_flags &
1086 break; /* dummy operation ok */
1087 lwkt_reltoken(&pf_token);
1090 lwkt_reltoken(&pf_token);
1095 if (!(ap->a_fflag & FWRITE)) {
1103 case DIOCGETTIMEOUT:
1108 case DIOCGETRULESETS:
1109 case DIOCGETRULESET:
1111 case DIOCRGETTABLES:
1112 case DIOCRGETTSTATS:
1114 case DIOCRGETASTATS:
1117 case DIOCGETSRCNODES:
1118 case DIOCIGETIFACES:
1121 case DIOCRCLRTABLES:
1122 case DIOCRADDTABLES:
1123 case DIOCRDELTABLES:
1124 case DIOCRCLRTSTATS:
1129 case DIOCRSETTFLAGS:
1130 if (((struct pfioc_table *)addr)->pfrio_flags &
1132 break; /* dummy operation ok */
1133 lwkt_reltoken(&pf_token);
1136 if (((struct pfioc_rule *)addr)->action ==
1138 lwkt_reltoken(&pf_token);
1143 lwkt_reltoken(&pf_token);
1150 if (pf_status.running)
1155 DPFPRINTF(PF_DEBUG_MISC,
1156 ("pf: pfil registration fail\n"));
1159 pf_status.running = 1;
1160 pf_status.since = time_second;
1161 if (pf_status.stateid == 0) {
1162 pf_status.stateid = time_second;
1163 pf_status.stateid = pf_status.stateid << 32;
1165 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1170 if (!pf_status.running)
1173 pf_status.running = 0;
1174 error = dehook_pf();
1176 pf_status.running = 1;
1177 DPFPRINTF(PF_DEBUG_MISC,
1178 ("pf: pfil unregistration failed\n"));
1180 pf_status.since = time_second;
1181 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1186 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1187 struct pf_ruleset *ruleset;
1188 struct pf_rule *rule, *tail;
1189 struct pf_pooladdr *pa;
1192 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1193 ruleset = pf_find_ruleset(pr->anchor);
1194 if (ruleset == NULL) {
1198 rs_num = pf_get_ruleset_number(pr->rule.action);
1199 if (rs_num >= PF_RULESET_MAX) {
1203 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1207 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1211 if (pr->pool_ticket != ticket_pabuf) {
1215 rule = kmalloc(sizeof(struct pf_rule), M_PFRULEPL, M_WAITOK);
1216 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1217 rule->cuid = ap->a_cred->cr_ruid;
1219 rule->anchor = NULL;
1221 TAILQ_INIT(&rule->rpool.list);
1222 /* initialize refcounting */
1223 rule->states_cur = 0;
1224 rule->src_nodes = 0;
1225 rule->entries.tqe_prev = NULL;
1227 if (rule->af == AF_INET) {
1228 kfree(rule, M_PFRULEPL);
1229 error = EAFNOSUPPORT;
1234 if (rule->af == AF_INET6) {
1235 kfree(rule, M_PFRULEPL);
1236 error = EAFNOSUPPORT;
1240 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1243 rule->nr = tail->nr + 1;
1246 if (rule->ifname[0]) {
1247 rule->kif = pfi_kif_get(rule->ifname);
1248 if (rule->kif == NULL) {
1249 kfree(rule, M_PFRULEPL);
1253 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
1256 if (rule->rtableid > 0 && rule->rtableid > rt_numfibs)
1261 if (rule->qname[0] != 0) {
1262 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1264 else if (rule->pqname[0] != 0) {
1266 pf_qname2qid(rule->pqname)) == 0)
1269 rule->pqid = rule->qid;
1272 if (rule->tagname[0])
1273 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1275 if (rule->match_tagname[0])
1276 if ((rule->match_tag =
1277 pf_tagname2tag(rule->match_tagname)) == 0)
1279 if (rule->rt && !rule->direction)
1284 if (rule->logif >= PFLOGIFS_MAX)
1287 if (pf_rtlabel_add(&rule->src.addr) ||
1288 pf_rtlabel_add(&rule->dst.addr))
1290 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1292 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1294 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1296 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1297 if (pf_tbladdr_setup(ruleset, &pa->addr))
1300 if (rule->overload_tblname[0]) {
1301 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1302 rule->overload_tblname)) == NULL)
1305 rule->overload_tbl->pfrkt_flags |=
1309 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1310 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1311 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1312 (rule->rt > PF_FASTROUTE)) &&
1313 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1317 pf_rm_rule(NULL, rule);
1320 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1321 rule->evaluations = rule->packets[0] = rule->packets[1] =
1322 rule->bytes[0] = rule->bytes[1] = 0;
1323 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1325 ruleset->rules[rs_num].inactive.rcount++;
1329 case DIOCGETRULES: {
1330 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1331 struct pf_ruleset *ruleset;
1332 struct pf_rule *tail;
1335 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1336 ruleset = pf_find_ruleset(pr->anchor);
1337 if (ruleset == NULL) {
1341 rs_num = pf_get_ruleset_number(pr->rule.action);
1342 if (rs_num >= PF_RULESET_MAX) {
1346 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1349 pr->nr = tail->nr + 1;
1352 pr->ticket = ruleset->rules[rs_num].active.ticket;
1357 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1358 struct pf_ruleset *ruleset;
1359 struct pf_rule *rule;
1362 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1363 ruleset = pf_find_ruleset(pr->anchor);
1364 if (ruleset == NULL) {
1368 rs_num = pf_get_ruleset_number(pr->rule.action);
1369 if (rs_num >= PF_RULESET_MAX) {
1373 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1377 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1378 while ((rule != NULL) && (rule->nr != pr->nr))
1379 rule = TAILQ_NEXT(rule, entries);
1384 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1385 if (pf_anchor_copyout(ruleset, rule, pr)) {
1389 pf_addr_copyout(&pr->rule.src.addr);
1390 pf_addr_copyout(&pr->rule.dst.addr);
1391 for (i = 0; i < PF_SKIP_COUNT; ++i)
1392 if (rule->skip[i].ptr == NULL)
1393 pr->rule.skip[i].nr = (uint32_t)(-1);
1395 pr->rule.skip[i].nr =
1396 rule->skip[i].ptr->nr;
1398 if (pr->action == PF_GET_CLR_CNTR) {
1399 rule->evaluations = 0;
1400 rule->packets[0] = rule->packets[1] = 0;
1401 rule->bytes[0] = rule->bytes[1] = 0;
1402 rule->states_tot = 0;
1407 case DIOCCHANGERULE: {
1408 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1409 struct pf_ruleset *ruleset;
1410 struct pf_rule *oldrule = NULL, *newrule = NULL;
1414 if (!(pcr->action == PF_CHANGE_REMOVE ||
1415 pcr->action == PF_CHANGE_GET_TICKET) &&
1416 pcr->pool_ticket != ticket_pabuf) {
1421 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1422 pcr->action > PF_CHANGE_GET_TICKET) {
1426 ruleset = pf_find_ruleset(pcr->anchor);
1427 if (ruleset == NULL) {
1431 rs_num = pf_get_ruleset_number(pcr->rule.action);
1432 if (rs_num >= PF_RULESET_MAX) {
1437 if (pcr->action == PF_CHANGE_GET_TICKET) {
1438 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1442 ruleset->rules[rs_num].active.ticket) {
1446 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1452 if (pcr->action != PF_CHANGE_REMOVE) {
1453 newrule = kmalloc(sizeof(struct pf_rule), M_PFRULEPL, M_WAITOK|M_NULLOK);
1454 if (newrule == NULL) {
1458 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1459 newrule->cuid = ap->a_cred->cr_ruid;
1461 TAILQ_INIT(&newrule->rpool.list);
1462 /* initialize refcounting */
1463 newrule->states_cur = 0;
1464 newrule->entries.tqe_prev = NULL;
1466 if (newrule->af == AF_INET) {
1467 kfree(newrule, M_PFRULEPL);
1468 error = EAFNOSUPPORT;
1473 if (newrule->af == AF_INET6) {
1474 kfree(newrule, M_PFRULEPL);
1475 error = EAFNOSUPPORT;
1479 if (newrule->ifname[0]) {
1480 newrule->kif = pfi_kif_get(newrule->ifname);
1481 if (newrule->kif == NULL) {
1482 kfree(newrule, M_PFRULEPL);
1486 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
1488 newrule->kif = NULL;
1490 if (newrule->rtableid > 0 &&
1491 newrule->rtableid > rt_numfibs)
1496 if (newrule->qname[0] != 0) {
1498 pf_qname2qid(newrule->qname)) == 0)
1500 else if (newrule->pqname[0] != 0) {
1501 if ((newrule->pqid =
1502 pf_qname2qid(newrule->pqname)) == 0)
1505 newrule->pqid = newrule->qid;
1508 if (newrule->tagname[0])
1510 pf_tagname2tag(newrule->tagname)) == 0)
1512 if (newrule->match_tagname[0])
1513 if ((newrule->match_tag = pf_tagname2tag(
1514 newrule->match_tagname)) == 0)
1516 if (newrule->rt && !newrule->direction)
1521 if (newrule->logif >= PFLOGIFS_MAX)
1524 if (pf_rtlabel_add(&newrule->src.addr) ||
1525 pf_rtlabel_add(&newrule->dst.addr))
1527 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1529 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1531 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1533 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1534 if (pf_tbladdr_setup(ruleset, &pa->addr))
1537 if (newrule->overload_tblname[0]) {
1538 if ((newrule->overload_tbl = pfr_attach_table(
1539 ruleset, newrule->overload_tblname)) ==
1543 newrule->overload_tbl->pfrkt_flags |=
1547 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1548 if (((((newrule->action == PF_NAT) ||
1549 (newrule->action == PF_RDR) ||
1550 (newrule->action == PF_BINAT) ||
1551 (newrule->rt > PF_FASTROUTE)) &&
1552 !newrule->anchor)) &&
1553 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1557 pf_rm_rule(NULL, newrule);
1560 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1561 newrule->evaluations = 0;
1562 newrule->packets[0] = newrule->packets[1] = 0;
1563 newrule->bytes[0] = newrule->bytes[1] = 0;
1565 pf_empty_pool(&pf_pabuf);
1567 if (pcr->action == PF_CHANGE_ADD_HEAD)
1568 oldrule = TAILQ_FIRST(
1569 ruleset->rules[rs_num].active.ptr);
1570 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1571 oldrule = TAILQ_LAST(
1572 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1574 oldrule = TAILQ_FIRST(
1575 ruleset->rules[rs_num].active.ptr);
1576 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1577 oldrule = TAILQ_NEXT(oldrule, entries);
1578 if (oldrule == NULL) {
1579 if (newrule != NULL)
1580 pf_rm_rule(NULL, newrule);
1586 if (pcr->action == PF_CHANGE_REMOVE) {
1587 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1588 ruleset->rules[rs_num].active.rcount--;
1590 if (oldrule == NULL)
1592 ruleset->rules[rs_num].active.ptr,
1594 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1595 pcr->action == PF_CHANGE_ADD_BEFORE)
1596 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1599 ruleset->rules[rs_num].active.ptr,
1600 oldrule, newrule, entries);
1601 ruleset->rules[rs_num].active.rcount++;
1605 TAILQ_FOREACH(oldrule,
1606 ruleset->rules[rs_num].active.ptr, entries)
1609 ruleset->rules[rs_num].active.ticket++;
1611 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1612 pf_remove_if_empty_ruleset(ruleset);
1617 case DIOCCLRSTATES: {
1618 struct pf_state *s, *nexts;
1619 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1621 globaldata_t save_gd = mycpu;
1624 for (nn = 0; nn < ncpus; ++nn) {
1625 lwkt_setcpu_self(globaldata_find(nn));
1626 for (s = RB_MIN(pf_state_tree_id, &tree_id[nn]);
1628 nexts = RB_NEXT(pf_state_tree_id,
1631 if (!psk->psk_ifname[0] ||
1632 !strcmp(psk->psk_ifname,
1633 s->kif->pfik_name)) {
1635 * don't send out individual
1638 s->sync_flags = PFSTATE_NOSYNC;
1644 lwkt_setcpu_self(save_gd);
1645 psk->psk_killed = killed;
1646 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1650 case DIOCKILLSTATES: {
1651 struct pf_state *s, *nexts;
1652 struct pf_state_key *sk;
1653 struct pf_addr *srcaddr, *dstaddr;
1654 u_int16_t srcport, dstport;
1655 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1657 globaldata_t save_gd = mycpu;
1660 if (psk->psk_pfcmp.id) {
1661 if (psk->psk_pfcmp.creatorid == 0)
1662 psk->psk_pfcmp.creatorid = pf_status.hostid;
1663 for (nn = 0; nn < ncpus; ++nn) {
1664 lwkt_setcpu_self(globaldata_find(nn));
1665 if ((s = pf_find_state_byid(&psk->psk_pfcmp))) {
1666 /* send immediate delete of state */
1667 pfsync_delete_state(s);
1668 s->sync_flags |= PFSTATE_NOSYNC;
1673 lwkt_setcpu_self(save_gd);
1677 for (nn = 0; nn < ncpus; ++nn) {
1678 lwkt_setcpu_self(globaldata_find(nn));
1679 for (s = RB_MIN(pf_state_tree_id, &tree_id[nn]);
1681 nexts = RB_NEXT(pf_state_tree_id, &tree_id[nn], s);
1682 sk = s->key[PF_SK_WIRE];
1684 if (s->direction == PF_OUT) {
1685 srcaddr = &sk->addr[1];
1686 dstaddr = &sk->addr[0];
1687 srcport = sk->port[0];
1688 dstport = sk->port[0];
1690 srcaddr = &sk->addr[0];
1691 dstaddr = &sk->addr[1];
1692 srcport = sk->port[0];
1693 dstport = sk->port[0];
1695 if ((!psk->psk_af || sk->af == psk->psk_af)
1696 && (!psk->psk_proto || psk->psk_proto ==
1698 PF_MATCHA(psk->psk_src.neg,
1699 &psk->psk_src.addr.v.a.addr,
1700 &psk->psk_src.addr.v.a.mask,
1702 PF_MATCHA(psk->psk_dst.neg,
1703 &psk->psk_dst.addr.v.a.addr,
1704 &psk->psk_dst.addr.v.a.mask,
1706 (psk->psk_src.port_op == 0 ||
1707 pf_match_port(psk->psk_src.port_op,
1708 psk->psk_src.port[0],
1709 psk->psk_src.port[1],
1711 (psk->psk_dst.port_op == 0 ||
1712 pf_match_port(psk->psk_dst.port_op,
1713 psk->psk_dst.port[0],
1714 psk->psk_dst.port[1],
1716 (!psk->psk_label[0] ||
1717 (s->rule.ptr->label[0] &&
1718 !strcmp(psk->psk_label, s->rule.ptr->label))) &&
1719 (!psk->psk_ifname[0] ||
1720 !strcmp(psk->psk_ifname, s->kif->pfik_name))) {
1721 /* send immediate delete of state */
1722 pfsync_delete_state(s);
1723 s->sync_flags |= PFSTATE_NOSYNC;
1729 lwkt_setcpu_self(save_gd);
1730 psk->psk_killed = killed;
1734 case DIOCADDSTATE: {
1735 struct pfioc_state *ps = (struct pfioc_state *)addr;
1736 struct pfsync_state *sp = &ps->state;
1738 if (sp->timeout >= PFTM_MAX &&
1739 sp->timeout != PFTM_UNTIL_PACKET) {
1743 error = pfsync_state_import(sp, PFSYNC_SI_IOCTL);
1747 case DIOCGETSTATE: {
1748 struct pfioc_state *ps = (struct pfioc_state *)addr;
1750 struct pf_state_cmp id_key;
1751 globaldata_t save_gd = mycpu;
1754 bcopy(ps->state.id, &id_key.id, sizeof(id_key.id));
1755 id_key.creatorid = ps->state.creatorid;
1757 for (nn = 0; nn < ncpus; ++nn) {
1758 lwkt_setcpu_self(globaldata_find(nn));
1759 s = pf_find_state_byid(&id_key);
1764 pfsync_state_export(&ps->state, s);
1768 lwkt_setcpu_self(save_gd);
1772 case DIOCGETSTATES: {
1773 struct pfioc_states *ps = (struct pfioc_states *)addr;
1774 struct pf_state *state;
1775 struct pfsync_state *p, *pstore;
1777 globaldata_t save_gd = mycpu;
1780 if (ps->ps_len == 0) {
1781 nr = pf_status.states;
1782 ps->ps_len = sizeof(struct pfsync_state) * nr;
1786 pstore = kmalloc(sizeof(*pstore), M_TEMP, M_WAITOK);
1790 for (nn = 0; nn < ncpus; ++nn) {
1791 lwkt_setcpu_self(globaldata_find(nn));
1792 state = TAILQ_FIRST(&state_list[nn]);
1794 if (state->timeout != PFTM_UNLINKED) {
1795 if ((nr + 1) * sizeof(*p) >
1796 (unsigned)ps->ps_len) {
1799 pfsync_state_export(pstore, state);
1800 error = copyout(pstore, p, sizeof(*p));
1802 kfree(pstore, M_TEMP);
1803 lwkt_setcpu_self(save_gd);
1809 state = TAILQ_NEXT(state, entry_list);
1812 lwkt_setcpu_self(save_gd);
1813 ps->ps_len = sizeof(struct pfsync_state) * nr;
1814 kfree(pstore, M_TEMP);
1818 case DIOCGETSTATUS: {
1819 struct pf_status *s = (struct pf_status *)addr;
1820 bcopy(&pf_status, s, sizeof(struct pf_status));
1821 pfi_update_status(s->ifname, s);
1825 case DIOCSETSTATUSIF: {
1826 struct pfioc_if *pi = (struct pfioc_if *)addr;
1828 if (pi->ifname[0] == 0) {
1829 bzero(pf_status.ifname, IFNAMSIZ);
1832 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1836 case DIOCCLRSTATUS: {
1837 bzero(pf_status.counters, sizeof(pf_status.counters));
1838 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1839 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1840 pf_status.since = time_second;
1841 if (*pf_status.ifname)
1842 pfi_update_status(pf_status.ifname, NULL);
1847 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1848 struct pf_state_key *sk;
1849 struct pf_state *state;
1850 struct pf_state_key_cmp key;
1851 int m = 0, direction = pnl->direction;
1853 globaldata_t save_gd = mycpu;
1856 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
1857 sidx = (direction == PF_IN) ? 1 : 0;
1858 didx = (direction == PF_IN) ? 0 : 1;
1861 PF_AZERO(&pnl->saddr, pnl->af) ||
1862 PF_AZERO(&pnl->daddr, pnl->af) ||
1863 ((pnl->proto == IPPROTO_TCP ||
1864 pnl->proto == IPPROTO_UDP) &&
1865 (!pnl->dport || !pnl->sport)))
1869 key.proto = pnl->proto;
1870 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
1871 key.port[sidx] = pnl->sport;
1872 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
1873 key.port[didx] = pnl->dport;
1876 for (nn = 0; nn < ncpus; ++nn) {
1877 lwkt_setcpu_self(globaldata_find(nn));
1878 state = pf_find_state_all(&key, direction, &m);
1885 error = E2BIG; /* more than one state */
1886 } else if (state != NULL) {
1887 sk = state->key[sidx];
1888 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
1889 pnl->rsport = sk->port[sidx];
1890 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
1891 pnl->rdport = sk->port[didx];
1895 lwkt_setcpu_self(save_gd);
1900 case DIOCSETTIMEOUT: {
1901 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1904 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1909 old = pf_default_rule.timeout[pt->timeout];
1910 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1912 pf_default_rule.timeout[pt->timeout] = pt->seconds;
1913 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
1914 wakeup(pf_purge_thread);
1919 case DIOCGETTIMEOUT: {
1920 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1922 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1926 pt->seconds = pf_default_rule.timeout[pt->timeout];
1930 case DIOCGETLIMIT: {
1931 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1933 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1937 pl->limit = pf_pool_limits[pl->index].limit;
1941 case DIOCSETLIMIT: {
1942 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1945 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1946 pf_pool_limits[pl->index].pp == NULL) {
1951 /* XXX Get an API to set limits on the zone/pool */
1952 old_limit = pf_pool_limits[pl->index].limit;
1953 pf_pool_limits[pl->index].limit = pl->limit;
1954 pl->limit = old_limit;
1958 case DIOCSETDEBUG: {
1959 u_int32_t *level = (u_int32_t *)addr;
1961 pf_status.debug = *level;
1965 case DIOCCLRRULECTRS: {
1966 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1967 struct pf_ruleset *ruleset = &pf_main_ruleset;
1968 struct pf_rule *rule;
1971 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1972 rule->evaluations = 0;
1973 rule->packets[0] = rule->packets[1] = 0;
1974 rule->bytes[0] = rule->bytes[1] = 0;
1979 case DIOCGIFSPEED: {
1980 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr;
1981 struct pf_ifspeed ps;
1984 if (psp->ifname[0] != 0) {
1985 /* Can we completely trust user-land? */
1986 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
1988 ifp = ifunit(ps.ifname);
1991 psp->baudrate = ifp->if_baudrate;
1999 case DIOCSTARTALTQ: {
2000 struct pf_altq *altq;
2002 /* enable all altq interfaces on active list */
2004 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2005 if (altq->qname[0] == 0 && (altq->local_flags &
2006 PFALTQ_FLAG_IF_REMOVED) == 0) {
2007 error = pf_enable_altq(altq);
2013 pf_altq_running = 1;
2014 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2018 case DIOCSTOPALTQ: {
2019 struct pf_altq *altq;
2021 /* disable all altq interfaces on active list */
2022 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2023 if (altq->qname[0] == 0 && (altq->local_flags &
2024 PFALTQ_FLAG_IF_REMOVED) == 0) {
2025 error = pf_disable_altq(altq);
2031 pf_altq_running = 0;
2032 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2037 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2038 struct pf_altq *altq, *a;
2041 if (pa->ticket != ticket_altqs_inactive) {
2045 altq = kmalloc(sizeof(struct pf_altq), M_PFALTQPL, M_WAITOK|M_NULLOK);
2050 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2053 * if this is for a queue, find the discipline and
2054 * copy the necessary fields
2056 if (altq->qname[0] != 0) {
2057 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2059 kfree(altq, M_PFALTQPL);
2062 altq->altq_disc = NULL;
2063 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2064 if (strncmp(a->ifname, altq->ifname,
2065 IFNAMSIZ) == 0 && a->qname[0] == 0) {
2066 altq->altq_disc = a->altq_disc;
2073 ifp = ifunit(altq->ifname);
2076 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
2078 error = altq_add(altq);
2081 kfree(altq, M_PFALTQPL);
2085 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2086 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2090 case DIOCGETALTQS: {
2091 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2092 struct pf_altq *altq;
2095 TAILQ_FOREACH(altq, pf_altqs_active, entries)
2097 pa->ticket = ticket_altqs_active;
2102 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2103 struct pf_altq *altq;
2106 if (pa->ticket != ticket_altqs_active) {
2111 altq = TAILQ_FIRST(pf_altqs_active);
2112 while ((altq != NULL) && (nr < pa->nr)) {
2113 altq = TAILQ_NEXT(altq, entries);
2120 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2124 case DIOCCHANGEALTQ:
2125 /* CHANGEALTQ not supported yet! */
2129 case DIOCGETQSTATS: {
2130 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2131 struct pf_altq *altq;
2135 if (pq->ticket != ticket_altqs_active) {
2139 nbytes = pq->nbytes;
2141 altq = TAILQ_FIRST(pf_altqs_active);
2142 while ((altq != NULL) && (nr < pq->nr)) {
2143 altq = TAILQ_NEXT(altq, entries);
2150 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
2154 error = altq_getqstats(altq, pq->buf, &nbytes);
2156 pq->scheduler = altq->scheduler;
2157 pq->nbytes = nbytes;
2163 case DIOCBEGINADDRS: {
2164 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2166 pf_empty_pool(&pf_pabuf);
2167 pp->ticket = ++ticket_pabuf;
2172 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2174 if (pp->ticket != ticket_pabuf) {
2179 if (pp->af == AF_INET) {
2180 error = EAFNOSUPPORT;
2185 if (pp->af == AF_INET6) {
2186 error = EAFNOSUPPORT;
2190 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2191 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2192 pp->addr.addr.type != PF_ADDR_TABLE) {
2196 pa = kmalloc(sizeof(struct pf_altq), M_PFPOOLADDRPL, M_WAITOK|M_NULLOK);
2201 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2202 if (pa->ifname[0]) {
2203 pa->kif = pfi_kif_get(pa->ifname);
2204 if (pa->kif == NULL) {
2205 kfree(ap, M_PFPOOLADDRPL);
2209 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
2211 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2212 pfi_dynaddr_remove(&pa->addr);
2213 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
2214 kfree(pa, M_PFPOOLADDRPL);
2218 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2222 case DIOCGETADDRS: {
2223 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2226 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2227 pp->r_num, 0, 1, 0);
2232 TAILQ_FOREACH(pa, &pool->list, entries)
2238 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2241 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2242 pp->r_num, 0, 1, 1);
2247 pa = TAILQ_FIRST(&pool->list);
2248 while ((pa != NULL) && (nr < pp->nr)) {
2249 pa = TAILQ_NEXT(pa, entries);
2256 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2257 pf_addr_copyout(&pp->addr.addr);
2261 case DIOCCHANGEADDR: {
2262 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2263 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2264 struct pf_ruleset *ruleset;
2266 if (pca->action < PF_CHANGE_ADD_HEAD ||
2267 pca->action > PF_CHANGE_REMOVE) {
2271 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2272 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2273 pca->addr.addr.type != PF_ADDR_TABLE) {
2278 ruleset = pf_find_ruleset(pca->anchor);
2279 if (ruleset == NULL) {
2283 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2284 pca->r_num, pca->r_last, 1, 1);
2289 if (pca->action != PF_CHANGE_REMOVE) {
2290 newpa = kmalloc(sizeof(struct pf_pooladdr),
2291 M_PFPOOLADDRPL, M_WAITOK|M_NULLOK);
2292 if (newpa == NULL) {
2296 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2298 if (pca->af == AF_INET) {
2299 kfree(newpa, M_PFPOOLADDRPL);
2300 error = EAFNOSUPPORT;
2305 if (pca->af == AF_INET6) {
2306 kfree(newpa, M_PFPOOLADDRPL);
2307 error = EAFNOSUPPORT;
2311 if (newpa->ifname[0]) {
2312 newpa->kif = pfi_kif_get(newpa->ifname);
2313 if (newpa->kif == NULL) {
2314 kfree(newpa, M_PFPOOLADDRPL);
2318 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
2321 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2322 pf_tbladdr_setup(ruleset, &newpa->addr)) {
2323 pfi_dynaddr_remove(&newpa->addr);
2324 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
2325 kfree(newpa, M_PFPOOLADDRPL);
2331 if (pca->action == PF_CHANGE_ADD_HEAD)
2332 oldpa = TAILQ_FIRST(&pool->list);
2333 else if (pca->action == PF_CHANGE_ADD_TAIL)
2334 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2338 oldpa = TAILQ_FIRST(&pool->list);
2339 while ((oldpa != NULL) && (i < pca->nr)) {
2340 oldpa = TAILQ_NEXT(oldpa, entries);
2343 if (oldpa == NULL) {
2349 if (pca->action == PF_CHANGE_REMOVE) {
2350 TAILQ_REMOVE(&pool->list, oldpa, entries);
2351 pfi_dynaddr_remove(&oldpa->addr);
2352 pf_tbladdr_remove(&oldpa->addr);
2353 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
2354 kfree(oldpa, M_PFPOOLADDRPL);
2357 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2358 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2359 pca->action == PF_CHANGE_ADD_BEFORE)
2360 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2362 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2366 pool->cur = TAILQ_FIRST(&pool->list);
2367 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2372 case DIOCGETRULESETS: {
2373 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2374 struct pf_ruleset *ruleset;
2375 struct pf_anchor *anchor;
2377 pr->path[sizeof(pr->path) - 1] = 0;
2378 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2383 if (ruleset->anchor == NULL) {
2384 /* XXX kludge for pf_main_ruleset */
2385 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2386 if (anchor->parent == NULL)
2389 RB_FOREACH(anchor, pf_anchor_node,
2390 &ruleset->anchor->children)
2396 case DIOCGETRULESET: {
2397 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2398 struct pf_ruleset *ruleset;
2399 struct pf_anchor *anchor;
2402 pr->path[sizeof(pr->path) - 1] = 0;
2403 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2408 if (ruleset->anchor == NULL) {
2409 /* XXX kludge for pf_main_ruleset */
2410 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2411 if (anchor->parent == NULL && nr++ == pr->nr) {
2412 strlcpy(pr->name, anchor->name,
2417 RB_FOREACH(anchor, pf_anchor_node,
2418 &ruleset->anchor->children)
2419 if (nr++ == pr->nr) {
2420 strlcpy(pr->name, anchor->name,
2430 case DIOCRCLRTABLES: {
2431 struct pfioc_table *io = (struct pfioc_table *)addr;
2433 if (io->pfrio_esize != 0) {
2437 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2438 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2442 case DIOCRADDTABLES: {
2443 struct pfioc_table *io = (struct pfioc_table *)addr;
2445 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2449 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2450 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2454 case DIOCRDELTABLES: {
2455 struct pfioc_table *io = (struct pfioc_table *)addr;
2457 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2461 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2462 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2466 case DIOCRGETTABLES: {
2467 struct pfioc_table *io = (struct pfioc_table *)addr;
2469 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2473 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2474 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2478 case DIOCRGETTSTATS: {
2479 struct pfioc_table *io = (struct pfioc_table *)addr;
2481 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2485 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2486 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2490 case DIOCRCLRTSTATS: {
2491 struct pfioc_table *io = (struct pfioc_table *)addr;
2493 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2497 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2498 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2502 case DIOCRSETTFLAGS: {
2503 struct pfioc_table *io = (struct pfioc_table *)addr;
2505 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2509 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2510 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2511 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2515 case DIOCRCLRADDRS: {
2516 struct pfioc_table *io = (struct pfioc_table *)addr;
2518 if (io->pfrio_esize != 0) {
2522 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2523 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2527 case DIOCRADDADDRS: {
2528 struct pfioc_table *io = (struct pfioc_table *)addr;
2530 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2534 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2535 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2536 PFR_FLAG_USERIOCTL);
2540 case DIOCRDELADDRS: {
2541 struct pfioc_table *io = (struct pfioc_table *)addr;
2543 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2547 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2548 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2549 PFR_FLAG_USERIOCTL);
2553 case DIOCRSETADDRS: {
2554 struct pfioc_table *io = (struct pfioc_table *)addr;
2556 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2560 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2561 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2562 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2563 PFR_FLAG_USERIOCTL, 0);
2567 case DIOCRGETADDRS: {
2568 struct pfioc_table *io = (struct pfioc_table *)addr;
2570 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2574 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2575 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2579 case DIOCRGETASTATS: {
2580 struct pfioc_table *io = (struct pfioc_table *)addr;
2582 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2586 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2587 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2591 case DIOCRCLRASTATS: {
2592 struct pfioc_table *io = (struct pfioc_table *)addr;
2594 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2598 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2599 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2600 PFR_FLAG_USERIOCTL);
2604 case DIOCRTSTADDRS: {
2605 struct pfioc_table *io = (struct pfioc_table *)addr;
2607 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2611 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2612 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2613 PFR_FLAG_USERIOCTL);
2617 case DIOCRINADEFINE: {
2618 struct pfioc_table *io = (struct pfioc_table *)addr;
2620 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2624 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2625 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2626 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2631 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2632 error = pf_osfp_add(io);
2637 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2638 error = pf_osfp_get(io);
2643 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2644 struct pfioc_trans_e *ioe;
2645 struct pfr_table *table;
2648 if (io->esize != sizeof(*ioe)) {
2652 ioe = kmalloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2653 table = kmalloc(sizeof(*table), M_TEMP, M_WAITOK);
2654 for (i = 0; i < io->size; i++) {
2655 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2656 kfree(table, M_TEMP);
2661 switch (ioe->rs_num) {
2663 case PF_RULESET_ALTQ:
2664 if (ioe->anchor[0]) {
2665 kfree(table, M_TEMP);
2670 if ((error = pf_begin_altq(&ioe->ticket))) {
2671 kfree(table, M_TEMP);
2677 case PF_RULESET_TABLE:
2678 bzero(table, sizeof(*table));
2679 strlcpy(table->pfrt_anchor, ioe->anchor,
2680 sizeof(table->pfrt_anchor));
2681 if ((error = pfr_ina_begin(table,
2682 &ioe->ticket, NULL, 0))) {
2683 kfree(table, M_TEMP);
2689 if ((error = pf_begin_rules(&ioe->ticket,
2690 ioe->rs_num, ioe->anchor))) {
2691 kfree(table, M_TEMP);
2697 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
2698 kfree(table, M_TEMP);
2704 kfree(table, M_TEMP);
2709 case DIOCXROLLBACK: {
2710 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2711 struct pfioc_trans_e *ioe;
2712 struct pfr_table *table;
2715 if (io->esize != sizeof(*ioe)) {
2719 ioe = kmalloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2720 table = kmalloc(sizeof(*table), M_TEMP, M_WAITOK);
2721 for (i = 0; i < io->size; i++) {
2722 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2723 kfree(table, M_TEMP);
2728 switch (ioe->rs_num) {
2730 case PF_RULESET_ALTQ:
2731 if (ioe->anchor[0]) {
2732 kfree(table, M_TEMP);
2737 if ((error = pf_rollback_altq(ioe->ticket))) {
2738 kfree(table, M_TEMP);
2740 goto fail; /* really bad */
2744 case PF_RULESET_TABLE:
2745 bzero(table, sizeof(*table));
2746 strlcpy(table->pfrt_anchor, ioe->anchor,
2747 sizeof(table->pfrt_anchor));
2748 if ((error = pfr_ina_rollback(table,
2749 ioe->ticket, NULL, 0))) {
2750 kfree(table, M_TEMP);
2752 goto fail; /* really bad */
2756 if ((error = pf_rollback_rules(ioe->ticket,
2757 ioe->rs_num, ioe->anchor))) {
2758 kfree(table, M_TEMP);
2760 goto fail; /* really bad */
2765 kfree(table, M_TEMP);
2771 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2772 struct pfioc_trans_e *ioe;
2773 struct pfr_table *table;
2774 struct pf_ruleset *rs;
2777 if (io->esize != sizeof(*ioe)) {
2781 ioe = kmalloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2782 table = kmalloc(sizeof(*table), M_TEMP, M_WAITOK);
2783 /* first makes sure everything will succeed */
2784 for (i = 0; i < io->size; i++) {
2785 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2786 kfree(table, M_TEMP);
2791 switch (ioe->rs_num) {
2793 case PF_RULESET_ALTQ:
2794 if (ioe->anchor[0]) {
2795 kfree(table, M_TEMP);
2800 if (!altqs_inactive_open || ioe->ticket !=
2801 ticket_altqs_inactive) {
2802 kfree(table, M_TEMP);
2809 case PF_RULESET_TABLE:
2810 rs = pf_find_ruleset(ioe->anchor);
2811 if (rs == NULL || !rs->topen || ioe->ticket !=
2813 kfree(table, M_TEMP);
2820 if (ioe->rs_num < 0 || ioe->rs_num >=
2822 kfree(table, M_TEMP);
2827 rs = pf_find_ruleset(ioe->anchor);
2829 !rs->rules[ioe->rs_num].inactive.open ||
2830 rs->rules[ioe->rs_num].inactive.ticket !=
2832 kfree(table, M_TEMP);
2840 /* now do the commit - no errors should happen here */
2841 for (i = 0; i < io->size; i++) {
2842 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2843 kfree(table, M_TEMP);
2848 switch (ioe->rs_num) {
2850 case PF_RULESET_ALTQ:
2851 if ((error = pf_commit_altq(ioe->ticket))) {
2852 kfree(table, M_TEMP);
2854 goto fail; /* really bad */
2858 case PF_RULESET_TABLE:
2859 bzero(table, sizeof(*table));
2860 strlcpy(table->pfrt_anchor, ioe->anchor,
2861 sizeof(table->pfrt_anchor));
2862 if ((error = pfr_ina_commit(table, ioe->ticket,
2864 kfree(table, M_TEMP);
2866 goto fail; /* really bad */
2870 if ((error = pf_commit_rules(ioe->ticket,
2871 ioe->rs_num, ioe->anchor))) {
2872 kfree(table, M_TEMP);
2874 goto fail; /* really bad */
2879 kfree(table, M_TEMP);
2884 case DIOCGETSRCNODES: {
2885 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
2886 struct pf_src_node *n, *p, *pstore;
2888 int space = psn->psn_len;
2892 for (nn = 0; nn < ncpus; ++nn) {
2893 RB_FOREACH(n, pf_src_tree,
2894 &tree_src_tracking[nn]) {
2898 psn->psn_len = sizeof(struct pf_src_node) * nr;
2902 pstore = kmalloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2904 p = psn->psn_src_nodes;
2907 * WARNING: We are not switching cpus so we cannot call
2908 * nominal pf.c support routines for cpu-specific
2911 for (nn = 0; nn < ncpus; ++nn) {
2912 RB_FOREACH(n, pf_src_tree, &tree_src_tracking[nn]) {
2913 int secs = time_second, diff;
2915 if ((nr + 1) * sizeof(*p) >
2916 (unsigned)psn->psn_len) {
2920 bcopy(n, pstore, sizeof(*pstore));
2921 if (n->rule.ptr != NULL)
2922 pstore->rule.nr = n->rule.ptr->nr;
2923 pstore->creation = secs - pstore->creation;
2924 if (pstore->expire > secs)
2925 pstore->expire -= secs;
2929 /* adjust the connection rate estimate */
2930 diff = secs - n->conn_rate.last;
2931 if (diff >= n->conn_rate.seconds)
2932 pstore->conn_rate.count = 0;
2934 pstore->conn_rate.count -=
2935 n->conn_rate.count * diff /
2936 n->conn_rate.seconds;
2938 error = copyout(pstore, p, sizeof(*p));
2940 kfree(pstore, M_TEMP);
2947 psn->psn_len = sizeof(struct pf_src_node) * nr;
2948 kfree(pstore, M_TEMP);
2952 case DIOCCLRSRCNODES: {
2953 struct pf_src_node *n;
2954 struct pf_state *state;
2955 globaldata_t save_gd = mycpu;
2959 * WARNING: We are not switching cpus so we cannot call
2960 * nominal pf.c support routines for cpu-specific
2963 for (nn = 0; nn < ncpus; ++nn) {
2964 RB_FOREACH(state, pf_state_tree_id, &tree_id[nn]) {
2965 state->src_node = NULL;
2966 state->nat_src_node = NULL;
2968 RB_FOREACH(n, pf_src_tree, &tree_src_tracking[nn]) {
2975 * WARNING: Must move to the target cpu for nominal calls
2978 for (nn = 0; nn < ncpus; ++nn) {
2979 lwkt_setcpu_self(globaldata_find(nn));
2980 pf_purge_expired_src_nodes(1);
2982 lwkt_setcpu_self(save_gd);
2983 pf_status.src_nodes = 0;
2987 case DIOCKILLSRCNODES: {
2988 struct pf_src_node *sn;
2990 struct pfioc_src_node_kill *psnk =
2991 (struct pfioc_src_node_kill *)addr;
2993 globaldata_t save_gd = mycpu;
2997 * WARNING: We are not switching cpus so we cannot call
2998 * nominal pf.c support routines for cpu-specific
3001 for (nn = 0; nn < ncpus; ++nn) {
3002 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking[nn]) {
3003 if (PF_MATCHA(psnk->psnk_src.neg,
3004 &psnk->psnk_src.addr.v.a.addr,
3005 &psnk->psnk_src.addr.v.a.mask,
3006 &sn->addr, sn->af) &&
3007 PF_MATCHA(psnk->psnk_dst.neg,
3008 &psnk->psnk_dst.addr.v.a.addr,
3009 &psnk->psnk_dst.addr.v.a.mask,
3010 &sn->raddr, sn->af)) {
3011 /* Handle state to src_node linkage */
3012 if (sn->states != 0) {
3013 RB_FOREACH(s, pf_state_tree_id,
3015 if (s->src_node == sn)
3017 if (s->nat_src_node == sn)
3018 s->nat_src_node = NULL;
3028 for (nn = 0; nn < ncpus; ++nn) {
3029 lwkt_setcpu_self(globaldata_find(nn));
3030 pf_purge_expired_src_nodes(1);
3032 lwkt_setcpu_self(save_gd);
3035 psnk->psnk_killed = killed;
3039 case DIOCSETHOSTID: {
3040 u_int32_t *hostid = (u_int32_t *)addr;
3043 pf_status.hostid = karc4random();
3045 pf_status.hostid = *hostid;
3055 case DIOCIGETIFACES: {
3056 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3058 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
3062 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
3067 case DIOCSETIFFLAG: {
3068 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3070 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
3074 case DIOCCLRIFFLAG: {
3075 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3077 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
3086 lwkt_reltoken(&pf_token);
3091 * XXX - Check for version missmatch!!!
3094 pf_clear_states(void)
3096 struct pf_state *s, *nexts;
3097 globaldata_t save_gd = mycpu;
3100 for (nn = 0; nn < ncpus; ++nn) {
3101 lwkt_setcpu_self(globaldata_find(nn));
3102 for (s = RB_MIN(pf_state_tree_id, &tree_id[nn]); s; s = nexts) {
3103 nexts = RB_NEXT(pf_state_tree_id, &tree_id[nn], s);
3105 /* don't send out individual delete messages */
3106 s->sync_flags = PFSTATE_NOSYNC;
3111 lwkt_setcpu_self(save_gd);
3115 * XXX This is called on module unload, we do not want to sync that over? */
3117 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3122 pf_clear_tables(void)
3124 struct pfioc_table io;
3127 bzero(&io, sizeof(io));
3129 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
3136 pf_clear_srcnodes(void)
3138 struct pf_src_node *n;
3139 struct pf_state *state;
3140 globaldata_t save_gd = mycpu;
3143 for (nn = 0; nn < ncpus; ++nn) {
3144 lwkt_setcpu_self(globaldata_find(nn));
3145 RB_FOREACH(state, pf_state_tree_id, &tree_id[nn]) {
3146 state->src_node = NULL;
3147 state->nat_src_node = NULL;
3149 RB_FOREACH(n, pf_src_tree, &tree_src_tracking[nn]) {
3153 pf_purge_expired_src_nodes(0);
3155 lwkt_setcpu_self(save_gd);
3157 pf_status.src_nodes = 0;
3161 * XXX - Check for version missmatch!!!
3165 * Duplicate pfctl -Fa operation to get rid of as much as we can.
3175 pf_status.running = 0;
3176 error = dehook_pf();
3178 pf_status.running = 1;
3179 DPFPRINTF(PF_DEBUG_MISC,
3180 ("pf: pfil unregistration failed\n"));
3184 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) != 0) {
3185 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
3188 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) != 0) {
3189 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
3190 break; /* XXX: rollback? */
3192 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) != 0) {
3193 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
3194 break; /* XXX: rollback? */
3196 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
3198 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
3199 break; /* XXX: rollback? */
3201 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
3203 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
3204 break; /* XXX: rollback? */
3207 /* XXX: these should always succeed here */
3208 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
3209 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
3210 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
3211 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
3212 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
3214 if ((error = pf_clear_tables()) != 0)
3217 if ((error = pf_begin_altq(&t[0])) != 0) {
3218 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
3221 pf_commit_altq(t[0]);
3224 pf_clear_srcnodes();
3226 /* status does not use malloced mem so no need to cleanup */
3227 /* fingerprints and interfaces have their own cleanup code */
3233 pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
3236 * DragonFly's version of pf uses FreeBSD's native host byte ordering
3237 * for ip_len/ip_off. This is why we don't have to change byte order
3238 * like the FreeBSD-5 version does.
3242 lwkt_gettoken_shared(&pf_token);
3244 chk = pf_test(PF_IN, ifp, m, NULL, NULL);
3249 lwkt_reltoken(&pf_token);
3254 pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
3257 * DragonFly's version of pf uses FreeBSD's native host byte ordering
3258 * for ip_len/ip_off. This is why we don't have to change byte order
3259 * like the FreeBSD-5 version does.
3263 lwkt_gettoken_shared(&pf_token);
3265 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
3266 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
3267 in_delayed_cksum(*m);
3268 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
3270 chk = pf_test(PF_OUT, ifp, m, NULL, NULL);
3275 lwkt_reltoken(&pf_token);
3281 pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
3284 * IPv6 is not affected by ip_len/ip_off byte order changes.
3288 lwkt_gettoken_shared(&pf_token);
3290 chk = pf_test6(PF_IN, ifp, m, NULL, NULL);
3295 lwkt_reltoken(&pf_token);
3300 pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
3303 * IPv6 is not affected by ip_len/ip_off byte order changes.
3307 lwkt_gettoken_shared(&pf_token);
3309 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
3310 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
3311 in_delayed_cksum(*m);
3312 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
3314 chk = pf_test6(PF_OUT, ifp, m, NULL, NULL);
3319 lwkt_reltoken(&pf_token);
3327 struct pfil_head *pfh_inet;
3329 struct pfil_head *pfh_inet6;
3332 lwkt_gettoken(&pf_token);
3334 if (pf_pfil_hooked) {
3335 lwkt_reltoken(&pf_token);
3339 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3340 if (pfh_inet == NULL) {
3341 lwkt_reltoken(&pf_token);
3344 pfil_add_hook(pf_check_in, NULL, PFIL_IN, pfh_inet);
3345 pfil_add_hook(pf_check_out, NULL, PFIL_OUT, pfh_inet);
3347 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3348 if (pfh_inet6 == NULL) {
3349 pfil_remove_hook(pf_check_in, NULL, PFIL_IN, pfh_inet);
3350 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT, pfh_inet);
3351 lwkt_reltoken(&pf_token);
3354 pfil_add_hook(pf_check6_in, NULL, PFIL_IN, pfh_inet6);
3355 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT, pfh_inet6);
3359 lwkt_reltoken(&pf_token);
3366 struct pfil_head *pfh_inet;
3368 struct pfil_head *pfh_inet6;
3371 lwkt_gettoken(&pf_token);
3373 if (pf_pfil_hooked == 0) {
3374 lwkt_reltoken(&pf_token);
3378 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3379 if (pfh_inet == NULL) {
3380 lwkt_reltoken(&pf_token);
3383 pfil_remove_hook(pf_check_in, NULL, PFIL_IN, pfh_inet);
3384 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT, pfh_inet);
3386 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3387 if (pfh_inet6 == NULL) {
3388 lwkt_reltoken(&pf_token);
3391 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN, pfh_inet6);
3392 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT, pfh_inet6);
3396 lwkt_reltoken(&pf_token);
3403 lwkt_gettoken(&pf_token);
3405 pf_dev = make_dev(&pf_ops, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
3407 lockinit(&pf_consistency_lock, "pfconslck", 0, LK_CANRECURSE);
3408 lockinit(&pf_global_statetbl_lock, "pfglstlk", 0, 0);
3409 lwkt_reltoken(&pf_token);
3414 pf_mask_del(struct radix_node *rn, void *arg)
3416 struct radix_node_head *rnh = arg;
3418 rnh->rnh_deladdr(rn->rn_key, rn->rn_mask, rnh);
3427 pf_status.running = 0;
3429 lwkt_gettoken(&pf_token);
3431 error = dehook_pf();
3434 * Should not happen!
3435 * XXX Due to error code ESRCH, kldunload will show
3436 * a message like 'No such process'.
3438 kprintf("pfil unregistration fail\n");
3439 lwkt_reltoken(&pf_token);
3444 while (pf_end_threads < 2) {
3445 wakeup_one(pf_purge_thread);
3446 tsleep(pf_purge_thread, 0, "pftmo", hz);
3450 dev_ops_remove_all(&pf_ops);
3451 lockuninit(&pf_consistency_lock);
3452 lwkt_reltoken(&pf_token);
3454 if (pf_maskhead != NULL) {
3455 pf_maskhead->rnh_walktree(pf_maskhead,
3456 pf_mask_del, pf_maskhead);
3460 kmalloc_destroy(&pf_state_pl);
3461 kmalloc_destroy(&pf_frent_pl);
3462 kmalloc_destroy(&pf_cent_pl);
3467 pf_modevent(module_t mod, int type, void *data __unused)
3471 lwkt_gettoken(&pf_token);
3479 error = pf_unload();
3485 lwkt_reltoken(&pf_token);
3489 static moduledata_t pf_mod = {
3495 DECLARE_MODULE(pf, pf_mod, SI_SUB_PSEUDO, SI_ORDER_FIRST);
3496 MODULE_VERSION(pf, PF_MODVER);