1 /* $OpenBSD: pf_ioctl.c,v 1.209 2008/06/29 08:42:15 mcbride Exp $ */
2 /*add $OpenBSD: pf_ioctl.c,v 1.212 2009/02/15 20:42:33 mbalmer Exp $ */
5 * Copyright (c) 2010 The DragonFly Project. All rights reserved.
7 * Copyright (c) 2001 Daniel Hartmeier
8 * Copyright (c) 2002,2003 Henning Brauer
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * - Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
30 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
35 * Effort sponsored in part by the Defense Advanced Research Projects
36 * Agency (DARPA) and Air Force Research Laboratory, Air Force
37 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
42 #include "opt_inet6.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
47 #include <sys/device.h>
49 #include <sys/filio.h>
50 #include <sys/fcntl.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/kernel.h>
54 #include <sys/kthread.h>
57 #include <sys/malloc.h>
58 #include <sys/module.h>
61 #include <sys/thread2.h>
64 #include <net/if_types.h>
65 #include <net/route.h>
67 #include <netinet/in.h>
68 #include <netinet/in_var.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/ip.h>
71 #include <netinet/ip_var.h>
72 #include <netinet/ip_icmp.h>
74 #include <net/pf/pfvar.h>
77 #include <net/pf/if_pflog.h>
78 #include <net/pf/if_pfsync.h>
81 #include <netinet/ip6.h>
82 #include <netinet/in_pcb.h>
86 #include <net/altq/altq.h>
89 #include <machine/limits.h>
91 #include <sys/mutex.h>
93 u_int rt_numfibs = RT_NUMFIBS;
96 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
97 u_int8_t, u_int8_t, u_int8_t);
99 void pf_mv_pool(struct pf_palist *, struct pf_palist *);
100 void pf_empty_pool(struct pf_palist *);
102 int pf_begin_altq(u_int32_t *);
103 int pf_rollback_altq(u_int32_t);
104 int pf_commit_altq(u_int32_t);
105 int pf_enable_altq(struct pf_altq *);
106 int pf_disable_altq(struct pf_altq *);
108 int pf_begin_rules(u_int32_t *, int, const char *);
109 int pf_rollback_rules(u_int32_t, int, char *);
110 int pf_setup_pfsync_matching(struct pf_ruleset *);
111 void pf_hash_rule(MD5_CTX *, struct pf_rule *);
112 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
113 int pf_commit_rules(u_int32_t, int, char *);
114 int pf_addr_setup(struct pf_ruleset *,
115 struct pf_addr_wrap *, sa_family_t);
116 void pf_addr_copyout(struct pf_addr_wrap *);
118 struct pf_rule pf_default_rule;
119 struct lock pf_consistency_lock;
120 struct lock pf_global_statetbl_lock;
122 static int pf_altq_running;
125 #define TAGID_MAX 50000
126 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
127 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
129 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
130 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
132 u_int16_t tagname2tag(struct pf_tags *, char *);
133 void tag2tagname(struct pf_tags *, u_int16_t, char *);
134 void tag_unref(struct pf_tags *, u_int16_t);
135 int pf_rtlabel_add(struct pf_addr_wrap *);
136 void pf_rtlabel_remove(struct pf_addr_wrap *);
137 void pf_rtlabel_copyout(struct pf_addr_wrap *);
139 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) kprintf x
141 static cdev_t pf_dev;
143 static MALLOC_DEFINE(M_PFRULEPL, "pfrulepl", "pf rule pool list");
144 static MALLOC_DEFINE(M_PFALTQPL, "pfaltqpl", "pf altq pool list");
145 static MALLOC_DEFINE(M_PFPOOLADDRPL, "pfpooladdrpl", "pf pool address pool list");
146 static MALLOC_DEFINE(M_PFFRENTPL, "pffrent", "pf frent pool list");
150 * XXX - These are new and need to be checked when moveing to a new version
152 static void pf_clear_states(void);
153 static int pf_clear_tables(void);
154 static void pf_clear_srcnodes(void);
156 * XXX - These are new and need to be checked when moveing to a new version
160 * Wrapper functions for pfil(9) hooks
162 static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
164 static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
167 static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
169 static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
173 static int hook_pf(void);
174 static int dehook_pf(void);
175 static int shutdown_pf(void);
176 static int pf_load(void);
177 static int pf_unload(void);
183 static struct dev_ops pf_ops = { /* XXX convert to port model */
190 static volatile int pf_pfil_hooked = 0;
191 int pf_end_threads = 0;
193 int debug_pfugidhack = 0;
194 SYSCTL_INT(_debug, OID_AUTO, pfugidhack, CTLFLAG_RW, &debug_pfugidhack, 0,
195 "Enable/disable pf user/group rules mpsafe hack");
200 u_int32_t *my_timeout = pf_default_rule.timeout;
203 if (!rn_inithead((void **)&pf_maskhead, NULL, 0)) {
204 kprintf("pf mask radix tree create failed\n");
207 kmalloc_create(&pf_state_pl, "pf state pool list");
208 kmalloc_raise_limit(pf_state_pl, 0);
209 kmalloc_create(&pf_frent_pl, "pf fragment pool list");
210 kmalloc_raise_limit(pf_frent_pl, 0);
211 kmalloc_create(&pf_cent_pl, "pf cent pool list");
212 kmalloc_raise_limit(pf_cent_pl, 0);
216 pf_osfp_initialize();
218 pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl;
219 pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
220 pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl;
221 pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
222 if (ctob(physmem) <= 100*1024*1024)
223 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
224 PFR_KENTRY_HIWAT_SMALL;
226 for (nn = 0; nn < ncpus; ++nn) {
227 RB_INIT(&tree_src_tracking[nn]);
228 RB_INIT(&tree_id[nn]);
230 RB_INIT(&pf_anchors);
231 pf_init_ruleset(&pf_main_ruleset);
232 TAILQ_INIT(&pf_altqs[0]);
233 TAILQ_INIT(&pf_altqs[1]);
234 TAILQ_INIT(&pf_pabuf);
235 pf_altqs_active = &pf_altqs[0];
236 pf_altqs_inactive = &pf_altqs[1];
237 for (nn = 0; nn < ncpus; ++nn)
238 TAILQ_INIT(&state_list[nn]);
240 /* default rule should never be garbage collected */
241 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
242 pf_default_rule.action = PF_PASS;
243 pf_default_rule.nr = (uint32_t)(-1);
244 pf_default_rule.rtableid = -1;
246 /* initialize default timeouts */
247 my_timeout[PFTM_TCP_FIRST_PACKET] = 120; /* First TCP packet */
248 my_timeout[PFTM_TCP_OPENING] = 30; /* No response yet */
249 my_timeout[PFTM_TCP_ESTABLISHED] = 24*60*60; /* Established */
250 my_timeout[PFTM_TCP_CLOSING] = 15 * 60; /* Half closed */
251 my_timeout[PFTM_TCP_FIN_WAIT] = 45; /* Got both FINs */
252 my_timeout[PFTM_TCP_CLOSED] = 90; /* Got a RST */
253 my_timeout[PFTM_UDP_FIRST_PACKET] = 60; /* First UDP packet */
254 my_timeout[PFTM_UDP_SINGLE] = 30; /* Unidirectional */
255 my_timeout[PFTM_UDP_MULTIPLE] = 60; /* Bidirectional */
256 my_timeout[PFTM_ICMP_FIRST_PACKET] = 20; /* First ICMP packet */
257 my_timeout[PFTM_ICMP_ERROR_REPLY] = 10; /* Got error response */
258 my_timeout[PFTM_OTHER_FIRST_PACKET] = 60; /* First packet */
259 my_timeout[PFTM_OTHER_SINGLE] = 30; /* Unidirectional */
260 my_timeout[PFTM_OTHER_MULTIPLE] = 60; /* Bidirectional */
261 my_timeout[PFTM_FRAG] = 30; /* Fragment expire */
262 my_timeout[PFTM_INTERVAL] = 10; /* Expire interval */
263 my_timeout[PFTM_SRC_NODE] = 0; /* Source Tracking */
264 my_timeout[PFTM_TS_DIFF] = 30; /* Allowed TS diff */
265 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
266 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
269 bzero(&pf_status, sizeof(pf_status));
270 pf_status.debug = PF_DEBUG_URGENT;
271 /* XXX do our best to avoid a conflict */
272 pf_status.hostid = karc4random();
274 if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge"))
275 panic("pfpurge thread");
279 pfopen(struct dev_open_args *ap)
281 lwkt_gettoken(&pf_token);
282 cdev_t dev = ap->a_head.a_dev;
283 if (minor(dev) >= 1) {
284 lwkt_reltoken(&pf_token);
287 lwkt_reltoken(&pf_token);
292 pfclose(struct dev_close_args *ap)
294 lwkt_gettoken(&pf_token);
295 cdev_t dev = ap->a_head.a_dev;
296 if (minor(dev) >= 1) {
297 lwkt_reltoken(&pf_token);
300 lwkt_reltoken(&pf_token);
305 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
306 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
307 u_int8_t check_ticket)
309 struct pf_ruleset *ruleset;
310 struct pf_rule *rule;
313 ruleset = pf_find_ruleset(anchor);
316 rs_num = pf_get_ruleset_number(rule_action);
317 if (rs_num >= PF_RULESET_MAX)
320 if (check_ticket && ticket !=
321 ruleset->rules[rs_num].active.ticket)
324 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
327 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
329 if (check_ticket && ticket !=
330 ruleset->rules[rs_num].inactive.ticket)
333 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
336 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
339 while ((rule != NULL) && (rule->nr != rule_number))
340 rule = TAILQ_NEXT(rule, entries);
345 return (&rule->rpool);
349 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
351 struct pf_pooladdr *mv_pool_pa;
353 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
354 TAILQ_REMOVE(poola, mv_pool_pa, entries);
355 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
360 pf_empty_pool(struct pf_palist *poola)
362 struct pf_pooladdr *empty_pool_pa;
364 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
365 pfi_dynaddr_remove(&empty_pool_pa->addr);
366 pf_tbladdr_remove(&empty_pool_pa->addr);
367 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
368 TAILQ_REMOVE(poola, empty_pool_pa, entries);
369 kfree(empty_pool_pa, M_PFPOOLADDRPL);
374 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
376 if (rulequeue != NULL) {
377 if (rule->states_cur <= 0) {
379 * XXX - we need to remove the table *before* detaching
380 * the rule to make sure the table code does not delete
381 * the anchor under our feet.
383 pf_tbladdr_remove(&rule->src.addr);
384 pf_tbladdr_remove(&rule->dst.addr);
385 if (rule->overload_tbl)
386 pfr_detach_table(rule->overload_tbl);
388 TAILQ_REMOVE(rulequeue, rule, entries);
389 rule->entries.tqe_prev = NULL;
393 if (rule->states_cur > 0 || rule->src_nodes > 0 ||
394 rule->entries.tqe_prev != NULL)
396 pf_tag_unref(rule->tag);
397 pf_tag_unref(rule->match_tag);
399 if (rule->pqid != rule->qid)
400 pf_qid_unref(rule->pqid);
401 pf_qid_unref(rule->qid);
403 pf_rtlabel_remove(&rule->src.addr);
404 pf_rtlabel_remove(&rule->dst.addr);
405 pfi_dynaddr_remove(&rule->src.addr);
406 pfi_dynaddr_remove(&rule->dst.addr);
407 if (rulequeue == NULL) {
408 pf_tbladdr_remove(&rule->src.addr);
409 pf_tbladdr_remove(&rule->dst.addr);
410 if (rule->overload_tbl)
411 pfr_detach_table(rule->overload_tbl);
413 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
414 pf_anchor_remove(rule);
415 pf_empty_pool(&rule->rpool.list);
416 kfree(rule, M_PFRULEPL);
420 tagname2tag(struct pf_tags *head, char *tagname)
422 struct pf_tagname *tag, *p = NULL;
423 u_int16_t new_tagid = 1;
425 TAILQ_FOREACH(tag, head, entries)
426 if (strcmp(tagname, tag->name) == 0) {
432 * to avoid fragmentation, we do a linear search from the beginning
433 * and take the first free slot we find. if there is none or the list
434 * is empty, append a new entry at the end.
438 if (!TAILQ_EMPTY(head))
439 for (p = TAILQ_FIRST(head); p != NULL &&
440 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
441 new_tagid = p->tag + 1;
443 if (new_tagid > TAGID_MAX)
446 /* allocate and fill new struct pf_tagname */
447 tag = kmalloc(sizeof(*tag), M_TEMP, M_WAITOK);
448 strlcpy(tag->name, tagname, sizeof(tag->name));
449 tag->tag = new_tagid;
452 if (p != NULL) /* insert new entry before p */
453 TAILQ_INSERT_BEFORE(p, tag, entries);
454 else /* either list empty or no free slot in between */
455 TAILQ_INSERT_TAIL(head, tag, entries);
461 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
463 struct pf_tagname *tag;
465 TAILQ_FOREACH(tag, head, entries)
466 if (tag->tag == tagid) {
467 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
473 tag_unref(struct pf_tags *head, u_int16_t tag)
475 struct pf_tagname *p, *next;
480 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
481 next = TAILQ_NEXT(p, entries);
484 TAILQ_REMOVE(head, p, entries);
493 pf_tagname2tag(char *tagname)
495 return (tagname2tag(&pf_tags, tagname));
499 pf_tag2tagname(u_int16_t tagid, char *p)
501 tag2tagname(&pf_tags, tagid, p);
505 pf_tag_ref(u_int16_t tag)
507 struct pf_tagname *t;
509 TAILQ_FOREACH(t, &pf_tags, entries)
517 pf_tag_unref(u_int16_t tag)
519 tag_unref(&pf_tags, tag);
523 pf_rtlabel_add(struct pf_addr_wrap *a)
529 pf_rtlabel_remove(struct pf_addr_wrap *a)
534 pf_rtlabel_copyout(struct pf_addr_wrap *a)
536 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel)
537 strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname));
542 pf_qname2qid(char *qname)
544 return ((u_int32_t)tagname2tag(&pf_qids, qname));
548 pf_qid2qname(u_int32_t qid, char *p)
550 tag2tagname(&pf_qids, (u_int16_t)qid, p);
554 pf_qid_unref(u_int32_t qid)
556 tag_unref(&pf_qids, (u_int16_t)qid);
560 pf_begin_altq(u_int32_t *ticket)
562 struct pf_altq *altq;
565 /* Purge the old altq list */
566 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
567 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
568 if (altq->qname[0] == 0) {
569 /* detach and destroy the discipline */
570 error = altq_remove(altq);
572 pf_qid_unref(altq->qid);
573 kfree(altq, M_PFALTQPL);
577 *ticket = ++ticket_altqs_inactive;
578 altqs_inactive_open = 1;
583 pf_rollback_altq(u_int32_t ticket)
585 struct pf_altq *altq;
588 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
590 /* Purge the old altq list */
591 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
592 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
593 if (altq->qname[0] == 0) {
594 /* detach and destroy the discipline */
595 error = altq_remove(altq);
597 pf_qid_unref(altq->qid);
598 kfree(altq, M_PFALTQPL);
600 altqs_inactive_open = 0;
605 pf_commit_altq(u_int32_t ticket)
607 struct pf_altqqueue *old_altqs;
608 struct pf_altq *altq;
611 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
614 /* swap altqs, keep the old. */
616 old_altqs = pf_altqs_active;
617 pf_altqs_active = pf_altqs_inactive;
618 pf_altqs_inactive = old_altqs;
619 ticket_altqs_active = ticket_altqs_inactive;
621 /* Attach new disciplines */
622 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
623 if (altq->qname[0] == 0) {
624 /* attach the discipline */
625 error = altq_pfattach(altq);
633 /* Purge the old altq list */
634 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
635 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
636 if (altq->qname[0] == 0) {
637 /* detach and destroy the discipline */
639 error = pf_disable_altq(altq);
640 err = altq_pfdetach(altq);
641 if (err != 0 && error == 0)
643 err = altq_remove(altq);
644 if (err != 0 && error == 0)
647 pf_qid_unref(altq->qid);
648 kfree(altq, M_PFALTQPL);
652 altqs_inactive_open = 0;
657 pf_enable_altq(struct pf_altq *altq)
660 struct tb_profile tb;
663 if ((ifp = ifunit(altq->ifname)) == NULL)
666 if (ifp->if_snd.altq_type != ALTQT_NONE)
667 error = altq_enable(&ifp->if_snd);
669 /* set tokenbucket regulator */
670 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
671 tb.rate = altq->ifbandwidth;
672 tb.depth = altq->tbrsize;
674 error = tbr_set(&ifp->if_snd, &tb);
682 pf_disable_altq(struct pf_altq *altq)
685 struct tb_profile tb;
688 if ((ifp = ifunit(altq->ifname)) == NULL)
692 * when the discipline is no longer referenced, it was overridden
693 * by a new one. if so, just return.
695 if (altq->altq_disc != ifp->if_snd.altq_disc)
698 error = altq_disable(&ifp->if_snd);
701 /* clear tokenbucket regulator */
704 error = tbr_set(&ifp->if_snd, &tb);
713 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
715 struct pf_ruleset *rs;
716 struct pf_rule *rule;
718 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
720 rs = pf_find_or_create_ruleset(anchor);
723 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
724 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
725 rs->rules[rs_num].inactive.rcount--;
727 *ticket = ++rs->rules[rs_num].inactive.ticket;
728 rs->rules[rs_num].inactive.open = 1;
733 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
735 struct pf_ruleset *rs;
736 struct pf_rule *rule;
738 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
740 rs = pf_find_ruleset(anchor);
741 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
742 rs->rules[rs_num].inactive.ticket != ticket)
744 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
745 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
746 rs->rules[rs_num].inactive.rcount--;
748 rs->rules[rs_num].inactive.open = 0;
752 #define PF_MD5_UPD(st, elm) \
753 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
755 #define PF_MD5_UPD_STR(st, elm) \
756 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
758 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
759 (stor) = htonl((st)->elm); \
760 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
763 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
764 (stor) = htons((st)->elm); \
765 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
769 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
771 PF_MD5_UPD(pfr, addr.type);
772 switch (pfr->addr.type) {
773 case PF_ADDR_DYNIFTL:
774 PF_MD5_UPD(pfr, addr.v.ifname);
775 PF_MD5_UPD(pfr, addr.iflags);
778 PF_MD5_UPD(pfr, addr.v.tblname);
780 case PF_ADDR_ADDRMASK:
782 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
783 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
785 case PF_ADDR_RTLABEL:
786 PF_MD5_UPD(pfr, addr.v.rtlabelname);
790 PF_MD5_UPD(pfr, port[0]);
791 PF_MD5_UPD(pfr, port[1]);
792 PF_MD5_UPD(pfr, neg);
793 PF_MD5_UPD(pfr, port_op);
797 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
802 pf_hash_rule_addr(ctx, &rule->src);
803 pf_hash_rule_addr(ctx, &rule->dst);
804 PF_MD5_UPD_STR(rule, label);
805 PF_MD5_UPD_STR(rule, ifname);
806 PF_MD5_UPD_STR(rule, match_tagname);
807 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
808 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
809 PF_MD5_UPD_HTONL(rule, prob, y);
810 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
811 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
812 PF_MD5_UPD(rule, uid.op);
813 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
814 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
815 PF_MD5_UPD(rule, gid.op);
816 PF_MD5_UPD_HTONL(rule, rule_flag, y);
817 PF_MD5_UPD(rule, action);
818 PF_MD5_UPD(rule, direction);
819 PF_MD5_UPD(rule, af);
820 PF_MD5_UPD(rule, quick);
821 PF_MD5_UPD(rule, ifnot);
822 PF_MD5_UPD(rule, match_tag_not);
823 PF_MD5_UPD(rule, natpass);
824 PF_MD5_UPD(rule, keep_state);
825 PF_MD5_UPD(rule, proto);
826 PF_MD5_UPD(rule, type);
827 PF_MD5_UPD(rule, code);
828 PF_MD5_UPD(rule, flags);
829 PF_MD5_UPD(rule, flagset);
830 PF_MD5_UPD(rule, allow_opts);
831 PF_MD5_UPD(rule, rt);
832 PF_MD5_UPD(rule, tos);
836 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
838 struct pf_ruleset *rs;
839 struct pf_rule *rule, **old_array;
840 struct pf_rulequeue *old_rules;
842 u_int32_t old_rcount;
844 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
846 rs = pf_find_ruleset(anchor);
847 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
848 ticket != rs->rules[rs_num].inactive.ticket)
851 /* Calculate checksum for the main ruleset */
852 if (rs == &pf_main_ruleset) {
853 error = pf_setup_pfsync_matching(rs);
858 /* Swap rules, keep the old. */
860 old_rules = rs->rules[rs_num].active.ptr;
861 old_rcount = rs->rules[rs_num].active.rcount;
862 old_array = rs->rules[rs_num].active.ptr_array;
864 rs->rules[rs_num].active.ptr =
865 rs->rules[rs_num].inactive.ptr;
866 rs->rules[rs_num].active.ptr_array =
867 rs->rules[rs_num].inactive.ptr_array;
868 rs->rules[rs_num].active.rcount =
869 rs->rules[rs_num].inactive.rcount;
870 rs->rules[rs_num].inactive.ptr = old_rules;
871 rs->rules[rs_num].inactive.ptr_array = old_array;
872 rs->rules[rs_num].inactive.rcount = old_rcount;
874 rs->rules[rs_num].active.ticket =
875 rs->rules[rs_num].inactive.ticket;
876 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
879 /* Purge the old rule list. */
880 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
881 pf_rm_rule(old_rules, rule);
882 if (rs->rules[rs_num].inactive.ptr_array)
883 kfree(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
884 rs->rules[rs_num].inactive.ptr_array = NULL;
885 rs->rules[rs_num].inactive.rcount = 0;
886 rs->rules[rs_num].inactive.open = 0;
887 pf_remove_if_empty_ruleset(rs);
893 pf_setup_pfsync_matching(struct pf_ruleset *rs)
896 struct pf_rule *rule;
898 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
901 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
902 /* XXX PF_RULESET_SCRUB as well? */
903 if (rs_cnt == PF_RULESET_SCRUB)
906 if (rs->rules[rs_cnt].inactive.ptr_array)
907 kfree(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
908 rs->rules[rs_cnt].inactive.ptr_array = NULL;
910 if (rs->rules[rs_cnt].inactive.rcount) {
911 rs->rules[rs_cnt].inactive.ptr_array =
912 kmalloc(sizeof(caddr_t) *
913 rs->rules[rs_cnt].inactive.rcount,
917 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
919 pf_hash_rule(&ctx, rule);
920 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
924 MD5Final(digest, &ctx);
925 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
930 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
933 if (pfi_dynaddr_setup(addr, af) ||
934 pf_tbladdr_setup(ruleset, addr))
941 pf_addr_copyout(struct pf_addr_wrap *addr)
943 pfi_dynaddr_copyout(addr);
944 pf_tbladdr_copyout(addr);
945 pf_rtlabel_copyout(addr);
949 pfioctl(struct dev_ioctl_args *ap)
951 u_long cmd = ap->a_cmd;
952 caddr_t addr = ap->a_data;
953 struct pf_pooladdr *pa = NULL;
954 struct pf_pool *pool = NULL;
957 lwkt_gettoken(&pf_token);
959 /* XXX keep in sync with switch() below */
960 if (securelevel > 1) {
967 case DIOCSETSTATUSIF:
974 case DIOCCLRRULECTRS:
979 case DIOCGETRULESETS:
993 case DIOCGETSRCNODES:
994 case DIOCCLRSRCNODES:
1000 case DIOCRCLRTABLES:
1001 case DIOCRADDTABLES:
1002 case DIOCRDELTABLES:
1003 case DIOCRSETTFLAGS:
1004 if (((struct pfioc_table *)addr)->pfrio_flags &
1006 break; /* dummy operation ok */
1007 lwkt_reltoken(&pf_token);
1010 lwkt_reltoken(&pf_token);
1015 if (!(ap->a_fflag & FWRITE)) {
1023 case DIOCGETTIMEOUT:
1028 case DIOCGETRULESETS:
1029 case DIOCGETRULESET:
1031 case DIOCRGETTABLES:
1032 case DIOCRGETTSTATS:
1034 case DIOCRGETASTATS:
1037 case DIOCGETSRCNODES:
1038 case DIOCIGETIFACES:
1041 case DIOCRCLRTABLES:
1042 case DIOCRADDTABLES:
1043 case DIOCRDELTABLES:
1044 case DIOCRCLRTSTATS:
1049 case DIOCRSETTFLAGS:
1050 if (((struct pfioc_table *)addr)->pfrio_flags &
1052 break; /* dummy operation ok */
1053 lwkt_reltoken(&pf_token);
1056 if (((struct pfioc_rule *)addr)->action ==
1058 lwkt_reltoken(&pf_token);
1063 lwkt_reltoken(&pf_token);
1070 if (pf_status.running)
1075 DPFPRINTF(PF_DEBUG_MISC,
1076 ("pf: pfil registration fail\n"));
1079 pf_status.running = 1;
1080 pf_status.since = time_second;
1081 if (pf_status.stateid == 0) {
1082 pf_status.stateid = time_second;
1083 pf_status.stateid = pf_status.stateid << 32;
1085 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1090 if (!pf_status.running)
1093 pf_status.running = 0;
1094 error = dehook_pf();
1096 pf_status.running = 1;
1097 DPFPRINTF(PF_DEBUG_MISC,
1098 ("pf: pfil unregistration failed\n"));
1100 pf_status.since = time_second;
1101 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1106 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1107 struct pf_ruleset *ruleset;
1108 struct pf_rule *rule, *tail;
1109 struct pf_pooladdr *pa;
1112 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1113 ruleset = pf_find_ruleset(pr->anchor);
1114 if (ruleset == NULL) {
1118 rs_num = pf_get_ruleset_number(pr->rule.action);
1119 if (rs_num >= PF_RULESET_MAX) {
1123 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1127 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1131 if (pr->pool_ticket != ticket_pabuf) {
1135 rule = kmalloc(sizeof(struct pf_rule), M_PFRULEPL, M_WAITOK);
1136 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1137 rule->cuid = ap->a_cred->cr_ruid;
1139 rule->anchor = NULL;
1141 TAILQ_INIT(&rule->rpool.list);
1142 /* initialize refcounting */
1143 rule->states_cur = 0;
1144 rule->src_nodes = 0;
1145 rule->entries.tqe_prev = NULL;
1147 if (rule->af == AF_INET) {
1148 kfree(rule, M_PFRULEPL);
1149 error = EAFNOSUPPORT;
1154 if (rule->af == AF_INET6) {
1155 kfree(rule, M_PFRULEPL);
1156 error = EAFNOSUPPORT;
1160 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1163 rule->nr = tail->nr + 1;
1166 if (rule->ifname[0]) {
1167 rule->kif = pfi_kif_get(rule->ifname);
1168 if (rule->kif == NULL) {
1169 kfree(rule, M_PFRULEPL);
1173 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
1176 if (rule->rtableid > 0 && rule->rtableid > rt_numfibs)
1181 if (rule->qname[0] != 0) {
1182 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1184 else if (rule->pqname[0] != 0) {
1186 pf_qname2qid(rule->pqname)) == 0)
1189 rule->pqid = rule->qid;
1192 if (rule->tagname[0])
1193 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1195 if (rule->match_tagname[0])
1196 if ((rule->match_tag =
1197 pf_tagname2tag(rule->match_tagname)) == 0)
1199 if (rule->rt && !rule->direction)
1204 if (rule->logif >= PFLOGIFS_MAX)
1207 if (pf_rtlabel_add(&rule->src.addr) ||
1208 pf_rtlabel_add(&rule->dst.addr))
1210 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1212 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1214 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1216 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1217 if (pf_tbladdr_setup(ruleset, &pa->addr))
1220 if (rule->overload_tblname[0]) {
1221 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1222 rule->overload_tblname)) == NULL)
1225 rule->overload_tbl->pfrkt_flags |=
1229 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1230 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1231 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1232 (rule->rt > PF_FASTROUTE)) &&
1233 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1237 pf_rm_rule(NULL, rule);
1240 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1241 rule->evaluations = rule->packets[0] = rule->packets[1] =
1242 rule->bytes[0] = rule->bytes[1] = 0;
1243 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1245 ruleset->rules[rs_num].inactive.rcount++;
1249 case DIOCGETRULES: {
1250 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1251 struct pf_ruleset *ruleset;
1252 struct pf_rule *tail;
1255 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1256 ruleset = pf_find_ruleset(pr->anchor);
1257 if (ruleset == NULL) {
1261 rs_num = pf_get_ruleset_number(pr->rule.action);
1262 if (rs_num >= PF_RULESET_MAX) {
1266 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1269 pr->nr = tail->nr + 1;
1272 pr->ticket = ruleset->rules[rs_num].active.ticket;
1277 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1278 struct pf_ruleset *ruleset;
1279 struct pf_rule *rule;
1282 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1283 ruleset = pf_find_ruleset(pr->anchor);
1284 if (ruleset == NULL) {
1288 rs_num = pf_get_ruleset_number(pr->rule.action);
1289 if (rs_num >= PF_RULESET_MAX) {
1293 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1297 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1298 while ((rule != NULL) && (rule->nr != pr->nr))
1299 rule = TAILQ_NEXT(rule, entries);
1304 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1305 if (pf_anchor_copyout(ruleset, rule, pr)) {
1309 pf_addr_copyout(&pr->rule.src.addr);
1310 pf_addr_copyout(&pr->rule.dst.addr);
1311 for (i = 0; i < PF_SKIP_COUNT; ++i)
1312 if (rule->skip[i].ptr == NULL)
1313 pr->rule.skip[i].nr = (uint32_t)(-1);
1315 pr->rule.skip[i].nr =
1316 rule->skip[i].ptr->nr;
1318 if (pr->action == PF_GET_CLR_CNTR) {
1319 rule->evaluations = 0;
1320 rule->packets[0] = rule->packets[1] = 0;
1321 rule->bytes[0] = rule->bytes[1] = 0;
1322 rule->states_tot = 0;
1327 case DIOCCHANGERULE: {
1328 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1329 struct pf_ruleset *ruleset;
1330 struct pf_rule *oldrule = NULL, *newrule = NULL;
1334 if (!(pcr->action == PF_CHANGE_REMOVE ||
1335 pcr->action == PF_CHANGE_GET_TICKET) &&
1336 pcr->pool_ticket != ticket_pabuf) {
1341 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1342 pcr->action > PF_CHANGE_GET_TICKET) {
1346 ruleset = pf_find_ruleset(pcr->anchor);
1347 if (ruleset == NULL) {
1351 rs_num = pf_get_ruleset_number(pcr->rule.action);
1352 if (rs_num >= PF_RULESET_MAX) {
1357 if (pcr->action == PF_CHANGE_GET_TICKET) {
1358 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1362 ruleset->rules[rs_num].active.ticket) {
1366 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1372 if (pcr->action != PF_CHANGE_REMOVE) {
1373 newrule = kmalloc(sizeof(struct pf_rule), M_PFRULEPL, M_WAITOK|M_NULLOK);
1374 if (newrule == NULL) {
1378 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1379 newrule->cuid = ap->a_cred->cr_ruid;
1381 TAILQ_INIT(&newrule->rpool.list);
1382 /* initialize refcounting */
1383 newrule->states_cur = 0;
1384 newrule->entries.tqe_prev = NULL;
1386 if (newrule->af == AF_INET) {
1387 kfree(newrule, M_PFRULEPL);
1388 error = EAFNOSUPPORT;
1393 if (newrule->af == AF_INET6) {
1394 kfree(newrule, M_PFRULEPL);
1395 error = EAFNOSUPPORT;
1399 if (newrule->ifname[0]) {
1400 newrule->kif = pfi_kif_get(newrule->ifname);
1401 if (newrule->kif == NULL) {
1402 kfree(newrule, M_PFRULEPL);
1406 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
1408 newrule->kif = NULL;
1410 if (newrule->rtableid > 0 &&
1411 newrule->rtableid > rt_numfibs)
1416 if (newrule->qname[0] != 0) {
1418 pf_qname2qid(newrule->qname)) == 0)
1420 else if (newrule->pqname[0] != 0) {
1421 if ((newrule->pqid =
1422 pf_qname2qid(newrule->pqname)) == 0)
1425 newrule->pqid = newrule->qid;
1428 if (newrule->tagname[0])
1430 pf_tagname2tag(newrule->tagname)) == 0)
1432 if (newrule->match_tagname[0])
1433 if ((newrule->match_tag = pf_tagname2tag(
1434 newrule->match_tagname)) == 0)
1436 if (newrule->rt && !newrule->direction)
1441 if (newrule->logif >= PFLOGIFS_MAX)
1444 if (pf_rtlabel_add(&newrule->src.addr) ||
1445 pf_rtlabel_add(&newrule->dst.addr))
1447 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1449 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1451 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1453 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1454 if (pf_tbladdr_setup(ruleset, &pa->addr))
1457 if (newrule->overload_tblname[0]) {
1458 if ((newrule->overload_tbl = pfr_attach_table(
1459 ruleset, newrule->overload_tblname)) ==
1463 newrule->overload_tbl->pfrkt_flags |=
1467 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1468 if (((((newrule->action == PF_NAT) ||
1469 (newrule->action == PF_RDR) ||
1470 (newrule->action == PF_BINAT) ||
1471 (newrule->rt > PF_FASTROUTE)) &&
1472 !newrule->anchor)) &&
1473 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1477 pf_rm_rule(NULL, newrule);
1480 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1481 newrule->evaluations = 0;
1482 newrule->packets[0] = newrule->packets[1] = 0;
1483 newrule->bytes[0] = newrule->bytes[1] = 0;
1485 pf_empty_pool(&pf_pabuf);
1487 if (pcr->action == PF_CHANGE_ADD_HEAD)
1488 oldrule = TAILQ_FIRST(
1489 ruleset->rules[rs_num].active.ptr);
1490 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1491 oldrule = TAILQ_LAST(
1492 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1494 oldrule = TAILQ_FIRST(
1495 ruleset->rules[rs_num].active.ptr);
1496 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1497 oldrule = TAILQ_NEXT(oldrule, entries);
1498 if (oldrule == NULL) {
1499 if (newrule != NULL)
1500 pf_rm_rule(NULL, newrule);
1506 if (pcr->action == PF_CHANGE_REMOVE) {
1507 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1508 ruleset->rules[rs_num].active.rcount--;
1510 if (oldrule == NULL)
1512 ruleset->rules[rs_num].active.ptr,
1514 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1515 pcr->action == PF_CHANGE_ADD_BEFORE)
1516 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1519 ruleset->rules[rs_num].active.ptr,
1520 oldrule, newrule, entries);
1521 ruleset->rules[rs_num].active.rcount++;
1525 TAILQ_FOREACH(oldrule,
1526 ruleset->rules[rs_num].active.ptr, entries)
1529 ruleset->rules[rs_num].active.ticket++;
1531 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1532 pf_remove_if_empty_ruleset(ruleset);
1537 case DIOCCLRSTATES: {
1538 struct pf_state *s, *nexts;
1539 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1541 globaldata_t save_gd = mycpu;
1544 for (nn = 0; nn < ncpus; ++nn) {
1545 lwkt_setcpu_self(globaldata_find(nn));
1546 for (s = RB_MIN(pf_state_tree_id, &tree_id[nn]);
1548 nexts = RB_NEXT(pf_state_tree_id,
1551 if (!psk->psk_ifname[0] ||
1552 !strcmp(psk->psk_ifname,
1553 s->kif->pfik_name)) {
1555 * don't send out individual
1558 s->sync_flags = PFSTATE_NOSYNC;
1564 lwkt_setcpu_self(save_gd);
1565 psk->psk_killed = killed;
1566 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1570 case DIOCKILLSTATES: {
1571 struct pf_state *s, *nexts;
1572 struct pf_state_key *sk;
1573 struct pf_addr *srcaddr, *dstaddr;
1574 u_int16_t srcport, dstport;
1575 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1577 globaldata_t save_gd = mycpu;
1580 if (psk->psk_pfcmp.id) {
1581 if (psk->psk_pfcmp.creatorid == 0)
1582 psk->psk_pfcmp.creatorid = pf_status.hostid;
1583 for (nn = 0; nn < ncpus; ++nn) {
1584 lwkt_setcpu_self(globaldata_find(nn));
1585 if ((s = pf_find_state_byid(&psk->psk_pfcmp))) {
1586 /* send immediate delete of state */
1587 pfsync_delete_state(s);
1588 s->sync_flags |= PFSTATE_NOSYNC;
1593 lwkt_setcpu_self(save_gd);
1597 for (nn = 0; nn < ncpus; ++nn) {
1598 lwkt_setcpu_self(globaldata_find(nn));
1599 for (s = RB_MIN(pf_state_tree_id, &tree_id[nn]);
1601 nexts = RB_NEXT(pf_state_tree_id, &tree_id[nn], s);
1602 sk = s->key[PF_SK_WIRE];
1604 if (s->direction == PF_OUT) {
1605 srcaddr = &sk->addr[1];
1606 dstaddr = &sk->addr[0];
1607 srcport = sk->port[0];
1608 dstport = sk->port[0];
1610 srcaddr = &sk->addr[0];
1611 dstaddr = &sk->addr[1];
1612 srcport = sk->port[0];
1613 dstport = sk->port[0];
1615 if ((!psk->psk_af || sk->af == psk->psk_af)
1616 && (!psk->psk_proto || psk->psk_proto ==
1618 PF_MATCHA(psk->psk_src.neg,
1619 &psk->psk_src.addr.v.a.addr,
1620 &psk->psk_src.addr.v.a.mask,
1622 PF_MATCHA(psk->psk_dst.neg,
1623 &psk->psk_dst.addr.v.a.addr,
1624 &psk->psk_dst.addr.v.a.mask,
1626 (psk->psk_src.port_op == 0 ||
1627 pf_match_port(psk->psk_src.port_op,
1628 psk->psk_src.port[0],
1629 psk->psk_src.port[1],
1631 (psk->psk_dst.port_op == 0 ||
1632 pf_match_port(psk->psk_dst.port_op,
1633 psk->psk_dst.port[0],
1634 psk->psk_dst.port[1],
1636 (!psk->psk_label[0] ||
1637 (s->rule.ptr->label[0] &&
1638 !strcmp(psk->psk_label, s->rule.ptr->label))) &&
1639 (!psk->psk_ifname[0] ||
1640 !strcmp(psk->psk_ifname, s->kif->pfik_name))) {
1641 /* send immediate delete of state */
1642 pfsync_delete_state(s);
1643 s->sync_flags |= PFSTATE_NOSYNC;
1649 lwkt_setcpu_self(save_gd);
1650 psk->psk_killed = killed;
1654 case DIOCADDSTATE: {
1655 struct pfioc_state *ps = (struct pfioc_state *)addr;
1656 struct pfsync_state *sp = &ps->state;
1658 if (sp->timeout >= PFTM_MAX &&
1659 sp->timeout != PFTM_UNTIL_PACKET) {
1663 error = pfsync_state_import(sp, PFSYNC_SI_IOCTL);
1667 case DIOCGETSTATE: {
1668 struct pfioc_state *ps = (struct pfioc_state *)addr;
1670 struct pf_state_cmp id_key;
1671 globaldata_t save_gd = mycpu;
1674 bcopy(ps->state.id, &id_key.id, sizeof(id_key.id));
1675 id_key.creatorid = ps->state.creatorid;
1677 for (nn = 0; nn < ncpus; ++nn) {
1678 lwkt_setcpu_self(globaldata_find(nn));
1679 s = pf_find_state_byid(&id_key);
1684 pfsync_state_export(&ps->state, s);
1688 lwkt_setcpu_self(save_gd);
1692 case DIOCGETSTATES: {
1693 struct pfioc_states *ps = (struct pfioc_states *)addr;
1694 struct pf_state *state;
1695 struct pfsync_state *p, *pstore;
1697 globaldata_t save_gd = mycpu;
1700 if (ps->ps_len == 0) {
1701 nr = pf_status.states;
1702 ps->ps_len = sizeof(struct pfsync_state) * nr;
1706 pstore = kmalloc(sizeof(*pstore), M_TEMP, M_WAITOK);
1710 for (nn = 0; nn < ncpus; ++nn) {
1711 lwkt_setcpu_self(globaldata_find(nn));
1712 state = TAILQ_FIRST(&state_list[nn]);
1714 if (state->timeout != PFTM_UNLINKED) {
1715 if ((nr + 1) * sizeof(*p) >
1716 (unsigned)ps->ps_len) {
1719 pfsync_state_export(pstore, state);
1720 error = copyout(pstore, p, sizeof(*p));
1722 kfree(pstore, M_TEMP);
1723 lwkt_setcpu_self(save_gd);
1729 state = TAILQ_NEXT(state, entry_list);
1732 lwkt_setcpu_self(save_gd);
1733 ps->ps_len = sizeof(struct pfsync_state) * nr;
1734 kfree(pstore, M_TEMP);
1738 case DIOCGETSTATUS: {
1739 struct pf_status *s = (struct pf_status *)addr;
1740 bcopy(&pf_status, s, sizeof(struct pf_status));
1741 pfi_update_status(s->ifname, s);
1745 case DIOCSETSTATUSIF: {
1746 struct pfioc_if *pi = (struct pfioc_if *)addr;
1748 if (pi->ifname[0] == 0) {
1749 bzero(pf_status.ifname, IFNAMSIZ);
1752 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1756 case DIOCCLRSTATUS: {
1757 bzero(pf_status.counters, sizeof(pf_status.counters));
1758 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1759 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1760 pf_status.since = time_second;
1761 if (*pf_status.ifname)
1762 pfi_update_status(pf_status.ifname, NULL);
1767 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1768 struct pf_state_key *sk;
1769 struct pf_state *state;
1770 struct pf_state_key_cmp key;
1771 int m = 0, direction = pnl->direction;
1773 globaldata_t save_gd = mycpu;
1776 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
1777 sidx = (direction == PF_IN) ? 1 : 0;
1778 didx = (direction == PF_IN) ? 0 : 1;
1781 PF_AZERO(&pnl->saddr, pnl->af) ||
1782 PF_AZERO(&pnl->daddr, pnl->af) ||
1783 ((pnl->proto == IPPROTO_TCP ||
1784 pnl->proto == IPPROTO_UDP) &&
1785 (!pnl->dport || !pnl->sport)))
1789 key.proto = pnl->proto;
1790 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
1791 key.port[sidx] = pnl->sport;
1792 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
1793 key.port[didx] = pnl->dport;
1796 for (nn = 0; nn < ncpus; ++nn) {
1797 lwkt_setcpu_self(globaldata_find(nn));
1798 state = pf_find_state_all(&key, direction, &m);
1805 error = E2BIG; /* more than one state */
1806 } else if (state != NULL) {
1807 sk = state->key[sidx];
1808 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
1809 pnl->rsport = sk->port[sidx];
1810 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
1811 pnl->rdport = sk->port[didx];
1815 lwkt_setcpu_self(save_gd);
1820 case DIOCSETTIMEOUT: {
1821 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1824 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1829 old = pf_default_rule.timeout[pt->timeout];
1830 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1832 pf_default_rule.timeout[pt->timeout] = pt->seconds;
1833 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
1834 wakeup(pf_purge_thread);
1839 case DIOCGETTIMEOUT: {
1840 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1842 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1846 pt->seconds = pf_default_rule.timeout[pt->timeout];
1850 case DIOCGETLIMIT: {
1851 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1853 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1857 pl->limit = pf_pool_limits[pl->index].limit;
1861 case DIOCSETLIMIT: {
1862 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1865 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1866 pf_pool_limits[pl->index].pp == NULL) {
1871 /* XXX Get an API to set limits on the zone/pool */
1872 old_limit = pf_pool_limits[pl->index].limit;
1873 pf_pool_limits[pl->index].limit = pl->limit;
1874 pl->limit = old_limit;
1878 case DIOCSETDEBUG: {
1879 u_int32_t *level = (u_int32_t *)addr;
1881 pf_status.debug = *level;
1885 case DIOCCLRRULECTRS: {
1886 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1887 struct pf_ruleset *ruleset = &pf_main_ruleset;
1888 struct pf_rule *rule;
1891 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1892 rule->evaluations = 0;
1893 rule->packets[0] = rule->packets[1] = 0;
1894 rule->bytes[0] = rule->bytes[1] = 0;
1899 case DIOCGIFSPEED: {
1900 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr;
1901 struct pf_ifspeed ps;
1904 if (psp->ifname[0] != 0) {
1905 /* Can we completely trust user-land? */
1906 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
1907 ifp = ifunit(ps.ifname);
1909 psp->baudrate = ifp->if_baudrate;
1917 case DIOCSTARTALTQ: {
1918 struct pf_altq *altq;
1920 /* enable all altq interfaces on active list */
1921 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1922 if (altq->qname[0] == 0) {
1923 error = pf_enable_altq(altq);
1929 pf_altq_running = 1;
1930 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
1934 case DIOCSTOPALTQ: {
1935 struct pf_altq *altq;
1937 /* disable all altq interfaces on active list */
1938 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1939 if (altq->qname[0] == 0) {
1940 error = pf_disable_altq(altq);
1946 pf_altq_running = 0;
1947 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
1952 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
1953 struct pf_altq *altq, *a;
1955 if (pa->ticket != ticket_altqs_inactive) {
1959 altq = kmalloc(sizeof(struct pf_altq), M_PFALTQPL, M_WAITOK|M_NULLOK);
1964 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
1967 * if this is for a queue, find the discipline and
1968 * copy the necessary fields
1970 if (altq->qname[0] != 0) {
1971 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
1973 kfree(altq, M_PFALTQPL);
1976 altq->altq_disc = NULL;
1977 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
1978 if (strncmp(a->ifname, altq->ifname,
1979 IFNAMSIZ) == 0 && a->qname[0] == 0) {
1980 altq->altq_disc = a->altq_disc;
1986 error = altq_add(altq);
1988 kfree(altq, M_PFALTQPL);
1992 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
1993 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
1997 case DIOCGETALTQS: {
1998 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
1999 struct pf_altq *altq;
2002 TAILQ_FOREACH(altq, pf_altqs_active, entries)
2004 pa->ticket = ticket_altqs_active;
2009 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2010 struct pf_altq *altq;
2013 if (pa->ticket != ticket_altqs_active) {
2018 altq = TAILQ_FIRST(pf_altqs_active);
2019 while ((altq != NULL) && (nr < pa->nr)) {
2020 altq = TAILQ_NEXT(altq, entries);
2027 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2031 case DIOCCHANGEALTQ:
2032 /* CHANGEALTQ not supported yet! */
2036 case DIOCGETQSTATS: {
2037 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2038 struct pf_altq *altq;
2042 if (pq->ticket != ticket_altqs_active) {
2046 nbytes = pq->nbytes;
2048 altq = TAILQ_FIRST(pf_altqs_active);
2049 while ((altq != NULL) && (nr < pq->nr)) {
2050 altq = TAILQ_NEXT(altq, entries);
2057 error = altq_getqstats(altq, pq->buf, &nbytes);
2059 pq->scheduler = altq->scheduler;
2060 pq->nbytes = nbytes;
2066 case DIOCBEGINADDRS: {
2067 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2069 pf_empty_pool(&pf_pabuf);
2070 pp->ticket = ++ticket_pabuf;
2075 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2077 if (pp->ticket != ticket_pabuf) {
2082 if (pp->af == AF_INET) {
2083 error = EAFNOSUPPORT;
2088 if (pp->af == AF_INET6) {
2089 error = EAFNOSUPPORT;
2093 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2094 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2095 pp->addr.addr.type != PF_ADDR_TABLE) {
2099 pa = kmalloc(sizeof(struct pf_altq), M_PFPOOLADDRPL, M_WAITOK|M_NULLOK);
2104 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2105 if (pa->ifname[0]) {
2106 pa->kif = pfi_kif_get(pa->ifname);
2107 if (pa->kif == NULL) {
2108 kfree(ap, M_PFPOOLADDRPL);
2112 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
2114 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2115 pfi_dynaddr_remove(&pa->addr);
2116 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
2117 kfree(pa, M_PFPOOLADDRPL);
2121 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2125 case DIOCGETADDRS: {
2126 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2129 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2130 pp->r_num, 0, 1, 0);
2135 TAILQ_FOREACH(pa, &pool->list, entries)
2141 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2144 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2145 pp->r_num, 0, 1, 1);
2150 pa = TAILQ_FIRST(&pool->list);
2151 while ((pa != NULL) && (nr < pp->nr)) {
2152 pa = TAILQ_NEXT(pa, entries);
2159 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2160 pf_addr_copyout(&pp->addr.addr);
2164 case DIOCCHANGEADDR: {
2165 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2166 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2167 struct pf_ruleset *ruleset;
2169 if (pca->action < PF_CHANGE_ADD_HEAD ||
2170 pca->action > PF_CHANGE_REMOVE) {
2174 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2175 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2176 pca->addr.addr.type != PF_ADDR_TABLE) {
2181 ruleset = pf_find_ruleset(pca->anchor);
2182 if (ruleset == NULL) {
2186 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2187 pca->r_num, pca->r_last, 1, 1);
2192 if (pca->action != PF_CHANGE_REMOVE) {
2193 newpa = kmalloc(sizeof(struct pf_pooladdr),
2194 M_PFPOOLADDRPL, M_WAITOK|M_NULLOK);
2195 if (newpa == NULL) {
2199 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2201 if (pca->af == AF_INET) {
2202 kfree(newpa, M_PFPOOLADDRPL);
2203 error = EAFNOSUPPORT;
2208 if (pca->af == AF_INET6) {
2209 kfree(newpa, M_PFPOOLADDRPL);
2210 error = EAFNOSUPPORT;
2214 if (newpa->ifname[0]) {
2215 newpa->kif = pfi_kif_get(newpa->ifname);
2216 if (newpa->kif == NULL) {
2217 kfree(newpa, M_PFPOOLADDRPL);
2221 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
2224 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2225 pf_tbladdr_setup(ruleset, &newpa->addr)) {
2226 pfi_dynaddr_remove(&newpa->addr);
2227 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
2228 kfree(newpa, M_PFPOOLADDRPL);
2234 if (pca->action == PF_CHANGE_ADD_HEAD)
2235 oldpa = TAILQ_FIRST(&pool->list);
2236 else if (pca->action == PF_CHANGE_ADD_TAIL)
2237 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2241 oldpa = TAILQ_FIRST(&pool->list);
2242 while ((oldpa != NULL) && (i < pca->nr)) {
2243 oldpa = TAILQ_NEXT(oldpa, entries);
2246 if (oldpa == NULL) {
2252 if (pca->action == PF_CHANGE_REMOVE) {
2253 TAILQ_REMOVE(&pool->list, oldpa, entries);
2254 pfi_dynaddr_remove(&oldpa->addr);
2255 pf_tbladdr_remove(&oldpa->addr);
2256 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
2257 kfree(oldpa, M_PFPOOLADDRPL);
2260 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2261 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2262 pca->action == PF_CHANGE_ADD_BEFORE)
2263 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2265 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2269 pool->cur = TAILQ_FIRST(&pool->list);
2270 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2275 case DIOCGETRULESETS: {
2276 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2277 struct pf_ruleset *ruleset;
2278 struct pf_anchor *anchor;
2280 pr->path[sizeof(pr->path) - 1] = 0;
2281 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2286 if (ruleset->anchor == NULL) {
2287 /* XXX kludge for pf_main_ruleset */
2288 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2289 if (anchor->parent == NULL)
2292 RB_FOREACH(anchor, pf_anchor_node,
2293 &ruleset->anchor->children)
2299 case DIOCGETRULESET: {
2300 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2301 struct pf_ruleset *ruleset;
2302 struct pf_anchor *anchor;
2305 pr->path[sizeof(pr->path) - 1] = 0;
2306 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2311 if (ruleset->anchor == NULL) {
2312 /* XXX kludge for pf_main_ruleset */
2313 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2314 if (anchor->parent == NULL && nr++ == pr->nr) {
2315 strlcpy(pr->name, anchor->name,
2320 RB_FOREACH(anchor, pf_anchor_node,
2321 &ruleset->anchor->children)
2322 if (nr++ == pr->nr) {
2323 strlcpy(pr->name, anchor->name,
2333 case DIOCRCLRTABLES: {
2334 struct pfioc_table *io = (struct pfioc_table *)addr;
2336 if (io->pfrio_esize != 0) {
2340 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2341 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2345 case DIOCRADDTABLES: {
2346 struct pfioc_table *io = (struct pfioc_table *)addr;
2348 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2352 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2353 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2357 case DIOCRDELTABLES: {
2358 struct pfioc_table *io = (struct pfioc_table *)addr;
2360 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2364 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2365 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2369 case DIOCRGETTABLES: {
2370 struct pfioc_table *io = (struct pfioc_table *)addr;
2372 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2376 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2377 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2381 case DIOCRGETTSTATS: {
2382 struct pfioc_table *io = (struct pfioc_table *)addr;
2384 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2388 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2389 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2393 case DIOCRCLRTSTATS: {
2394 struct pfioc_table *io = (struct pfioc_table *)addr;
2396 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2400 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2401 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2405 case DIOCRSETTFLAGS: {
2406 struct pfioc_table *io = (struct pfioc_table *)addr;
2408 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2412 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2413 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2414 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2418 case DIOCRCLRADDRS: {
2419 struct pfioc_table *io = (struct pfioc_table *)addr;
2421 if (io->pfrio_esize != 0) {
2425 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2426 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2430 case DIOCRADDADDRS: {
2431 struct pfioc_table *io = (struct pfioc_table *)addr;
2433 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2437 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2438 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2439 PFR_FLAG_USERIOCTL);
2443 case DIOCRDELADDRS: {
2444 struct pfioc_table *io = (struct pfioc_table *)addr;
2446 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2450 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2451 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2452 PFR_FLAG_USERIOCTL);
2456 case DIOCRSETADDRS: {
2457 struct pfioc_table *io = (struct pfioc_table *)addr;
2459 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2463 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2464 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2465 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2466 PFR_FLAG_USERIOCTL, 0);
2470 case DIOCRGETADDRS: {
2471 struct pfioc_table *io = (struct pfioc_table *)addr;
2473 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2477 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2478 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2482 case DIOCRGETASTATS: {
2483 struct pfioc_table *io = (struct pfioc_table *)addr;
2485 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2489 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2490 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2494 case DIOCRCLRASTATS: {
2495 struct pfioc_table *io = (struct pfioc_table *)addr;
2497 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2501 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2502 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2503 PFR_FLAG_USERIOCTL);
2507 case DIOCRTSTADDRS: {
2508 struct pfioc_table *io = (struct pfioc_table *)addr;
2510 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2514 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2515 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2516 PFR_FLAG_USERIOCTL);
2520 case DIOCRINADEFINE: {
2521 struct pfioc_table *io = (struct pfioc_table *)addr;
2523 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2527 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2528 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2529 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2534 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2535 error = pf_osfp_add(io);
2540 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2541 error = pf_osfp_get(io);
2546 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2547 struct pfioc_trans_e *ioe;
2548 struct pfr_table *table;
2551 if (io->esize != sizeof(*ioe)) {
2555 ioe = kmalloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2556 table = kmalloc(sizeof(*table), M_TEMP, M_WAITOK);
2557 for (i = 0; i < io->size; i++) {
2558 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2559 kfree(table, M_TEMP);
2564 switch (ioe->rs_num) {
2566 case PF_RULESET_ALTQ:
2567 if (ioe->anchor[0]) {
2568 kfree(table, M_TEMP);
2573 if ((error = pf_begin_altq(&ioe->ticket))) {
2574 kfree(table, M_TEMP);
2580 case PF_RULESET_TABLE:
2581 bzero(table, sizeof(*table));
2582 strlcpy(table->pfrt_anchor, ioe->anchor,
2583 sizeof(table->pfrt_anchor));
2584 if ((error = pfr_ina_begin(table,
2585 &ioe->ticket, NULL, 0))) {
2586 kfree(table, M_TEMP);
2592 if ((error = pf_begin_rules(&ioe->ticket,
2593 ioe->rs_num, ioe->anchor))) {
2594 kfree(table, M_TEMP);
2600 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
2601 kfree(table, M_TEMP);
2607 kfree(table, M_TEMP);
2612 case DIOCXROLLBACK: {
2613 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2614 struct pfioc_trans_e *ioe;
2615 struct pfr_table *table;
2618 if (io->esize != sizeof(*ioe)) {
2622 ioe = kmalloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2623 table = kmalloc(sizeof(*table), M_TEMP, M_WAITOK);
2624 for (i = 0; i < io->size; i++) {
2625 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2626 kfree(table, M_TEMP);
2631 switch (ioe->rs_num) {
2633 case PF_RULESET_ALTQ:
2634 if (ioe->anchor[0]) {
2635 kfree(table, M_TEMP);
2640 if ((error = pf_rollback_altq(ioe->ticket))) {
2641 kfree(table, M_TEMP);
2643 goto fail; /* really bad */
2647 case PF_RULESET_TABLE:
2648 bzero(table, sizeof(*table));
2649 strlcpy(table->pfrt_anchor, ioe->anchor,
2650 sizeof(table->pfrt_anchor));
2651 if ((error = pfr_ina_rollback(table,
2652 ioe->ticket, NULL, 0))) {
2653 kfree(table, M_TEMP);
2655 goto fail; /* really bad */
2659 if ((error = pf_rollback_rules(ioe->ticket,
2660 ioe->rs_num, ioe->anchor))) {
2661 kfree(table, M_TEMP);
2663 goto fail; /* really bad */
2668 kfree(table, M_TEMP);
2674 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2675 struct pfioc_trans_e *ioe;
2676 struct pfr_table *table;
2677 struct pf_ruleset *rs;
2680 if (io->esize != sizeof(*ioe)) {
2684 ioe = kmalloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2685 table = kmalloc(sizeof(*table), M_TEMP, M_WAITOK);
2686 /* first makes sure everything will succeed */
2687 for (i = 0; i < io->size; i++) {
2688 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2689 kfree(table, M_TEMP);
2694 switch (ioe->rs_num) {
2696 case PF_RULESET_ALTQ:
2697 if (ioe->anchor[0]) {
2698 kfree(table, M_TEMP);
2703 if (!altqs_inactive_open || ioe->ticket !=
2704 ticket_altqs_inactive) {
2705 kfree(table, M_TEMP);
2712 case PF_RULESET_TABLE:
2713 rs = pf_find_ruleset(ioe->anchor);
2714 if (rs == NULL || !rs->topen || ioe->ticket !=
2716 kfree(table, M_TEMP);
2723 if (ioe->rs_num < 0 || ioe->rs_num >=
2725 kfree(table, M_TEMP);
2730 rs = pf_find_ruleset(ioe->anchor);
2732 !rs->rules[ioe->rs_num].inactive.open ||
2733 rs->rules[ioe->rs_num].inactive.ticket !=
2735 kfree(table, M_TEMP);
2743 /* now do the commit - no errors should happen here */
2744 for (i = 0; i < io->size; i++) {
2745 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2746 kfree(table, M_TEMP);
2751 switch (ioe->rs_num) {
2753 case PF_RULESET_ALTQ:
2754 if ((error = pf_commit_altq(ioe->ticket))) {
2755 kfree(table, M_TEMP);
2757 goto fail; /* really bad */
2761 case PF_RULESET_TABLE:
2762 bzero(table, sizeof(*table));
2763 strlcpy(table->pfrt_anchor, ioe->anchor,
2764 sizeof(table->pfrt_anchor));
2765 if ((error = pfr_ina_commit(table, ioe->ticket,
2767 kfree(table, M_TEMP);
2769 goto fail; /* really bad */
2773 if ((error = pf_commit_rules(ioe->ticket,
2774 ioe->rs_num, ioe->anchor))) {
2775 kfree(table, M_TEMP);
2777 goto fail; /* really bad */
2782 kfree(table, M_TEMP);
2787 case DIOCGETSRCNODES: {
2788 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
2789 struct pf_src_node *n, *p, *pstore;
2791 int space = psn->psn_len;
2795 for (nn = 0; nn < ncpus; ++nn) {
2796 RB_FOREACH(n, pf_src_tree,
2797 &tree_src_tracking[nn]) {
2801 psn->psn_len = sizeof(struct pf_src_node) * nr;
2805 pstore = kmalloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2807 p = psn->psn_src_nodes;
2810 * WARNING: We are not switching cpus so we cannot call
2811 * nominal pf.c support routines for cpu-specific
2814 for (nn = 0; nn < ncpus; ++nn) {
2815 RB_FOREACH(n, pf_src_tree, &tree_src_tracking[nn]) {
2816 int secs = time_second, diff;
2818 if ((nr + 1) * sizeof(*p) >
2819 (unsigned)psn->psn_len) {
2823 bcopy(n, pstore, sizeof(*pstore));
2824 if (n->rule.ptr != NULL)
2825 pstore->rule.nr = n->rule.ptr->nr;
2826 pstore->creation = secs - pstore->creation;
2827 if (pstore->expire > secs)
2828 pstore->expire -= secs;
2832 /* adjust the connection rate estimate */
2833 diff = secs - n->conn_rate.last;
2834 if (diff >= n->conn_rate.seconds)
2835 pstore->conn_rate.count = 0;
2837 pstore->conn_rate.count -=
2838 n->conn_rate.count * diff /
2839 n->conn_rate.seconds;
2841 error = copyout(pstore, p, sizeof(*p));
2843 kfree(pstore, M_TEMP);
2850 psn->psn_len = sizeof(struct pf_src_node) * nr;
2851 kfree(pstore, M_TEMP);
2855 case DIOCCLRSRCNODES: {
2856 struct pf_src_node *n;
2857 struct pf_state *state;
2858 globaldata_t save_gd = mycpu;
2862 * WARNING: We are not switching cpus so we cannot call
2863 * nominal pf.c support routines for cpu-specific
2866 for (nn = 0; nn < ncpus; ++nn) {
2867 RB_FOREACH(state, pf_state_tree_id, &tree_id[nn]) {
2868 state->src_node = NULL;
2869 state->nat_src_node = NULL;
2871 RB_FOREACH(n, pf_src_tree, &tree_src_tracking[nn]) {
2878 * WARNING: Must move to the target cpu for nominal calls
2881 for (nn = 0; nn < ncpus; ++nn) {
2882 lwkt_setcpu_self(globaldata_find(nn));
2883 pf_purge_expired_src_nodes(1);
2885 lwkt_setcpu_self(save_gd);
2886 pf_status.src_nodes = 0;
2890 case DIOCKILLSRCNODES: {
2891 struct pf_src_node *sn;
2893 struct pfioc_src_node_kill *psnk =
2894 (struct pfioc_src_node_kill *)addr;
2896 globaldata_t save_gd = mycpu;
2900 * WARNING: We are not switching cpus so we cannot call
2901 * nominal pf.c support routines for cpu-specific
2904 for (nn = 0; nn < ncpus; ++nn) {
2905 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking[nn]) {
2906 if (PF_MATCHA(psnk->psnk_src.neg,
2907 &psnk->psnk_src.addr.v.a.addr,
2908 &psnk->psnk_src.addr.v.a.mask,
2909 &sn->addr, sn->af) &&
2910 PF_MATCHA(psnk->psnk_dst.neg,
2911 &psnk->psnk_dst.addr.v.a.addr,
2912 &psnk->psnk_dst.addr.v.a.mask,
2913 &sn->raddr, sn->af)) {
2914 /* Handle state to src_node linkage */
2915 if (sn->states != 0) {
2916 RB_FOREACH(s, pf_state_tree_id,
2918 if (s->src_node == sn)
2920 if (s->nat_src_node == sn)
2921 s->nat_src_node = NULL;
2931 for (nn = 0; nn < ncpus; ++nn) {
2932 lwkt_setcpu_self(globaldata_find(nn));
2933 pf_purge_expired_src_nodes(1);
2935 lwkt_setcpu_self(save_gd);
2938 psnk->psnk_killed = killed;
2942 case DIOCSETHOSTID: {
2943 u_int32_t *hostid = (u_int32_t *)addr;
2946 pf_status.hostid = karc4random();
2948 pf_status.hostid = *hostid;
2958 case DIOCIGETIFACES: {
2959 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2961 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
2965 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2970 case DIOCSETIFFLAG: {
2971 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2973 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2977 case DIOCCLRIFFLAG: {
2978 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2980 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2989 lwkt_reltoken(&pf_token);
2994 * XXX - Check for version missmatch!!!
2997 pf_clear_states(void)
2999 struct pf_state *s, *nexts;
3001 globaldata_t save_gd = mycpu;
3004 for (nn = 0; nn < ncpus; ++nn) {
3005 lwkt_setcpu_self(globaldata_find(nn));
3006 for (s = RB_MIN(pf_state_tree_id, &tree_id[nn]); s; s = nexts) {
3007 nexts = RB_NEXT(pf_state_tree_id, &tree_id[nn], s);
3009 /* don't send out individual delete messages */
3010 s->sync_flags = PFSTATE_NOSYNC;
3016 lwkt_setcpu_self(save_gd);
3020 * XXX This is called on module unload, we do not want to sync that over? */
3022 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3027 pf_clear_tables(void)
3029 struct pfioc_table io;
3032 bzero(&io, sizeof(io));
3034 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
3041 pf_clear_srcnodes(void)
3043 struct pf_src_node *n;
3044 struct pf_state *state;
3045 globaldata_t save_gd = mycpu;
3048 for (nn = 0; nn < ncpus; ++nn) {
3049 lwkt_setcpu_self(globaldata_find(nn));
3050 RB_FOREACH(state, pf_state_tree_id, &tree_id[nn]) {
3051 state->src_node = NULL;
3052 state->nat_src_node = NULL;
3054 RB_FOREACH(n, pf_src_tree, &tree_src_tracking[nn]) {
3058 pf_purge_expired_src_nodes(0);
3060 lwkt_setcpu_self(save_gd);
3062 pf_status.src_nodes = 0;
3066 * XXX - Check for version missmatch!!!
3070 * Duplicate pfctl -Fa operation to get rid of as much as we can.
3080 pf_status.running = 0;
3081 error = dehook_pf();
3083 pf_status.running = 1;
3084 DPFPRINTF(PF_DEBUG_MISC,
3085 ("pf: pfil unregistration failed\n"));
3089 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) != 0) {
3090 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
3093 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) != 0) {
3094 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
3095 break; /* XXX: rollback? */
3097 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) != 0) {
3098 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
3099 break; /* XXX: rollback? */
3101 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
3103 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
3104 break; /* XXX: rollback? */
3106 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
3108 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
3109 break; /* XXX: rollback? */
3112 /* XXX: these should always succeed here */
3113 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
3114 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
3115 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
3116 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
3117 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
3119 if ((error = pf_clear_tables()) != 0)
3122 if ((error = pf_begin_altq(&t[0])) != 0) {
3123 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
3126 pf_commit_altq(t[0]);
3129 pf_clear_srcnodes();
3131 /* status does not use malloced mem so no need to cleanup */
3132 /* fingerprints and interfaces have their own cleanup code */
3138 pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
3141 * DragonFly's version of pf uses FreeBSD's native host byte ordering
3142 * for ip_len/ip_off. This is why we don't have to change byte order
3143 * like the FreeBSD-5 version does.
3147 lwkt_gettoken_shared(&pf_token);
3149 chk = pf_test(PF_IN, ifp, m, NULL, NULL);
3154 lwkt_reltoken(&pf_token);
3159 pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
3162 * DragonFly's version of pf uses FreeBSD's native host byte ordering
3163 * for ip_len/ip_off. This is why we don't have to change byte order
3164 * like the FreeBSD-5 version does.
3168 lwkt_gettoken_shared(&pf_token);
3170 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
3171 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
3172 in_delayed_cksum(*m);
3173 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
3175 chk = pf_test(PF_OUT, ifp, m, NULL, NULL);
3180 lwkt_reltoken(&pf_token);
3186 pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
3189 * IPv6 is not affected by ip_len/ip_off byte order changes.
3193 lwkt_gettoken_shared(&pf_token);
3195 chk = pf_test6(PF_IN, ifp, m, NULL, NULL);
3200 lwkt_reltoken(&pf_token);
3205 pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
3208 * IPv6 is not affected by ip_len/ip_off byte order changes.
3212 lwkt_gettoken_shared(&pf_token);
3214 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
3215 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
3216 in_delayed_cksum(*m);
3217 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
3219 chk = pf_test6(PF_OUT, ifp, m, NULL, NULL);
3224 lwkt_reltoken(&pf_token);
3232 struct pfil_head *pfh_inet;
3234 struct pfil_head *pfh_inet6;
3237 lwkt_gettoken(&pf_token);
3239 if (pf_pfil_hooked) {
3240 lwkt_reltoken(&pf_token);
3244 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3245 if (pfh_inet == NULL) {
3246 lwkt_reltoken(&pf_token);
3249 pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_MPSAFE, pfh_inet);
3250 pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_MPSAFE, pfh_inet);
3252 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3253 if (pfh_inet6 == NULL) {
3254 pfil_remove_hook(pf_check_in, NULL, PFIL_IN, pfh_inet);
3255 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT, pfh_inet);
3256 lwkt_reltoken(&pf_token);
3259 pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_MPSAFE, pfh_inet6);
3260 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_MPSAFE, pfh_inet6);
3264 lwkt_reltoken(&pf_token);
3271 struct pfil_head *pfh_inet;
3273 struct pfil_head *pfh_inet6;
3276 lwkt_gettoken(&pf_token);
3278 if (pf_pfil_hooked == 0) {
3279 lwkt_reltoken(&pf_token);
3283 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3284 if (pfh_inet == NULL) {
3285 lwkt_reltoken(&pf_token);
3288 pfil_remove_hook(pf_check_in, NULL, PFIL_IN, pfh_inet);
3289 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT, pfh_inet);
3291 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3292 if (pfh_inet6 == NULL) {
3293 lwkt_reltoken(&pf_token);
3296 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN, pfh_inet6);
3297 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT, pfh_inet6);
3301 lwkt_reltoken(&pf_token);
3308 lwkt_gettoken(&pf_token);
3310 pf_dev = make_dev(&pf_ops, 0, 0, 0, 0600, PF_NAME);
3312 lockinit(&pf_consistency_lock, "pfconslck", 0, LK_CANRECURSE);
3313 lockinit(&pf_global_statetbl_lock, "pfglstlk", 0, 0);
3314 lwkt_reltoken(&pf_token);
3319 pf_mask_del(struct radix_node *rn, void *arg)
3321 struct radix_node_head *rnh = arg;
3323 rnh->rnh_deladdr(rn->rn_key, rn->rn_mask, rnh);
3332 pf_status.running = 0;
3334 lwkt_gettoken(&pf_token);
3336 error = dehook_pf();
3339 * Should not happen!
3340 * XXX Due to error code ESRCH, kldunload will show
3341 * a message like 'No such process'.
3343 kprintf("pfil unregistration fail\n");
3344 lwkt_reltoken(&pf_token);
3349 while (pf_end_threads < 2) {
3350 wakeup_one(pf_purge_thread);
3351 tsleep(pf_purge_thread, 0, "pftmo", hz);
3355 dev_ops_remove_all(&pf_ops);
3356 lockuninit(&pf_consistency_lock);
3357 lwkt_reltoken(&pf_token);
3359 if (pf_maskhead != NULL) {
3360 pf_maskhead->rnh_walktree(pf_maskhead,
3361 pf_mask_del, pf_maskhead);
3365 kmalloc_destroy(&pf_state_pl);
3366 kmalloc_destroy(&pf_frent_pl);
3367 kmalloc_destroy(&pf_cent_pl);
3372 pf_modevent(module_t mod, int type, void *data)
3376 lwkt_gettoken(&pf_token);
3384 error = pf_unload();
3390 lwkt_reltoken(&pf_token);
3394 static moduledata_t pf_mod = {
3399 DECLARE_MODULE(pf, pf_mod, SI_SUB_PSEUDO, SI_ORDER_FIRST);
3400 MODULE_VERSION(pf, PF_MODVER);