1 /* $OpenBSD: pf_table.c,v 1.78 2008/06/14 03:50:14 art Exp $ */
4 * Copyright (c) 2010 The DragonFly Project. All rights reserved.
6 * Copyright (c) 2002 Cedric Berger
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
36 #include "opt_inet6.h"
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/socket.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/thread2.h>
45 #include <vm/vm_zone.h>
48 #include <net/route.h>
49 #include <netinet/in.h>
50 #include <net/pf/pfvar.h>
52 #define ACCEPT_FLAGS(flags, oklist) \
54 if ((flags & ~(oklist)) & \
59 #define COPYIN(from, to, size, flags) \
60 ((flags & PFR_FLAG_USERIOCTL) ? \
61 copyin((from), (to), (size)) : \
62 (bcopy((from), (to), (size)), 0))
64 #define COPYOUT(from, to, size, flags) \
65 ((flags & PFR_FLAG_USERIOCTL) ? \
66 copyout((from), (to), (size)) : \
67 (bcopy((from), (to), (size)), 0))
69 #define FILLIN_SIN(sin, addr) \
71 (sin).sin_len = sizeof(sin); \
72 (sin).sin_family = AF_INET; \
73 (sin).sin_addr = (addr); \
76 #define FILLIN_SIN6(sin6, addr) \
78 (sin6).sin6_len = sizeof(sin6); \
79 (sin6).sin6_family = AF_INET6; \
80 (sin6).sin6_addr = (addr); \
83 #define SWAP(type, a1, a2) \
90 #define SUNION2PF(su, af) (((af)==AF_INET) ? \
91 (struct pf_addr *)&(su)->sin.sin_addr : \
92 (struct pf_addr *)&(su)->sin6.sin6_addr)
94 #define AF_BITS(af) (((af)==AF_INET)?32:128)
95 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
96 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
97 #define KENTRY_RNF_ROOT(ke) \
98 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
100 #define NO_ADDRESSES (-1)
101 #define ENQUEUE_UNMARKED_ONLY (1)
102 #define INVERT_NEG_FLAG (1)
104 struct pfr_walktree {
115 struct pfr_addr *pfrw1_addr;
116 struct pfr_astats *pfrw1_astats;
117 struct pfr_kentryworkq *pfrw1_workq;
118 struct pfr_kentry *pfrw1_kentry;
119 struct pfi_dynaddr *pfrw1_dyn;
124 #define pfrw_addr pfrw_1.pfrw1_addr
125 #define pfrw_astats pfrw_1.pfrw1_astats
126 #define pfrw_workq pfrw_1.pfrw1_workq
127 #define pfrw_kentry pfrw_1.pfrw1_kentry
128 #define pfrw_dyn pfrw_1.pfrw1_dyn
129 #define pfrw_cnt pfrw_free
131 #define senderr(e) do { rv = (e); goto _bad; } while (0)
133 vm_zone_t pfr_ktable_pl;
134 vm_zone_t pfr_kentry_pl;
135 vm_zone_t pfr_kentry_pl2;
136 vm_zone_t pfr_kcounters_pl;
137 struct sockaddr_in pfr_sin;
138 struct sockaddr_in6 pfr_sin6;
139 union sockaddr_union pfr_mask;
140 struct pf_addr pfr_ffaddr;
142 void pfr_copyout_addr(struct pfr_addr *,
143 struct pfr_kentry *ke);
144 int pfr_validate_addr(struct pfr_addr *);
145 void pfr_enqueue_addrs(struct pfr_ktable *,
146 struct pfr_kentryworkq *, int *, int);
147 void pfr_mark_addrs(struct pfr_ktable *);
148 struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
149 struct pfr_addr *, int);
150 struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, int);
151 void pfr_destroy_kentries(struct pfr_kentryworkq *);
152 void pfr_destroy_kentry(struct pfr_kentry *);
153 void pfr_insert_kentries(struct pfr_ktable *,
154 struct pfr_kentryworkq *, long);
155 void pfr_remove_kentries(struct pfr_ktable *,
156 struct pfr_kentryworkq *);
157 void pfr_clstats_kentries(struct pfr_kentryworkq *, long,
159 void pfr_reset_feedback(struct pfr_addr *, int, int);
160 void pfr_prepare_network(union sockaddr_union *, int, int);
161 int pfr_route_kentry(struct pfr_ktable *,
162 struct pfr_kentry *);
163 int pfr_unroute_kentry(struct pfr_ktable *,
164 struct pfr_kentry *);
165 int pfr_walktree(struct radix_node *, void *);
166 int pfr_validate_table(struct pfr_table *, int, int);
167 int pfr_fix_anchor(char *);
168 void pfr_commit_ktable(struct pfr_ktable *, long);
169 void pfr_insert_ktables(struct pfr_ktableworkq *);
170 void pfr_insert_ktable(struct pfr_ktable *);
171 void pfr_setflags_ktables(struct pfr_ktableworkq *);
172 void pfr_setflags_ktable(struct pfr_ktable *, int);
173 void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
175 void pfr_clstats_ktable(struct pfr_ktable *, long, int);
176 struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int);
177 void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
178 void pfr_destroy_ktable(struct pfr_ktable *, int);
179 int pfr_ktable_compare(struct pfr_ktable *,
180 struct pfr_ktable *);
181 struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
182 void pfr_clean_node_mask(struct pfr_ktable *,
183 struct pfr_kentryworkq *);
184 int pfr_table_count(struct pfr_table *, int);
185 int pfr_skip_table(struct pfr_table *,
186 struct pfr_ktable *, int);
187 struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
189 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
190 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
192 struct pfr_ktablehead pfr_ktables;
193 struct pfr_table pfr_nulltable;
199 pfr_sin.sin_len = sizeof(pfr_sin);
200 pfr_sin.sin_family = AF_INET;
201 pfr_sin6.sin6_len = sizeof(pfr_sin6);
202 pfr_sin6.sin6_family = AF_INET6;
204 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
208 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
210 struct pfr_ktable *kt;
211 struct pfr_kentryworkq workq;
213 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
214 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
216 kt = pfr_lookup_table(tbl);
217 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
219 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
221 pfr_enqueue_addrs(kt, &workq, ndel, 0);
223 if (!(flags & PFR_FLAG_DUMMY)) {
224 if (flags & PFR_FLAG_ATOMIC)
226 pfr_remove_kentries(kt, &workq);
227 if (flags & PFR_FLAG_ATOMIC)
230 kprintf("pfr_clr_addrs: corruption detected (%d).\n",
239 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
240 int *nadd, int flags)
242 struct pfr_ktable *kt, *tmpkt;
243 struct pfr_kentryworkq workq;
244 struct pfr_kentry *p, *q;
247 long tzero = time_second;
249 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
251 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
253 kt = pfr_lookup_table(tbl);
254 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
256 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
258 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
262 for (i = 0; i < size; i++) {
263 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
265 if (pfr_validate_addr(&ad))
267 p = pfr_lookup_addr(kt, &ad, 1);
268 q = pfr_lookup_addr(tmpkt, &ad, 1);
269 if (flags & PFR_FLAG_FEEDBACK) {
271 ad.pfra_fback = PFR_FB_DUPLICATE;
273 ad.pfra_fback = PFR_FB_ADDED;
274 else if (p->pfrke_not != ad.pfra_not)
275 ad.pfra_fback = PFR_FB_CONFLICT;
277 ad.pfra_fback = PFR_FB_NONE;
279 if (p == NULL && q == NULL) {
280 p = pfr_create_kentry(&ad,
281 !(flags & PFR_FLAG_USERIOCTL));
284 if (pfr_route_kentry(tmpkt, p)) {
285 pfr_destroy_kentry(p);
286 ad.pfra_fback = PFR_FB_NONE;
288 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
292 if (flags & PFR_FLAG_FEEDBACK)
293 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
296 pfr_clean_node_mask(tmpkt, &workq);
297 if (!(flags & PFR_FLAG_DUMMY)) {
298 if (flags & PFR_FLAG_ATOMIC)
300 pfr_insert_kentries(kt, &workq, tzero);
301 if (flags & PFR_FLAG_ATOMIC)
304 pfr_destroy_kentries(&workq);
307 pfr_destroy_ktable(tmpkt, 0);
310 pfr_clean_node_mask(tmpkt, &workq);
311 pfr_destroy_kentries(&workq);
312 if (flags & PFR_FLAG_FEEDBACK)
313 pfr_reset_feedback(addr, size, flags);
314 pfr_destroy_ktable(tmpkt, 0);
319 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
320 int *ndel, int flags)
322 struct pfr_ktable *kt;
323 struct pfr_kentryworkq workq;
324 struct pfr_kentry *p;
326 int i, rv, xdel = 0, log = 1;
328 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
330 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
332 kt = pfr_lookup_table(tbl);
333 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
335 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
338 * there are two algorithms to choose from here.
340 * n: number of addresses to delete
341 * N: number of addresses in the table
343 * one is O(N) and is better for large 'n'
344 * one is O(n*LOG(N)) and is better for small 'n'
346 * following code try to decide which one is best.
348 for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
350 if (size > kt->pfrkt_cnt/log) {
351 /* full table scan */
354 /* iterate over addresses to delete */
355 for (i = 0; i < size; i++) {
356 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
358 if (pfr_validate_addr(&ad))
360 p = pfr_lookup_addr(kt, &ad, 1);
366 for (i = 0; i < size; i++) {
367 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
369 if (pfr_validate_addr(&ad))
371 p = pfr_lookup_addr(kt, &ad, 1);
372 if (flags & PFR_FLAG_FEEDBACK) {
374 ad.pfra_fback = PFR_FB_NONE;
375 else if (p->pfrke_not != ad.pfra_not)
376 ad.pfra_fback = PFR_FB_CONFLICT;
377 else if (p->pfrke_mark)
378 ad.pfra_fback = PFR_FB_DUPLICATE;
380 ad.pfra_fback = PFR_FB_DELETED;
382 if (p != NULL && p->pfrke_not == ad.pfra_not &&
385 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
388 if (flags & PFR_FLAG_FEEDBACK)
389 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
392 if (!(flags & PFR_FLAG_DUMMY)) {
393 if (flags & PFR_FLAG_ATOMIC)
395 pfr_remove_kentries(kt, &workq);
396 if (flags & PFR_FLAG_ATOMIC)
403 if (flags & PFR_FLAG_FEEDBACK)
404 pfr_reset_feedback(addr, size, flags);
409 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
410 int *size2, int *nadd, int *ndel, int *nchange, int flags,
411 u_int32_t ignore_pfrt_flags)
413 struct pfr_ktable *kt, *tmpkt;
414 struct pfr_kentryworkq addq, delq, changeq;
415 struct pfr_kentry *p, *q;
417 int i, rv, xadd = 0, xdel = 0, xchange = 0;
418 long tzero = time_second;
420 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
422 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
425 kt = pfr_lookup_table(tbl);
426 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
428 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
430 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
436 SLIST_INIT(&changeq);
437 for (i = 0; i < size; i++) {
438 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
440 if (pfr_validate_addr(&ad))
442 ad.pfra_fback = PFR_FB_NONE;
443 p = pfr_lookup_addr(kt, &ad, 1);
446 ad.pfra_fback = PFR_FB_DUPLICATE;
450 if (p->pfrke_not != ad.pfra_not) {
451 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
452 ad.pfra_fback = PFR_FB_CHANGED;
456 q = pfr_lookup_addr(tmpkt, &ad, 1);
458 ad.pfra_fback = PFR_FB_DUPLICATE;
461 p = pfr_create_kentry(&ad,
462 !(flags & PFR_FLAG_USERIOCTL));
465 if (pfr_route_kentry(tmpkt, p)) {
466 pfr_destroy_kentry(p);
467 ad.pfra_fback = PFR_FB_NONE;
469 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
470 ad.pfra_fback = PFR_FB_ADDED;
475 if (flags & PFR_FLAG_FEEDBACK)
476 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
479 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
480 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
481 if (*size2 < size+xdel) {
486 SLIST_FOREACH(p, &delq, pfrke_workq) {
487 pfr_copyout_addr(&ad, p);
488 ad.pfra_fback = PFR_FB_DELETED;
489 if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags))
494 pfr_clean_node_mask(tmpkt, &addq);
495 if (!(flags & PFR_FLAG_DUMMY)) {
496 if (flags & PFR_FLAG_ATOMIC)
498 pfr_insert_kentries(kt, &addq, tzero);
499 pfr_remove_kentries(kt, &delq);
500 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
501 if (flags & PFR_FLAG_ATOMIC)
504 pfr_destroy_kentries(&addq);
511 if ((flags & PFR_FLAG_FEEDBACK) && size2)
513 pfr_destroy_ktable(tmpkt, 0);
516 pfr_clean_node_mask(tmpkt, &addq);
517 pfr_destroy_kentries(&addq);
518 if (flags & PFR_FLAG_FEEDBACK)
519 pfr_reset_feedback(addr, size, flags);
520 pfr_destroy_ktable(tmpkt, 0);
525 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
526 int *nmatch, int flags)
528 struct pfr_ktable *kt;
529 struct pfr_kentry *p;
533 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
534 if (pfr_validate_table(tbl, 0, 0))
536 kt = pfr_lookup_table(tbl);
537 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
540 for (i = 0; i < size; i++) {
541 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
543 if (pfr_validate_addr(&ad))
545 if (ADDR_NETWORK(&ad))
547 p = pfr_lookup_addr(kt, &ad, 0);
548 if (flags & PFR_FLAG_REPLACE)
549 pfr_copyout_addr(&ad, p);
550 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
551 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
552 if (p != NULL && !p->pfrke_not)
554 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
563 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
566 struct pfr_ktable *kt;
567 struct pfr_walktree w;
570 ACCEPT_FLAGS(flags, 0);
571 if (pfr_validate_table(tbl, 0, 0))
573 kt = pfr_lookup_table(tbl);
574 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
576 if (kt->pfrkt_cnt > *size) {
577 *size = kt->pfrkt_cnt;
581 bzero(&w, sizeof(w));
582 w.pfrw_op = PFRW_GET_ADDRS;
584 w.pfrw_free = kt->pfrkt_cnt;
585 w.pfrw_flags = flags;
586 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
588 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
593 kprintf("pfr_get_addrs: corruption detected (%d).\n",
597 *size = kt->pfrkt_cnt;
602 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
605 struct pfr_ktable *kt;
606 struct pfr_walktree w;
607 struct pfr_kentryworkq workq;
609 long tzero = time_second;
611 /* XXX PFR_FLAG_CLSTATS disabled */
612 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
613 if (pfr_validate_table(tbl, 0, 0))
615 kt = pfr_lookup_table(tbl);
616 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
618 if (kt->pfrkt_cnt > *size) {
619 *size = kt->pfrkt_cnt;
623 bzero(&w, sizeof(w));
624 w.pfrw_op = PFRW_GET_ASTATS;
625 w.pfrw_astats = addr;
626 w.pfrw_free = kt->pfrkt_cnt;
627 w.pfrw_flags = flags;
628 if (flags & PFR_FLAG_ATOMIC)
630 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
632 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
633 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
634 pfr_enqueue_addrs(kt, &workq, NULL, 0);
635 pfr_clstats_kentries(&workq, tzero, 0);
637 if (flags & PFR_FLAG_ATOMIC)
643 kprintf("pfr_get_astats: corruption detected (%d).\n",
647 *size = kt->pfrkt_cnt;
652 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
653 int *nzero, int flags)
655 struct pfr_ktable *kt;
656 struct pfr_kentryworkq workq;
657 struct pfr_kentry *p;
659 int i, rv, xzero = 0;
661 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
663 if (pfr_validate_table(tbl, 0, 0))
665 kt = pfr_lookup_table(tbl);
666 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
669 for (i = 0; i < size; i++) {
670 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
672 if (pfr_validate_addr(&ad))
674 p = pfr_lookup_addr(kt, &ad, 1);
675 if (flags & PFR_FLAG_FEEDBACK) {
676 ad.pfra_fback = (p != NULL) ?
677 PFR_FB_CLEARED : PFR_FB_NONE;
678 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
682 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
687 if (!(flags & PFR_FLAG_DUMMY)) {
688 if (flags & PFR_FLAG_ATOMIC)
690 pfr_clstats_kentries(&workq, 0, 0);
691 if (flags & PFR_FLAG_ATOMIC)
698 if (flags & PFR_FLAG_FEEDBACK)
699 pfr_reset_feedback(addr, size, flags);
704 pfr_validate_addr(struct pfr_addr *ad)
708 switch (ad->pfra_af) {
711 if (ad->pfra_net > 32)
717 if (ad->pfra_net > 128)
724 if (ad->pfra_net < 128 &&
725 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
727 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
728 if (((caddr_t)ad)[i])
730 if (ad->pfra_not && ad->pfra_not != 1)
738 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
739 int *naddr, int sweep)
741 struct pfr_walktree w;
744 bzero(&w, sizeof(w));
745 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
746 w.pfrw_workq = workq;
747 if (kt->pfrkt_ip4 != NULL)
748 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
749 kprintf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
750 if (kt->pfrkt_ip6 != NULL)
751 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
752 kprintf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
758 pfr_mark_addrs(struct pfr_ktable *kt)
760 struct pfr_walktree w;
762 bzero(&w, sizeof(w));
763 w.pfrw_op = PFRW_MARK;
764 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
765 kprintf("pfr_mark_addrs: IPv4 walktree failed.\n");
766 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
767 kprintf("pfr_mark_addrs: IPv6 walktree failed.\n");
772 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
774 union sockaddr_union sa, mask;
775 struct radix_node_head *head = NULL;
776 struct pfr_kentry *ke;
778 bzero(&sa, sizeof(sa));
779 if (ad->pfra_af == AF_INET) {
780 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
781 head = kt->pfrkt_ip4;
782 } else if ( ad->pfra_af == AF_INET6 ) {
783 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
784 head = kt->pfrkt_ip6;
786 if (ADDR_NETWORK(ad)) {
787 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
788 crit_enter(); /* rn_lookup makes use of globals */
789 ke = (struct pfr_kentry *)rn_lookup((char *)&sa, (char *)&mask,
792 if (ke && KENTRY_RNF_ROOT(ke))
795 ke = (struct pfr_kentry *)rn_match((char *)&sa, head);
796 if (ke && KENTRY_RNF_ROOT(ke))
798 if (exact && ke && KENTRY_NETWORK(ke))
805 pfr_create_kentry(struct pfr_addr *ad, int intr)
807 struct pfr_kentry *ke;
810 ke = pool_get(&pfr_kentry_pl2, PR_NOWAIT | PR_ZERO);
812 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT|PR_ZERO|PR_LIMITFAIL);
816 if (ad->pfra_af == AF_INET)
817 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
818 else if (ad->pfra_af == AF_INET6)
819 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
820 ke->pfrke_af = ad->pfra_af;
821 ke->pfrke_net = ad->pfra_net;
822 ke->pfrke_not = ad->pfra_not;
823 ke->pfrke_intrpool = intr;
828 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
830 struct pfr_kentry *p, *q;
832 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
833 q = SLIST_NEXT(p, pfrke_workq);
834 pfr_destroy_kentry(p);
839 pfr_destroy_kentry(struct pfr_kentry *ke)
841 if (ke->pfrke_counters)
842 pool_put(&pfr_kcounters_pl, ke->pfrke_counters);
843 if (ke->pfrke_intrpool)
844 pool_put(&pfr_kentry_pl2, ke);
846 pool_put(&pfr_kentry_pl, ke);
850 pfr_insert_kentries(struct pfr_ktable *kt,
851 struct pfr_kentryworkq *workq, long tzero)
853 struct pfr_kentry *p;
856 SLIST_FOREACH(p, workq, pfrke_workq) {
857 rv = pfr_route_kentry(kt, p);
859 kprintf("pfr_insert_kentries: cannot route entry "
863 p->pfrke_tzero = tzero;
870 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
872 struct pfr_kentry *p;
875 p = pfr_lookup_addr(kt, ad, 1);
878 p = pfr_create_kentry(ad, 1);
882 rv = pfr_route_kentry(kt, p);
886 p->pfrke_tzero = tzero;
893 pfr_remove_kentries(struct pfr_ktable *kt,
894 struct pfr_kentryworkq *workq)
896 struct pfr_kentry *p;
899 SLIST_FOREACH(p, workq, pfrke_workq) {
900 pfr_unroute_kentry(kt, p);
904 pfr_destroy_kentries(workq);
908 pfr_clean_node_mask(struct pfr_ktable *kt,
909 struct pfr_kentryworkq *workq)
911 struct pfr_kentry *p;
913 SLIST_FOREACH(p, workq, pfrke_workq)
914 pfr_unroute_kentry(kt, p);
918 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
920 struct pfr_kentry *p;
922 SLIST_FOREACH(p, workq, pfrke_workq) {
925 p->pfrke_not = !p->pfrke_not;
926 if (p->pfrke_counters) {
927 pool_put(&pfr_kcounters_pl, p->pfrke_counters);
928 p->pfrke_counters = NULL;
931 p->pfrke_tzero = tzero;
936 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
941 for (i = 0; i < size; i++) {
942 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
944 ad.pfra_fback = PFR_FB_NONE;
945 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
951 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
955 bzero(sa, sizeof(*sa));
957 sa->sin.sin_len = sizeof(sa->sin);
958 sa->sin.sin_family = AF_INET;
959 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
960 } else if (af == AF_INET6) {
961 sa->sin6.sin6_len = sizeof(sa->sin6);
962 sa->sin6.sin6_family = AF_INET6;
963 for (i = 0; i < 4; i++) {
965 sa->sin6.sin6_addr.s6_addr32[i] =
966 net ? htonl(-1 << (32-net)) : 0;
969 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
976 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
978 union sockaddr_union mask;
979 struct radix_node *rn;
980 struct radix_node_head *head = NULL;
982 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
983 if (ke->pfrke_af == AF_INET)
984 head = kt->pfrkt_ip4;
985 else if (ke->pfrke_af == AF_INET6)
986 head = kt->pfrkt_ip6;
989 if (KENTRY_NETWORK(ke)) {
990 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
991 rn = rn_addroute((char *)&ke->pfrke_sa, (char *)&mask, head,
994 rn = rn_addroute((char *)&ke->pfrke_sa, NULL, head,
998 return (rn == NULL ? -1 : 0);
1002 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1004 union sockaddr_union mask;
1005 struct radix_node *rn;
1006 struct radix_node_head *head = NULL;
1008 if (ke->pfrke_af == AF_INET)
1009 head = kt->pfrkt_ip4;
1010 else if (ke->pfrke_af == AF_INET6)
1011 head = kt->pfrkt_ip6;
1014 if (KENTRY_NETWORK(ke)) {
1015 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1016 rn = rn_delete((char *)&ke->pfrke_sa, (char *)&mask, head);
1018 rn = rn_delete((char *)&ke->pfrke_sa, NULL, head);
1022 kprintf("pfr_unroute_kentry: delete failed.\n");
1029 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1031 bzero(ad, sizeof(*ad));
1034 ad->pfra_af = ke->pfrke_af;
1035 ad->pfra_net = ke->pfrke_net;
1036 ad->pfra_not = ke->pfrke_not;
1037 if (ad->pfra_af == AF_INET)
1038 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1039 else if (ad->pfra_af == AF_INET6)
1040 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1044 pfr_walktree(struct radix_node *rn, void *arg)
1046 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1047 struct pfr_walktree *w = arg;
1048 int flags = w->pfrw_flags;
1050 switch (w->pfrw_op) {
1059 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1062 case PFRW_GET_ADDRS:
1063 if (w->pfrw_free-- > 0) {
1066 pfr_copyout_addr(&ad, ke);
1067 if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1072 case PFRW_GET_ASTATS:
1073 if (w->pfrw_free-- > 0) {
1074 struct pfr_astats as;
1076 pfr_copyout_addr(&as.pfras_a, ke);
1079 if (ke->pfrke_counters) {
1080 bcopy(ke->pfrke_counters->pfrkc_packets,
1081 as.pfras_packets, sizeof(as.pfras_packets));
1082 bcopy(ke->pfrke_counters->pfrkc_bytes,
1083 as.pfras_bytes, sizeof(as.pfras_bytes));
1085 bzero(as.pfras_packets, sizeof(as.pfras_packets));
1086 bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1087 as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1090 as.pfras_tzero = ke->pfrke_tzero;
1092 if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags))
1099 break; /* negative entries are ignored */
1100 if (!w->pfrw_cnt--) {
1101 w->pfrw_kentry = ke;
1102 return (1); /* finish search */
1105 case PFRW_DYNADDR_UPDATE:
1106 if (ke->pfrke_af == AF_INET) {
1107 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1109 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1110 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1111 &ke->pfrke_sa, AF_INET);
1112 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1113 &pfr_mask, AF_INET);
1114 } else if (ke->pfrke_af == AF_INET6){
1115 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1117 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1118 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1119 &ke->pfrke_sa, AF_INET6);
1120 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1121 &pfr_mask, AF_INET6);
1129 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1131 struct pfr_ktableworkq workq;
1132 struct pfr_ktable *p;
1135 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1137 if (pfr_fix_anchor(filter->pfrt_anchor))
1139 if (pfr_table_count(filter, flags) < 0)
1143 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1144 if (pfr_skip_table(filter, p, flags))
1146 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1148 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1150 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1151 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1154 if (!(flags & PFR_FLAG_DUMMY)) {
1155 if (flags & PFR_FLAG_ATOMIC)
1157 pfr_setflags_ktables(&workq);
1158 if (flags & PFR_FLAG_ATOMIC)
1167 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1169 struct pfr_ktableworkq addq, changeq;
1170 struct pfr_ktable *p, *q, *r, key;
1171 int i, rv, xadd = 0;
1172 long tzero = time_second;
1174 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1176 SLIST_INIT(&changeq);
1177 for (i = 0; i < size; i++) {
1178 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1180 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1181 flags & PFR_FLAG_USERIOCTL))
1183 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1184 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1186 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1189 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1190 if (!pfr_ktable_compare(p, q))
1193 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1195 if (!key.pfrkt_anchor[0])
1198 /* find or create root table */
1199 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1200 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1205 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1206 if (!pfr_ktable_compare(&key, q)) {
1211 key.pfrkt_flags = 0;
1212 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1215 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1217 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1218 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1219 if (!pfr_ktable_compare(&key, q))
1221 p->pfrkt_nflags = (p->pfrkt_flags &
1222 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1223 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1229 if (!(flags & PFR_FLAG_DUMMY)) {
1230 if (flags & PFR_FLAG_ATOMIC)
1232 pfr_insert_ktables(&addq);
1233 pfr_setflags_ktables(&changeq);
1234 if (flags & PFR_FLAG_ATOMIC)
1237 pfr_destroy_ktables(&addq, 0);
1242 pfr_destroy_ktables(&addq, 0);
1247 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1249 struct pfr_ktableworkq workq;
1250 struct pfr_ktable *p, *q, key;
1253 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1255 for (i = 0; i < size; i++) {
1256 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1258 if (pfr_validate_table(&key.pfrkt_t, 0,
1259 flags & PFR_FLAG_USERIOCTL))
1261 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1262 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1263 SLIST_FOREACH(q, &workq, pfrkt_workq)
1264 if (!pfr_ktable_compare(p, q))
1266 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1267 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1274 if (!(flags & PFR_FLAG_DUMMY)) {
1275 if (flags & PFR_FLAG_ATOMIC)
1277 pfr_setflags_ktables(&workq);
1278 if (flags & PFR_FLAG_ATOMIC)
1287 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1290 struct pfr_ktable *p;
1293 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1294 if (pfr_fix_anchor(filter->pfrt_anchor))
1296 n = nn = pfr_table_count(filter, flags);
1303 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1304 if (pfr_skip_table(filter, p, flags))
1308 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags))
1312 kprintf("pfr_get_tables: corruption detected (%d).\n", n);
1320 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1323 struct pfr_ktable *p;
1324 struct pfr_ktableworkq workq;
1326 long tzero = time_second;
1328 /* XXX PFR_FLAG_CLSTATS disabled */
1329 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1330 if (pfr_fix_anchor(filter->pfrt_anchor))
1332 n = nn = pfr_table_count(filter, flags);
1340 if (flags & PFR_FLAG_ATOMIC)
1342 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1343 if (pfr_skip_table(filter, p, flags))
1347 if (!(flags & PFR_FLAG_ATOMIC))
1349 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags)) {
1353 if (!(flags & PFR_FLAG_ATOMIC))
1355 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1357 if (flags & PFR_FLAG_CLSTATS)
1358 pfr_clstats_ktables(&workq, tzero,
1359 flags & PFR_FLAG_ADDRSTOO);
1360 if (flags & PFR_FLAG_ATOMIC)
1363 kprintf("pfr_get_tstats: corruption detected (%d).\n", n);
1371 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1373 struct pfr_ktableworkq workq;
1374 struct pfr_ktable *p, key;
1376 long tzero = time_second;
1378 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1381 for (i = 0; i < size; i++) {
1382 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1384 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1386 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1388 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1392 if (!(flags & PFR_FLAG_DUMMY)) {
1393 if (flags & PFR_FLAG_ATOMIC)
1395 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1396 if (flags & PFR_FLAG_ATOMIC)
1405 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1406 int *nchange, int *ndel, int flags)
1408 struct pfr_ktableworkq workq;
1409 struct pfr_ktable *p, *q, key;
1410 int i, xchange = 0, xdel = 0;
1412 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1413 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1414 (clrflag & ~PFR_TFLAG_USRMASK) ||
1415 (setflag & clrflag))
1418 for (i = 0; i < size; i++) {
1419 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1421 if (pfr_validate_table(&key.pfrkt_t, 0,
1422 flags & PFR_FLAG_USERIOCTL))
1424 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1425 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1426 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1428 if (p->pfrkt_nflags == p->pfrkt_flags)
1430 SLIST_FOREACH(q, &workq, pfrkt_workq)
1431 if (!pfr_ktable_compare(p, q))
1433 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1434 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1435 (clrflag & PFR_TFLAG_PERSIST) &&
1436 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1444 if (!(flags & PFR_FLAG_DUMMY)) {
1445 if (flags & PFR_FLAG_ATOMIC)
1447 pfr_setflags_ktables(&workq);
1448 if (flags & PFR_FLAG_ATOMIC)
1451 if (nchange != NULL)
1459 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1461 struct pfr_ktableworkq workq;
1462 struct pfr_ktable *p;
1463 struct pf_ruleset *rs;
1466 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1467 rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1471 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1472 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1473 pfr_skip_table(trs, p, 0))
1475 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1476 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1479 if (!(flags & PFR_FLAG_DUMMY)) {
1480 pfr_setflags_ktables(&workq);
1482 *ticket = ++rs->tticket;
1485 pf_remove_if_empty_ruleset(rs);
1492 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1493 int *nadd, int *naddr, u_int32_t ticket, int flags)
1495 struct pfr_ktableworkq tableq;
1496 struct pfr_kentryworkq addrq;
1497 struct pfr_ktable *kt, *rt, *shadow, key;
1498 struct pfr_kentry *p;
1500 struct pf_ruleset *rs;
1501 int i, rv, xadd = 0, xaddr = 0;
1503 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1504 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1506 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1507 flags & PFR_FLAG_USERIOCTL))
1509 rs = pf_find_ruleset(tbl->pfrt_anchor);
1510 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1512 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1513 SLIST_INIT(&tableq);
1514 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1516 kt = pfr_create_ktable(tbl, 0, 1);
1519 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1521 if (!tbl->pfrt_anchor[0])
1524 /* find or create root table */
1525 bzero(&key, sizeof(key));
1526 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1527 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1529 kt->pfrkt_root = rt;
1532 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1534 pfr_destroy_ktables(&tableq, 0);
1537 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1538 kt->pfrkt_root = rt;
1539 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1542 shadow = pfr_create_ktable(tbl, 0, 0);
1543 if (shadow == NULL) {
1544 pfr_destroy_ktables(&tableq, 0);
1548 for (i = 0; i < size; i++) {
1549 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1551 if (pfr_validate_addr(&ad))
1553 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1555 p = pfr_create_kentry(&ad, 0);
1558 if (pfr_route_kentry(shadow, p)) {
1559 pfr_destroy_kentry(p);
1562 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1565 if (!(flags & PFR_FLAG_DUMMY)) {
1566 if (kt->pfrkt_shadow != NULL)
1567 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1568 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1569 pfr_insert_ktables(&tableq);
1570 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1571 xaddr : NO_ADDRESSES;
1572 kt->pfrkt_shadow = shadow;
1574 pfr_clean_node_mask(shadow, &addrq);
1575 pfr_destroy_ktable(shadow, 0);
1576 pfr_destroy_ktables(&tableq, 0);
1577 pfr_destroy_kentries(&addrq);
1585 pfr_destroy_ktable(shadow, 0);
1586 pfr_destroy_ktables(&tableq, 0);
1587 pfr_destroy_kentries(&addrq);
1592 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1594 struct pfr_ktableworkq workq;
1595 struct pfr_ktable *p;
1596 struct pf_ruleset *rs;
1599 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1600 rs = pf_find_ruleset(trs->pfrt_anchor);
1601 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1604 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1605 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1606 pfr_skip_table(trs, p, 0))
1608 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1609 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1612 if (!(flags & PFR_FLAG_DUMMY)) {
1613 pfr_setflags_ktables(&workq);
1615 pf_remove_if_empty_ruleset(rs);
1623 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1624 int *nchange, int flags)
1626 struct pfr_ktable *p, *q;
1627 struct pfr_ktableworkq workq;
1628 struct pf_ruleset *rs;
1629 int xadd = 0, xchange = 0;
1630 long tzero = time_second;
1632 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1633 rs = pf_find_ruleset(trs->pfrt_anchor);
1634 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1638 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1639 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1640 pfr_skip_table(trs, p, 0))
1642 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1643 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1649 if (!(flags & PFR_FLAG_DUMMY)) {
1650 if (flags & PFR_FLAG_ATOMIC)
1652 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1653 q = SLIST_NEXT(p, pfrkt_workq);
1654 pfr_commit_ktable(p, tzero);
1656 if (flags & PFR_FLAG_ATOMIC)
1659 pf_remove_if_empty_ruleset(rs);
1663 if (nchange != NULL)
1670 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1672 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1675 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1676 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1677 pfr_clstats_ktable(kt, tzero, 1);
1678 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1679 /* kt might contain addresses */
1680 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1681 struct pfr_kentry *p, *q, *next;
1684 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1687 SLIST_INIT(&changeq);
1689 SLIST_INIT(&garbageq);
1690 pfr_clean_node_mask(shadow, &addrq);
1691 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1692 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1693 pfr_copyout_addr(&ad, p);
1694 q = pfr_lookup_addr(kt, &ad, 1);
1696 if (q->pfrke_not != p->pfrke_not)
1697 SLIST_INSERT_HEAD(&changeq, q,
1700 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1702 p->pfrke_tzero = tzero;
1703 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1706 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1707 pfr_insert_kentries(kt, &addq, tzero);
1708 pfr_remove_kentries(kt, &delq);
1709 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1710 pfr_destroy_kentries(&garbageq);
1712 /* kt cannot contain addresses */
1713 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1715 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1717 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1718 pfr_clstats_ktable(kt, tzero, 1);
1720 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1721 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1722 & ~PFR_TFLAG_INACTIVE;
1723 pfr_destroy_ktable(shadow, 0);
1724 kt->pfrkt_shadow = NULL;
1725 pfr_setflags_ktable(kt, nflags);
1729 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1733 if (!tbl->pfrt_name[0])
1735 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1737 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1739 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1740 if (tbl->pfrt_name[i])
1742 if (pfr_fix_anchor(tbl->pfrt_anchor))
1744 if (tbl->pfrt_flags & ~allowedflags)
1750 * Rewrite anchors referenced by tables to remove slashes
1751 * and check for validity.
1754 pfr_fix_anchor(char *anchor)
1756 size_t siz = MAXPATHLEN;
1759 if (anchor[0] == '/') {
1765 while (*++path == '/')
1767 bcopy(path, anchor, siz - off);
1768 memset(anchor + siz - off, 0, off);
1770 if (anchor[siz - 1])
1772 for (i = strlen(anchor); i < siz; i++)
1779 pfr_table_count(struct pfr_table *filter, int flags)
1781 struct pf_ruleset *rs;
1783 if (flags & PFR_FLAG_ALLRSETS)
1784 return (pfr_ktable_cnt);
1785 if (filter->pfrt_anchor[0]) {
1786 rs = pf_find_ruleset(filter->pfrt_anchor);
1787 return ((rs != NULL) ? rs->tables : -1);
1789 return (pf_main_ruleset.tables);
1793 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1795 if (flags & PFR_FLAG_ALLRSETS)
1797 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1803 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1805 struct pfr_ktable *p;
1807 SLIST_FOREACH(p, workq, pfrkt_workq)
1808 pfr_insert_ktable(p);
1812 pfr_insert_ktable(struct pfr_ktable *kt)
1814 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1816 if (kt->pfrkt_root != NULL)
1817 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1818 pfr_setflags_ktable(kt->pfrkt_root,
1819 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1823 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1825 struct pfr_ktable *p, *q;
1827 for (p = SLIST_FIRST(workq); p; p = q) {
1828 q = SLIST_NEXT(p, pfrkt_workq);
1829 pfr_setflags_ktable(p, p->pfrkt_nflags);
1834 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1836 struct pfr_kentryworkq addrq;
1838 if (!(newf & PFR_TFLAG_REFERENCED) &&
1839 !(newf & PFR_TFLAG_PERSIST))
1840 newf &= ~PFR_TFLAG_ACTIVE;
1841 if (!(newf & PFR_TFLAG_ACTIVE))
1842 newf &= ~PFR_TFLAG_USRMASK;
1843 if (!(newf & PFR_TFLAG_SETMASK)) {
1844 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1845 if (kt->pfrkt_root != NULL)
1846 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1847 pfr_setflags_ktable(kt->pfrkt_root,
1848 kt->pfrkt_root->pfrkt_flags &
1849 ~PFR_TFLAG_REFDANCHOR);
1850 pfr_destroy_ktable(kt, 1);
1854 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1855 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1856 pfr_remove_kentries(kt, &addrq);
1858 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1859 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1860 kt->pfrkt_shadow = NULL;
1862 kt->pfrkt_flags = newf;
1866 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1868 struct pfr_ktable *p;
1870 SLIST_FOREACH(p, workq, pfrkt_workq)
1871 pfr_clstats_ktable(p, tzero, recurse);
1875 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1877 struct pfr_kentryworkq addrq;
1880 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1881 pfr_clstats_kentries(&addrq, tzero, 0);
1884 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1885 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1886 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1888 kt->pfrkt_tzero = tzero;
1892 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1894 struct pfr_ktable *kt;
1895 struct pf_ruleset *rs;
1897 kt = pool_get(&pfr_ktable_pl, PR_NOWAIT| PR_ZERO | PR_LIMITFAIL);
1902 if (attachruleset) {
1903 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1905 pfr_destroy_ktable(kt, 0);
1912 KKASSERT(pf_maskhead != NULL);
1913 if (!rn_inithead((void **)&kt->pfrkt_ip4, pf_maskhead,
1914 offsetof(struct sockaddr_in, sin_addr) * 8) ||
1915 !rn_inithead((void **)&kt->pfrkt_ip6, pf_maskhead,
1916 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1917 pfr_destroy_ktable(kt, 0);
1920 kt->pfrkt_tzero = tzero;
1926 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1928 struct pfr_ktable *p, *q;
1930 for (p = SLIST_FIRST(workq); p; p = q) {
1931 q = SLIST_NEXT(p, pfrkt_workq);
1932 pfr_destroy_ktable(p, flushaddr);
1937 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1939 struct pfr_kentryworkq addrq;
1942 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1943 pfr_clean_node_mask(kt, &addrq);
1944 pfr_destroy_kentries(&addrq);
1946 if (kt->pfrkt_ip4 != NULL)
1947 kfree((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1949 if (kt->pfrkt_ip6 != NULL)
1950 kfree((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1951 if (kt->pfrkt_shadow != NULL)
1952 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1953 if (kt->pfrkt_rs != NULL) {
1954 kt->pfrkt_rs->tables--;
1955 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1957 pool_put(&pfr_ktable_pl, kt);
1961 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1965 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1967 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1971 pfr_lookup_table(struct pfr_table *tbl)
1973 /* struct pfr_ktable start like a struct pfr_table */
1974 return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1975 (struct pfr_ktable *)tbl));
1979 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1981 struct pfr_kentry *ke = NULL;
1984 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1985 kt = kt->pfrkt_root;
1986 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1992 pfr_sin.sin_addr.s_addr = a->addr32[0];
1993 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
1995 if (ke && KENTRY_RNF_ROOT(ke))
2001 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2002 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2004 if (ke && KENTRY_RNF_ROOT(ke))
2009 match = (ke && !ke->pfrke_not);
2013 kt->pfrkt_nomatch++;
2018 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2019 u_int64_t len, int dir_out, int op_pass, int notrule)
2021 struct pfr_kentry *ke = NULL;
2023 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2024 kt = kt->pfrkt_root;
2025 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2031 pfr_sin.sin_addr.s_addr = a->addr32[0];
2032 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
2034 if (ke && KENTRY_RNF_ROOT(ke))
2040 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2041 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2043 if (ke && KENTRY_RNF_ROOT(ke))
2050 if ((ke == NULL || ke->pfrke_not) != notrule) {
2051 if (op_pass != PFR_OP_PASS)
2052 kprintf("pfr_update_stats: assertion failed.\n");
2053 op_pass = PFR_OP_XPASS;
2055 kt->pfrkt_packets[dir_out][op_pass]++;
2056 kt->pfrkt_bytes[dir_out][op_pass] += len;
2057 if (ke != NULL && op_pass != PFR_OP_XPASS &&
2058 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2059 if (ke->pfrke_counters == NULL)
2060 ke->pfrke_counters = pool_get(&pfr_kcounters_pl,
2061 PR_NOWAIT | PR_ZERO);
2062 if (ke->pfrke_counters != NULL) {
2063 ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++;
2064 ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len;
2070 pfr_attach_table(struct pf_ruleset *rs, char *name)
2072 struct pfr_ktable *kt, *rt;
2073 struct pfr_table tbl;
2074 struct pf_anchor *ac = rs->anchor;
2076 bzero(&tbl, sizeof(tbl));
2077 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2079 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2080 kt = pfr_lookup_table(&tbl);
2082 kt = pfr_create_ktable(&tbl, time_second, 1);
2086 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2087 rt = pfr_lookup_table(&tbl);
2089 rt = pfr_create_ktable(&tbl, 0, 1);
2091 pfr_destroy_ktable(kt, 0);
2094 pfr_insert_ktable(rt);
2096 kt->pfrkt_root = rt;
2098 pfr_insert_ktable(kt);
2100 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2101 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2106 pfr_detach_table(struct pfr_ktable *kt)
2108 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2109 kprintf("pfr_detach_table: refcount = %d.\n",
2110 kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2111 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2112 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2116 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2117 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2119 struct pfr_kentry *ke, *ke2 = NULL;
2120 struct pf_addr *addr = NULL;
2121 union sockaddr_union mask;
2122 int idx = -1, use_counter = 0;
2125 addr = (struct pf_addr *)&pfr_sin.sin_addr;
2126 else if (af == AF_INET6)
2127 addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2128 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2129 kt = kt->pfrkt_root;
2130 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2135 if (counter != NULL && idx >= 0)
2141 ke = pfr_kentry_byidx(kt, idx, af);
2143 kt->pfrkt_nomatch++;
2146 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2147 *raddr = SUNION2PF(&ke->pfrke_sa, af);
2148 *rmask = SUNION2PF(&pfr_mask, af);
2151 /* is supplied address within block? */
2152 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2153 /* no, go to next block in table */
2158 PF_ACPY(addr, counter, af);
2160 /* use first address of block */
2161 PF_ACPY(addr, *raddr, af);
2164 if (!KENTRY_NETWORK(ke)) {
2165 /* this is a single IP address - no possible nested block */
2166 PF_ACPY(counter, addr, af);
2172 /* we don't want to use a nested block */
2174 ke2 = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
2176 else if (af == AF_INET6)
2177 ke2 = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2179 /* no need to check KENTRY_RNF_ROOT() here */
2181 /* lookup return the same block - perfect */
2182 PF_ACPY(counter, addr, af);
2188 /* we need to increase the counter past the nested block */
2189 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2190 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2192 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2193 /* ok, we reached the end of our main block */
2194 /* go to next block in table */
2203 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2205 struct pfr_walktree w;
2207 bzero(&w, sizeof(w));
2208 w.pfrw_op = PFRW_POOL_GET;
2214 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2215 return (w.pfrw_kentry);
2219 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2220 return (w.pfrw_kentry);
2228 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2230 struct pfr_walktree w;
2232 bzero(&w, sizeof(w));
2233 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2237 dyn->pfid_acnt4 = 0;
2238 dyn->pfid_acnt6 = 0;
2239 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2240 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2241 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2242 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);