1 /* $FreeBSD: src/sys/contrib/pf/net/pf_table.c,v 1.5 2004/07/28 06:14:44 kan Exp $ */
2 /* $OpenBSD: pf_table.c,v 1.47 2004/03/09 21:44:41 mcbride Exp $ */
3 /* $DragonFly: src/sys/net/pf/pf_table.c,v 1.1 2004/09/19 22:32:47 joerg Exp $ */
6 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
8 * Copyright (c) 2002 Cedric Berger
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * - Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
30 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
38 #include "opt_inet6.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/socket.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <vm/vm_zone.h>
49 #include <net/route.h>
50 #include <netinet/in.h>
51 #include <net/pf/pfvar.h>
53 #define ACCEPT_FLAGS(oklist) \
55 if ((flags & ~(oklist)) & \
60 #define COPYIN(from, to, size) \
61 ((flags & PFR_FLAG_USERIOCTL) ? \
62 copyin((from), (to), (size)) : \
63 (bcopy((from), (to), (size)), 0))
65 #define COPYOUT(from, to, size) \
66 ((flags & PFR_FLAG_USERIOCTL) ? \
67 copyout((from), (to), (size)) : \
68 (bcopy((from), (to), (size)), 0))
70 #define FILLIN_SIN(sin, addr) \
72 (sin).sin_len = sizeof(sin); \
73 (sin).sin_family = AF_INET; \
74 (sin).sin_addr = (addr); \
77 #define FILLIN_SIN6(sin6, addr) \
79 (sin6).sin6_len = sizeof(sin6); \
80 (sin6).sin6_family = AF_INET6; \
81 (sin6).sin6_addr = (addr); \
84 #define SWAP(type, a1, a2) \
91 #define SUNION2PF(su, af) (((af)==AF_INET) ? \
92 (struct pf_addr *)&(su)->sin.sin_addr : \
93 (struct pf_addr *)&(su)->sin6.sin6_addr)
95 #define AF_BITS(af) (((af)==AF_INET)?32:128)
96 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
97 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
98 #define KENTRY_RNF_ROOT(ke) \
99 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
101 #define NO_ADDRESSES (-1)
102 #define ENQUEUE_UNMARKED_ONLY (1)
103 #define INVERT_NEG_FLAG (1)
105 struct pfr_walktree {
116 struct pfr_addr *pfrw1_addr;
117 struct pfr_astats *pfrw1_astats;
118 struct pfr_kentryworkq *pfrw1_workq;
119 struct pfr_kentry *pfrw1_kentry;
120 struct pfi_dynaddr *pfrw1_dyn;
125 #define pfrw_addr pfrw_1.pfrw1_addr
126 #define pfrw_astats pfrw_1.pfrw1_astats
127 #define pfrw_workq pfrw_1.pfrw1_workq
128 #define pfrw_kentry pfrw_1.pfrw1_kentry
129 #define pfrw_dyn pfrw_1.pfrw1_dyn
130 #define pfrw_cnt pfrw_free
132 #define senderr(e) do { rv = (e); goto _bad; } while (0)
134 vm_zone_t pfr_ktable_pl;
135 vm_zone_t pfr_kentry_pl;
136 struct sockaddr_in pfr_sin;
137 struct sockaddr_in6 pfr_sin6;
138 union sockaddr_union pfr_mask;
139 struct pf_addr pfr_ffaddr;
141 void pfr_copyout_addr(struct pfr_addr *,
142 struct pfr_kentry *ke);
143 int pfr_validate_addr(struct pfr_addr *);
144 void pfr_enqueue_addrs(struct pfr_ktable *,
145 struct pfr_kentryworkq *, int *, int);
146 void pfr_mark_addrs(struct pfr_ktable *);
147 struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
148 struct pfr_addr *, int);
149 struct pfr_kentry *pfr_create_kentry(struct pfr_addr *);
150 void pfr_destroy_kentries(struct pfr_kentryworkq *);
151 void pfr_destroy_kentry(struct pfr_kentry *);
152 void pfr_insert_kentries(struct pfr_ktable *,
153 struct pfr_kentryworkq *, long);
154 void pfr_remove_kentries(struct pfr_ktable *,
155 struct pfr_kentryworkq *);
156 void pfr_clstats_kentries(struct pfr_kentryworkq *, long,
158 void pfr_reset_feedback(struct pfr_addr *, int, int);
159 void pfr_prepare_network(union sockaddr_union *, int, int);
160 int pfr_route_kentry(struct pfr_ktable *,
161 struct pfr_kentry *);
162 int pfr_unroute_kentry(struct pfr_ktable *,
163 struct pfr_kentry *);
164 int pfr_walktree(struct radix_node *, void *);
165 int pfr_validate_table(struct pfr_table *, int, int);
166 void pfr_commit_ktable(struct pfr_ktable *, long);
167 void pfr_insert_ktables(struct pfr_ktableworkq *);
168 void pfr_insert_ktable(struct pfr_ktable *);
169 void pfr_setflags_ktables(struct pfr_ktableworkq *);
170 void pfr_setflags_ktable(struct pfr_ktable *, int);
171 void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
173 void pfr_clstats_ktable(struct pfr_ktable *, long, int);
174 struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int);
175 void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
176 void pfr_destroy_ktable(struct pfr_ktable *, int);
177 int pfr_ktable_compare(struct pfr_ktable *,
178 struct pfr_ktable *);
179 struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
180 void pfr_clean_node_mask(struct pfr_ktable *,
181 struct pfr_kentryworkq *);
182 int pfr_table_count(struct pfr_table *, int);
183 int pfr_skip_table(struct pfr_table *,
184 struct pfr_ktable *, int);
185 struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
187 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
188 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
190 struct pfr_ktablehead pfr_ktables;
191 struct pfr_table pfr_nulltable;
197 pfr_sin.sin_len = sizeof(pfr_sin);
198 pfr_sin.sin_family = AF_INET;
199 pfr_sin6.sin6_len = sizeof(pfr_sin6);
200 pfr_sin6.sin6_family = AF_INET6;
202 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
206 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
208 struct pfr_ktable *kt;
209 struct pfr_kentryworkq workq;
212 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
213 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
215 kt = pfr_lookup_table(tbl);
216 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
218 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
220 pfr_enqueue_addrs(kt, &workq, ndel, 0);
222 if (!(flags & PFR_FLAG_DUMMY)) {
223 if (flags & PFR_FLAG_ATOMIC)
225 pfr_remove_kentries(kt, &workq);
226 if (flags & PFR_FLAG_ATOMIC)
229 printf("pfr_clr_addrs: corruption detected (%d).\n",
238 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
239 int *nadd, int flags)
241 struct pfr_ktable *kt, *tmpkt;
242 struct pfr_kentryworkq workq;
243 struct pfr_kentry *p, *q;
245 int i, rv, s = 0, xadd = 0;
246 long tzero = time_second;
248 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
249 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
251 kt = pfr_lookup_table(tbl);
252 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
254 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
256 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
260 for (i = 0; i < size; i++) {
261 if (COPYIN(addr+i, &ad, sizeof(ad)))
263 if (pfr_validate_addr(&ad))
265 p = pfr_lookup_addr(kt, &ad, 1);
266 q = pfr_lookup_addr(tmpkt, &ad, 1);
267 if (flags & PFR_FLAG_FEEDBACK) {
269 ad.pfra_fback = PFR_FB_DUPLICATE;
271 ad.pfra_fback = PFR_FB_ADDED;
272 else if (p->pfrke_not != ad.pfra_not)
273 ad.pfra_fback = PFR_FB_CONFLICT;
275 ad.pfra_fback = PFR_FB_NONE;
277 if (p == NULL && q == NULL) {
278 p = pfr_create_kentry(&ad);
281 if (pfr_route_kentry(tmpkt, p)) {
282 pfr_destroy_kentry(p);
283 ad.pfra_fback = PFR_FB_NONE;
285 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
289 if (flags & PFR_FLAG_FEEDBACK)
290 if (COPYOUT(&ad, addr+i, sizeof(ad)))
293 pfr_clean_node_mask(tmpkt, &workq);
294 if (!(flags & PFR_FLAG_DUMMY)) {
295 if (flags & PFR_FLAG_ATOMIC)
297 pfr_insert_kentries(kt, &workq, tzero);
298 if (flags & PFR_FLAG_ATOMIC)
301 pfr_destroy_kentries(&workq);
304 pfr_destroy_ktable(tmpkt, 0);
307 pfr_clean_node_mask(tmpkt, &workq);
308 pfr_destroy_kentries(&workq);
309 if (flags & PFR_FLAG_FEEDBACK)
310 pfr_reset_feedback(addr, size, flags);
311 pfr_destroy_ktable(tmpkt, 0);
316 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
317 int *ndel, int flags)
319 struct pfr_ktable *kt;
320 struct pfr_kentryworkq workq;
321 struct pfr_kentry *p;
323 int i, rv, s = 0, xdel = 0;
325 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
326 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
328 kt = pfr_lookup_table(tbl);
329 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
331 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
335 for (i = 0; i < size; i++) {
336 if (COPYIN(addr+i, &ad, sizeof(ad)))
338 if (pfr_validate_addr(&ad))
340 p = pfr_lookup_addr(kt, &ad, 1);
341 if (flags & PFR_FLAG_FEEDBACK) {
343 ad.pfra_fback = PFR_FB_NONE;
344 else if (p->pfrke_not != ad.pfra_not)
345 ad.pfra_fback = PFR_FB_CONFLICT;
346 else if (p->pfrke_mark)
347 ad.pfra_fback = PFR_FB_DUPLICATE;
349 ad.pfra_fback = PFR_FB_DELETED;
351 if (p != NULL && p->pfrke_not == ad.pfra_not &&
354 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
357 if (flags & PFR_FLAG_FEEDBACK)
358 if (COPYOUT(&ad, addr+i, sizeof(ad)))
361 if (!(flags & PFR_FLAG_DUMMY)) {
362 if (flags & PFR_FLAG_ATOMIC)
364 pfr_remove_kentries(kt, &workq);
365 if (flags & PFR_FLAG_ATOMIC)
372 if (flags & PFR_FLAG_FEEDBACK)
373 pfr_reset_feedback(addr, size, flags);
378 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
379 int *size2, int *nadd, int *ndel, int *nchange, int flags)
381 struct pfr_ktable *kt, *tmpkt;
382 struct pfr_kentryworkq addq, delq, changeq;
383 struct pfr_kentry *p, *q;
385 int i, rv, s = 0, xadd = 0, xdel = 0, xchange = 0;
386 long tzero = time_second;
388 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
389 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
391 kt = pfr_lookup_table(tbl);
392 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
394 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
396 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
402 SLIST_INIT(&changeq);
403 for (i = 0; i < size; i++) {
404 if (COPYIN(addr+i, &ad, sizeof(ad)))
406 if (pfr_validate_addr(&ad))
408 ad.pfra_fback = PFR_FB_NONE;
409 p = pfr_lookup_addr(kt, &ad, 1);
412 ad.pfra_fback = PFR_FB_DUPLICATE;
416 if (p->pfrke_not != ad.pfra_not) {
417 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
418 ad.pfra_fback = PFR_FB_CHANGED;
422 q = pfr_lookup_addr(tmpkt, &ad, 1);
424 ad.pfra_fback = PFR_FB_DUPLICATE;
427 p = pfr_create_kentry(&ad);
430 if (pfr_route_kentry(tmpkt, p)) {
431 pfr_destroy_kentry(p);
432 ad.pfra_fback = PFR_FB_NONE;
434 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
435 ad.pfra_fback = PFR_FB_ADDED;
440 if (flags & PFR_FLAG_FEEDBACK)
441 if (COPYOUT(&ad, addr+i, sizeof(ad)))
444 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
445 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
446 if (*size2 < size+xdel) {
451 SLIST_FOREACH(p, &delq, pfrke_workq) {
452 pfr_copyout_addr(&ad, p);
453 ad.pfra_fback = PFR_FB_DELETED;
454 if (COPYOUT(&ad, addr+size+i, sizeof(ad)))
459 pfr_clean_node_mask(tmpkt, &addq);
460 if (!(flags & PFR_FLAG_DUMMY)) {
461 if (flags & PFR_FLAG_ATOMIC)
463 pfr_insert_kentries(kt, &addq, tzero);
464 pfr_remove_kentries(kt, &delq);
465 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
466 if (flags & PFR_FLAG_ATOMIC)
469 pfr_destroy_kentries(&addq);
476 if ((flags & PFR_FLAG_FEEDBACK) && size2)
478 pfr_destroy_ktable(tmpkt, 0);
481 pfr_clean_node_mask(tmpkt, &addq);
482 pfr_destroy_kentries(&addq);
483 if (flags & PFR_FLAG_FEEDBACK)
484 pfr_reset_feedback(addr, size, flags);
485 pfr_destroy_ktable(tmpkt, 0);
490 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
491 int *nmatch, int flags)
493 struct pfr_ktable *kt;
494 struct pfr_kentry *p;
498 ACCEPT_FLAGS(PFR_FLAG_REPLACE);
499 if (pfr_validate_table(tbl, 0, 0))
501 kt = pfr_lookup_table(tbl);
502 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
505 for (i = 0; i < size; i++) {
506 if (COPYIN(addr+i, &ad, sizeof(ad)))
508 if (pfr_validate_addr(&ad))
510 if (ADDR_NETWORK(&ad))
512 p = pfr_lookup_addr(kt, &ad, 0);
513 if (flags & PFR_FLAG_REPLACE)
514 pfr_copyout_addr(&ad, p);
515 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
516 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
517 if (p != NULL && !p->pfrke_not)
519 if (COPYOUT(&ad, addr+i, sizeof(ad)))
528 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
531 struct pfr_ktable *kt;
532 struct pfr_walktree w;
536 if (pfr_validate_table(tbl, 0, 0))
538 kt = pfr_lookup_table(tbl);
539 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
541 if (kt->pfrkt_cnt > *size) {
542 *size = kt->pfrkt_cnt;
546 bzero(&w, sizeof(w));
547 w.pfrw_op = PFRW_GET_ADDRS;
549 w.pfrw_free = kt->pfrkt_cnt;
550 w.pfrw_flags = flags;
551 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
553 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
558 printf("pfr_get_addrs: corruption detected (%d).\n",
562 *size = kt->pfrkt_cnt;
567 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
570 struct pfr_ktable *kt;
571 struct pfr_walktree w;
572 struct pfr_kentryworkq workq;
574 long tzero = time_second;
576 ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
577 if (pfr_validate_table(tbl, 0, 0))
579 kt = pfr_lookup_table(tbl);
580 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
582 if (kt->pfrkt_cnt > *size) {
583 *size = kt->pfrkt_cnt;
587 bzero(&w, sizeof(w));
588 w.pfrw_op = PFRW_GET_ASTATS;
589 w.pfrw_astats = addr;
590 w.pfrw_free = kt->pfrkt_cnt;
591 w.pfrw_flags = flags;
592 if (flags & PFR_FLAG_ATOMIC)
594 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
596 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
597 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
598 pfr_enqueue_addrs(kt, &workq, NULL, 0);
599 pfr_clstats_kentries(&workq, tzero, 0);
601 if (flags & PFR_FLAG_ATOMIC)
607 printf("pfr_get_astats: corruption detected (%d).\n",
611 *size = kt->pfrkt_cnt;
616 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
617 int *nzero, int flags)
619 struct pfr_ktable *kt;
620 struct pfr_kentryworkq workq;
621 struct pfr_kentry *p;
623 int i, rv, s = 0, xzero = 0;
625 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
626 if (pfr_validate_table(tbl, 0, 0))
628 kt = pfr_lookup_table(tbl);
629 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
632 for (i = 0; i < size; i++) {
633 if (COPYIN(addr+i, &ad, sizeof(ad)))
635 if (pfr_validate_addr(&ad))
637 p = pfr_lookup_addr(kt, &ad, 1);
638 if (flags & PFR_FLAG_FEEDBACK) {
639 ad.pfra_fback = (p != NULL) ?
640 PFR_FB_CLEARED : PFR_FB_NONE;
641 if (COPYOUT(&ad, addr+i, sizeof(ad)))
645 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
650 if (!(flags & PFR_FLAG_DUMMY)) {
651 if (flags & PFR_FLAG_ATOMIC)
653 pfr_clstats_kentries(&workq, 0, 0);
654 if (flags & PFR_FLAG_ATOMIC)
661 if (flags & PFR_FLAG_FEEDBACK)
662 pfr_reset_feedback(addr, size, flags);
667 pfr_validate_addr(struct pfr_addr *ad)
671 switch (ad->pfra_af) {
673 if (ad->pfra_net > 32)
677 if (ad->pfra_net > 128)
683 if (ad->pfra_net < 128 &&
684 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
686 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
687 if (((caddr_t)ad)[i])
689 if (ad->pfra_not && ad->pfra_not != 1)
697 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
698 int *naddr, int sweep)
700 struct pfr_walktree w;
703 bzero(&w, sizeof(w));
704 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
705 w.pfrw_workq = workq;
706 if (kt->pfrkt_ip4 != NULL)
707 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
708 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
709 if (kt->pfrkt_ip6 != NULL)
710 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
711 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
717 pfr_mark_addrs(struct pfr_ktable *kt)
719 struct pfr_walktree w;
721 bzero(&w, sizeof(w));
722 w.pfrw_op = PFRW_MARK;
723 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
724 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
725 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
726 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
731 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
733 union sockaddr_union sa, mask;
734 struct radix_node_head *head;
735 struct pfr_kentry *ke;
738 bzero(&sa, sizeof(sa));
739 if (ad->pfra_af == AF_INET) {
740 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
741 head = kt->pfrkt_ip4;
743 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
744 head = kt->pfrkt_ip6;
746 if (ADDR_NETWORK(ad)) {
747 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
748 s = splsoftnet(); /* rn_lookup makes use of globals */
749 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
751 if (ke && KENTRY_RNF_ROOT(ke))
754 ke = (struct pfr_kentry *)rn_match(&sa, head);
755 if (ke && KENTRY_RNF_ROOT(ke))
757 if (exact && ke && KENTRY_NETWORK(ke))
764 pfr_create_kentry(struct pfr_addr *ad)
766 struct pfr_kentry *ke;
768 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
771 bzero(ke, sizeof(*ke));
773 if (ad->pfra_af == AF_INET)
774 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
776 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
777 ke->pfrke_af = ad->pfra_af;
778 ke->pfrke_net = ad->pfra_net;
779 ke->pfrke_not = ad->pfra_not;
784 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
786 struct pfr_kentry *p, *q;
788 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
789 q = SLIST_NEXT(p, pfrke_workq);
790 pfr_destroy_kentry(p);
795 pfr_destroy_kentry(struct pfr_kentry *ke)
797 pool_put(&pfr_kentry_pl, ke);
801 pfr_insert_kentries(struct pfr_ktable *kt,
802 struct pfr_kentryworkq *workq, long tzero)
804 struct pfr_kentry *p;
807 SLIST_FOREACH(p, workq, pfrke_workq) {
808 rv = pfr_route_kentry(kt, p);
810 printf("pfr_insert_kentries: cannot route entry "
814 p->pfrke_tzero = tzero;
821 pfr_remove_kentries(struct pfr_ktable *kt,
822 struct pfr_kentryworkq *workq)
824 struct pfr_kentry *p;
827 SLIST_FOREACH(p, workq, pfrke_workq) {
828 pfr_unroute_kentry(kt, p);
832 pfr_destroy_kentries(workq);
836 pfr_clean_node_mask(struct pfr_ktable *kt,
837 struct pfr_kentryworkq *workq)
839 struct pfr_kentry *p;
841 SLIST_FOREACH(p, workq, pfrke_workq)
842 pfr_unroute_kentry(kt, p);
846 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
848 struct pfr_kentry *p;
851 SLIST_FOREACH(p, workq, pfrke_workq) {
854 p->pfrke_not = !p->pfrke_not;
855 bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
856 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
858 p->pfrke_tzero = tzero;
863 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
868 for (i = 0; i < size; i++) {
869 if (COPYIN(addr+i, &ad, sizeof(ad)))
871 ad.pfra_fback = PFR_FB_NONE;
872 if (COPYOUT(&ad, addr+i, sizeof(ad)))
878 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
882 bzero(sa, sizeof(*sa));
884 sa->sin.sin_len = sizeof(sa->sin);
885 sa->sin.sin_family = AF_INET;
886 sa->sin.sin_addr.s_addr = htonl(-1 << (32-net));
888 sa->sin6.sin6_len = sizeof(sa->sin6);
889 sa->sin6.sin6_family = AF_INET6;
890 for (i = 0; i < 4; i++) {
892 sa->sin6.sin6_addr.s6_addr32[i] =
893 htonl(-1 << (32-net));
896 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
903 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
905 union sockaddr_union mask;
906 struct radix_node *rn;
907 struct radix_node_head *head;
910 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
911 if (ke->pfrke_af == AF_INET)
912 head = kt->pfrkt_ip4;
914 head = kt->pfrkt_ip6;
917 if (KENTRY_NETWORK(ke)) {
918 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
919 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
921 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
924 return (rn == NULL ? -1 : 0);
928 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
930 union sockaddr_union mask;
931 struct radix_node *rn;
932 struct radix_node_head *head;
935 if (ke->pfrke_af == AF_INET)
936 head = kt->pfrkt_ip4;
938 head = kt->pfrkt_ip6;
941 if (KENTRY_NETWORK(ke)) {
942 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
943 rn = rn_delete(&ke->pfrke_sa, &mask, head);
945 rn = rn_delete(&ke->pfrke_sa, NULL, head);
949 printf("pfr_unroute_kentry: delete failed.\n");
956 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
958 bzero(ad, sizeof(*ad));
961 ad->pfra_af = ke->pfrke_af;
962 ad->pfra_net = ke->pfrke_net;
963 ad->pfra_not = ke->pfrke_not;
964 if (ad->pfra_af == AF_INET)
965 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
967 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
971 pfr_walktree(struct radix_node *rn, void *arg)
973 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
974 struct pfr_walktree *w = arg;
975 int s, flags = w->pfrw_flags;
977 switch (w->pfrw_op) {
986 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
990 if (w->pfrw_free-- > 0) {
993 pfr_copyout_addr(&ad, ke);
994 if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
999 case PFRW_GET_ASTATS:
1000 if (w->pfrw_free-- > 0) {
1001 struct pfr_astats as;
1003 pfr_copyout_addr(&as.pfras_a, ke);
1006 bcopy(ke->pfrke_packets, as.pfras_packets,
1007 sizeof(as.pfras_packets));
1008 bcopy(ke->pfrke_bytes, as.pfras_bytes,
1009 sizeof(as.pfras_bytes));
1011 as.pfras_tzero = ke->pfrke_tzero;
1013 if (COPYOUT(&as, w->pfrw_astats, sizeof(as)))
1020 break; /* negative entries are ignored */
1021 if (!w->pfrw_cnt--) {
1022 w->pfrw_kentry = ke;
1023 return (1); /* finish search */
1026 case PFRW_DYNADDR_UPDATE:
1027 if (ke->pfrke_af == AF_INET) {
1028 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1030 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1031 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1032 &ke->pfrke_sa, AF_INET);
1033 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1034 &pfr_mask, AF_INET);
1036 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1038 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1039 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1040 &ke->pfrke_sa, AF_INET6);
1041 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1042 &pfr_mask, AF_INET6);
1050 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1052 struct pfr_ktableworkq workq;
1053 struct pfr_ktable *p;
1054 int s = 0, xdel = 0;
1056 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS);
1057 if (pfr_table_count(filter, flags) < 0)
1061 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1062 if (pfr_skip_table(filter, p, flags))
1064 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1066 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1068 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1069 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1072 if (!(flags & PFR_FLAG_DUMMY)) {
1073 if (flags & PFR_FLAG_ATOMIC)
1075 pfr_setflags_ktables(&workq);
1076 if (flags & PFR_FLAG_ATOMIC)
1085 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1087 struct pfr_ktableworkq addq, changeq;
1088 struct pfr_ktable *p, *q, *r, key;
1089 int i, rv, s = 0, xadd = 0;
1090 long tzero = time_second;
1092 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1094 SLIST_INIT(&changeq);
1095 for (i = 0; i < size; i++) {
1096 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1098 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1099 flags & PFR_FLAG_USERIOCTL))
1101 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1102 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1104 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1107 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1108 if (!pfr_ktable_compare(p, q))
1111 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1113 if (!key.pfrkt_anchor[0])
1116 /* find or create root table */
1117 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1118 bzero(key.pfrkt_ruleset, sizeof(key.pfrkt_ruleset));
1119 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1124 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1125 if (!pfr_ktable_compare(&key, q)) {
1130 key.pfrkt_flags = 0;
1131 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1134 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1136 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1137 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1138 if (!pfr_ktable_compare(&key, q))
1140 p->pfrkt_nflags = (p->pfrkt_flags &
1141 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1142 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1148 if (!(flags & PFR_FLAG_DUMMY)) {
1149 if (flags & PFR_FLAG_ATOMIC)
1151 pfr_insert_ktables(&addq);
1152 pfr_setflags_ktables(&changeq);
1153 if (flags & PFR_FLAG_ATOMIC)
1156 pfr_destroy_ktables(&addq, 0);
1161 pfr_destroy_ktables(&addq, 0);
1166 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1168 struct pfr_ktableworkq workq;
1169 struct pfr_ktable *p, *q, key;
1170 int i, s = 0, xdel = 0;
1172 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1174 for (i = 0; i < size; i++) {
1175 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1177 if (pfr_validate_table(&key.pfrkt_t, 0,
1178 flags & PFR_FLAG_USERIOCTL))
1180 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1181 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1182 SLIST_FOREACH(q, &workq, pfrkt_workq)
1183 if (!pfr_ktable_compare(p, q))
1185 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1186 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1193 if (!(flags & PFR_FLAG_DUMMY)) {
1194 if (flags & PFR_FLAG_ATOMIC)
1196 pfr_setflags_ktables(&workq);
1197 if (flags & PFR_FLAG_ATOMIC)
1206 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1209 struct pfr_ktable *p;
1212 ACCEPT_FLAGS(PFR_FLAG_ALLRSETS);
1213 n = nn = pfr_table_count(filter, flags);
1220 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1221 if (pfr_skip_table(filter, p, flags))
1225 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl)))
1229 printf("pfr_get_tables: corruption detected (%d).\n", n);
1237 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1240 struct pfr_ktable *p;
1241 struct pfr_ktableworkq workq;
1243 long tzero = time_second;
1245 ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS);
1246 /* XXX PFR_FLAG_CLSTATS disabled */
1247 n = nn = pfr_table_count(filter, flags);
1255 if (flags & PFR_FLAG_ATOMIC)
1257 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1258 if (pfr_skip_table(filter, p, flags))
1262 if (!(flags & PFR_FLAG_ATOMIC))
1264 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
1268 if (!(flags & PFR_FLAG_ATOMIC))
1270 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1272 if (flags & PFR_FLAG_CLSTATS)
1273 pfr_clstats_ktables(&workq, tzero,
1274 flags & PFR_FLAG_ADDRSTOO);
1275 if (flags & PFR_FLAG_ATOMIC)
1278 printf("pfr_get_tstats: corruption detected (%d).\n", n);
1286 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1288 struct pfr_ktableworkq workq;
1289 struct pfr_ktable *p, key;
1290 int i, s = 0, xzero = 0;
1291 long tzero = time_second;
1293 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO);
1295 for (i = 0; i < size; i++) {
1296 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1298 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1300 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1302 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1306 if (!(flags & PFR_FLAG_DUMMY)) {
1307 if (flags & PFR_FLAG_ATOMIC)
1309 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1310 if (flags & PFR_FLAG_ATOMIC)
1319 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1320 int *nchange, int *ndel, int flags)
1322 struct pfr_ktableworkq workq;
1323 struct pfr_ktable *p, *q, key;
1324 int i, s = 0, xchange = 0, xdel = 0;
1326 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1327 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1328 (clrflag & ~PFR_TFLAG_USRMASK) ||
1329 (setflag & clrflag))
1332 for (i = 0; i < size; i++) {
1333 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1335 if (pfr_validate_table(&key.pfrkt_t, 0,
1336 flags & PFR_FLAG_USERIOCTL))
1338 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1339 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1340 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1342 if (p->pfrkt_nflags == p->pfrkt_flags)
1344 SLIST_FOREACH(q, &workq, pfrkt_workq)
1345 if (!pfr_ktable_compare(p, q))
1347 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1348 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1349 (clrflag & PFR_TFLAG_PERSIST) &&
1350 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1358 if (!(flags & PFR_FLAG_DUMMY)) {
1359 if (flags & PFR_FLAG_ATOMIC)
1361 pfr_setflags_ktables(&workq);
1362 if (flags & PFR_FLAG_ATOMIC)
1365 if (nchange != NULL)
1373 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1375 struct pfr_ktableworkq workq;
1376 struct pfr_ktable *p;
1377 struct pf_ruleset *rs;
1380 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1381 rs = pf_find_or_create_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1385 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1386 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1387 pfr_skip_table(trs, p, 0))
1389 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1390 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1393 if (!(flags & PFR_FLAG_DUMMY)) {
1394 pfr_setflags_ktables(&workq);
1396 *ticket = ++rs->tticket;
1399 pf_remove_if_empty_ruleset(rs);
1406 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1407 int *nadd, int *naddr, u_int32_t ticket, int flags)
1409 struct pfr_ktableworkq tableq;
1410 struct pfr_kentryworkq addrq;
1411 struct pfr_ktable *kt, *rt, *shadow, key;
1412 struct pfr_kentry *p;
1414 struct pf_ruleset *rs;
1415 int i, rv, xadd = 0, xaddr = 0;
1417 ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO);
1418 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1420 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1421 flags & PFR_FLAG_USERIOCTL))
1423 rs = pf_find_ruleset(tbl->pfrt_anchor, tbl->pfrt_ruleset);
1424 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1426 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1427 SLIST_INIT(&tableq);
1428 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1430 kt = pfr_create_ktable(tbl, 0, 1);
1433 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1435 if (!tbl->pfrt_anchor[0])
1438 /* find or create root table */
1439 bzero(&key, sizeof(key));
1440 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1441 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1443 kt->pfrkt_root = rt;
1446 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1448 pfr_destroy_ktables(&tableq, 0);
1451 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1452 kt->pfrkt_root = rt;
1453 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1456 shadow = pfr_create_ktable(tbl, 0, 0);
1457 if (shadow == NULL) {
1458 pfr_destroy_ktables(&tableq, 0);
1462 for (i = 0; i < size; i++) {
1463 if (COPYIN(addr+i, &ad, sizeof(ad)))
1465 if (pfr_validate_addr(&ad))
1467 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1469 p = pfr_create_kentry(&ad);
1472 if (pfr_route_kentry(shadow, p)) {
1473 pfr_destroy_kentry(p);
1476 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1479 if (!(flags & PFR_FLAG_DUMMY)) {
1480 if (kt->pfrkt_shadow != NULL)
1481 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1482 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1483 pfr_insert_ktables(&tableq);
1484 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1485 xaddr : NO_ADDRESSES;
1486 kt->pfrkt_shadow = shadow;
1488 pfr_clean_node_mask(shadow, &addrq);
1489 pfr_destroy_ktable(shadow, 0);
1490 pfr_destroy_ktables(&tableq, 0);
1491 pfr_destroy_kentries(&addrq);
1499 pfr_destroy_ktable(shadow, 0);
1500 pfr_destroy_ktables(&tableq, 0);
1501 pfr_destroy_kentries(&addrq);
1506 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1508 struct pfr_ktableworkq workq;
1509 struct pfr_ktable *p;
1510 struct pf_ruleset *rs;
1513 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1514 rs = pf_find_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1515 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1518 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1519 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1520 pfr_skip_table(trs, p, 0))
1522 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1523 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1526 if (!(flags & PFR_FLAG_DUMMY)) {
1527 pfr_setflags_ktables(&workq);
1529 pf_remove_if_empty_ruleset(rs);
1537 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1538 int *nchange, int flags)
1540 struct pfr_ktable *p;
1541 struct pfr_ktableworkq workq;
1542 struct pf_ruleset *rs;
1543 int s = 0, xadd = 0, xchange = 0;
1544 long tzero = time_second;
1546 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1547 rs = pf_find_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1548 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1552 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1553 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1554 pfr_skip_table(trs, p, 0))
1556 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1557 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1563 if (!(flags & PFR_FLAG_DUMMY)) {
1564 if (flags & PFR_FLAG_ATOMIC)
1566 SLIST_FOREACH(p, &workq, pfrkt_workq)
1567 pfr_commit_ktable(p, tzero);
1568 if (flags & PFR_FLAG_ATOMIC)
1571 pf_remove_if_empty_ruleset(rs);
1575 if (nchange != NULL)
1582 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1584 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1587 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1588 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1589 pfr_clstats_ktable(kt, tzero, 1);
1590 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1591 /* kt might contain addresses */
1592 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1593 struct pfr_kentry *p, *q, *next;
1596 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1599 SLIST_INIT(&changeq);
1601 SLIST_INIT(&garbageq);
1602 pfr_clean_node_mask(shadow, &addrq);
1603 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1604 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1605 pfr_copyout_addr(&ad, p);
1606 q = pfr_lookup_addr(kt, &ad, 1);
1608 if (q->pfrke_not != p->pfrke_not)
1609 SLIST_INSERT_HEAD(&changeq, q,
1612 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1614 p->pfrke_tzero = tzero;
1615 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1618 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1619 pfr_insert_kentries(kt, &addq, tzero);
1620 pfr_remove_kentries(kt, &delq);
1621 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1622 pfr_destroy_kentries(&garbageq);
1624 /* kt cannot contain addresses */
1625 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1627 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1629 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1630 pfr_clstats_ktable(kt, tzero, 1);
1632 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1633 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1634 & ~PFR_TFLAG_INACTIVE;
1635 pfr_destroy_ktable(shadow, 0);
1636 kt->pfrkt_shadow = NULL;
1637 pfr_setflags_ktable(kt, nflags);
1641 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1645 if (!tbl->pfrt_name[0])
1647 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1649 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1651 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1652 if (tbl->pfrt_name[i])
1654 if (tbl->pfrt_flags & ~allowedflags)
1660 pfr_table_count(struct pfr_table *filter, int flags)
1662 struct pf_ruleset *rs;
1663 struct pf_anchor *ac;
1665 if (flags & PFR_FLAG_ALLRSETS)
1666 return (pfr_ktable_cnt);
1667 if (filter->pfrt_ruleset[0]) {
1668 rs = pf_find_ruleset(filter->pfrt_anchor,
1669 filter->pfrt_ruleset);
1670 return ((rs != NULL) ? rs->tables : -1);
1672 if (filter->pfrt_anchor[0]) {
1673 ac = pf_find_anchor(filter->pfrt_anchor);
1674 return ((ac != NULL) ? ac->tables : -1);
1676 return (pf_main_ruleset.tables);
1680 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1682 if (flags & PFR_FLAG_ALLRSETS)
1684 if (strncmp(filter->pfrt_anchor, kt->pfrkt_anchor,
1685 PF_ANCHOR_NAME_SIZE))
1687 if (!filter->pfrt_ruleset[0])
1689 if (strncmp(filter->pfrt_ruleset, kt->pfrkt_ruleset,
1690 PF_RULESET_NAME_SIZE))
1696 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1698 struct pfr_ktable *p;
1700 SLIST_FOREACH(p, workq, pfrkt_workq)
1701 pfr_insert_ktable(p);
1705 pfr_insert_ktable(struct pfr_ktable *kt)
1707 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1709 if (kt->pfrkt_root != NULL)
1710 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1711 pfr_setflags_ktable(kt->pfrkt_root,
1712 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1716 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1718 struct pfr_ktable *p;
1720 SLIST_FOREACH(p, workq, pfrkt_workq)
1721 pfr_setflags_ktable(p, p->pfrkt_nflags);
1725 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1727 struct pfr_kentryworkq addrq;
1729 if (!(newf & PFR_TFLAG_REFERENCED) &&
1730 !(newf & PFR_TFLAG_PERSIST))
1731 newf &= ~PFR_TFLAG_ACTIVE;
1732 if (!(newf & PFR_TFLAG_ACTIVE))
1733 newf &= ~PFR_TFLAG_USRMASK;
1734 if (!(newf & PFR_TFLAG_SETMASK)) {
1735 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1736 if (kt->pfrkt_root != NULL)
1737 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1738 pfr_setflags_ktable(kt->pfrkt_root,
1739 kt->pfrkt_root->pfrkt_flags &
1740 ~PFR_TFLAG_REFDANCHOR);
1741 pfr_destroy_ktable(kt, 1);
1745 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1746 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1747 pfr_remove_kentries(kt, &addrq);
1749 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1750 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1751 kt->pfrkt_shadow = NULL;
1753 kt->pfrkt_flags = newf;
1757 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1759 struct pfr_ktable *p;
1761 SLIST_FOREACH(p, workq, pfrkt_workq)
1762 pfr_clstats_ktable(p, tzero, recurse);
1766 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1768 struct pfr_kentryworkq addrq;
1772 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1773 pfr_clstats_kentries(&addrq, tzero, 0);
1776 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1777 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1778 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1780 kt->pfrkt_tzero = tzero;
1784 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1786 struct pfr_ktable *kt;
1787 struct pf_ruleset *rs;
1789 kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
1792 bzero(kt, sizeof(*kt));
1795 if (attachruleset) {
1796 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor,
1799 pfr_destroy_ktable(kt, 0);
1804 if (rs->anchor != NULL)
1805 rs->anchor->tables++;
1808 if (!rn_inithead((void **)&kt->pfrkt_ip4,
1809 offsetof(struct sockaddr_in, sin_addr) * 8) ||
1810 !rn_inithead((void **)&kt->pfrkt_ip6,
1811 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1812 pfr_destroy_ktable(kt, 0);
1815 kt->pfrkt_tzero = tzero;
1821 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1823 struct pfr_ktable *p, *q;
1825 for (p = SLIST_FIRST(workq); p; p = q) {
1826 q = SLIST_NEXT(p, pfrkt_workq);
1827 pfr_destroy_ktable(p, flushaddr);
1832 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1834 struct pfr_kentryworkq addrq;
1837 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1838 pfr_clean_node_mask(kt, &addrq);
1839 pfr_destroy_kentries(&addrq);
1841 if (kt->pfrkt_ip4 != NULL)
1842 free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1843 if (kt->pfrkt_ip6 != NULL)
1844 free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1845 if (kt->pfrkt_shadow != NULL)
1846 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1847 if (kt->pfrkt_rs != NULL) {
1848 kt->pfrkt_rs->tables--;
1849 if (kt->pfrkt_rs->anchor != NULL)
1850 kt->pfrkt_rs->anchor->tables--;
1851 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1853 pool_put(&pfr_ktable_pl, kt);
1857 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1861 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1863 if ((d = strncmp(p->pfrkt_anchor, q->pfrkt_anchor,
1864 PF_ANCHOR_NAME_SIZE)))
1866 return (strncmp(p->pfrkt_ruleset, q->pfrkt_ruleset,
1867 PF_RULESET_NAME_SIZE));
1871 pfr_lookup_table(struct pfr_table *tbl)
1873 /* struct pfr_ktable start like a struct pfr_table */
1874 return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1875 (struct pfr_ktable *)tbl));
1879 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1881 struct pfr_kentry *ke = NULL;
1884 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1885 kt = kt->pfrkt_root;
1886 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1891 pfr_sin.sin_addr.s_addr = a->addr32[0];
1892 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1893 if (ke && KENTRY_RNF_ROOT(ke))
1897 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1898 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
1899 if (ke && KENTRY_RNF_ROOT(ke))
1903 match = (ke && !ke->pfrke_not);
1907 kt->pfrkt_nomatch++;
1912 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
1913 u_int64_t len, int dir_out, int op_pass, int notrule)
1915 struct pfr_kentry *ke = NULL;
1917 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1918 kt = kt->pfrkt_root;
1919 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1924 pfr_sin.sin_addr.s_addr = a->addr32[0];
1925 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1926 if (ke && KENTRY_RNF_ROOT(ke))
1930 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1931 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
1932 if (ke && KENTRY_RNF_ROOT(ke))
1936 if ((ke == NULL || ke->pfrke_not) != notrule) {
1937 if (op_pass != PFR_OP_PASS)
1938 printf("pfr_update_stats: assertion failed.\n");
1939 op_pass = PFR_OP_XPASS;
1941 kt->pfrkt_packets[dir_out][op_pass]++;
1942 kt->pfrkt_bytes[dir_out][op_pass] += len;
1943 if (ke != NULL && op_pass != PFR_OP_XPASS) {
1944 ke->pfrke_packets[dir_out][op_pass]++;
1945 ke->pfrke_bytes[dir_out][op_pass] += len;
1950 pfr_attach_table(struct pf_ruleset *rs, char *name)
1952 struct pfr_ktable *kt, *rt;
1953 struct pfr_table tbl;
1954 struct pf_anchor *ac = rs->anchor;
1956 bzero(&tbl, sizeof(tbl));
1957 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
1959 strlcpy(tbl.pfrt_anchor, ac->name, sizeof(tbl.pfrt_anchor));
1960 strlcpy(tbl.pfrt_ruleset, rs->name, sizeof(tbl.pfrt_ruleset));
1962 kt = pfr_lookup_table(&tbl);
1964 kt = pfr_create_ktable(&tbl, time_second, 1);
1968 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
1969 bzero(tbl.pfrt_ruleset, sizeof(tbl.pfrt_ruleset));
1970 rt = pfr_lookup_table(&tbl);
1972 rt = pfr_create_ktable(&tbl, 0, 1);
1974 pfr_destroy_ktable(kt, 0);
1977 pfr_insert_ktable(rt);
1979 kt->pfrkt_root = rt;
1981 pfr_insert_ktable(kt);
1983 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
1984 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
1989 pfr_detach_table(struct pfr_ktable *kt)
1991 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
1992 printf("pfr_detach_table: refcount = %d.\n",
1993 kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
1994 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
1995 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
1999 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2000 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2002 struct pfr_kentry *ke, *ke2;
2003 struct pf_addr *addr;
2004 union sockaddr_union mask;
2005 int idx = -1, use_counter = 0;
2007 addr = (af == AF_INET) ? (struct pf_addr *)&pfr_sin.sin_addr :
2008 (struct pf_addr *)&pfr_sin6.sin6_addr;
2009 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2010 kt = kt->pfrkt_root;
2011 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2016 if (counter != NULL && idx >= 0)
2022 ke = pfr_kentry_byidx(kt, idx, af);
2025 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2026 *raddr = SUNION2PF(&ke->pfrke_sa, af);
2027 *rmask = SUNION2PF(&pfr_mask, af);
2030 /* is supplied address within block? */
2031 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2032 /* no, go to next block in table */
2037 PF_ACPY(addr, counter, af);
2039 /* use first address of block */
2040 PF_ACPY(addr, *raddr, af);
2043 if (!KENTRY_NETWORK(ke)) {
2044 /* this is a single IP address - no possible nested block */
2045 PF_ACPY(counter, addr, af);
2050 /* we don't want to use a nested block */
2051 ke2 = (struct pfr_kentry *)(af == AF_INET ?
2052 rn_match(&pfr_sin, kt->pfrkt_ip4) :
2053 rn_match(&pfr_sin6, kt->pfrkt_ip6));
2054 /* no need to check KENTRY_RNF_ROOT() here */
2056 /* lookup return the same block - perfect */
2057 PF_ACPY(counter, addr, af);
2062 /* we need to increase the counter past the nested block */
2063 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2064 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2066 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2067 /* ok, we reached the end of our main block */
2068 /* go to next block in table */
2077 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2079 struct pfr_walktree w;
2081 bzero(&w, sizeof(w));
2082 w.pfrw_op = PFRW_POOL_GET;
2087 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2088 return (w.pfrw_kentry);
2090 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2091 return (w.pfrw_kentry);
2098 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2100 struct pfr_walktree w;
2103 bzero(&w, sizeof(w));
2104 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2108 dyn->pfid_acnt4 = 0;
2109 dyn->pfid_acnt6 = 0;
2110 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2111 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2112 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2113 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);