1 /* $FreeBSD: src/sys/contrib/pf/net/pf_table.c,v 1.5 2004/07/28 06:14:44 kan Exp $ */
2 /* $OpenBSD: pf_table.c,v 1.47 2004/03/09 21:44:41 mcbride Exp $ */
3 /* $DragonFly: src/sys/net/pf/pf_table.c,v 1.5 2006/12/22 23:44:57 swildner Exp $ */
4 /* $OpenBSD: pf_table.c,v 1.68 2006/05/02 10:08:45 dhartmei Exp $ */
7 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
9 * Copyright (c) 2002 Cedric Berger
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * - Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
31 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
33 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
39 #include "opt_inet6.h"
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/socket.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/thread2.h>
48 #include <vm/vm_zone.h>
51 #include <net/route.h>
52 #include <netinet/in.h>
53 #include <net/pf/pfvar.h>
55 #define ACCEPT_FLAGS(oklist) \
57 if ((flags & ~(oklist)) & \
62 #define COPYIN(from, to, size) \
63 ((flags & PFR_FLAG_USERIOCTL) ? \
64 copyin((from), (to), (size)) : \
65 (bcopy((from), (to), (size)), 0))
67 #define COPYOUT(from, to, size) \
68 ((flags & PFR_FLAG_USERIOCTL) ? \
69 copyout((from), (to), (size)) : \
70 (bcopy((from), (to), (size)), 0))
72 #define FILLIN_SIN(sin, addr) \
74 (sin).sin_len = sizeof(sin); \
75 (sin).sin_family = AF_INET; \
76 (sin).sin_addr = (addr); \
79 #define FILLIN_SIN6(sin6, addr) \
81 (sin6).sin6_len = sizeof(sin6); \
82 (sin6).sin6_family = AF_INET6; \
83 (sin6).sin6_addr = (addr); \
86 #define SWAP(type, a1, a2) \
93 #define SUNION2PF(su, af) (((af)==AF_INET) ? \
94 (struct pf_addr *)&(su)->sin.sin_addr : \
95 (struct pf_addr *)&(su)->sin6.sin6_addr)
97 #define AF_BITS(af) (((af)==AF_INET)?32:128)
98 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
99 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
100 #define KENTRY_RNF_ROOT(ke) \
101 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
103 #define NO_ADDRESSES (-1)
104 #define ENQUEUE_UNMARKED_ONLY (1)
105 #define INVERT_NEG_FLAG (1)
107 struct pfr_walktree {
118 struct pfr_addr *pfrw1_addr;
119 struct pfr_astats *pfrw1_astats;
120 struct pfr_kentryworkq *pfrw1_workq;
121 struct pfr_kentry *pfrw1_kentry;
122 struct pfi_dynaddr *pfrw1_dyn;
127 #define pfrw_addr pfrw_1.pfrw1_addr
128 #define pfrw_astats pfrw_1.pfrw1_astats
129 #define pfrw_workq pfrw_1.pfrw1_workq
130 #define pfrw_kentry pfrw_1.pfrw1_kentry
131 #define pfrw_dyn pfrw_1.pfrw1_dyn
132 #define pfrw_cnt pfrw_free
134 #define senderr(e) do { rv = (e); goto _bad; } while (0)
136 vm_zone_t pfr_ktable_pl;
137 vm_zone_t pfr_kentry_pl;
138 vm_zone_t pfr_kentry_pl2;
139 struct sockaddr_in pfr_sin;
140 struct sockaddr_in6 pfr_sin6;
141 union sockaddr_union pfr_mask;
142 struct pf_addr pfr_ffaddr;
144 void pfr_copyout_addr(struct pfr_addr *,
145 struct pfr_kentry *ke);
146 int pfr_validate_addr(struct pfr_addr *);
147 void pfr_enqueue_addrs(struct pfr_ktable *,
148 struct pfr_kentryworkq *, int *, int);
149 void pfr_mark_addrs(struct pfr_ktable *);
150 struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
151 struct pfr_addr *, int);
152 struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, int);
153 void pfr_destroy_kentries(struct pfr_kentryworkq *);
154 void pfr_destroy_kentry(struct pfr_kentry *);
155 void pfr_insert_kentries(struct pfr_ktable *,
156 struct pfr_kentryworkq *, long);
157 void pfr_remove_kentries(struct pfr_ktable *,
158 struct pfr_kentryworkq *);
159 void pfr_clstats_kentries(struct pfr_kentryworkq *, long,
161 void pfr_reset_feedback(struct pfr_addr *, int, int);
162 void pfr_prepare_network(union sockaddr_union *, int, int);
163 int pfr_route_kentry(struct pfr_ktable *,
164 struct pfr_kentry *);
165 int pfr_unroute_kentry(struct pfr_ktable *,
166 struct pfr_kentry *);
167 int pfr_walktree(struct radix_node *, void *);
168 int pfr_validate_table(struct pfr_table *, int, int);
169 int pfr_fix_anchor(char *);
170 void pfr_commit_ktable(struct pfr_ktable *, long);
171 void pfr_insert_ktables(struct pfr_ktableworkq *);
172 void pfr_insert_ktable(struct pfr_ktable *);
173 void pfr_setflags_ktables(struct pfr_ktableworkq *);
174 void pfr_setflags_ktable(struct pfr_ktable *, int);
175 void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
177 void pfr_clstats_ktable(struct pfr_ktable *, long, int);
178 struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int);
179 void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
180 void pfr_destroy_ktable(struct pfr_ktable *, int);
181 int pfr_ktable_compare(struct pfr_ktable *,
182 struct pfr_ktable *);
183 struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
184 void pfr_clean_node_mask(struct pfr_ktable *,
185 struct pfr_kentryworkq *);
186 int pfr_table_count(struct pfr_table *, int);
187 int pfr_skip_table(struct pfr_table *,
188 struct pfr_ktable *, int);
189 struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
191 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
192 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
194 struct pfr_ktablehead pfr_ktables;
195 struct pfr_table pfr_nulltable;
201 pfr_sin.sin_len = sizeof(pfr_sin);
202 pfr_sin.sin_family = AF_INET;
203 pfr_sin6.sin6_len = sizeof(pfr_sin6);
204 pfr_sin6.sin6_family = AF_INET6;
206 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
210 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
212 struct pfr_ktable *kt;
213 struct pfr_kentryworkq workq;
215 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
216 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
218 kt = pfr_lookup_table(tbl);
219 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
221 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
223 pfr_enqueue_addrs(kt, &workq, ndel, 0);
225 if (!(flags & PFR_FLAG_DUMMY)) {
226 if (flags & PFR_FLAG_ATOMIC)
228 pfr_remove_kentries(kt, &workq);
229 if (flags & PFR_FLAG_ATOMIC)
232 kprintf("pfr_clr_addrs: corruption detected (%d).\n",
241 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
242 int *nadd, int flags)
244 struct pfr_ktable *kt, *tmpkt;
245 struct pfr_kentryworkq workq;
246 struct pfr_kentry *p, *q;
249 long tzero = time_second;
251 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
252 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
254 kt = pfr_lookup_table(tbl);
255 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
257 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
259 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
263 for (i = 0; i < size; i++) {
264 if (COPYIN(addr+i, &ad, sizeof(ad)))
266 if (pfr_validate_addr(&ad))
268 p = pfr_lookup_addr(kt, &ad, 1);
269 q = pfr_lookup_addr(tmpkt, &ad, 1);
270 if (flags & PFR_FLAG_FEEDBACK) {
272 ad.pfra_fback = PFR_FB_DUPLICATE;
274 ad.pfra_fback = PFR_FB_ADDED;
275 else if (p->pfrke_not != ad.pfra_not)
276 ad.pfra_fback = PFR_FB_CONFLICT;
278 ad.pfra_fback = PFR_FB_NONE;
280 if (p == NULL && q == NULL) {
281 p = pfr_create_kentry(&ad, 0);
284 if (pfr_route_kentry(tmpkt, p)) {
285 pfr_destroy_kentry(p);
286 ad.pfra_fback = PFR_FB_NONE;
288 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
292 if (flags & PFR_FLAG_FEEDBACK)
293 if (COPYOUT(&ad, addr+i, sizeof(ad)))
296 pfr_clean_node_mask(tmpkt, &workq);
297 if (!(flags & PFR_FLAG_DUMMY)) {
298 if (flags & PFR_FLAG_ATOMIC)
300 pfr_insert_kentries(kt, &workq, tzero);
301 if (flags & PFR_FLAG_ATOMIC)
304 pfr_destroy_kentries(&workq);
307 pfr_destroy_ktable(tmpkt, 0);
310 pfr_clean_node_mask(tmpkt, &workq);
311 pfr_destroy_kentries(&workq);
312 if (flags & PFR_FLAG_FEEDBACK)
313 pfr_reset_feedback(addr, size, flags);
314 pfr_destroy_ktable(tmpkt, 0);
319 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
320 int *ndel, int flags)
322 struct pfr_ktable *kt;
323 struct pfr_kentryworkq workq;
324 struct pfr_kentry *p;
326 int i, rv, xdel = 0, log = 1;
328 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
329 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
331 kt = pfr_lookup_table(tbl);
332 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
334 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
337 * there are two algorithms to choose from here.
339 * n: number of addresses to delete
340 * N: number of addresses in the table
342 * one is O(N) and is better for large 'n'
343 * one is O(n*LOG(N)) and is better for small 'n'
345 * following code try to decide which one is best.
347 for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
349 if (size > kt->pfrkt_cnt/log) {
350 /* full table scan */
353 /* iterate over addresses to delete */
354 for (i = 0; i < size; i++) {
355 if (COPYIN(addr+i, &ad, sizeof(ad)))
357 if (pfr_validate_addr(&ad))
359 p = pfr_lookup_addr(kt, &ad, 1);
365 for (i = 0; i < size; i++) {
366 if (COPYIN(addr+i, &ad, sizeof(ad)))
368 if (pfr_validate_addr(&ad))
370 p = pfr_lookup_addr(kt, &ad, 1);
371 if (flags & PFR_FLAG_FEEDBACK) {
373 ad.pfra_fback = PFR_FB_NONE;
374 else if (p->pfrke_not != ad.pfra_not)
375 ad.pfra_fback = PFR_FB_CONFLICT;
376 else if (p->pfrke_mark)
377 ad.pfra_fback = PFR_FB_DUPLICATE;
379 ad.pfra_fback = PFR_FB_DELETED;
381 if (p != NULL && p->pfrke_not == ad.pfra_not &&
384 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
387 if (flags & PFR_FLAG_FEEDBACK)
388 if (COPYOUT(&ad, addr+i, sizeof(ad)))
391 if (!(flags & PFR_FLAG_DUMMY)) {
392 if (flags & PFR_FLAG_ATOMIC)
394 pfr_remove_kentries(kt, &workq);
395 if (flags & PFR_FLAG_ATOMIC)
402 if (flags & PFR_FLAG_FEEDBACK)
403 pfr_reset_feedback(addr, size, flags);
408 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
409 int *size2, int *nadd, int *ndel, int *nchange, int flags,
410 u_int32_t ignore_pfrt_flags)
412 struct pfr_ktable *kt, *tmpkt;
413 struct pfr_kentryworkq addq, delq, changeq;
414 struct pfr_kentry *p, *q;
416 int i, rv, xadd = 0, xdel = 0, xchange = 0;
417 long tzero = time_second;
419 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
420 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
423 kt = pfr_lookup_table(tbl);
424 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
426 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
428 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
434 SLIST_INIT(&changeq);
435 for (i = 0; i < size; i++) {
436 if (COPYIN(addr+i, &ad, sizeof(ad)))
438 if (pfr_validate_addr(&ad))
440 ad.pfra_fback = PFR_FB_NONE;
441 p = pfr_lookup_addr(kt, &ad, 1);
444 ad.pfra_fback = PFR_FB_DUPLICATE;
448 if (p->pfrke_not != ad.pfra_not) {
449 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
450 ad.pfra_fback = PFR_FB_CHANGED;
454 q = pfr_lookup_addr(tmpkt, &ad, 1);
456 ad.pfra_fback = PFR_FB_DUPLICATE;
459 p = pfr_create_kentry(&ad, 0);
462 if (pfr_route_kentry(tmpkt, p)) {
463 pfr_destroy_kentry(p);
464 ad.pfra_fback = PFR_FB_NONE;
466 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
467 ad.pfra_fback = PFR_FB_ADDED;
472 if (flags & PFR_FLAG_FEEDBACK)
473 if (COPYOUT(&ad, addr+i, sizeof(ad)))
476 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
477 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
478 if (*size2 < size+xdel) {
483 SLIST_FOREACH(p, &delq, pfrke_workq) {
484 pfr_copyout_addr(&ad, p);
485 ad.pfra_fback = PFR_FB_DELETED;
486 if (COPYOUT(&ad, addr+size+i, sizeof(ad)))
491 pfr_clean_node_mask(tmpkt, &addq);
492 if (!(flags & PFR_FLAG_DUMMY)) {
493 if (flags & PFR_FLAG_ATOMIC)
495 pfr_insert_kentries(kt, &addq, tzero);
496 pfr_remove_kentries(kt, &delq);
497 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
498 if (flags & PFR_FLAG_ATOMIC)
501 pfr_destroy_kentries(&addq);
508 if ((flags & PFR_FLAG_FEEDBACK) && size2)
510 pfr_destroy_ktable(tmpkt, 0);
513 pfr_clean_node_mask(tmpkt, &addq);
514 pfr_destroy_kentries(&addq);
515 if (flags & PFR_FLAG_FEEDBACK)
516 pfr_reset_feedback(addr, size, flags);
517 pfr_destroy_ktable(tmpkt, 0);
522 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
523 int *nmatch, int flags)
525 struct pfr_ktable *kt;
526 struct pfr_kentry *p;
530 ACCEPT_FLAGS(PFR_FLAG_REPLACE);
531 if (pfr_validate_table(tbl, 0, 0))
533 kt = pfr_lookup_table(tbl);
534 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
537 for (i = 0; i < size; i++) {
538 if (COPYIN(addr+i, &ad, sizeof(ad)))
540 if (pfr_validate_addr(&ad))
542 if (ADDR_NETWORK(&ad))
544 p = pfr_lookup_addr(kt, &ad, 0);
545 if (flags & PFR_FLAG_REPLACE)
546 pfr_copyout_addr(&ad, p);
547 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
548 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
549 if (p != NULL && !p->pfrke_not)
551 if (COPYOUT(&ad, addr+i, sizeof(ad)))
560 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
563 struct pfr_ktable *kt;
564 struct pfr_walktree w;
568 if (pfr_validate_table(tbl, 0, 0))
570 kt = pfr_lookup_table(tbl);
571 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
573 if (kt->pfrkt_cnt > *size) {
574 *size = kt->pfrkt_cnt;
578 bzero(&w, sizeof(w));
579 w.pfrw_op = PFRW_GET_ADDRS;
581 w.pfrw_free = kt->pfrkt_cnt;
582 w.pfrw_flags = flags;
583 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
585 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
590 kprintf("pfr_get_addrs: corruption detected (%d).\n",
594 *size = kt->pfrkt_cnt;
599 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
602 struct pfr_ktable *kt;
603 struct pfr_walktree w;
604 struct pfr_kentryworkq workq;
606 long tzero = time_second;
608 ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
609 if (pfr_validate_table(tbl, 0, 0))
611 kt = pfr_lookup_table(tbl);
612 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
614 if (kt->pfrkt_cnt > *size) {
615 *size = kt->pfrkt_cnt;
619 bzero(&w, sizeof(w));
620 w.pfrw_op = PFRW_GET_ASTATS;
621 w.pfrw_astats = addr;
622 w.pfrw_free = kt->pfrkt_cnt;
623 w.pfrw_flags = flags;
624 if (flags & PFR_FLAG_ATOMIC)
626 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
628 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
629 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
630 pfr_enqueue_addrs(kt, &workq, NULL, 0);
631 pfr_clstats_kentries(&workq, tzero, 0);
633 if (flags & PFR_FLAG_ATOMIC)
639 kprintf("pfr_get_astats: corruption detected (%d).\n",
643 *size = kt->pfrkt_cnt;
648 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
649 int *nzero, int flags)
651 struct pfr_ktable *kt;
652 struct pfr_kentryworkq workq;
653 struct pfr_kentry *p;
655 int i, rv, xzero = 0;
657 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
658 if (pfr_validate_table(tbl, 0, 0))
660 kt = pfr_lookup_table(tbl);
661 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
664 for (i = 0; i < size; i++) {
665 if (COPYIN(addr+i, &ad, sizeof(ad)))
667 if (pfr_validate_addr(&ad))
669 p = pfr_lookup_addr(kt, &ad, 1);
670 if (flags & PFR_FLAG_FEEDBACK) {
671 ad.pfra_fback = (p != NULL) ?
672 PFR_FB_CLEARED : PFR_FB_NONE;
673 if (COPYOUT(&ad, addr+i, sizeof(ad)))
677 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
682 if (!(flags & PFR_FLAG_DUMMY)) {
683 if (flags & PFR_FLAG_ATOMIC)
685 pfr_clstats_kentries(&workq, 0, 0);
686 if (flags & PFR_FLAG_ATOMIC)
693 if (flags & PFR_FLAG_FEEDBACK)
694 pfr_reset_feedback(addr, size, flags);
699 pfr_validate_addr(struct pfr_addr *ad)
703 switch (ad->pfra_af) {
706 if (ad->pfra_net > 32)
712 if (ad->pfra_net > 128)
719 if (ad->pfra_net < 128 &&
720 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
722 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
723 if (((caddr_t)ad)[i])
725 if (ad->pfra_not && ad->pfra_not != 1)
733 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
734 int *naddr, int sweep)
736 struct pfr_walktree w;
739 bzero(&w, sizeof(w));
740 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
741 w.pfrw_workq = workq;
742 if (kt->pfrkt_ip4 != NULL)
743 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
744 kprintf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
745 if (kt->pfrkt_ip6 != NULL)
746 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
747 kprintf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
753 pfr_mark_addrs(struct pfr_ktable *kt)
755 struct pfr_walktree w;
757 bzero(&w, sizeof(w));
758 w.pfrw_op = PFRW_MARK;
759 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
760 kprintf("pfr_mark_addrs: IPv4 walktree failed.\n");
761 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
762 kprintf("pfr_mark_addrs: IPv6 walktree failed.\n");
767 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
769 union sockaddr_union sa, mask;
770 struct radix_node_head *head = NULL;
771 struct pfr_kentry *ke;
773 bzero(&sa, sizeof(sa));
774 if (ad->pfra_af == AF_INET) {
775 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
776 head = kt->pfrkt_ip4;
777 } else if ( ad->pfra_af == AF_INET6 ) {
778 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
779 head = kt->pfrkt_ip6;
781 if (ADDR_NETWORK(ad)) {
782 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
783 crit_enter(); /* rn_lookup makes use of globals */
784 ke = (struct pfr_kentry *)rn_lookup((char *)&sa, (char *)&mask,
787 if (ke && KENTRY_RNF_ROOT(ke))
790 ke = (struct pfr_kentry *)rn_match((char *)&sa, head);
791 if (ke && KENTRY_RNF_ROOT(ke))
793 if (exact && ke && KENTRY_NETWORK(ke))
800 pfr_create_kentry(struct pfr_addr *ad, int intr)
802 struct pfr_kentry *ke;
805 ke = pool_get(&pfr_kentry_pl2, PR_NOWAIT);
807 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
810 bzero(ke, sizeof(*ke));
812 if (ad->pfra_af == AF_INET)
813 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
814 else if (ad->pfra_af == AF_INET6)
815 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
816 ke->pfrke_af = ad->pfra_af;
817 ke->pfrke_net = ad->pfra_net;
818 ke->pfrke_not = ad->pfra_not;
819 ke->pfrke_intrpool = intr;
824 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
826 struct pfr_kentry *p, *q;
828 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
829 q = SLIST_NEXT(p, pfrke_workq);
830 pfr_destroy_kentry(p);
835 pfr_destroy_kentry(struct pfr_kentry *ke)
837 if (ke->pfrke_intrpool)
838 pool_put(&pfr_kentry_pl2, ke);
840 pool_put(&pfr_kentry_pl, ke);
844 pfr_insert_kentries(struct pfr_ktable *kt,
845 struct pfr_kentryworkq *workq, long tzero)
847 struct pfr_kentry *p;
850 SLIST_FOREACH(p, workq, pfrke_workq) {
851 rv = pfr_route_kentry(kt, p);
853 kprintf("pfr_insert_kentries: cannot route entry "
857 p->pfrke_tzero = tzero;
864 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
866 struct pfr_kentry *p;
869 p = pfr_lookup_addr(kt, ad, 1);
872 p = pfr_create_kentry(ad, 1);
876 rv = pfr_route_kentry(kt, p);
880 p->pfrke_tzero = tzero;
887 pfr_remove_kentries(struct pfr_ktable *kt,
888 struct pfr_kentryworkq *workq)
890 struct pfr_kentry *p;
893 SLIST_FOREACH(p, workq, pfrke_workq) {
894 pfr_unroute_kentry(kt, p);
898 pfr_destroy_kentries(workq);
902 pfr_clean_node_mask(struct pfr_ktable *kt,
903 struct pfr_kentryworkq *workq)
905 struct pfr_kentry *p;
907 SLIST_FOREACH(p, workq, pfrke_workq)
908 pfr_unroute_kentry(kt, p);
912 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
914 struct pfr_kentry *p;
916 SLIST_FOREACH(p, workq, pfrke_workq) {
919 p->pfrke_not = !p->pfrke_not;
920 bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
921 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
923 p->pfrke_tzero = tzero;
928 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
933 for (i = 0; i < size; i++) {
934 if (COPYIN(addr+i, &ad, sizeof(ad)))
936 ad.pfra_fback = PFR_FB_NONE;
937 if (COPYOUT(&ad, addr+i, sizeof(ad)))
943 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
947 bzero(sa, sizeof(*sa));
949 sa->sin.sin_len = sizeof(sa->sin);
950 sa->sin.sin_family = AF_INET;
951 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
952 } else if (af == AF_INET6) {
953 sa->sin6.sin6_len = sizeof(sa->sin6);
954 sa->sin6.sin6_family = AF_INET6;
955 for (i = 0; i < 4; i++) {
957 sa->sin6.sin6_addr.s6_addr32[i] =
958 net ? htonl(-1 << (32-net)) : 0;
961 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
968 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
970 union sockaddr_union mask;
971 struct radix_node *rn;
972 struct radix_node_head *head = NULL;
974 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
975 if (ke->pfrke_af == AF_INET)
976 head = kt->pfrkt_ip4;
977 else if (ke->pfrke_af == AF_INET6)
978 head = kt->pfrkt_ip6;
981 if (KENTRY_NETWORK(ke)) {
982 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
983 rn = rn_addroute((char *)&ke->pfrke_sa, (char *)&mask, head,
986 rn = rn_addroute((char *)&ke->pfrke_sa, NULL, head,
990 return (rn == NULL ? -1 : 0);
994 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
996 union sockaddr_union mask;
997 struct radix_node *rn;
998 struct radix_node_head *head = NULL;
1000 if (ke->pfrke_af == AF_INET)
1001 head = kt->pfrkt_ip4;
1002 else if (ke->pfrke_af == AF_INET6)
1003 head = kt->pfrkt_ip6;
1006 if (KENTRY_NETWORK(ke)) {
1007 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1008 rn = rn_delete((char *)&ke->pfrke_sa, (char *)&mask, head);
1010 rn = rn_delete((char *)&ke->pfrke_sa, NULL, head);
1014 kprintf("pfr_unroute_kentry: delete failed.\n");
1021 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1023 bzero(ad, sizeof(*ad));
1026 ad->pfra_af = ke->pfrke_af;
1027 ad->pfra_net = ke->pfrke_net;
1028 ad->pfra_not = ke->pfrke_not;
1029 if (ad->pfra_af == AF_INET)
1030 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1031 else if (ad->pfra_af == AF_INET6)
1032 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1036 pfr_walktree(struct radix_node *rn, void *arg)
1038 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1039 struct pfr_walktree *w = arg;
1040 int flags = w->pfrw_flags;
1042 switch (w->pfrw_op) {
1051 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1054 case PFRW_GET_ADDRS:
1055 if (w->pfrw_free-- > 0) {
1058 pfr_copyout_addr(&ad, ke);
1059 if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1064 case PFRW_GET_ASTATS:
1065 if (w->pfrw_free-- > 0) {
1066 struct pfr_astats as;
1068 pfr_copyout_addr(&as.pfras_a, ke);
1071 bcopy(ke->pfrke_packets, as.pfras_packets,
1072 sizeof(as.pfras_packets));
1073 bcopy(ke->pfrke_bytes, as.pfras_bytes,
1074 sizeof(as.pfras_bytes));
1076 as.pfras_tzero = ke->pfrke_tzero;
1078 if (COPYOUT(&as, w->pfrw_astats, sizeof(as)))
1085 break; /* negative entries are ignored */
1086 if (!w->pfrw_cnt--) {
1087 w->pfrw_kentry = ke;
1088 return (1); /* finish search */
1091 case PFRW_DYNADDR_UPDATE:
1092 if (ke->pfrke_af == AF_INET) {
1093 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1095 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1096 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1097 &ke->pfrke_sa, AF_INET);
1098 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1099 &pfr_mask, AF_INET);
1100 } else if (ke->pfrke_af == AF_INET6){
1101 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1103 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1104 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1105 &ke->pfrke_sa, AF_INET6);
1106 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1107 &pfr_mask, AF_INET6);
1115 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1117 struct pfr_ktableworkq workq;
1118 struct pfr_ktable *p;
1121 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS);
1122 if (pfr_fix_anchor(filter->pfrt_anchor))
1124 if (pfr_table_count(filter, flags) < 0)
1128 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1129 if (pfr_skip_table(filter, p, flags))
1131 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1133 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1135 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1136 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1139 if (!(flags & PFR_FLAG_DUMMY)) {
1140 if (flags & PFR_FLAG_ATOMIC)
1142 pfr_setflags_ktables(&workq);
1143 if (flags & PFR_FLAG_ATOMIC)
1152 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1154 struct pfr_ktableworkq addq, changeq;
1155 struct pfr_ktable *p, *q, *r, key;
1156 int i, rv, xadd = 0;
1157 long tzero = time_second;
1159 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1161 SLIST_INIT(&changeq);
1162 for (i = 0; i < size; i++) {
1163 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1165 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1166 flags & PFR_FLAG_USERIOCTL))
1168 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1169 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1171 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1174 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1175 if (!pfr_ktable_compare(p, q))
1178 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1180 if (!key.pfrkt_anchor[0])
1183 /* find or create root table */
1184 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1185 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1190 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1191 if (!pfr_ktable_compare(&key, q)) {
1196 key.pfrkt_flags = 0;
1197 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1200 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1202 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1203 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1204 if (!pfr_ktable_compare(&key, q))
1206 p->pfrkt_nflags = (p->pfrkt_flags &
1207 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1208 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1214 if (!(flags & PFR_FLAG_DUMMY)) {
1215 if (flags & PFR_FLAG_ATOMIC)
1217 pfr_insert_ktables(&addq);
1218 pfr_setflags_ktables(&changeq);
1219 if (flags & PFR_FLAG_ATOMIC)
1222 pfr_destroy_ktables(&addq, 0);
1227 pfr_destroy_ktables(&addq, 0);
1232 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1234 struct pfr_ktableworkq workq;
1235 struct pfr_ktable *p, *q, key;
1238 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1240 for (i = 0; i < size; i++) {
1241 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1243 if (pfr_validate_table(&key.pfrkt_t, 0,
1244 flags & PFR_FLAG_USERIOCTL))
1246 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1247 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1248 SLIST_FOREACH(q, &workq, pfrkt_workq)
1249 if (!pfr_ktable_compare(p, q))
1251 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1252 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1259 if (!(flags & PFR_FLAG_DUMMY)) {
1260 if (flags & PFR_FLAG_ATOMIC)
1262 pfr_setflags_ktables(&workq);
1263 if (flags & PFR_FLAG_ATOMIC)
1272 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1275 struct pfr_ktable *p;
1278 ACCEPT_FLAGS(PFR_FLAG_ALLRSETS);
1279 if (pfr_fix_anchor(filter->pfrt_anchor))
1281 n = nn = pfr_table_count(filter, flags);
1288 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1289 if (pfr_skip_table(filter, p, flags))
1293 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl)))
1297 kprintf("pfr_get_tables: corruption detected (%d).\n", n);
1305 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1308 struct pfr_ktable *p;
1309 struct pfr_ktableworkq workq;
1311 long tzero = time_second;
1313 ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS);
1314 /* XXX PFR_FLAG_CLSTATS disabled */
1315 if (pfr_fix_anchor(filter->pfrt_anchor))
1317 n = nn = pfr_table_count(filter, flags);
1325 if (flags & PFR_FLAG_ATOMIC)
1327 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1328 if (pfr_skip_table(filter, p, flags))
1332 if (!(flags & PFR_FLAG_ATOMIC))
1334 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
1338 if (!(flags & PFR_FLAG_ATOMIC))
1340 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1342 if (flags & PFR_FLAG_CLSTATS)
1343 pfr_clstats_ktables(&workq, tzero,
1344 flags & PFR_FLAG_ADDRSTOO);
1345 if (flags & PFR_FLAG_ATOMIC)
1348 kprintf("pfr_get_tstats: corruption detected (%d).\n", n);
1356 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1358 struct pfr_ktableworkq workq;
1359 struct pfr_ktable *p, key;
1361 long tzero = time_second;
1363 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO);
1365 for (i = 0; i < size; i++) {
1366 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1368 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1370 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1372 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1376 if (!(flags & PFR_FLAG_DUMMY)) {
1377 if (flags & PFR_FLAG_ATOMIC)
1379 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1380 if (flags & PFR_FLAG_ATOMIC)
1389 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1390 int *nchange, int *ndel, int flags)
1392 struct pfr_ktableworkq workq;
1393 struct pfr_ktable *p, *q, key;
1394 int i, xchange = 0, xdel = 0;
1396 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1397 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1398 (clrflag & ~PFR_TFLAG_USRMASK) ||
1399 (setflag & clrflag))
1402 for (i = 0; i < size; i++) {
1403 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1405 if (pfr_validate_table(&key.pfrkt_t, 0,
1406 flags & PFR_FLAG_USERIOCTL))
1408 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1409 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1410 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1412 if (p->pfrkt_nflags == p->pfrkt_flags)
1414 SLIST_FOREACH(q, &workq, pfrkt_workq)
1415 if (!pfr_ktable_compare(p, q))
1417 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1418 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1419 (clrflag & PFR_TFLAG_PERSIST) &&
1420 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1428 if (!(flags & PFR_FLAG_DUMMY)) {
1429 if (flags & PFR_FLAG_ATOMIC)
1431 pfr_setflags_ktables(&workq);
1432 if (flags & PFR_FLAG_ATOMIC)
1435 if (nchange != NULL)
1443 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1445 struct pfr_ktableworkq workq;
1446 struct pfr_ktable *p;
1447 struct pf_ruleset *rs;
1450 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1451 rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1455 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1456 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1457 pfr_skip_table(trs, p, 0))
1459 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1460 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1463 if (!(flags & PFR_FLAG_DUMMY)) {
1464 pfr_setflags_ktables(&workq);
1466 *ticket = ++rs->tticket;
1469 pf_remove_if_empty_ruleset(rs);
1476 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1477 int *nadd, int *naddr, u_int32_t ticket, int flags)
1479 struct pfr_ktableworkq tableq;
1480 struct pfr_kentryworkq addrq;
1481 struct pfr_ktable *kt, *rt, *shadow, key;
1482 struct pfr_kentry *p;
1484 struct pf_ruleset *rs;
1485 int i, rv, xadd = 0, xaddr = 0;
1487 ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO);
1488 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1490 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1491 flags & PFR_FLAG_USERIOCTL))
1493 rs = pf_find_ruleset(tbl->pfrt_anchor);
1494 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1496 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1497 SLIST_INIT(&tableq);
1498 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1500 kt = pfr_create_ktable(tbl, 0, 1);
1503 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1505 if (!tbl->pfrt_anchor[0])
1508 /* find or create root table */
1509 bzero(&key, sizeof(key));
1510 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1511 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1513 kt->pfrkt_root = rt;
1516 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1518 pfr_destroy_ktables(&tableq, 0);
1521 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1522 kt->pfrkt_root = rt;
1523 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1526 shadow = pfr_create_ktable(tbl, 0, 0);
1527 if (shadow == NULL) {
1528 pfr_destroy_ktables(&tableq, 0);
1532 for (i = 0; i < size; i++) {
1533 if (COPYIN(addr+i, &ad, sizeof(ad)))
1535 if (pfr_validate_addr(&ad))
1537 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1539 p = pfr_create_kentry(&ad, 0);
1542 if (pfr_route_kentry(shadow, p)) {
1543 pfr_destroy_kentry(p);
1546 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1549 if (!(flags & PFR_FLAG_DUMMY)) {
1550 if (kt->pfrkt_shadow != NULL)
1551 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1552 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1553 pfr_insert_ktables(&tableq);
1554 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1555 xaddr : NO_ADDRESSES;
1556 kt->pfrkt_shadow = shadow;
1558 pfr_clean_node_mask(shadow, &addrq);
1559 pfr_destroy_ktable(shadow, 0);
1560 pfr_destroy_ktables(&tableq, 0);
1561 pfr_destroy_kentries(&addrq);
1569 pfr_destroy_ktable(shadow, 0);
1570 pfr_destroy_ktables(&tableq, 0);
1571 pfr_destroy_kentries(&addrq);
1576 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1578 struct pfr_ktableworkq workq;
1579 struct pfr_ktable *p;
1580 struct pf_ruleset *rs;
1583 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1584 rs = pf_find_ruleset(trs->pfrt_anchor);
1585 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1588 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1589 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1590 pfr_skip_table(trs, p, 0))
1592 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1593 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1596 if (!(flags & PFR_FLAG_DUMMY)) {
1597 pfr_setflags_ktables(&workq);
1599 pf_remove_if_empty_ruleset(rs);
1607 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1608 int *nchange, int flags)
1610 struct pfr_ktable *p, *q;
1611 struct pfr_ktableworkq workq;
1612 struct pf_ruleset *rs;
1613 int xadd = 0, xchange = 0;
1614 long tzero = time_second;
1616 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1617 rs = pf_find_ruleset(trs->pfrt_anchor);
1618 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1622 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1623 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1624 pfr_skip_table(trs, p, 0))
1626 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1627 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1633 if (!(flags & PFR_FLAG_DUMMY)) {
1634 if (flags & PFR_FLAG_ATOMIC)
1636 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1637 q = SLIST_NEXT(p, pfrkt_workq);
1638 pfr_commit_ktable(p, tzero);
1640 if (flags & PFR_FLAG_ATOMIC)
1643 pf_remove_if_empty_ruleset(rs);
1647 if (nchange != NULL)
1654 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1656 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1659 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1660 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1661 pfr_clstats_ktable(kt, tzero, 1);
1662 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1663 /* kt might contain addresses */
1664 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1665 struct pfr_kentry *p, *q, *next;
1668 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1671 SLIST_INIT(&changeq);
1673 SLIST_INIT(&garbageq);
1674 pfr_clean_node_mask(shadow, &addrq);
1675 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1676 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1677 pfr_copyout_addr(&ad, p);
1678 q = pfr_lookup_addr(kt, &ad, 1);
1680 if (q->pfrke_not != p->pfrke_not)
1681 SLIST_INSERT_HEAD(&changeq, q,
1684 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1686 p->pfrke_tzero = tzero;
1687 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1690 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1691 pfr_insert_kentries(kt, &addq, tzero);
1692 pfr_remove_kentries(kt, &delq);
1693 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1694 pfr_destroy_kentries(&garbageq);
1696 /* kt cannot contain addresses */
1697 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1699 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1701 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1702 pfr_clstats_ktable(kt, tzero, 1);
1704 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1705 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1706 & ~PFR_TFLAG_INACTIVE;
1707 pfr_destroy_ktable(shadow, 0);
1708 kt->pfrkt_shadow = NULL;
1709 pfr_setflags_ktable(kt, nflags);
1713 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1717 if (!tbl->pfrt_name[0])
1719 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1721 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1723 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1724 if (tbl->pfrt_name[i])
1726 if (pfr_fix_anchor(tbl->pfrt_anchor))
1728 if (tbl->pfrt_flags & ~allowedflags)
1734 * Rewrite anchors referenced by tables to remove slashes
1735 * and check for validity.
1738 pfr_fix_anchor(char *anchor)
1740 size_t siz = MAXPATHLEN;
1743 if (anchor[0] == '/') {
1749 while (*++path == '/')
1751 bcopy(path, anchor, siz - off);
1752 memset(anchor + siz - off, 0, off);
1754 if (anchor[siz - 1])
1756 for (i = strlen(anchor); i < siz; i++)
1763 pfr_table_count(struct pfr_table *filter, int flags)
1765 struct pf_ruleset *rs;
1767 if (flags & PFR_FLAG_ALLRSETS)
1768 return (pfr_ktable_cnt);
1769 if (filter->pfrt_anchor[0]) {
1770 rs = pf_find_ruleset(filter->pfrt_anchor);
1771 return ((rs != NULL) ? rs->tables : -1);
1773 return (pf_main_ruleset.tables);
1777 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1779 if (flags & PFR_FLAG_ALLRSETS)
1781 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1787 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1789 struct pfr_ktable *p;
1791 SLIST_FOREACH(p, workq, pfrkt_workq)
1792 pfr_insert_ktable(p);
1796 pfr_insert_ktable(struct pfr_ktable *kt)
1798 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1800 if (kt->pfrkt_root != NULL)
1801 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1802 pfr_setflags_ktable(kt->pfrkt_root,
1803 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1807 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1809 struct pfr_ktable *p, *q;
1811 for (p = SLIST_FIRST(workq); p; p = q) {
1812 q = SLIST_NEXT(p, pfrkt_workq);
1813 pfr_setflags_ktable(p, p->pfrkt_nflags);
1818 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1820 struct pfr_kentryworkq addrq;
1822 if (!(newf & PFR_TFLAG_REFERENCED) &&
1823 !(newf & PFR_TFLAG_PERSIST))
1824 newf &= ~PFR_TFLAG_ACTIVE;
1825 if (!(newf & PFR_TFLAG_ACTIVE))
1826 newf &= ~PFR_TFLAG_USRMASK;
1827 if (!(newf & PFR_TFLAG_SETMASK)) {
1828 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1829 if (kt->pfrkt_root != NULL)
1830 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1831 pfr_setflags_ktable(kt->pfrkt_root,
1832 kt->pfrkt_root->pfrkt_flags &
1833 ~PFR_TFLAG_REFDANCHOR);
1834 pfr_destroy_ktable(kt, 1);
1838 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1839 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1840 pfr_remove_kentries(kt, &addrq);
1842 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1843 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1844 kt->pfrkt_shadow = NULL;
1846 kt->pfrkt_flags = newf;
1850 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1852 struct pfr_ktable *p;
1854 SLIST_FOREACH(p, workq, pfrkt_workq)
1855 pfr_clstats_ktable(p, tzero, recurse);
1859 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1861 struct pfr_kentryworkq addrq;
1864 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1865 pfr_clstats_kentries(&addrq, tzero, 0);
1868 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1869 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1870 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1872 kt->pfrkt_tzero = tzero;
1876 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1878 struct pfr_ktable *kt;
1879 struct pf_ruleset *rs;
1881 kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
1884 bzero(kt, sizeof(*kt));
1887 if (attachruleset) {
1888 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1890 pfr_destroy_ktable(kt, 0);
1897 if (!rn_inithead((void **)&kt->pfrkt_ip4,
1898 offsetof(struct sockaddr_in, sin_addr) * 8) ||
1899 !rn_inithead((void **)&kt->pfrkt_ip6,
1900 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1901 pfr_destroy_ktable(kt, 0);
1904 kt->pfrkt_tzero = tzero;
1910 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1912 struct pfr_ktable *p, *q;
1914 for (p = SLIST_FIRST(workq); p; p = q) {
1915 q = SLIST_NEXT(p, pfrkt_workq);
1916 pfr_destroy_ktable(p, flushaddr);
1921 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1923 struct pfr_kentryworkq addrq;
1926 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1927 pfr_clean_node_mask(kt, &addrq);
1928 pfr_destroy_kentries(&addrq);
1930 if (kt->pfrkt_ip4 != NULL)
1931 kfree((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1932 if (kt->pfrkt_ip6 != NULL)
1933 kfree((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1934 if (kt->pfrkt_shadow != NULL)
1935 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1936 if (kt->pfrkt_rs != NULL) {
1937 kt->pfrkt_rs->tables--;
1938 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1940 pool_put(&pfr_ktable_pl, kt);
1944 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1948 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1950 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1954 pfr_lookup_table(struct pfr_table *tbl)
1956 /* struct pfr_ktable start like a struct pfr_table */
1957 return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1958 (struct pfr_ktable *)tbl));
1962 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1964 struct pfr_kentry *ke = NULL;
1967 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1968 kt = kt->pfrkt_root;
1969 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1975 pfr_sin.sin_addr.s_addr = a->addr32[0];
1976 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
1978 if (ke && KENTRY_RNF_ROOT(ke))
1984 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1985 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
1987 if (ke && KENTRY_RNF_ROOT(ke))
1992 match = (ke && !ke->pfrke_not);
1996 kt->pfrkt_nomatch++;
2001 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2002 u_int64_t len, int dir_out, int op_pass, int notrule)
2004 struct pfr_kentry *ke = NULL;
2006 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2007 kt = kt->pfrkt_root;
2008 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2014 pfr_sin.sin_addr.s_addr = a->addr32[0];
2015 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
2017 if (ke && KENTRY_RNF_ROOT(ke))
2023 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2024 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2026 if (ke && KENTRY_RNF_ROOT(ke))
2033 if ((ke == NULL || ke->pfrke_not) != notrule) {
2034 if (op_pass != PFR_OP_PASS)
2035 kprintf("pfr_update_stats: assertion failed.\n");
2036 op_pass = PFR_OP_XPASS;
2038 kt->pfrkt_packets[dir_out][op_pass]++;
2039 kt->pfrkt_bytes[dir_out][op_pass] += len;
2040 if (ke != NULL && op_pass != PFR_OP_XPASS) {
2041 ke->pfrke_packets[dir_out][op_pass]++;
2042 ke->pfrke_bytes[dir_out][op_pass] += len;
2047 pfr_attach_table(struct pf_ruleset *rs, char *name)
2049 struct pfr_ktable *kt, *rt;
2050 struct pfr_table tbl;
2051 struct pf_anchor *ac = rs->anchor;
2053 bzero(&tbl, sizeof(tbl));
2054 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2056 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2057 kt = pfr_lookup_table(&tbl);
2059 kt = pfr_create_ktable(&tbl, time_second, 1);
2063 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2064 rt = pfr_lookup_table(&tbl);
2066 rt = pfr_create_ktable(&tbl, 0, 1);
2068 pfr_destroy_ktable(kt, 0);
2071 pfr_insert_ktable(rt);
2073 kt->pfrkt_root = rt;
2075 pfr_insert_ktable(kt);
2077 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2078 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2083 pfr_detach_table(struct pfr_ktable *kt)
2085 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2086 kprintf("pfr_detach_table: refcount = %d.\n",
2087 kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2088 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2089 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2093 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2094 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2096 struct pfr_kentry *ke, *ke2 = NULL;
2097 struct pf_addr *addr = NULL;
2098 union sockaddr_union mask;
2099 int idx = -1, use_counter = 0;
2102 addr = (struct pf_addr *)&pfr_sin.sin_addr;
2103 else if (af == AF_INET6)
2104 addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2105 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2106 kt = kt->pfrkt_root;
2107 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2112 if (counter != NULL && idx >= 0)
2118 ke = pfr_kentry_byidx(kt, idx, af);
2121 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2122 *raddr = SUNION2PF(&ke->pfrke_sa, af);
2123 *rmask = SUNION2PF(&pfr_mask, af);
2126 /* is supplied address within block? */
2127 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2128 /* no, go to next block in table */
2133 PF_ACPY(addr, counter, af);
2135 /* use first address of block */
2136 PF_ACPY(addr, *raddr, af);
2139 if (!KENTRY_NETWORK(ke)) {
2140 /* this is a single IP address - no possible nested block */
2141 PF_ACPY(counter, addr, af);
2146 /* we don't want to use a nested block */
2148 ke2 = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
2150 else if (af == AF_INET6)
2151 ke2 = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2153 /* no need to check KENTRY_RNF_ROOT() here */
2155 /* lookup return the same block - perfect */
2156 PF_ACPY(counter, addr, af);
2161 /* we need to increase the counter past the nested block */
2162 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2163 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2165 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2166 /* ok, we reached the end of our main block */
2167 /* go to next block in table */
2176 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2178 struct pfr_walktree w;
2180 bzero(&w, sizeof(w));
2181 w.pfrw_op = PFRW_POOL_GET;
2187 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2188 return (w.pfrw_kentry);
2192 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2193 return (w.pfrw_kentry);
2201 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2203 struct pfr_walktree w;
2205 bzero(&w, sizeof(w));
2206 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2210 dyn->pfid_acnt4 = 0;
2211 dyn->pfid_acnt6 = 0;
2212 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2213 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2214 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2215 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);