kernel/pf: Remove an unused variable.
[dragonfly.git] / sys / net / pf / pf_table.c
1 /*      $OpenBSD: pf_table.c,v 1.78 2008/06/14 03:50:14 art Exp $       */
2
3 /*
4  * Copyright (c) 2010 The DragonFly Project.  All rights reserved.
5  *
6  * Copyright (c) 2002 Cedric Berger
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/socket.h>
41 #include <sys/mbuf.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/thread2.h>
45
46 #include <net/if.h>
47 #include <net/route.h>
48 #include <netinet/in.h>
49 #include <net/pf/pfvar.h>
50
51 #define ACCEPT_FLAGS(flags, oklist)             \
52         do {                                    \
53                 if ((flags & ~(oklist)) &       \
54                     PFR_FLAG_ALLMASK)           \
55                         return (EINVAL);        \
56         } while (0)
57
58 #define COPYIN(from, to, size, flags)           \
59         ((flags & PFR_FLAG_USERIOCTL) ?         \
60         copyin((from), (to), (size)) :          \
61         (bcopy((from), (to), (size)), 0))
62
63 #define COPYOUT(from, to, size, flags)          \
64         ((flags & PFR_FLAG_USERIOCTL) ?         \
65         copyout((from), (to), (size)) :         \
66         (bcopy((from), (to), (size)), 0))
67
68 #define FILLIN_SIN(sin, addr)                   \
69         do {                                    \
70                 (sin).sin_len = sizeof(sin);    \
71                 (sin).sin_family = AF_INET;     \
72                 (sin).sin_addr = (addr);        \
73         } while (0)
74
75 #define FILLIN_SIN6(sin6, addr)                 \
76         do {                                    \
77                 (sin6).sin6_len = sizeof(sin6); \
78                 (sin6).sin6_family = AF_INET6;  \
79                 (sin6).sin6_addr = (addr);      \
80         } while (0)
81
82 #define SWAP(type, a1, a2)                      \
83         do {                                    \
84                 type tmp = a1;                  \
85                 a1 = a2;                        \
86                 a2 = tmp;                       \
87         } while (0)
88
89 #define SUNION2PF(su, af) (((af)==AF_INET) ?    \
90     (struct pf_addr *)&(su)->sin.sin_addr :     \
91     (struct pf_addr *)&(su)->sin6.sin6_addr)
92
93 #define AF_BITS(af)             (((af)==AF_INET)?32:128)
94 #define ADDR_NETWORK(ad)        ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
95 #define KENTRY_NETWORK(ke)      ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
96 #define KENTRY_RNF_ROOT(ke) \
97                 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
98
99 #define NO_ADDRESSES            (-1)
100 #define ENQUEUE_UNMARKED_ONLY   (1)
101 #define INVERT_NEG_FLAG         (1)
102
103 static MALLOC_DEFINE(M_PFRKTABLEPL, "pfrktable", "pf radix table pool list");
104 static MALLOC_DEFINE(M_PFRKENTRYPL, "pfrkentry", "pf radix entry pool list");
105 static MALLOC_DEFINE(M_PFRKENTRYPL2, "pfrkentry2", "pf radix entry 2 pool list");
106 static MALLOC_DEFINE(M_PFRKCOUNTERSPL, "pfrkcounters", "pf radix counters");
107
108 struct pfr_walktree {
109         enum pfrw_op {
110                 PFRW_MARK,
111                 PFRW_SWEEP,
112                 PFRW_ENQUEUE,
113                 PFRW_GET_ADDRS,
114                 PFRW_GET_ASTATS,
115                 PFRW_POOL_GET,
116                 PFRW_DYNADDR_UPDATE
117         }        pfrw_op;
118         union {
119                 struct pfr_addr         *pfrw1_addr;
120                 struct pfr_astats       *pfrw1_astats;
121                 struct pfr_kentryworkq  *pfrw1_workq;
122                 struct pfr_kentry       *pfrw1_kentry;
123                 struct pfi_dynaddr      *pfrw1_dyn;
124         }        pfrw_1;
125         int      pfrw_free;
126         int      pfrw_flags;
127 };
128 #define pfrw_addr       pfrw_1.pfrw1_addr
129 #define pfrw_astats     pfrw_1.pfrw1_astats
130 #define pfrw_workq      pfrw_1.pfrw1_workq
131 #define pfrw_kentry     pfrw_1.pfrw1_kentry
132 #define pfrw_dyn        pfrw_1.pfrw1_dyn
133 #define pfrw_cnt        pfrw_free
134
135 #define senderr(e)      do { rv = (e); goto _bad; } while (0)
136 struct malloc_type      *pfr_ktable_pl;
137 struct malloc_type      *pfr_kentry_pl;
138 struct malloc_type      *pfr_kentry_pl2;
139 static struct pf_addr    pfr_ffaddr;            /* constant after setup */
140
141 void                     pfr_copyout_addr(struct pfr_addr *,
142                             struct pfr_kentry *ke);
143 int                      pfr_validate_addr(struct pfr_addr *);
144 void                     pfr_enqueue_addrs(struct pfr_ktable *,
145                             struct pfr_kentryworkq *, int *, int);
146 void                     pfr_mark_addrs(struct pfr_ktable *);
147 struct pfr_kentry       *pfr_lookup_addr(struct pfr_ktable *,
148                             struct pfr_addr *, int);
149 struct pfr_kentry       *pfr_create_kentry(struct pfr_addr *, int);
150 void                     pfr_destroy_kentries(struct pfr_kentryworkq *);
151 void                     pfr_destroy_kentry(struct pfr_kentry *);
152 void                     pfr_insert_kentries(struct pfr_ktable *,
153                             struct pfr_kentryworkq *, long);
154 void                     pfr_remove_kentries(struct pfr_ktable *,
155                             struct pfr_kentryworkq *);
156 void                     pfr_clstats_kentries(struct pfr_kentryworkq *, long,
157                             int);
158 void                     pfr_reset_feedback(struct pfr_addr *, int, int);
159 void                     pfr_prepare_network(union sockaddr_union *, int, int);
160 int                      pfr_route_kentry(struct pfr_ktable *,
161                             struct pfr_kentry *);
162 int                      pfr_unroute_kentry(struct pfr_ktable *,
163                             struct pfr_kentry *);
164 int                      pfr_walktree(struct radix_node *, void *);
165 int                      pfr_validate_table(struct pfr_table *, int, int);
166 int                      pfr_fix_anchor(char *);
167 void                     pfr_commit_ktable(struct pfr_ktable *, long);
168 void                     pfr_insert_ktables(struct pfr_ktableworkq *);
169 void                     pfr_insert_ktable(struct pfr_ktable *);
170 void                     pfr_setflags_ktables(struct pfr_ktableworkq *);
171 void                     pfr_setflags_ktable(struct pfr_ktable *, int);
172 void                     pfr_clstats_ktables(struct pfr_ktableworkq *, long,
173                             int);
174 void                     pfr_clstats_ktable(struct pfr_ktable *, long, int);
175 struct pfr_ktable       *pfr_create_ktable(struct pfr_table *, long, int);
176 void                     pfr_destroy_ktables(struct pfr_ktableworkq *, int);
177 void                     pfr_destroy_ktable(struct pfr_ktable *, int);
178 int                      pfr_ktable_compare(struct pfr_ktable *,
179                             struct pfr_ktable *);
180 struct pfr_ktable       *pfr_lookup_table(struct pfr_table *);
181 void                     pfr_clean_node_mask(struct pfr_ktable *,
182                             struct pfr_kentryworkq *);
183 int                      pfr_table_count(struct pfr_table *, int);
184 int                      pfr_skip_table(struct pfr_table *,
185                             struct pfr_ktable *, int);
186 struct pfr_kentry       *pfr_kentry_byidx(struct pfr_ktable *, int, int);
187
188 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
189 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
190
191 struct pfr_ktablehead    pfr_ktables;
192 struct pfr_table         pfr_nulltable;
193 int                      pfr_ktable_cnt;
194
195 void
196 pfr_initialize(void)
197 {
198         memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
199 }
200
201 int
202 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
203 {
204         struct pfr_ktable       *kt;
205         struct pfr_kentryworkq   workq;
206
207         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
208         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
209                 return (EINVAL);
210         kt = pfr_lookup_table(tbl);
211         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
212                 return (ESRCH);
213         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
214                 return (EPERM);
215         pfr_enqueue_addrs(kt, &workq, ndel, 0);
216
217         if (!(flags & PFR_FLAG_DUMMY)) {
218                 if (flags & PFR_FLAG_ATOMIC)
219                         crit_enter();
220                 pfr_remove_kentries(kt, &workq);
221                 if (flags & PFR_FLAG_ATOMIC)
222                         crit_exit();
223                 if (kt->pfrkt_cnt) {
224                         kprintf("pfr_clr_addrs: corruption detected (%d).\n",
225                             kt->pfrkt_cnt);
226                         kt->pfrkt_cnt = 0;
227                 }
228         }
229         return (0);
230 }
231
232 int
233 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
234     int *nadd, int flags)
235 {
236         struct pfr_ktable       *kt, *tmpkt;
237         struct pfr_kentryworkq   workq;
238         struct pfr_kentry       *p, *q;
239         struct pfr_addr          ad;
240         int                      i, rv, xadd = 0;
241         long                     tzero = time_second;
242
243         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
244             PFR_FLAG_FEEDBACK);
245         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
246                 return (EINVAL);
247         kt = pfr_lookup_table(tbl);
248         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
249                 return (ESRCH);
250         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
251                 return (EPERM);
252         tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
253         if (tmpkt == NULL)
254                 return (ENOMEM);
255         SLIST_INIT(&workq);
256         for (i = 0; i < size; i++) {
257                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
258                         senderr(EFAULT);
259                 if (pfr_validate_addr(&ad))
260                         senderr(EINVAL);
261                 p = pfr_lookup_addr(kt, &ad, 1);
262                 q = pfr_lookup_addr(tmpkt, &ad, 1);
263                 if (flags & PFR_FLAG_FEEDBACK) {
264                         if (q != NULL)
265                                 ad.pfra_fback = PFR_FB_DUPLICATE;
266                         else if (p == NULL)
267                                 ad.pfra_fback = PFR_FB_ADDED;
268                         else if (p->pfrke_not != ad.pfra_not)
269                                 ad.pfra_fback = PFR_FB_CONFLICT;
270                         else
271                                 ad.pfra_fback = PFR_FB_NONE;
272                 }
273                 if (p == NULL && q == NULL) {
274                         p = pfr_create_kentry(&ad,
275                             !(flags & PFR_FLAG_USERIOCTL));
276                         if (p == NULL)
277                                 senderr(ENOMEM);
278                         if (pfr_route_kentry(tmpkt, p)) {
279                                 pfr_destroy_kentry(p);
280                                 ad.pfra_fback = PFR_FB_NONE;
281                         } else {
282                                 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
283                                 xadd++;
284                         }
285                 }
286                 if (flags & PFR_FLAG_FEEDBACK)
287                         if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
288                                 senderr(EFAULT);
289         }
290         pfr_clean_node_mask(tmpkt, &workq);
291         if (!(flags & PFR_FLAG_DUMMY)) {
292                 if (flags & PFR_FLAG_ATOMIC)
293                         crit_enter();
294                 pfr_insert_kentries(kt, &workq, tzero);
295                 if (flags & PFR_FLAG_ATOMIC)
296                         crit_exit();
297         } else
298                 pfr_destroy_kentries(&workq);
299         if (nadd != NULL)
300                 *nadd = xadd;
301         pfr_destroy_ktable(tmpkt, 0);
302         return (0);
303 _bad:
304         pfr_clean_node_mask(tmpkt, &workq);
305         pfr_destroy_kentries(&workq);
306         if (flags & PFR_FLAG_FEEDBACK)
307                 pfr_reset_feedback(addr, size, flags);
308         pfr_destroy_ktable(tmpkt, 0);
309         return (rv);
310 }
311
312 int
313 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
314     int *ndel, int flags)
315 {
316         struct pfr_ktable       *kt;
317         struct pfr_kentryworkq   workq;
318         struct pfr_kentry       *p;
319         struct pfr_addr          ad;
320         int                      i, rv, xdel = 0, log = 1;
321
322         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
323             PFR_FLAG_FEEDBACK);
324         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
325                 return (EINVAL);
326         kt = pfr_lookup_table(tbl);
327         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
328                 return (ESRCH);
329         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
330                 return (EPERM);
331         /*
332          * there are two algorithms to choose from here.
333          * with:
334          *   n: number of addresses to delete
335          *   N: number of addresses in the table
336          *
337          * one is O(N) and is better for large 'n'
338          * one is O(n*LOG(N)) and is better for small 'n'
339          * 
340          * following code try to decide which one is best.
341          */
342         for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
343                 log++;
344         if (size > kt->pfrkt_cnt/log) {
345                 /* full table scan */
346                 pfr_mark_addrs(kt);
347         } else {
348                 /* iterate over addresses to delete */
349                 for (i = 0; i < size; i++) {
350                         if (COPYIN(addr+i, &ad, sizeof(ad), flags))
351                                 return (EFAULT);
352                         if (pfr_validate_addr(&ad))
353                                 return (EINVAL);
354                         p = pfr_lookup_addr(kt, &ad, 1);
355                         if (p != NULL)
356                                 p->pfrke_mark = 0;
357                 }
358         }
359         SLIST_INIT(&workq);
360         for (i = 0; i < size; i++) {
361                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
362                         senderr(EFAULT);
363                 if (pfr_validate_addr(&ad))
364                         senderr(EINVAL);
365                 p = pfr_lookup_addr(kt, &ad, 1);
366                 if (flags & PFR_FLAG_FEEDBACK) {
367                         if (p == NULL)
368                                 ad.pfra_fback = PFR_FB_NONE;
369                         else if (p->pfrke_not != ad.pfra_not)
370                                 ad.pfra_fback = PFR_FB_CONFLICT;
371                         else if (p->pfrke_mark)
372                                 ad.pfra_fback = PFR_FB_DUPLICATE;
373                         else
374                                 ad.pfra_fback = PFR_FB_DELETED;
375                 }
376                 if (p != NULL && p->pfrke_not == ad.pfra_not &&
377                     !p->pfrke_mark) {
378                         p->pfrke_mark = 1;
379                         SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
380                         xdel++;
381                 }
382                 if (flags & PFR_FLAG_FEEDBACK)
383                         if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
384                                 senderr(EFAULT);
385         }
386         if (!(flags & PFR_FLAG_DUMMY)) {
387                 if (flags & PFR_FLAG_ATOMIC)
388                         crit_enter();
389                 pfr_remove_kentries(kt, &workq);
390                 if (flags & PFR_FLAG_ATOMIC)
391                         crit_exit();
392         }
393         if (ndel != NULL)
394                 *ndel = xdel;
395         return (0);
396 _bad:
397         if (flags & PFR_FLAG_FEEDBACK)
398                 pfr_reset_feedback(addr, size, flags);
399         return (rv);
400 }
401
402 int
403 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
404     int *size2, int *nadd, int *ndel, int *nchange, int flags,
405     u_int32_t ignore_pfrt_flags)
406 {
407         struct pfr_ktable       *kt, *tmpkt;
408         struct pfr_kentryworkq   addq, delq, changeq;
409         struct pfr_kentry       *p, *q;
410         struct pfr_addr          ad;
411         int                      i, rv, xadd = 0, xdel = 0, xchange = 0;
412         long                     tzero = time_second;
413
414         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
415             PFR_FLAG_FEEDBACK);
416         if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
417             PFR_FLAG_USERIOCTL))
418                 return (EINVAL);
419         kt = pfr_lookup_table(tbl);
420         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
421                 return (ESRCH);
422         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
423                 return (EPERM);
424         tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
425         if (tmpkt == NULL)
426                 return (ENOMEM);
427         pfr_mark_addrs(kt);
428         SLIST_INIT(&addq);
429         SLIST_INIT(&delq);
430         SLIST_INIT(&changeq);
431         for (i = 0; i < size; i++) {
432                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
433                         senderr(EFAULT);
434                 if (pfr_validate_addr(&ad))
435                         senderr(EINVAL);
436                 ad.pfra_fback = PFR_FB_NONE;
437                 p = pfr_lookup_addr(kt, &ad, 1);
438                 if (p != NULL) {
439                         if (p->pfrke_mark) {
440                                 ad.pfra_fback = PFR_FB_DUPLICATE;
441                                 goto _skip;
442                         }
443                         p->pfrke_mark = 1;
444                         if (p->pfrke_not != ad.pfra_not) {
445                                 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
446                                 ad.pfra_fback = PFR_FB_CHANGED;
447                                 xchange++;
448                         }
449                 } else {
450                         q = pfr_lookup_addr(tmpkt, &ad, 1);
451                         if (q != NULL) {
452                                 ad.pfra_fback = PFR_FB_DUPLICATE;
453                                 goto _skip;
454                         }
455                         p = pfr_create_kentry(&ad,
456                             !(flags & PFR_FLAG_USERIOCTL));
457                         if (p == NULL)
458                                 senderr(ENOMEM);
459                         if (pfr_route_kentry(tmpkt, p)) {
460                                 pfr_destroy_kentry(p);
461                                 ad.pfra_fback = PFR_FB_NONE;
462                         } else {
463                                 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
464                                 ad.pfra_fback = PFR_FB_ADDED;
465                                 xadd++;
466                         }
467                 }
468 _skip:
469                 if (flags & PFR_FLAG_FEEDBACK)
470                         if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
471                                 senderr(EFAULT);
472         }
473         pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
474         if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
475                 if (*size2 < size+xdel) {
476                         *size2 = size+xdel;
477                         senderr(0);
478                 }
479                 i = 0;
480                 SLIST_FOREACH(p, &delq, pfrke_workq) {
481                         pfr_copyout_addr(&ad, p);
482                         ad.pfra_fback = PFR_FB_DELETED;
483                         if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags))
484                                 senderr(EFAULT);
485                         i++;
486                 }
487         }
488         pfr_clean_node_mask(tmpkt, &addq);
489         if (!(flags & PFR_FLAG_DUMMY)) {
490                 if (flags & PFR_FLAG_ATOMIC)
491                         crit_enter();
492                 pfr_insert_kentries(kt, &addq, tzero);
493                 pfr_remove_kentries(kt, &delq);
494                 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
495                 if (flags & PFR_FLAG_ATOMIC)
496                         crit_exit();
497         } else
498                 pfr_destroy_kentries(&addq);
499         if (nadd != NULL)
500                 *nadd = xadd;
501         if (ndel != NULL)
502                 *ndel = xdel;
503         if (nchange != NULL)
504                 *nchange = xchange;
505         if ((flags & PFR_FLAG_FEEDBACK) && size2)
506                 *size2 = size+xdel;
507         pfr_destroy_ktable(tmpkt, 0);
508         return (0);
509 _bad:
510         pfr_clean_node_mask(tmpkt, &addq);
511         pfr_destroy_kentries(&addq);
512         if (flags & PFR_FLAG_FEEDBACK)
513                 pfr_reset_feedback(addr, size, flags);
514         pfr_destroy_ktable(tmpkt, 0);
515         return (rv);
516 }
517
518 int
519 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
520         int *nmatch, int flags)
521 {
522         struct pfr_ktable       *kt;
523         struct pfr_kentry       *p;
524         struct pfr_addr          ad;
525         int                      i, xmatch = 0;
526
527         ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
528         if (pfr_validate_table(tbl, 0, 0))
529                 return (EINVAL);
530         kt = pfr_lookup_table(tbl);
531         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
532                 return (ESRCH);
533
534         for (i = 0; i < size; i++) {
535                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
536                         return (EFAULT);
537                 if (pfr_validate_addr(&ad))
538                         return (EINVAL);
539                 if (ADDR_NETWORK(&ad))
540                         return (EINVAL);
541                 p = pfr_lookup_addr(kt, &ad, 0);
542                 if (flags & PFR_FLAG_REPLACE)
543                         pfr_copyout_addr(&ad, p);
544                 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
545                     (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
546                 if (p != NULL && !p->pfrke_not)
547                         xmatch++;
548                 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
549                         return (EFAULT);
550         }
551         if (nmatch != NULL)
552                 *nmatch = xmatch;
553         return (0);
554 }
555
556 int
557 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
558         int flags)
559 {
560         struct pfr_ktable       *kt;
561         struct pfr_walktree      w;
562         int                      rv;
563
564         ACCEPT_FLAGS(flags, 0);
565         if (pfr_validate_table(tbl, 0, 0))
566                 return (EINVAL);
567         kt = pfr_lookup_table(tbl);
568         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
569                 return (ESRCH);
570         if (kt->pfrkt_cnt > *size) {
571                 *size = kt->pfrkt_cnt;
572                 return (0);
573         }
574
575         bzero(&w, sizeof(w));
576         w.pfrw_op = PFRW_GET_ADDRS;
577         w.pfrw_addr = addr;
578         w.pfrw_free = kt->pfrkt_cnt;
579         w.pfrw_flags = flags;
580         rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
581         if (!rv)
582                 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
583         if (rv)
584                 return (rv);
585
586         if (w.pfrw_free) {
587                 kprintf("pfr_get_addrs: corruption detected (%d).\n",
588                     w.pfrw_free);
589                 return (ENOTTY);
590         }
591         *size = kt->pfrkt_cnt;
592         return (0);
593 }
594
595 int
596 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
597         int flags)
598 {
599         struct pfr_ktable       *kt;
600         struct pfr_walktree      w;
601         struct pfr_kentryworkq   workq;
602         int                      rv;
603         long                     tzero = time_second;
604
605         /* XXX PFR_FLAG_CLSTATS disabled */
606         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
607         if (pfr_validate_table(tbl, 0, 0))
608                 return (EINVAL);
609         kt = pfr_lookup_table(tbl);
610         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
611                 return (ESRCH);
612         if (kt->pfrkt_cnt > *size) {
613                 *size = kt->pfrkt_cnt;
614                 return (0);
615         }
616
617         bzero(&w, sizeof(w));
618         w.pfrw_op = PFRW_GET_ASTATS;
619         w.pfrw_astats = addr;
620         w.pfrw_free = kt->pfrkt_cnt;
621         w.pfrw_flags = flags;
622         if (flags & PFR_FLAG_ATOMIC)
623                 crit_enter();
624         rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
625         if (!rv)
626                 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
627         if (!rv && (flags & PFR_FLAG_CLSTATS)) {
628                 pfr_enqueue_addrs(kt, &workq, NULL, 0);
629                 pfr_clstats_kentries(&workq, tzero, 0);
630         }
631         if (flags & PFR_FLAG_ATOMIC)
632                 crit_exit();
633         if (rv)
634                 return (rv);
635
636         if (w.pfrw_free) {
637                 kprintf("pfr_get_astats: corruption detected (%d).\n",
638                     w.pfrw_free);
639                 return (ENOTTY);
640         }
641         *size = kt->pfrkt_cnt;
642         return (0);
643 }
644
645 int
646 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
647     int *nzero, int flags)
648 {
649         struct pfr_ktable       *kt;
650         struct pfr_kentryworkq   workq;
651         struct pfr_kentry       *p;
652         struct pfr_addr          ad;
653         int                      i, rv, xzero = 0;
654
655         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
656             PFR_FLAG_FEEDBACK);
657         if (pfr_validate_table(tbl, 0, 0))
658                 return (EINVAL);
659         kt = pfr_lookup_table(tbl);
660         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
661                 return (ESRCH);
662         SLIST_INIT(&workq);
663         for (i = 0; i < size; i++) {
664                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
665                         senderr(EFAULT);
666                 if (pfr_validate_addr(&ad))
667                         senderr(EINVAL);
668                 p = pfr_lookup_addr(kt, &ad, 1);
669                 if (flags & PFR_FLAG_FEEDBACK) {
670                         ad.pfra_fback = (p != NULL) ?
671                             PFR_FB_CLEARED : PFR_FB_NONE;
672                         if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
673                                 senderr(EFAULT);
674                 }
675                 if (p != NULL) {
676                         SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
677                         xzero++;
678                 }
679         }
680
681         if (!(flags & PFR_FLAG_DUMMY)) {
682                 if (flags & PFR_FLAG_ATOMIC)
683                         crit_enter();
684                 pfr_clstats_kentries(&workq, 0, 0);
685                 if (flags & PFR_FLAG_ATOMIC)
686                         crit_exit();
687         }
688         if (nzero != NULL)
689                 *nzero = xzero;
690         return (0);
691 _bad:
692         if (flags & PFR_FLAG_FEEDBACK)
693                 pfr_reset_feedback(addr, size, flags);
694         return (rv);
695 }
696
697 int
698 pfr_validate_addr(struct pfr_addr *ad)
699 {
700         int i;
701
702         switch (ad->pfra_af) {
703 #ifdef INET
704         case AF_INET:
705                 if (ad->pfra_net > 32)
706                         return (-1);
707                 break;
708 #endif /* INET */
709 #ifdef INET6
710         case AF_INET6:
711                 if (ad->pfra_net > 128)
712                         return (-1);
713                 break;
714 #endif /* INET6 */
715         default:
716                 return (-1);
717         }
718         if (ad->pfra_net < 128 &&
719                 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
720                         return (-1);
721         for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
722                 if (((caddr_t)ad)[i])
723                         return (-1);
724         if (ad->pfra_not && ad->pfra_not != 1)
725                 return (-1);
726         if (ad->pfra_fback)
727                 return (-1);
728         return (0);
729 }
730
731 void
732 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
733         int *naddr, int sweep)
734 {
735         struct pfr_walktree     w;
736
737         SLIST_INIT(workq);
738         bzero(&w, sizeof(w));
739         w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
740         w.pfrw_workq = workq;
741         if (kt->pfrkt_ip4 != NULL)
742                 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
743                         kprintf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
744         if (kt->pfrkt_ip6 != NULL)
745                 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
746                         kprintf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
747         if (naddr != NULL)
748                 *naddr = w.pfrw_cnt;
749 }
750
751 void
752 pfr_mark_addrs(struct pfr_ktable *kt)
753 {
754         struct pfr_walktree     w;
755
756         bzero(&w, sizeof(w));
757         w.pfrw_op = PFRW_MARK;
758         if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
759                 kprintf("pfr_mark_addrs: IPv4 walktree failed.\n");
760         if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
761                 kprintf("pfr_mark_addrs: IPv6 walktree failed.\n");
762 }
763
764
765 struct pfr_kentry *
766 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
767 {
768         union sockaddr_union     sa, mask;
769         struct radix_node_head  *head = NULL;
770         struct pfr_kentry       *ke;
771
772         bzero(&sa, sizeof(sa));
773         if (ad->pfra_af == AF_INET) {
774                 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
775                 head = kt->pfrkt_ip4;
776         } else if ( ad->pfra_af == AF_INET6 ) {
777                 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
778                 head = kt->pfrkt_ip6;
779         }
780         if (ADDR_NETWORK(ad)) {
781                 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
782                 crit_enter(); /* rn_lookup makes use of globals */
783                 ke = (struct pfr_kentry *)rn_lookup((char *)&sa, (char *)&mask,
784                     head);
785                 crit_exit();
786                 if (ke && KENTRY_RNF_ROOT(ke))
787                         ke = NULL;
788         } else {
789                 ke = (struct pfr_kentry *)rn_match((char *)&sa, head);
790                 if (ke && KENTRY_RNF_ROOT(ke))
791                         ke = NULL;
792                 if (exact && ke && KENTRY_NETWORK(ke))
793                         ke = NULL;
794         }
795         return (ke);
796 }
797
798 struct pfr_kentry *
799 pfr_create_kentry(struct pfr_addr *ad, int intr)
800 {
801         struct pfr_kentry       *ke;
802
803         if (intr)
804                 ke = kmalloc(sizeof(struct pfr_kentry), M_PFRKENTRYPL2, M_NOWAIT|M_ZERO);
805         else
806                 ke = kmalloc(sizeof(struct pfr_kentry), M_PFRKENTRYPL, M_NOWAIT|M_ZERO|M_NULLOK);
807         if (ke == NULL)
808                 return (NULL);
809
810         if (ad->pfra_af == AF_INET)
811                 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
812         else if (ad->pfra_af == AF_INET6)
813                 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
814         ke->pfrke_af = ad->pfra_af;
815         ke->pfrke_net = ad->pfra_net;
816         ke->pfrke_not = ad->pfra_not;
817         ke->pfrke_intrpool = intr;
818         return (ke);
819 }
820
821 void
822 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
823 {
824         struct pfr_kentry       *p, *q;
825
826         for (p = SLIST_FIRST(workq); p != NULL; p = q) {
827                 q = SLIST_NEXT(p, pfrke_workq);
828                 pfr_destroy_kentry(p);
829         }
830 }
831
832 void
833 pfr_destroy_kentry(struct pfr_kentry *ke)
834 {
835         if (ke->pfrke_counters)
836                 kfree(ke->pfrke_counters, M_PFRKCOUNTERSPL);
837         if (ke->pfrke_intrpool)
838                 kfree(ke, M_PFRKENTRYPL2);
839         else
840                 kfree(ke, M_PFRKENTRYPL);
841 }
842
843 void
844 pfr_insert_kentries(struct pfr_ktable *kt,
845     struct pfr_kentryworkq *workq, long tzero)
846 {
847         struct pfr_kentry       *p;
848         int                      rv, n = 0;
849
850         SLIST_FOREACH(p, workq, pfrke_workq) {
851                 rv = pfr_route_kentry(kt, p);
852                 if (rv) {
853                         kprintf("pfr_insert_kentries: cannot route entry "
854                             "(code=%d).\n", rv);
855                         break;
856                 }
857                 p->pfrke_tzero = tzero;
858                 n++;
859         }
860         kt->pfrkt_cnt += n;
861 }
862
863 int
864 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
865 {
866         struct pfr_kentry       *p;
867         int                      rv;
868
869         p = pfr_lookup_addr(kt, ad, 1);
870         if (p != NULL)
871                 return (0);
872         p = pfr_create_kentry(ad, 1);
873         if (p == NULL)
874                 return (EINVAL);
875
876         rv = pfr_route_kentry(kt, p);
877         if (rv)
878                 return (rv);
879
880         p->pfrke_tzero = tzero;
881         kt->pfrkt_cnt++;
882
883         return (0);
884 }
885
886 void
887 pfr_remove_kentries(struct pfr_ktable *kt,
888     struct pfr_kentryworkq *workq)
889 {
890         struct pfr_kentry       *p;
891         int                      n = 0;
892
893         SLIST_FOREACH(p, workq, pfrke_workq) {
894                 pfr_unroute_kentry(kt, p);
895                 n++;
896         }
897         kt->pfrkt_cnt -= n;
898         pfr_destroy_kentries(workq);
899 }
900
901 void
902 pfr_clean_node_mask(struct pfr_ktable *kt,
903     struct pfr_kentryworkq *workq)
904 {
905         struct pfr_kentry       *p;
906
907         SLIST_FOREACH(p, workq, pfrke_workq)
908                 pfr_unroute_kentry(kt, p);
909 }
910
911 void
912 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
913 {
914         struct pfr_kentry       *p;
915
916         SLIST_FOREACH(p, workq, pfrke_workq) {
917                 crit_enter();
918                 if (negchange)
919                         p->pfrke_not = !p->pfrke_not;
920                 if (p->pfrke_counters) {
921                         kfree(p->pfrke_counters, M_PFRKCOUNTERSPL);
922                         p->pfrke_counters = NULL;
923                 }
924                 crit_exit();
925                 p->pfrke_tzero = tzero;
926         }
927 }
928
929 void
930 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
931 {
932         struct pfr_addr ad;
933         int             i;
934
935         for (i = 0; i < size; i++) {
936                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
937                         break;
938                 ad.pfra_fback = PFR_FB_NONE;
939                 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
940                         break;
941         }
942 }
943
944 void
945 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
946 {
947         int     i;
948
949         bzero(sa, sizeof(*sa));
950         if (af == AF_INET) {
951                 sa->sin.sin_len = sizeof(sa->sin);
952                 sa->sin.sin_family = AF_INET;
953                 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
954         } else if (af == AF_INET6) {
955                 sa->sin6.sin6_len = sizeof(sa->sin6);
956                 sa->sin6.sin6_family = AF_INET6;
957                 for (i = 0; i < 4; i++) {
958                         if (net <= 32) {
959                                 sa->sin6.sin6_addr.s6_addr32[i] =
960                                     net ? htonl(-1 << (32-net)) : 0;
961                                 break;
962                         }
963                         sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
964                         net -= 32;
965                 }
966         }
967 }
968
969 int
970 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
971 {
972         union sockaddr_union     mask;
973         struct radix_node       *rn;
974         struct radix_node_head  *head = NULL;
975
976         bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
977         if (ke->pfrke_af == AF_INET)
978                 head = kt->pfrkt_ip4;
979         else if (ke->pfrke_af == AF_INET6)
980                 head = kt->pfrkt_ip6;
981
982         crit_enter();
983         if (KENTRY_NETWORK(ke)) {
984                 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
985                 rn = rn_addroute((char *)&ke->pfrke_sa, (char *)&mask, head,
986                     ke->pfrke_node);
987         } else
988                 rn = rn_addroute((char *)&ke->pfrke_sa, NULL, head,
989                     ke->pfrke_node);
990         crit_exit();
991
992         return (rn == NULL ? -1 : 0);
993 }
994
995 int
996 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
997 {
998         union sockaddr_union     mask;
999         struct radix_node       *rn;
1000         struct radix_node_head  *head = NULL;
1001
1002         if (ke->pfrke_af == AF_INET)
1003                 head = kt->pfrkt_ip4;
1004         else if (ke->pfrke_af == AF_INET6)
1005                 head = kt->pfrkt_ip6;
1006
1007         crit_enter();
1008         if (KENTRY_NETWORK(ke)) {
1009                 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1010                 rn = rn_delete((char *)&ke->pfrke_sa, (char *)&mask, head);
1011         } else
1012                 rn = rn_delete((char *)&ke->pfrke_sa, NULL, head);
1013         crit_exit();
1014
1015         if (rn == NULL) {
1016                 kprintf("pfr_unroute_kentry: delete failed.\n");
1017                 return (-1);
1018         }
1019         return (0);
1020 }
1021
1022 void
1023 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1024 {
1025         bzero(ad, sizeof(*ad));
1026         if (ke == NULL)
1027                 return;
1028         ad->pfra_af = ke->pfrke_af;
1029         ad->pfra_net = ke->pfrke_net;
1030         ad->pfra_not = ke->pfrke_not;
1031         if (ad->pfra_af == AF_INET)
1032                 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1033         else if (ad->pfra_af == AF_INET6)
1034                 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1035 }
1036
1037 int
1038 pfr_walktree(struct radix_node *rn, void *arg)
1039 {
1040         struct pfr_kentry       *ke = (struct pfr_kentry *)rn;
1041         struct pfr_walktree     *w = arg;
1042         union sockaddr_union    pfr_mask;
1043         int                     flags = w->pfrw_flags;
1044
1045         switch (w->pfrw_op) {
1046         case PFRW_MARK:
1047                 ke->pfrke_mark = 0;
1048                 break;
1049         case PFRW_SWEEP:
1050                 if (ke->pfrke_mark)
1051                         break;
1052                 /* FALLTHROUGH */
1053         case PFRW_ENQUEUE:
1054                 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1055                 w->pfrw_cnt++;
1056                 break;
1057         case PFRW_GET_ADDRS:
1058                 if (w->pfrw_free-- > 0) {
1059                         struct pfr_addr ad;
1060
1061                         pfr_copyout_addr(&ad, ke);
1062                         if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1063                                 return (EFAULT);
1064                         w->pfrw_addr++;
1065                 }
1066                 break;
1067         case PFRW_GET_ASTATS:
1068                 if (w->pfrw_free-- > 0) {
1069                         struct pfr_astats as;
1070
1071                         pfr_copyout_addr(&as.pfras_a, ke);
1072
1073                         crit_enter();
1074                         if (ke->pfrke_counters) {
1075                                 bcopy(ke->pfrke_counters->pfrkc_packets,
1076                                     as.pfras_packets, sizeof(as.pfras_packets));
1077                                 bcopy(ke->pfrke_counters->pfrkc_bytes,
1078                                     as.pfras_bytes, sizeof(as.pfras_bytes));
1079                         } else {
1080                                 bzero(as.pfras_packets, sizeof(as.pfras_packets));
1081                                 bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1082                                 as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1083                         }
1084                         crit_exit();
1085                         as.pfras_tzero = ke->pfrke_tzero;
1086
1087                         if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags))
1088                                 return (EFAULT);
1089                         w->pfrw_astats++;
1090                 }
1091                 break;
1092         case PFRW_POOL_GET:
1093                 if (ke->pfrke_not)
1094                         break; /* negative entries are ignored */
1095                 if (!w->pfrw_cnt--) {
1096                         w->pfrw_kentry = ke;
1097                         return (1); /* finish search */
1098                 }
1099                 break;
1100         case PFRW_DYNADDR_UPDATE:
1101                 if (ke->pfrke_af == AF_INET) {
1102                         if (w->pfrw_dyn->pfid_acnt4++ > 0)
1103                                 break;
1104                         pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1105                         w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1106                             &ke->pfrke_sa, AF_INET);
1107                         w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1108                             &pfr_mask, AF_INET);
1109                 } else if (ke->pfrke_af == AF_INET6){
1110                         if (w->pfrw_dyn->pfid_acnt6++ > 0)
1111                                 break;
1112                         pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1113                         w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1114                             &ke->pfrke_sa, AF_INET6);
1115                         w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1116                             &pfr_mask, AF_INET6);
1117                 }
1118                 break;
1119         }
1120         return (0);
1121 }
1122
1123 int
1124 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1125 {
1126         struct pfr_ktableworkq   workq;
1127         struct pfr_ktable       *p;
1128         int                      xdel = 0;
1129
1130         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1131             PFR_FLAG_ALLRSETS);
1132         if (pfr_fix_anchor(filter->pfrt_anchor))
1133                 return (EINVAL);
1134         if (pfr_table_count(filter, flags) < 0)
1135                 return (ENOENT);
1136
1137         SLIST_INIT(&workq);
1138         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1139                 if (pfr_skip_table(filter, p, flags))
1140                         continue;
1141                 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1142                         continue;
1143                 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1144                         continue;
1145                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1146                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1147                 xdel++;
1148         }
1149         if (!(flags & PFR_FLAG_DUMMY)) {
1150                 if (flags & PFR_FLAG_ATOMIC)
1151                         crit_enter();
1152                 pfr_setflags_ktables(&workq);
1153                 if (flags & PFR_FLAG_ATOMIC)
1154                         crit_exit();
1155         }
1156         if (ndel != NULL)
1157                 *ndel = xdel;
1158         return (0);
1159 }
1160
1161 int
1162 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1163 {
1164         struct pfr_ktableworkq   addq, changeq;
1165         struct pfr_ktable       *p, *q, *r, key;
1166         int                      i, rv, xadd = 0;
1167         long                     tzero = time_second;
1168
1169         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1170         SLIST_INIT(&addq);
1171         SLIST_INIT(&changeq);
1172         for (i = 0; i < size; i++) {
1173                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1174                         senderr(EFAULT);
1175                 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1176                     flags & PFR_FLAG_USERIOCTL))
1177                         senderr(EINVAL);
1178                 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1179                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1180                 if (p == NULL) {
1181                         p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1182                         if (p == NULL)
1183                                 senderr(ENOMEM);
1184                         SLIST_FOREACH(q, &addq, pfrkt_workq) {
1185                                 if (!pfr_ktable_compare(p, q))
1186                                         goto _skip;
1187                         }
1188                         SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1189                         xadd++;
1190                         if (!key.pfrkt_anchor[0])
1191                                 goto _skip;
1192
1193                         /* find or create root table */
1194                         bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1195                         r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1196                         if (r != NULL) {
1197                                 p->pfrkt_root = r;
1198                                 goto _skip;
1199                         }
1200                         SLIST_FOREACH(q, &addq, pfrkt_workq) {
1201                                 if (!pfr_ktable_compare(&key, q)) {
1202                                         p->pfrkt_root = q;
1203                                         goto _skip;
1204                                 }
1205                         }
1206                         key.pfrkt_flags = 0;
1207                         r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1208                         if (r == NULL)
1209                                 senderr(ENOMEM);
1210                         SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1211                         p->pfrkt_root = r;
1212                 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1213                         SLIST_FOREACH(q, &changeq, pfrkt_workq)
1214                                 if (!pfr_ktable_compare(&key, q))
1215                                         goto _skip;
1216                         p->pfrkt_nflags = (p->pfrkt_flags &
1217                             ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1218                         SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1219                         xadd++;
1220                 }
1221 _skip:
1222         ;
1223         }
1224         if (!(flags & PFR_FLAG_DUMMY)) {
1225                 if (flags & PFR_FLAG_ATOMIC)
1226                         crit_enter();
1227                 pfr_insert_ktables(&addq);
1228                 pfr_setflags_ktables(&changeq);
1229                 if (flags & PFR_FLAG_ATOMIC)
1230                         crit_exit();
1231         } else
1232                  pfr_destroy_ktables(&addq, 0);
1233         if (nadd != NULL)
1234                 *nadd = xadd;
1235         return (0);
1236 _bad:
1237         pfr_destroy_ktables(&addq, 0);
1238         return (rv);
1239 }
1240
1241 int
1242 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1243 {
1244         struct pfr_ktableworkq   workq;
1245         struct pfr_ktable       *p, *q, key;
1246         int                      i, xdel = 0;
1247
1248         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1249         SLIST_INIT(&workq);
1250         for (i = 0; i < size; i++) {
1251                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1252                         return (EFAULT);
1253                 if (pfr_validate_table(&key.pfrkt_t, 0,
1254                     flags & PFR_FLAG_USERIOCTL))
1255                         return (EINVAL);
1256                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1257                 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1258                         SLIST_FOREACH(q, &workq, pfrkt_workq)
1259                                 if (!pfr_ktable_compare(p, q))
1260                                         goto _skip;
1261                         p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1262                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1263                         xdel++;
1264                 }
1265 _skip:
1266         ;
1267         }
1268
1269         if (!(flags & PFR_FLAG_DUMMY)) {
1270                 if (flags & PFR_FLAG_ATOMIC)
1271                         crit_enter();
1272                 pfr_setflags_ktables(&workq);
1273                 if (flags & PFR_FLAG_ATOMIC)
1274                         crit_exit();
1275         }
1276         if (ndel != NULL)
1277                 *ndel = xdel;
1278         return (0);
1279 }
1280
1281 int
1282 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1283         int flags)
1284 {
1285         struct pfr_ktable       *p;
1286         int                      n, nn;
1287
1288         ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1289         if (pfr_fix_anchor(filter->pfrt_anchor))
1290                 return (EINVAL);
1291         n = nn = pfr_table_count(filter, flags);
1292         if (n < 0)
1293                 return (ENOENT);
1294         if (n > *size) {
1295                 *size = n;
1296                 return (0);
1297         }
1298         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1299                 if (pfr_skip_table(filter, p, flags))
1300                         continue;
1301                 if (n-- <= 0)
1302                         continue;
1303                 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags))
1304                         return (EFAULT);
1305         }
1306         if (n) {
1307                 kprintf("pfr_get_tables: corruption detected (%d).\n", n);
1308                 return (ENOTTY);
1309         }
1310         *size = nn;
1311         return (0);
1312 }
1313
1314 int
1315 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1316         int flags)
1317 {
1318         struct pfr_ktable       *p;
1319         struct pfr_ktableworkq   workq;
1320         int                      n, nn;
1321         long                     tzero = time_second;
1322
1323         /* XXX PFR_FLAG_CLSTATS disabled */
1324         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1325         if (pfr_fix_anchor(filter->pfrt_anchor))
1326                 return (EINVAL);
1327         n = nn = pfr_table_count(filter, flags);
1328         if (n < 0)
1329                 return (ENOENT);
1330         if (n > *size) {
1331                 *size = n;
1332                 return (0);
1333         }
1334         SLIST_INIT(&workq);
1335         if (flags & PFR_FLAG_ATOMIC)
1336                 crit_enter();
1337         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1338                 if (pfr_skip_table(filter, p, flags))
1339                         continue;
1340                 if (n-- <= 0)
1341                         continue;
1342                 if (!(flags & PFR_FLAG_ATOMIC))
1343                         crit_enter();
1344                 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags)) {
1345                         crit_exit();
1346                         return (EFAULT);
1347                 }
1348                 if (!(flags & PFR_FLAG_ATOMIC))
1349                         crit_exit();
1350                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1351         }
1352         if (flags & PFR_FLAG_CLSTATS)
1353                 pfr_clstats_ktables(&workq, tzero,
1354                     flags & PFR_FLAG_ADDRSTOO);
1355         if (flags & PFR_FLAG_ATOMIC)
1356                 crit_exit();
1357         if (n) {
1358                 kprintf("pfr_get_tstats: corruption detected (%d).\n", n);
1359                 return (ENOTTY);
1360         }
1361         *size = nn;
1362         return (0);
1363 }
1364
1365 int
1366 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1367 {
1368         struct pfr_ktableworkq   workq;
1369         struct pfr_ktable       *p, key;
1370         int                      i, xzero = 0;
1371         long                     tzero = time_second;
1372
1373         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1374             PFR_FLAG_ADDRSTOO);
1375         SLIST_INIT(&workq);
1376         for (i = 0; i < size; i++) {
1377                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1378                         return (EFAULT);
1379                 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1380                         return (EINVAL);
1381                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1382                 if (p != NULL) {
1383                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1384                         xzero++;
1385                 }
1386         }
1387         if (!(flags & PFR_FLAG_DUMMY)) {
1388                 if (flags & PFR_FLAG_ATOMIC)
1389                         crit_enter();
1390                 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1391                 if (flags & PFR_FLAG_ATOMIC)
1392                         crit_exit();
1393         }
1394         if (nzero != NULL)
1395                 *nzero = xzero;
1396         return (0);
1397 }
1398
1399 int
1400 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1401         int *nchange, int *ndel, int flags)
1402 {
1403         struct pfr_ktableworkq   workq;
1404         struct pfr_ktable       *p, *q, key;
1405         int                      i, xchange = 0, xdel = 0;
1406
1407         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1408         if ((setflag & ~PFR_TFLAG_USRMASK) ||
1409             (clrflag & ~PFR_TFLAG_USRMASK) ||
1410             (setflag & clrflag))
1411                 return (EINVAL);
1412         SLIST_INIT(&workq);
1413         for (i = 0; i < size; i++) {
1414                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1415                         return (EFAULT);
1416                 if (pfr_validate_table(&key.pfrkt_t, 0,
1417                     flags & PFR_FLAG_USERIOCTL))
1418                         return (EINVAL);
1419                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1420                 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1421                         p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1422                             ~clrflag;
1423                         if (p->pfrkt_nflags == p->pfrkt_flags)
1424                                 goto _skip;
1425                         SLIST_FOREACH(q, &workq, pfrkt_workq)
1426                                 if (!pfr_ktable_compare(p, q))
1427                                         goto _skip;
1428                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1429                         if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1430                             (clrflag & PFR_TFLAG_PERSIST) &&
1431                             !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1432                                 xdel++;
1433                         else
1434                                 xchange++;
1435                 }
1436 _skip:
1437         ;
1438         }
1439         if (!(flags & PFR_FLAG_DUMMY)) {
1440                 if (flags & PFR_FLAG_ATOMIC)
1441                         crit_enter();
1442                 pfr_setflags_ktables(&workq);
1443                 if (flags & PFR_FLAG_ATOMIC)
1444                         crit_exit();
1445         }
1446         if (nchange != NULL)
1447                 *nchange = xchange;
1448         if (ndel != NULL)
1449                 *ndel = xdel;
1450         return (0);
1451 }
1452
1453 int
1454 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1455 {
1456         struct pfr_ktableworkq   workq;
1457         struct pfr_ktable       *p;
1458         struct pf_ruleset       *rs;
1459         int                      xdel = 0;
1460
1461         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1462         rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1463         if (rs == NULL)
1464                 return (ENOMEM);
1465         SLIST_INIT(&workq);
1466         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1467                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1468                     pfr_skip_table(trs, p, 0))
1469                         continue;
1470                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1471                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1472                 xdel++;
1473         }
1474         if (!(flags & PFR_FLAG_DUMMY)) {
1475                 pfr_setflags_ktables(&workq);
1476                 if (ticket != NULL)
1477                         *ticket = ++rs->tticket;
1478                 rs->topen = 1;
1479         } else
1480                 pf_remove_if_empty_ruleset(rs);
1481         if (ndel != NULL)
1482                 *ndel = xdel;
1483         return (0);
1484 }
1485
1486 int
1487 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1488     int *nadd, int *naddr, u_int32_t ticket, int flags)
1489 {
1490         struct pfr_ktableworkq   tableq;
1491         struct pfr_kentryworkq   addrq;
1492         struct pfr_ktable       *kt, *rt, *shadow, key;
1493         struct pfr_kentry       *p;
1494         struct pfr_addr          ad;
1495         struct pf_ruleset       *rs;
1496         int                      i, rv, xadd = 0, xaddr = 0;
1497
1498         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1499         if (size && !(flags & PFR_FLAG_ADDRSTOO))
1500                 return (EINVAL);
1501         if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1502             flags & PFR_FLAG_USERIOCTL))
1503                 return (EINVAL);
1504         rs = pf_find_ruleset(tbl->pfrt_anchor);
1505         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1506                 return (EBUSY);
1507         tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1508         SLIST_INIT(&tableq);
1509         kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1510         if (kt == NULL) {
1511                 kt = pfr_create_ktable(tbl, 0, 1);
1512                 if (kt == NULL)
1513                         return (ENOMEM);
1514                 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1515                 xadd++;
1516                 if (!tbl->pfrt_anchor[0])
1517                         goto _skip;
1518
1519                 /* find or create root table */
1520                 bzero(&key, sizeof(key));
1521                 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1522                 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1523                 if (rt != NULL) {
1524                         kt->pfrkt_root = rt;
1525                         goto _skip;
1526                 }
1527                 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1528                 if (rt == NULL) {
1529                         pfr_destroy_ktables(&tableq, 0);
1530                         return (ENOMEM);
1531                 }
1532                 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1533                 kt->pfrkt_root = rt;
1534         } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1535                 xadd++;
1536 _skip:
1537         shadow = pfr_create_ktable(tbl, 0, 0);
1538         if (shadow == NULL) {
1539                 pfr_destroy_ktables(&tableq, 0);
1540                 return (ENOMEM);
1541         }
1542         SLIST_INIT(&addrq);
1543         for (i = 0; i < size; i++) {
1544                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1545                         senderr(EFAULT);
1546                 if (pfr_validate_addr(&ad))
1547                         senderr(EINVAL);
1548                 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1549                         continue;
1550                 p = pfr_create_kentry(&ad, 0);
1551                 if (p == NULL)
1552                         senderr(ENOMEM);
1553                 if (pfr_route_kentry(shadow, p)) {
1554                         pfr_destroy_kentry(p);
1555                         continue;
1556                 }
1557                 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1558                 xaddr++;
1559         }
1560         if (!(flags & PFR_FLAG_DUMMY)) {
1561                 if (kt->pfrkt_shadow != NULL)
1562                         pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1563                 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1564                 pfr_insert_ktables(&tableq);
1565                 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1566                     xaddr : NO_ADDRESSES;
1567                 kt->pfrkt_shadow = shadow;
1568         } else {
1569                 pfr_clean_node_mask(shadow, &addrq);
1570                 pfr_destroy_ktable(shadow, 0);
1571                 pfr_destroy_ktables(&tableq, 0);
1572                 pfr_destroy_kentries(&addrq);
1573         }
1574         if (nadd != NULL)
1575                 *nadd = xadd;
1576         if (naddr != NULL)
1577                 *naddr = xaddr;
1578         return (0);
1579 _bad:
1580         pfr_destroy_ktable(shadow, 0);
1581         pfr_destroy_ktables(&tableq, 0);
1582         pfr_destroy_kentries(&addrq);
1583         return (rv);
1584 }
1585
1586 int
1587 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1588 {
1589         struct pfr_ktableworkq   workq;
1590         struct pfr_ktable       *p;
1591         struct pf_ruleset       *rs;
1592         int                      xdel = 0;
1593
1594         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1595         rs = pf_find_ruleset(trs->pfrt_anchor);
1596         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1597                 return (0);
1598         SLIST_INIT(&workq);
1599         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1600                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1601                     pfr_skip_table(trs, p, 0))
1602                         continue;
1603                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1604                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1605                 xdel++;
1606         }
1607         if (!(flags & PFR_FLAG_DUMMY)) {
1608                 pfr_setflags_ktables(&workq);
1609                 rs->topen = 0;
1610                 pf_remove_if_empty_ruleset(rs);
1611         }
1612         if (ndel != NULL)
1613                 *ndel = xdel;
1614         return (0);
1615 }
1616
1617 int
1618 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1619     int *nchange, int flags)
1620 {
1621         struct pfr_ktable       *p, *q;
1622         struct pfr_ktableworkq   workq;
1623         struct pf_ruleset       *rs;
1624         int                      xadd = 0, xchange = 0;
1625         long                     tzero = time_second;
1626
1627         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1628         rs = pf_find_ruleset(trs->pfrt_anchor);
1629         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1630                 return (EBUSY);
1631
1632         SLIST_INIT(&workq);
1633         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1634                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1635                     pfr_skip_table(trs, p, 0))
1636                         continue;
1637                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1638                 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1639                         xchange++;
1640                 else
1641                         xadd++;
1642         }
1643
1644         if (!(flags & PFR_FLAG_DUMMY)) {
1645                 if (flags & PFR_FLAG_ATOMIC)
1646                         crit_enter();
1647                 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1648                         q = SLIST_NEXT(p, pfrkt_workq);
1649                         pfr_commit_ktable(p, tzero);
1650                 }
1651                 if (flags & PFR_FLAG_ATOMIC)
1652                         crit_exit();
1653                 rs->topen = 0;
1654                 pf_remove_if_empty_ruleset(rs);
1655         }
1656         if (nadd != NULL)
1657                 *nadd = xadd;
1658         if (nchange != NULL)
1659                 *nchange = xchange;
1660
1661         return (0);
1662 }
1663
1664 void
1665 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1666 {
1667         struct pfr_ktable       *shadow = kt->pfrkt_shadow;
1668         int                      nflags;
1669
1670         if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1671                 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1672                         pfr_clstats_ktable(kt, tzero, 1);
1673         } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1674                 /* kt might contain addresses */
1675                 struct pfr_kentryworkq   addrq, addq, changeq, delq, garbageq;
1676                 struct pfr_kentry       *p, *q, *next;
1677                 struct pfr_addr          ad;
1678
1679                 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1680                 pfr_mark_addrs(kt);
1681                 SLIST_INIT(&addq);
1682                 SLIST_INIT(&changeq);
1683                 SLIST_INIT(&delq);
1684                 SLIST_INIT(&garbageq);
1685                 pfr_clean_node_mask(shadow, &addrq);
1686                 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1687                         next = SLIST_NEXT(p, pfrke_workq);      /* XXX */
1688                         pfr_copyout_addr(&ad, p);
1689                         q = pfr_lookup_addr(kt, &ad, 1);
1690                         if (q != NULL) {
1691                                 if (q->pfrke_not != p->pfrke_not)
1692                                         SLIST_INSERT_HEAD(&changeq, q,
1693                                             pfrke_workq);
1694                                 q->pfrke_mark = 1;
1695                                 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1696                         } else {
1697                                 p->pfrke_tzero = tzero;
1698                                 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1699                         }
1700                 }
1701                 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1702                 pfr_insert_kentries(kt, &addq, tzero);
1703                 pfr_remove_kentries(kt, &delq);
1704                 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1705                 pfr_destroy_kentries(&garbageq);
1706         } else {
1707                 /* kt cannot contain addresses */
1708                 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1709                     shadow->pfrkt_ip4);
1710                 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1711                     shadow->pfrkt_ip6);
1712                 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1713                 pfr_clstats_ktable(kt, tzero, 1);
1714         }
1715         nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1716             (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1717                 & ~PFR_TFLAG_INACTIVE;
1718         pfr_destroy_ktable(shadow, 0);
1719         kt->pfrkt_shadow = NULL;
1720         pfr_setflags_ktable(kt, nflags);
1721 }
1722
1723 int
1724 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1725 {
1726         int i;
1727
1728         if (!tbl->pfrt_name[0])
1729                 return (-1);
1730         if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1731                  return (-1);
1732         if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1733                 return (-1);
1734         for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1735                 if (tbl->pfrt_name[i])
1736                         return (-1);
1737         if (pfr_fix_anchor(tbl->pfrt_anchor))
1738                 return (-1);
1739         if (tbl->pfrt_flags & ~allowedflags)
1740                 return (-1);
1741         return (0);
1742 }
1743
1744 /*
1745  * Rewrite anchors referenced by tables to remove slashes
1746  * and check for validity.
1747  */
1748 int
1749 pfr_fix_anchor(char *anchor)
1750 {
1751         size_t siz = MAXPATHLEN;
1752         int i;
1753
1754         if (anchor[0] == '/') {
1755                 char *path;
1756                 int off;
1757
1758                 path = anchor;
1759                 off = 1;
1760                 while (*++path == '/')
1761                         off++;
1762                 bcopy(path, anchor, siz - off);
1763                 memset(anchor + siz - off, 0, off);
1764         }
1765         if (anchor[siz - 1])
1766                 return (-1);
1767         for (i = strlen(anchor); i < siz; i++)
1768                 if (anchor[i])
1769                         return (-1);
1770         return (0);
1771 }
1772
1773 int
1774 pfr_table_count(struct pfr_table *filter, int flags)
1775 {
1776         struct pf_ruleset *rs;
1777
1778         if (flags & PFR_FLAG_ALLRSETS)
1779                 return (pfr_ktable_cnt);
1780         if (filter->pfrt_anchor[0]) {
1781                 rs = pf_find_ruleset(filter->pfrt_anchor);
1782                 return ((rs != NULL) ? rs->tables : -1);
1783         }
1784         return (pf_main_ruleset.tables);
1785 }
1786
1787 int
1788 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1789 {
1790         if (flags & PFR_FLAG_ALLRSETS)
1791                 return (0);
1792         if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1793                 return (1);
1794         return (0);
1795 }
1796
1797 void
1798 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1799 {
1800         struct pfr_ktable       *p;
1801
1802         SLIST_FOREACH(p, workq, pfrkt_workq)
1803                 pfr_insert_ktable(p);
1804 }
1805
1806 void
1807 pfr_insert_ktable(struct pfr_ktable *kt)
1808 {
1809         RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1810         pfr_ktable_cnt++;
1811         if (kt->pfrkt_root != NULL)
1812                 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1813                         pfr_setflags_ktable(kt->pfrkt_root,
1814                             kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1815 }
1816
1817 void
1818 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1819 {
1820         struct pfr_ktable       *p, *q;
1821
1822         for (p = SLIST_FIRST(workq); p; p = q) {
1823                 q = SLIST_NEXT(p, pfrkt_workq);
1824                 pfr_setflags_ktable(p, p->pfrkt_nflags);
1825         }
1826 }
1827
1828 void
1829 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1830 {
1831         struct pfr_kentryworkq  addrq;
1832
1833         if (!(newf & PFR_TFLAG_REFERENCED) &&
1834             !(newf & PFR_TFLAG_PERSIST))
1835                 newf &= ~PFR_TFLAG_ACTIVE;
1836         if (!(newf & PFR_TFLAG_ACTIVE))
1837                 newf &= ~PFR_TFLAG_USRMASK;
1838         if (!(newf & PFR_TFLAG_SETMASK)) {
1839                 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1840                 if (kt->pfrkt_root != NULL)
1841                         if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1842                                 pfr_setflags_ktable(kt->pfrkt_root,
1843                                     kt->pfrkt_root->pfrkt_flags &
1844                                         ~PFR_TFLAG_REFDANCHOR);
1845                 pfr_destroy_ktable(kt, 1);
1846                 pfr_ktable_cnt--;
1847                 return;
1848         }
1849         if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1850                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1851                 pfr_remove_kentries(kt, &addrq);
1852         }
1853         if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1854                 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1855                 kt->pfrkt_shadow = NULL;
1856         }
1857         kt->pfrkt_flags = newf;
1858 }
1859
1860 void
1861 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1862 {
1863         struct pfr_ktable       *p;
1864
1865         SLIST_FOREACH(p, workq, pfrkt_workq)
1866                 pfr_clstats_ktable(p, tzero, recurse);
1867 }
1868
1869 void
1870 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1871 {
1872         struct pfr_kentryworkq   addrq;
1873
1874         if (recurse) {
1875                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1876                 pfr_clstats_kentries(&addrq, tzero, 0);
1877         }
1878         crit_enter();
1879         bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1880         bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1881         kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1882         crit_exit();
1883         kt->pfrkt_tzero = tzero;
1884 }
1885
1886 struct pfr_ktable *
1887 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1888 {
1889         struct pfr_ktable       *kt;
1890         struct pf_ruleset       *rs;
1891
1892         kt = kmalloc(sizeof(struct pfr_ktable), M_PFRKTABLEPL, M_NOWAIT|M_ZERO|M_NULLOK);
1893         if (kt == NULL)
1894                 return (NULL);
1895         kt->pfrkt_t = *tbl;
1896
1897         if (attachruleset) {
1898                 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1899                 if (!rs) {
1900                         pfr_destroy_ktable(kt, 0);
1901                         return (NULL);
1902                 }
1903                 kt->pfrkt_rs = rs;
1904                 rs->tables++;
1905         }
1906
1907         KKASSERT(pf_maskhead != NULL);
1908         if (!rn_inithead((void **)&kt->pfrkt_ip4, pf_maskhead,
1909             offsetof(struct sockaddr_in, sin_addr) * 8) ||
1910             !rn_inithead((void **)&kt->pfrkt_ip6, pf_maskhead,
1911             offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1912                 pfr_destroy_ktable(kt, 0);
1913                 return (NULL);
1914         }
1915         kt->pfrkt_tzero = tzero;
1916
1917         return (kt);
1918 }
1919
1920 void
1921 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1922 {
1923         struct pfr_ktable       *p, *q;
1924
1925         for (p = SLIST_FIRST(workq); p; p = q) {
1926                 q = SLIST_NEXT(p, pfrkt_workq);
1927                 pfr_destroy_ktable(p, flushaddr);
1928         }
1929 }
1930
1931 void
1932 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1933 {
1934         struct pfr_kentryworkq   addrq;
1935
1936         if (flushaddr) {
1937                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1938                 pfr_clean_node_mask(kt, &addrq);
1939                 pfr_destroy_kentries(&addrq);
1940         }
1941         if (kt->pfrkt_ip4 != NULL)
1942                 kfree((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1943
1944         if (kt->pfrkt_ip6 != NULL)
1945                 kfree((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1946         if (kt->pfrkt_shadow != NULL) 
1947                 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1948         if (kt->pfrkt_rs != NULL) {
1949                 kt->pfrkt_rs->tables--;
1950                 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1951         }
1952         kfree(kt, M_PFRKTABLEPL);
1953 }
1954
1955 int
1956 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1957 {
1958         int d;
1959
1960         if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1961                 return (d);
1962         return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1963 }
1964
1965 struct pfr_ktable *
1966 pfr_lookup_table(struct pfr_table *tbl)
1967 {
1968         /* struct pfr_ktable start like a struct pfr_table */
1969         return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1970             (struct pfr_ktable *)tbl));
1971 }
1972
1973 int
1974 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1975 {
1976         struct pfr_kentry       *ke = NULL;
1977         int                      match;
1978         struct sockaddr_in       pfr_sin;
1979 #ifdef INET6
1980         struct sockaddr_in6      pfr_sin6;
1981 #endif
1982
1983         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1984                 kt = kt->pfrkt_root;
1985         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1986                 return (0);
1987
1988         switch (af) {
1989 #ifdef INET
1990         case AF_INET:
1991                 bzero(&pfr_sin, sizeof(pfr_sin));
1992                 pfr_sin.sin_len = sizeof(pfr_sin);
1993                 pfr_sin.sin_family = AF_INET;
1994                 pfr_sin.sin_addr.s_addr = a->addr32[0];
1995                 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
1996                     kt->pfrkt_ip4);
1997                 if (ke && KENTRY_RNF_ROOT(ke))
1998                         ke = NULL;
1999                 break;
2000 #endif /* INET */
2001 #ifdef INET6
2002         case AF_INET6:
2003                 bzero(&pfr_sin6, sizeof(pfr_sin6));
2004                 pfr_sin6.sin6_len = sizeof(pfr_sin6);
2005                 pfr_sin6.sin6_family = AF_INET6;
2006                 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2007                 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2008                     kt->pfrkt_ip6);
2009                 if (ke && KENTRY_RNF_ROOT(ke))
2010                         ke = NULL;
2011                 break;
2012 #endif /* INET6 */
2013         }
2014         match = (ke && !ke->pfrke_not);
2015         if (match)
2016                 kt->pfrkt_match++;
2017         else
2018                 kt->pfrkt_nomatch++;
2019         return (match);
2020 }
2021
2022 void
2023 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2024     u_int64_t len, int dir_out, int op_pass, int notrule)
2025 {
2026         struct pfr_kentry       *ke = NULL;
2027         struct sockaddr_in       pfr_sin;
2028 #ifdef INET6
2029         struct sockaddr_in6      pfr_sin6;
2030 #endif
2031
2032         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2033                 kt = kt->pfrkt_root;
2034         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2035                 return;
2036
2037         switch (af) {
2038 #ifdef INET
2039         case AF_INET:
2040                 bzero(&pfr_sin, sizeof(pfr_sin));
2041                 pfr_sin.sin_len = sizeof(pfr_sin);
2042                 pfr_sin.sin_family = AF_INET;
2043                 pfr_sin.sin_addr.s_addr = a->addr32[0];
2044                 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
2045                     kt->pfrkt_ip4);
2046                 if (ke && KENTRY_RNF_ROOT(ke))
2047                         ke = NULL;
2048                 break;
2049 #endif /* INET */
2050 #ifdef INET6
2051         case AF_INET6:
2052                 bzero(&pfr_sin6, sizeof(pfr_sin6));
2053                 pfr_sin6.sin6_len = sizeof(pfr_sin6);
2054                 pfr_sin6.sin6_family = AF_INET6;
2055                 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2056                 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2057                     kt->pfrkt_ip6);
2058                 if (ke && KENTRY_RNF_ROOT(ke))
2059                         ke = NULL;
2060                 break;
2061 #endif /* INET6 */
2062         default:
2063                 ;
2064         }
2065         if ((ke == NULL || ke->pfrke_not) != notrule) {
2066                 if (op_pass != PFR_OP_PASS)
2067                         kprintf("pfr_update_stats: assertion failed.\n");
2068                 op_pass = PFR_OP_XPASS;
2069         }
2070         kt->pfrkt_packets[dir_out][op_pass]++;
2071         kt->pfrkt_bytes[dir_out][op_pass] += len;
2072         if (ke != NULL && op_pass != PFR_OP_XPASS &&
2073             (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2074                 if (ke->pfrke_counters == NULL)
2075                         ke->pfrke_counters = kmalloc(sizeof(struct pfr_kcounters),
2076                             M_PFRKCOUNTERSPL, M_NOWAIT|M_ZERO);
2077                 if (ke->pfrke_counters != NULL) {
2078                         ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++;
2079                         ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len;
2080                 }
2081         }
2082 }
2083
2084 struct pfr_ktable *
2085 pfr_attach_table(struct pf_ruleset *rs, char *name)
2086 {
2087         struct pfr_ktable       *kt, *rt;
2088         struct pfr_table         tbl;
2089         struct pf_anchor        *ac = rs->anchor;
2090
2091         bzero(&tbl, sizeof(tbl));
2092         strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2093         if (ac != NULL)
2094                 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2095         kt = pfr_lookup_table(&tbl);
2096         if (kt == NULL) {
2097                 kt = pfr_create_ktable(&tbl, time_second, 1);
2098                 if (kt == NULL)
2099                         return (NULL);
2100                 if (ac != NULL) {
2101                         bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2102                         rt = pfr_lookup_table(&tbl);
2103                         if (rt == NULL) {
2104                                 rt = pfr_create_ktable(&tbl, 0, 1);
2105                                 if (rt == NULL) {
2106                                         pfr_destroy_ktable(kt, 0);
2107                                         return (NULL);
2108                                 }
2109                                 pfr_insert_ktable(rt);
2110                         }
2111                         kt->pfrkt_root = rt;
2112                 }
2113                 pfr_insert_ktable(kt);
2114         }
2115         if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2116                 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2117         return (kt);
2118 }
2119
2120 void
2121 pfr_detach_table(struct pfr_ktable *kt)
2122 {
2123         if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2124                 kprintf("pfr_detach_table: refcount = %d.\n",
2125                     kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2126         else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2127                 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2128 }
2129
2130 int
2131 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2132     struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2133 {
2134         struct pfr_kentry       *ke, *ke2 = NULL;
2135         struct pf_addr          *addr = NULL;
2136         union sockaddr_union     mask;
2137         int                      idx = -1, use_counter = 0;
2138         struct sockaddr_in       pfr_sin;
2139         struct sockaddr_in6      pfr_sin6;
2140         union sockaddr_union     pfr_mask;
2141
2142         if (af == AF_INET)
2143                 addr = (struct pf_addr *)&pfr_sin.sin_addr;
2144         else if (af == AF_INET6)
2145                 addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2146         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2147                 kt = kt->pfrkt_root;
2148         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2149                 return (-1);
2150
2151         if (pidx != NULL)
2152                 idx = *pidx;
2153         if (counter != NULL && idx >= 0)
2154                 use_counter = 1;
2155         if (idx < 0)
2156                 idx = 0;
2157
2158 _next_block:
2159         ke = pfr_kentry_byidx(kt, idx, af);
2160         if (ke == NULL) {
2161                 kt->pfrkt_nomatch++;
2162                 return (1);
2163         }
2164         pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2165         *raddr = SUNION2PF(&ke->pfrke_sa, af);
2166         *rmask = SUNION2PF(&pfr_mask, af);
2167
2168         if (use_counter) {
2169                 /* is supplied address within block? */
2170                 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2171                         /* no, go to next block in table */
2172                         idx++;
2173                         use_counter = 0;
2174                         goto _next_block;
2175                 }
2176                 PF_ACPY(addr, counter, af);
2177         } else {
2178                 /* use first address of block */
2179                 PF_ACPY(addr, *raddr, af);
2180         }
2181
2182         if (!KENTRY_NETWORK(ke)) {
2183                 /* this is a single IP address - no possible nested block */
2184                 PF_ACPY(counter, addr, af);
2185                 *pidx = idx;
2186                 kt->pfrkt_match++;
2187                 return (0);
2188         }
2189         for (;;) {
2190                 /* we don't want to use a nested block */
2191                 if (af == AF_INET)
2192                         ke2 = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
2193                             kt->pfrkt_ip4);
2194                 else if (af == AF_INET6)
2195                         ke2 = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2196                             kt->pfrkt_ip6);
2197                 /* no need to check KENTRY_RNF_ROOT() here */
2198                 if (ke2 == ke) {
2199                         /* lookup return the same block - perfect */
2200                         PF_ACPY(counter, addr, af);
2201                         *pidx = idx;
2202                         kt->pfrkt_match++;
2203                         return (0);
2204                 }
2205
2206                 /* we need to increase the counter past the nested block */
2207                 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2208                 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2209                 PF_AINC(addr, af);
2210                 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2211                         /* ok, we reached the end of our main block */
2212                         /* go to next block in table */
2213                         idx++;
2214                         use_counter = 0;
2215                         goto _next_block;
2216                 }
2217         }
2218 }
2219
2220 struct pfr_kentry *
2221 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2222 {
2223         struct pfr_walktree     w;
2224
2225         bzero(&w, sizeof(w));
2226         w.pfrw_op = PFRW_POOL_GET;
2227         w.pfrw_cnt = idx;
2228
2229         switch (af) {
2230 #ifdef INET
2231         case AF_INET:
2232                 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2233                 return (w.pfrw_kentry);
2234 #endif /* INET */
2235 #ifdef INET6
2236         case AF_INET6:
2237                 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2238                 return (w.pfrw_kentry);
2239 #endif /* INET6 */
2240         default:
2241                 return (NULL);
2242         }
2243 }
2244
2245 void
2246 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2247 {
2248         struct pfr_walktree     w;
2249
2250         bzero(&w, sizeof(w));
2251         w.pfrw_op = PFRW_DYNADDR_UPDATE;
2252         w.pfrw_dyn = dyn;
2253
2254         crit_enter();
2255         dyn->pfid_acnt4 = 0;
2256         dyn->pfid_acnt6 = 0;
2257         if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2258                 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2259         if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2260                 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2261         crit_exit();
2262 }