82eb6791be8ac60ffe2ac553900574d0baa7e5ba
[dragonfly.git] / sys / net / pf / pf_table.c
1 /*      $OpenBSD: pf_table.c,v 1.78 2008/06/14 03:50:14 art Exp $       */
2
3 /*
4  * Copyright (c) 2010 The DragonFly Project.  All rights reserved.
5  *
6  * Copyright (c) 2002 Cedric Berger
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/socket.h>
41 #include <sys/mbuf.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/thread2.h>
45 #include <vm/vm_zone.h>
46
47 #include <net/if.h>
48 #include <net/route.h>
49 #include <netinet/in.h>
50 #include <net/pf/pfvar.h>
51
52 #define ACCEPT_FLAGS(flags, oklist)             \
53         do {                                    \
54                 if ((flags & ~(oklist)) &       \
55                     PFR_FLAG_ALLMASK)           \
56                         return (EINVAL);        \
57         } while (0)
58
59 #define COPYIN(from, to, size, flags)           \
60         ((flags & PFR_FLAG_USERIOCTL) ?         \
61         copyin((from), (to), (size)) :          \
62         (bcopy((from), (to), (size)), 0))
63
64 #define COPYOUT(from, to, size, flags)          \
65         ((flags & PFR_FLAG_USERIOCTL) ?         \
66         copyout((from), (to), (size)) :         \
67         (bcopy((from), (to), (size)), 0))
68
69 #define FILLIN_SIN(sin, addr)                   \
70         do {                                    \
71                 (sin).sin_len = sizeof(sin);    \
72                 (sin).sin_family = AF_INET;     \
73                 (sin).sin_addr = (addr);        \
74         } while (0)
75
76 #define FILLIN_SIN6(sin6, addr)                 \
77         do {                                    \
78                 (sin6).sin6_len = sizeof(sin6); \
79                 (sin6).sin6_family = AF_INET6;  \
80                 (sin6).sin6_addr = (addr);      \
81         } while (0)
82
83 #define SWAP(type, a1, a2)                      \
84         do {                                    \
85                 type tmp = a1;                  \
86                 a1 = a2;                        \
87                 a2 = tmp;                       \
88         } while (0)
89
90 #define SUNION2PF(su, af) (((af)==AF_INET) ?    \
91     (struct pf_addr *)&(su)->sin.sin_addr :     \
92     (struct pf_addr *)&(su)->sin6.sin6_addr)
93
94 #define AF_BITS(af)             (((af)==AF_INET)?32:128)
95 #define ADDR_NETWORK(ad)        ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
96 #define KENTRY_NETWORK(ke)      ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
97 #define KENTRY_RNF_ROOT(ke) \
98                 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
99
100 #define NO_ADDRESSES            (-1)
101 #define ENQUEUE_UNMARKED_ONLY   (1)
102 #define INVERT_NEG_FLAG         (1)
103
104 struct pfr_walktree {
105         enum pfrw_op {
106                 PFRW_MARK,
107                 PFRW_SWEEP,
108                 PFRW_ENQUEUE,
109                 PFRW_GET_ADDRS,
110                 PFRW_GET_ASTATS,
111                 PFRW_POOL_GET,
112                 PFRW_DYNADDR_UPDATE
113         }        pfrw_op;
114         union {
115                 struct pfr_addr         *pfrw1_addr;
116                 struct pfr_astats       *pfrw1_astats;
117                 struct pfr_kentryworkq  *pfrw1_workq;
118                 struct pfr_kentry       *pfrw1_kentry;
119                 struct pfi_dynaddr      *pfrw1_dyn;
120         }        pfrw_1;
121         int      pfrw_free;
122         int      pfrw_flags;
123 };
124 #define pfrw_addr       pfrw_1.pfrw1_addr
125 #define pfrw_astats     pfrw_1.pfrw1_astats
126 #define pfrw_workq      pfrw_1.pfrw1_workq
127 #define pfrw_kentry     pfrw_1.pfrw1_kentry
128 #define pfrw_dyn        pfrw_1.pfrw1_dyn
129 #define pfrw_cnt        pfrw_free
130
131 #define senderr(e)      do { rv = (e); goto _bad; } while (0)
132
133 vm_zone_t                pfr_ktable_pl;
134 vm_zone_t                pfr_kentry_pl;
135 vm_zone_t                pfr_kentry_pl2;
136 vm_zone_t               pfr_kcounters_pl;
137 struct sockaddr_in       pfr_sin;
138 struct sockaddr_in6      pfr_sin6;
139 union sockaddr_union     pfr_mask;
140 struct pf_addr           pfr_ffaddr;
141
142 void                     pfr_copyout_addr(struct pfr_addr *,
143                             struct pfr_kentry *ke);
144 int                      pfr_validate_addr(struct pfr_addr *);
145 void                     pfr_enqueue_addrs(struct pfr_ktable *,
146                             struct pfr_kentryworkq *, int *, int);
147 void                     pfr_mark_addrs(struct pfr_ktable *);
148 struct pfr_kentry       *pfr_lookup_addr(struct pfr_ktable *,
149                             struct pfr_addr *, int);
150 struct pfr_kentry       *pfr_create_kentry(struct pfr_addr *, int);
151 void                     pfr_destroy_kentries(struct pfr_kentryworkq *);
152 void                     pfr_destroy_kentry(struct pfr_kentry *);
153 void                     pfr_insert_kentries(struct pfr_ktable *,
154                             struct pfr_kentryworkq *, long);
155 void                     pfr_remove_kentries(struct pfr_ktable *,
156                             struct pfr_kentryworkq *);
157 void                     pfr_clstats_kentries(struct pfr_kentryworkq *, long,
158                             int);
159 void                     pfr_reset_feedback(struct pfr_addr *, int, int);
160 void                     pfr_prepare_network(union sockaddr_union *, int, int);
161 int                      pfr_route_kentry(struct pfr_ktable *,
162                             struct pfr_kentry *);
163 int                      pfr_unroute_kentry(struct pfr_ktable *,
164                             struct pfr_kentry *);
165 int                      pfr_walktree(struct radix_node *, void *);
166 int                      pfr_validate_table(struct pfr_table *, int, int);
167 int                      pfr_fix_anchor(char *);
168 void                     pfr_commit_ktable(struct pfr_ktable *, long);
169 void                     pfr_insert_ktables(struct pfr_ktableworkq *);
170 void                     pfr_insert_ktable(struct pfr_ktable *);
171 void                     pfr_setflags_ktables(struct pfr_ktableworkq *);
172 void                     pfr_setflags_ktable(struct pfr_ktable *, int);
173 void                     pfr_clstats_ktables(struct pfr_ktableworkq *, long,
174                             int);
175 void                     pfr_clstats_ktable(struct pfr_ktable *, long, int);
176 struct pfr_ktable       *pfr_create_ktable(struct pfr_table *, long, int);
177 void                     pfr_destroy_ktables(struct pfr_ktableworkq *, int);
178 void                     pfr_destroy_ktable(struct pfr_ktable *, int);
179 int                      pfr_ktable_compare(struct pfr_ktable *,
180                             struct pfr_ktable *);
181 struct pfr_ktable       *pfr_lookup_table(struct pfr_table *);
182 void                     pfr_clean_node_mask(struct pfr_ktable *,
183                             struct pfr_kentryworkq *);
184 int                      pfr_table_count(struct pfr_table *, int);
185 int                      pfr_skip_table(struct pfr_table *,
186                             struct pfr_ktable *, int);
187 struct pfr_kentry       *pfr_kentry_byidx(struct pfr_ktable *, int, int);
188
189 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
190 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
191
192 struct pfr_ktablehead    pfr_ktables;
193 struct pfr_table         pfr_nulltable;
194 int                      pfr_ktable_cnt;
195
196 void
197 pfr_initialize(void)
198 {
199         pfr_sin.sin_len = sizeof(pfr_sin);
200         pfr_sin.sin_family = AF_INET;
201         pfr_sin6.sin6_len = sizeof(pfr_sin6);
202         pfr_sin6.sin6_family = AF_INET6;
203
204         memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
205 }
206
207 int
208 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
209 {
210         struct pfr_ktable       *kt;
211         struct pfr_kentryworkq   workq;
212
213         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
214         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
215                 return (EINVAL);
216         kt = pfr_lookup_table(tbl);
217         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
218                 return (ESRCH);
219         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
220                 return (EPERM);
221         pfr_enqueue_addrs(kt, &workq, ndel, 0);
222
223         if (!(flags & PFR_FLAG_DUMMY)) {
224                 if (flags & PFR_FLAG_ATOMIC)
225                         crit_enter();
226                 pfr_remove_kentries(kt, &workq);
227                 if (flags & PFR_FLAG_ATOMIC)
228                         crit_exit();
229                 if (kt->pfrkt_cnt) {
230                         kprintf("pfr_clr_addrs: corruption detected (%d).\n",
231                             kt->pfrkt_cnt);
232                         kt->pfrkt_cnt = 0;
233                 }
234         }
235         return (0);
236 }
237
238 int
239 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
240     int *nadd, int flags)
241 {
242         struct pfr_ktable       *kt, *tmpkt;
243         struct pfr_kentryworkq   workq;
244         struct pfr_kentry       *p, *q;
245         struct pfr_addr          ad;
246         int                      i, rv, xadd = 0;
247         long                     tzero = time_second;
248
249         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
250             PFR_FLAG_FEEDBACK);
251         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
252                 return (EINVAL);
253         kt = pfr_lookup_table(tbl);
254         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
255                 return (ESRCH);
256         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
257                 return (EPERM);
258         tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
259         if (tmpkt == NULL)
260                 return (ENOMEM);
261         SLIST_INIT(&workq);
262         for (i = 0; i < size; i++) {
263                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
264                         senderr(EFAULT);
265                 if (pfr_validate_addr(&ad))
266                         senderr(EINVAL);
267                 p = pfr_lookup_addr(kt, &ad, 1);
268                 q = pfr_lookup_addr(tmpkt, &ad, 1);
269                 if (flags & PFR_FLAG_FEEDBACK) {
270                         if (q != NULL)
271                                 ad.pfra_fback = PFR_FB_DUPLICATE;
272                         else if (p == NULL)
273                                 ad.pfra_fback = PFR_FB_ADDED;
274                         else if (p->pfrke_not != ad.pfra_not)
275                                 ad.pfra_fback = PFR_FB_CONFLICT;
276                         else
277                                 ad.pfra_fback = PFR_FB_NONE;
278                 }
279                 if (p == NULL && q == NULL) {
280                         p = pfr_create_kentry(&ad,
281                             !(flags & PFR_FLAG_USERIOCTL));
282                         if (p == NULL)
283                                 senderr(ENOMEM);
284                         if (pfr_route_kentry(tmpkt, p)) {
285                                 pfr_destroy_kentry(p);
286                                 ad.pfra_fback = PFR_FB_NONE;
287                         } else {
288                                 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
289                                 xadd++;
290                         }
291                 }
292                 if (flags & PFR_FLAG_FEEDBACK)
293                         if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
294                                 senderr(EFAULT);
295         }
296         pfr_clean_node_mask(tmpkt, &workq);
297         if (!(flags & PFR_FLAG_DUMMY)) {
298                 if (flags & PFR_FLAG_ATOMIC)
299                         crit_enter();
300                 pfr_insert_kentries(kt, &workq, tzero);
301                 if (flags & PFR_FLAG_ATOMIC)
302                         crit_exit();
303         } else
304                 pfr_destroy_kentries(&workq);
305         if (nadd != NULL)
306                 *nadd = xadd;
307         pfr_destroy_ktable(tmpkt, 0);
308         return (0);
309 _bad:
310         pfr_clean_node_mask(tmpkt, &workq);
311         pfr_destroy_kentries(&workq);
312         if (flags & PFR_FLAG_FEEDBACK)
313                 pfr_reset_feedback(addr, size, flags);
314         pfr_destroy_ktable(tmpkt, 0);
315         return (rv);
316 }
317
318 int
319 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
320     int *ndel, int flags)
321 {
322         struct pfr_ktable       *kt;
323         struct pfr_kentryworkq   workq;
324         struct pfr_kentry       *p;
325         struct pfr_addr          ad;
326         int                      i, rv, xdel = 0, log = 1;
327
328         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
329             PFR_FLAG_FEEDBACK);
330         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
331                 return (EINVAL);
332         kt = pfr_lookup_table(tbl);
333         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
334                 return (ESRCH);
335         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
336                 return (EPERM);
337         /*
338          * there are two algorithms to choose from here.
339          * with:
340          *   n: number of addresses to delete
341          *   N: number of addresses in the table
342          *
343          * one is O(N) and is better for large 'n'
344          * one is O(n*LOG(N)) and is better for small 'n'
345          * 
346          * following code try to decide which one is best.
347          */
348         for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
349                 log++;
350         if (size > kt->pfrkt_cnt/log) {
351                 /* full table scan */
352                 pfr_mark_addrs(kt);
353         } else {
354                 /* iterate over addresses to delete */
355                 for (i = 0; i < size; i++) {
356                         if (COPYIN(addr+i, &ad, sizeof(ad), flags))
357                                 return (EFAULT);
358                         if (pfr_validate_addr(&ad))
359                                 return (EINVAL);
360                         p = pfr_lookup_addr(kt, &ad, 1);
361                         if (p != NULL)
362                                 p->pfrke_mark = 0;
363                 }
364         }
365         SLIST_INIT(&workq);
366         for (i = 0; i < size; i++) {
367                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
368                         senderr(EFAULT);
369                 if (pfr_validate_addr(&ad))
370                         senderr(EINVAL);
371                 p = pfr_lookup_addr(kt, &ad, 1);
372                 if (flags & PFR_FLAG_FEEDBACK) {
373                         if (p == NULL)
374                                 ad.pfra_fback = PFR_FB_NONE;
375                         else if (p->pfrke_not != ad.pfra_not)
376                                 ad.pfra_fback = PFR_FB_CONFLICT;
377                         else if (p->pfrke_mark)
378                                 ad.pfra_fback = PFR_FB_DUPLICATE;
379                         else
380                                 ad.pfra_fback = PFR_FB_DELETED;
381                 }
382                 if (p != NULL && p->pfrke_not == ad.pfra_not &&
383                     !p->pfrke_mark) {
384                         p->pfrke_mark = 1;
385                         SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
386                         xdel++;
387                 }
388                 if (flags & PFR_FLAG_FEEDBACK)
389                         if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
390                                 senderr(EFAULT);
391         }
392         if (!(flags & PFR_FLAG_DUMMY)) {
393                 if (flags & PFR_FLAG_ATOMIC)
394                         crit_enter();
395                 pfr_remove_kentries(kt, &workq);
396                 if (flags & PFR_FLAG_ATOMIC)
397                         crit_exit();
398         }
399         if (ndel != NULL)
400                 *ndel = xdel;
401         return (0);
402 _bad:
403         if (flags & PFR_FLAG_FEEDBACK)
404                 pfr_reset_feedback(addr, size, flags);
405         return (rv);
406 }
407
408 int
409 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
410     int *size2, int *nadd, int *ndel, int *nchange, int flags,
411     u_int32_t ignore_pfrt_flags)
412 {
413         struct pfr_ktable       *kt, *tmpkt;
414         struct pfr_kentryworkq   addq, delq, changeq;
415         struct pfr_kentry       *p, *q;
416         struct pfr_addr          ad;
417         int                      i, rv, xadd = 0, xdel = 0, xchange = 0;
418         long                     tzero = time_second;
419
420         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
421             PFR_FLAG_FEEDBACK);
422         if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
423             PFR_FLAG_USERIOCTL))
424                 return (EINVAL);
425         kt = pfr_lookup_table(tbl);
426         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
427                 return (ESRCH);
428         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
429                 return (EPERM);
430         tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
431         if (tmpkt == NULL)
432                 return (ENOMEM);
433         pfr_mark_addrs(kt);
434         SLIST_INIT(&addq);
435         SLIST_INIT(&delq);
436         SLIST_INIT(&changeq);
437         for (i = 0; i < size; i++) {
438                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
439                         senderr(EFAULT);
440                 if (pfr_validate_addr(&ad))
441                         senderr(EINVAL);
442                 ad.pfra_fback = PFR_FB_NONE;
443                 p = pfr_lookup_addr(kt, &ad, 1);
444                 if (p != NULL) {
445                         if (p->pfrke_mark) {
446                                 ad.pfra_fback = PFR_FB_DUPLICATE;
447                                 goto _skip;
448                         }
449                         p->pfrke_mark = 1;
450                         if (p->pfrke_not != ad.pfra_not) {
451                                 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
452                                 ad.pfra_fback = PFR_FB_CHANGED;
453                                 xchange++;
454                         }
455                 } else {
456                         q = pfr_lookup_addr(tmpkt, &ad, 1);
457                         if (q != NULL) {
458                                 ad.pfra_fback = PFR_FB_DUPLICATE;
459                                 goto _skip;
460                         }
461                         p = pfr_create_kentry(&ad,
462                             !(flags & PFR_FLAG_USERIOCTL));
463                         if (p == NULL)
464                                 senderr(ENOMEM);
465                         if (pfr_route_kentry(tmpkt, p)) {
466                                 pfr_destroy_kentry(p);
467                                 ad.pfra_fback = PFR_FB_NONE;
468                         } else {
469                                 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
470                                 ad.pfra_fback = PFR_FB_ADDED;
471                                 xadd++;
472                         }
473                 }
474 _skip:
475                 if (flags & PFR_FLAG_FEEDBACK)
476                         if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
477                                 senderr(EFAULT);
478         }
479         pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
480         if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
481                 if (*size2 < size+xdel) {
482                         *size2 = size+xdel;
483                         senderr(0);
484                 }
485                 i = 0;
486                 SLIST_FOREACH(p, &delq, pfrke_workq) {
487                         pfr_copyout_addr(&ad, p);
488                         ad.pfra_fback = PFR_FB_DELETED;
489                         if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags))
490                                 senderr(EFAULT);
491                         i++;
492                 }
493         }
494         pfr_clean_node_mask(tmpkt, &addq);
495         if (!(flags & PFR_FLAG_DUMMY)) {
496                 if (flags & PFR_FLAG_ATOMIC)
497                         crit_enter();
498                 pfr_insert_kentries(kt, &addq, tzero);
499                 pfr_remove_kentries(kt, &delq);
500                 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
501                 if (flags & PFR_FLAG_ATOMIC)
502                         crit_exit();
503         } else
504                 pfr_destroy_kentries(&addq);
505         if (nadd != NULL)
506                 *nadd = xadd;
507         if (ndel != NULL)
508                 *ndel = xdel;
509         if (nchange != NULL)
510                 *nchange = xchange;
511         if ((flags & PFR_FLAG_FEEDBACK) && size2)
512                 *size2 = size+xdel;
513         pfr_destroy_ktable(tmpkt, 0);
514         return (0);
515 _bad:
516         pfr_clean_node_mask(tmpkt, &addq);
517         pfr_destroy_kentries(&addq);
518         if (flags & PFR_FLAG_FEEDBACK)
519                 pfr_reset_feedback(addr, size, flags);
520         pfr_destroy_ktable(tmpkt, 0);
521         return (rv);
522 }
523
524 int
525 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
526         int *nmatch, int flags)
527 {
528         struct pfr_ktable       *kt;
529         struct pfr_kentry       *p;
530         struct pfr_addr          ad;
531         int                      i, xmatch = 0;
532
533         ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
534         if (pfr_validate_table(tbl, 0, 0))
535                 return (EINVAL);
536         kt = pfr_lookup_table(tbl);
537         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
538                 return (ESRCH);
539
540         for (i = 0; i < size; i++) {
541                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
542                         return (EFAULT);
543                 if (pfr_validate_addr(&ad))
544                         return (EINVAL);
545                 if (ADDR_NETWORK(&ad))
546                         return (EINVAL);
547                 p = pfr_lookup_addr(kt, &ad, 0);
548                 if (flags & PFR_FLAG_REPLACE)
549                         pfr_copyout_addr(&ad, p);
550                 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
551                     (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
552                 if (p != NULL && !p->pfrke_not)
553                         xmatch++;
554                 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
555                         return (EFAULT);
556         }
557         if (nmatch != NULL)
558                 *nmatch = xmatch;
559         return (0);
560 }
561
562 int
563 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
564         int flags)
565 {
566         struct pfr_ktable       *kt;
567         struct pfr_walktree      w;
568         int                      rv;
569
570         ACCEPT_FLAGS(flags, 0);
571         if (pfr_validate_table(tbl, 0, 0))
572                 return (EINVAL);
573         kt = pfr_lookup_table(tbl);
574         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
575                 return (ESRCH);
576         if (kt->pfrkt_cnt > *size) {
577                 *size = kt->pfrkt_cnt;
578                 return (0);
579         }
580
581         bzero(&w, sizeof(w));
582         w.pfrw_op = PFRW_GET_ADDRS;
583         w.pfrw_addr = addr;
584         w.pfrw_free = kt->pfrkt_cnt;
585         w.pfrw_flags = flags;
586         rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
587         if (!rv)
588                 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
589         if (rv)
590                 return (rv);
591
592         if (w.pfrw_free) {
593                 kprintf("pfr_get_addrs: corruption detected (%d).\n",
594                     w.pfrw_free);
595                 return (ENOTTY);
596         }
597         *size = kt->pfrkt_cnt;
598         return (0);
599 }
600
601 int
602 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
603         int flags)
604 {
605         struct pfr_ktable       *kt;
606         struct pfr_walktree      w;
607         struct pfr_kentryworkq   workq;
608         int                      rv;
609         long                     tzero = time_second;
610
611         /* XXX PFR_FLAG_CLSTATS disabled */
612         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
613         if (pfr_validate_table(tbl, 0, 0))
614                 return (EINVAL);
615         kt = pfr_lookup_table(tbl);
616         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
617                 return (ESRCH);
618         if (kt->pfrkt_cnt > *size) {
619                 *size = kt->pfrkt_cnt;
620                 return (0);
621         }
622
623         bzero(&w, sizeof(w));
624         w.pfrw_op = PFRW_GET_ASTATS;
625         w.pfrw_astats = addr;
626         w.pfrw_free = kt->pfrkt_cnt;
627         w.pfrw_flags = flags;
628         if (flags & PFR_FLAG_ATOMIC)
629                 crit_enter();
630         rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
631         if (!rv)
632                 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
633         if (!rv && (flags & PFR_FLAG_CLSTATS)) {
634                 pfr_enqueue_addrs(kt, &workq, NULL, 0);
635                 pfr_clstats_kentries(&workq, tzero, 0);
636         }
637         if (flags & PFR_FLAG_ATOMIC)
638                 crit_exit();
639         if (rv)
640                 return (rv);
641
642         if (w.pfrw_free) {
643                 kprintf("pfr_get_astats: corruption detected (%d).\n",
644                     w.pfrw_free);
645                 return (ENOTTY);
646         }
647         *size = kt->pfrkt_cnt;
648         return (0);
649 }
650
651 int
652 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
653     int *nzero, int flags)
654 {
655         struct pfr_ktable       *kt;
656         struct pfr_kentryworkq   workq;
657         struct pfr_kentry       *p;
658         struct pfr_addr          ad;
659         int                      i, rv, xzero = 0;
660
661         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
662             PFR_FLAG_FEEDBACK);
663         if (pfr_validate_table(tbl, 0, 0))
664                 return (EINVAL);
665         kt = pfr_lookup_table(tbl);
666         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
667                 return (ESRCH);
668         SLIST_INIT(&workq);
669         for (i = 0; i < size; i++) {
670                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
671                         senderr(EFAULT);
672                 if (pfr_validate_addr(&ad))
673                         senderr(EINVAL);
674                 p = pfr_lookup_addr(kt, &ad, 1);
675                 if (flags & PFR_FLAG_FEEDBACK) {
676                         ad.pfra_fback = (p != NULL) ?
677                             PFR_FB_CLEARED : PFR_FB_NONE;
678                         if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
679                                 senderr(EFAULT);
680                 }
681                 if (p != NULL) {
682                         SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
683                         xzero++;
684                 }
685         }
686
687         if (!(flags & PFR_FLAG_DUMMY)) {
688                 if (flags & PFR_FLAG_ATOMIC)
689                         crit_enter();
690                 pfr_clstats_kentries(&workq, 0, 0);
691                 if (flags & PFR_FLAG_ATOMIC)
692                         crit_exit();
693         }
694         if (nzero != NULL)
695                 *nzero = xzero;
696         return (0);
697 _bad:
698         if (flags & PFR_FLAG_FEEDBACK)
699                 pfr_reset_feedback(addr, size, flags);
700         return (rv);
701 }
702
703 int
704 pfr_validate_addr(struct pfr_addr *ad)
705 {
706         int i;
707
708         switch (ad->pfra_af) {
709 #ifdef INET
710         case AF_INET:
711                 if (ad->pfra_net > 32)
712                         return (-1);
713                 break;
714 #endif /* INET */
715 #ifdef INET6
716         case AF_INET6:
717                 if (ad->pfra_net > 128)
718                         return (-1);
719                 break;
720 #endif /* INET6 */
721         default:
722                 return (-1);
723         }
724         if (ad->pfra_net < 128 &&
725                 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
726                         return (-1);
727         for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
728                 if (((caddr_t)ad)[i])
729                         return (-1);
730         if (ad->pfra_not && ad->pfra_not != 1)
731                 return (-1);
732         if (ad->pfra_fback)
733                 return (-1);
734         return (0);
735 }
736
737 void
738 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
739         int *naddr, int sweep)
740 {
741         struct pfr_walktree     w;
742
743         SLIST_INIT(workq);
744         bzero(&w, sizeof(w));
745         w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
746         w.pfrw_workq = workq;
747         if (kt->pfrkt_ip4 != NULL)
748                 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
749                         kprintf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
750         if (kt->pfrkt_ip6 != NULL)
751                 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
752                         kprintf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
753         if (naddr != NULL)
754                 *naddr = w.pfrw_cnt;
755 }
756
757 void
758 pfr_mark_addrs(struct pfr_ktable *kt)
759 {
760         struct pfr_walktree     w;
761
762         bzero(&w, sizeof(w));
763         w.pfrw_op = PFRW_MARK;
764         if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
765                 kprintf("pfr_mark_addrs: IPv4 walktree failed.\n");
766         if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
767                 kprintf("pfr_mark_addrs: IPv6 walktree failed.\n");
768 }
769
770
771 struct pfr_kentry *
772 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
773 {
774         union sockaddr_union     sa, mask;
775         struct radix_node_head  *head = NULL;
776         struct pfr_kentry       *ke;
777
778         bzero(&sa, sizeof(sa));
779         if (ad->pfra_af == AF_INET) {
780                 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
781                 head = kt->pfrkt_ip4;
782         } else if ( ad->pfra_af == AF_INET6 ) {
783                 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
784                 head = kt->pfrkt_ip6;
785         }
786         if (ADDR_NETWORK(ad)) {
787                 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
788                 crit_enter(); /* rn_lookup makes use of globals */
789                 ke = (struct pfr_kentry *)rn_lookup((char *)&sa, (char *)&mask,
790                     head);
791                 crit_exit();
792                 if (ke && KENTRY_RNF_ROOT(ke))
793                         ke = NULL;
794         } else {
795                 ke = (struct pfr_kentry *)rn_match((char *)&sa, head);
796                 if (ke && KENTRY_RNF_ROOT(ke))
797                         ke = NULL;
798                 if (exact && ke && KENTRY_NETWORK(ke))
799                         ke = NULL;
800         }
801         return (ke);
802 }
803
804 struct pfr_kentry *
805 pfr_create_kentry(struct pfr_addr *ad, int intr)
806 {
807         struct pfr_kentry       *ke;
808
809         if (intr)
810                 ke = pool_get(&pfr_kentry_pl2, PR_NOWAIT | PR_ZERO);
811         else
812                 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT|PR_ZERO|PR_LIMITFAIL);
813         if (ke == NULL)
814                 return (NULL);
815
816         if (ad->pfra_af == AF_INET)
817                 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
818         else if (ad->pfra_af == AF_INET6)
819                 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
820         ke->pfrke_af = ad->pfra_af;
821         ke->pfrke_net = ad->pfra_net;
822         ke->pfrke_not = ad->pfra_not;
823         ke->pfrke_intrpool = intr;
824         return (ke);
825 }
826
827 void
828 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
829 {
830         struct pfr_kentry       *p, *q;
831
832         for (p = SLIST_FIRST(workq); p != NULL; p = q) {
833                 q = SLIST_NEXT(p, pfrke_workq);
834                 pfr_destroy_kentry(p);
835         }
836 }
837
838 void
839 pfr_destroy_kentry(struct pfr_kentry *ke)
840 {
841         if (ke->pfrke_counters)
842                 pool_put(&pfr_kcounters_pl, ke->pfrke_counters);
843         if (ke->pfrke_intrpool)
844                 pool_put(&pfr_kentry_pl2, ke);
845         else
846                 pool_put(&pfr_kentry_pl, ke);
847 }
848
849 void
850 pfr_insert_kentries(struct pfr_ktable *kt,
851     struct pfr_kentryworkq *workq, long tzero)
852 {
853         struct pfr_kentry       *p;
854         int                      rv, n = 0;
855
856         SLIST_FOREACH(p, workq, pfrke_workq) {
857                 rv = pfr_route_kentry(kt, p);
858                 if (rv) {
859                         kprintf("pfr_insert_kentries: cannot route entry "
860                             "(code=%d).\n", rv);
861                         break;
862                 }
863                 p->pfrke_tzero = tzero;
864                 n++;
865         }
866         kt->pfrkt_cnt += n;
867 }
868
869 int
870 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
871 {
872         struct pfr_kentry       *p;
873         int                      rv;
874
875         p = pfr_lookup_addr(kt, ad, 1);
876         if (p != NULL)
877                 return (0);
878         p = pfr_create_kentry(ad, 1);
879         if (p == NULL)
880                 return (EINVAL);
881
882         rv = pfr_route_kentry(kt, p);
883         if (rv)
884                 return (rv);
885
886         p->pfrke_tzero = tzero;
887         kt->pfrkt_cnt++;
888
889         return (0);
890 }
891
892 void
893 pfr_remove_kentries(struct pfr_ktable *kt,
894     struct pfr_kentryworkq *workq)
895 {
896         struct pfr_kentry       *p;
897         int                      n = 0;
898
899         SLIST_FOREACH(p, workq, pfrke_workq) {
900                 pfr_unroute_kentry(kt, p);
901                 n++;
902         }
903         kt->pfrkt_cnt -= n;
904         pfr_destroy_kentries(workq);
905 }
906
907 void
908 pfr_clean_node_mask(struct pfr_ktable *kt,
909     struct pfr_kentryworkq *workq)
910 {
911         struct pfr_kentry       *p;
912
913         SLIST_FOREACH(p, workq, pfrke_workq)
914                 pfr_unroute_kentry(kt, p);
915 }
916
917 void
918 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
919 {
920         struct pfr_kentry       *p;
921
922         SLIST_FOREACH(p, workq, pfrke_workq) {
923                 crit_enter();
924                 if (negchange)
925                         p->pfrke_not = !p->pfrke_not;
926                 if (p->pfrke_counters) {
927                         pool_put(&pfr_kcounters_pl, p->pfrke_counters);
928                         p->pfrke_counters = NULL;
929                 }
930                 crit_exit();
931                 p->pfrke_tzero = tzero;
932         }
933 }
934
935 void
936 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
937 {
938         struct pfr_addr ad;
939         int             i;
940
941         for (i = 0; i < size; i++) {
942                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
943                         break;
944                 ad.pfra_fback = PFR_FB_NONE;
945                 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
946                         break;
947         }
948 }
949
950 void
951 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
952 {
953         int     i;
954
955         bzero(sa, sizeof(*sa));
956         if (af == AF_INET) {
957                 sa->sin.sin_len = sizeof(sa->sin);
958                 sa->sin.sin_family = AF_INET;
959                 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
960         } else if (af == AF_INET6) {
961                 sa->sin6.sin6_len = sizeof(sa->sin6);
962                 sa->sin6.sin6_family = AF_INET6;
963                 for (i = 0; i < 4; i++) {
964                         if (net <= 32) {
965                                 sa->sin6.sin6_addr.s6_addr32[i] =
966                                     net ? htonl(-1 << (32-net)) : 0;
967                                 break;
968                         }
969                         sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
970                         net -= 32;
971                 }
972         }
973 }
974
975 int
976 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
977 {
978         union sockaddr_union     mask;
979         struct radix_node       *rn;
980         struct radix_node_head  *head = NULL;
981
982         bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
983         if (ke->pfrke_af == AF_INET)
984                 head = kt->pfrkt_ip4;
985         else if (ke->pfrke_af == AF_INET6)
986                 head = kt->pfrkt_ip6;
987
988         crit_enter();
989         if (KENTRY_NETWORK(ke)) {
990                 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
991                 rn = rn_addroute((char *)&ke->pfrke_sa, (char *)&mask, head,
992                     ke->pfrke_node);
993         } else
994                 rn = rn_addroute((char *)&ke->pfrke_sa, NULL, head,
995                     ke->pfrke_node);
996         crit_exit();
997
998         return (rn == NULL ? -1 : 0);
999 }
1000
1001 int
1002 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1003 {
1004         union sockaddr_union     mask;
1005         struct radix_node       *rn;
1006         struct radix_node_head  *head = NULL;
1007
1008         if (ke->pfrke_af == AF_INET)
1009                 head = kt->pfrkt_ip4;
1010         else if (ke->pfrke_af == AF_INET6)
1011                 head = kt->pfrkt_ip6;
1012
1013         crit_enter();
1014         if (KENTRY_NETWORK(ke)) {
1015                 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1016                 rn = rn_delete((char *)&ke->pfrke_sa, (char *)&mask, head);
1017         } else
1018                 rn = rn_delete((char *)&ke->pfrke_sa, NULL, head);
1019         crit_exit();
1020
1021         if (rn == NULL) {
1022                 kprintf("pfr_unroute_kentry: delete failed.\n");
1023                 return (-1);
1024         }
1025         return (0);
1026 }
1027
1028 void
1029 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1030 {
1031         bzero(ad, sizeof(*ad));
1032         if (ke == NULL)
1033                 return;
1034         ad->pfra_af = ke->pfrke_af;
1035         ad->pfra_net = ke->pfrke_net;
1036         ad->pfra_not = ke->pfrke_not;
1037         if (ad->pfra_af == AF_INET)
1038                 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1039         else if (ad->pfra_af == AF_INET6)
1040                 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1041 }
1042
1043 int
1044 pfr_walktree(struct radix_node *rn, void *arg)
1045 {
1046         struct pfr_kentry       *ke = (struct pfr_kentry *)rn;
1047         struct pfr_walktree     *w = arg;
1048         int                     flags = w->pfrw_flags;
1049
1050         switch (w->pfrw_op) {
1051         case PFRW_MARK:
1052                 ke->pfrke_mark = 0;
1053                 break;
1054         case PFRW_SWEEP:
1055                 if (ke->pfrke_mark)
1056                         break;
1057                 /* FALLTHROUGH */
1058         case PFRW_ENQUEUE:
1059                 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1060                 w->pfrw_cnt++;
1061                 break;
1062         case PFRW_GET_ADDRS:
1063                 if (w->pfrw_free-- > 0) {
1064                         struct pfr_addr ad;
1065
1066                         pfr_copyout_addr(&ad, ke);
1067                         if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1068                                 return (EFAULT);
1069                         w->pfrw_addr++;
1070                 }
1071                 break;
1072         case PFRW_GET_ASTATS:
1073                 if (w->pfrw_free-- > 0) {
1074                         struct pfr_astats as;
1075
1076                         pfr_copyout_addr(&as.pfras_a, ke);
1077
1078                         crit_enter();
1079                         if (ke->pfrke_counters) {
1080                                 bcopy(ke->pfrke_counters->pfrkc_packets,
1081                                     as.pfras_packets, sizeof(as.pfras_packets));
1082                                 bcopy(ke->pfrke_counters->pfrkc_bytes,
1083                                     as.pfras_bytes, sizeof(as.pfras_bytes));
1084                         } else {
1085                                 bzero(as.pfras_packets, sizeof(as.pfras_packets));
1086                                 bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1087                                 as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1088                         }
1089                         crit_exit();
1090                         as.pfras_tzero = ke->pfrke_tzero;
1091
1092                         if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags))
1093                                 return (EFAULT);
1094                         w->pfrw_astats++;
1095                 }
1096                 break;
1097         case PFRW_POOL_GET:
1098                 if (ke->pfrke_not)
1099                         break; /* negative entries are ignored */
1100                 if (!w->pfrw_cnt--) {
1101                         w->pfrw_kentry = ke;
1102                         return (1); /* finish search */
1103                 }
1104                 break;
1105         case PFRW_DYNADDR_UPDATE:
1106                 if (ke->pfrke_af == AF_INET) {
1107                         if (w->pfrw_dyn->pfid_acnt4++ > 0)
1108                                 break;
1109                         pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1110                         w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1111                             &ke->pfrke_sa, AF_INET);
1112                         w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1113                             &pfr_mask, AF_INET);
1114                 } else if (ke->pfrke_af == AF_INET6){
1115                         if (w->pfrw_dyn->pfid_acnt6++ > 0)
1116                                 break;
1117                         pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1118                         w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1119                             &ke->pfrke_sa, AF_INET6);
1120                         w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1121                             &pfr_mask, AF_INET6);
1122                 }
1123                 break;
1124         }
1125         return (0);
1126 }
1127
1128 int
1129 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1130 {
1131         struct pfr_ktableworkq   workq;
1132         struct pfr_ktable       *p;
1133         int                      xdel = 0;
1134
1135         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1136             PFR_FLAG_ALLRSETS);
1137         if (pfr_fix_anchor(filter->pfrt_anchor))
1138                 return (EINVAL);
1139         if (pfr_table_count(filter, flags) < 0)
1140                 return (ENOENT);
1141
1142         SLIST_INIT(&workq);
1143         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1144                 if (pfr_skip_table(filter, p, flags))
1145                         continue;
1146                 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1147                         continue;
1148                 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1149                         continue;
1150                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1151                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1152                 xdel++;
1153         }
1154         if (!(flags & PFR_FLAG_DUMMY)) {
1155                 if (flags & PFR_FLAG_ATOMIC)
1156                         crit_enter();
1157                 pfr_setflags_ktables(&workq);
1158                 if (flags & PFR_FLAG_ATOMIC)
1159                         crit_exit();
1160         }
1161         if (ndel != NULL)
1162                 *ndel = xdel;
1163         return (0);
1164 }
1165
1166 int
1167 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1168 {
1169         struct pfr_ktableworkq   addq, changeq;
1170         struct pfr_ktable       *p, *q, *r, key;
1171         int                      i, rv, xadd = 0;
1172         long                     tzero = time_second;
1173
1174         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1175         SLIST_INIT(&addq);
1176         SLIST_INIT(&changeq);
1177         for (i = 0; i < size; i++) {
1178                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1179                         senderr(EFAULT);
1180                 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1181                     flags & PFR_FLAG_USERIOCTL))
1182                         senderr(EINVAL);
1183                 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1184                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1185                 if (p == NULL) {
1186                         p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1187                         if (p == NULL)
1188                                 senderr(ENOMEM);
1189                         SLIST_FOREACH(q, &addq, pfrkt_workq) {
1190                                 if (!pfr_ktable_compare(p, q))
1191                                         goto _skip;
1192                         }
1193                         SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1194                         xadd++;
1195                         if (!key.pfrkt_anchor[0])
1196                                 goto _skip;
1197
1198                         /* find or create root table */
1199                         bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1200                         r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1201                         if (r != NULL) {
1202                                 p->pfrkt_root = r;
1203                                 goto _skip;
1204                         }
1205                         SLIST_FOREACH(q, &addq, pfrkt_workq) {
1206                                 if (!pfr_ktable_compare(&key, q)) {
1207                                         p->pfrkt_root = q;
1208                                         goto _skip;
1209                                 }
1210                         }
1211                         key.pfrkt_flags = 0;
1212                         r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1213                         if (r == NULL)
1214                                 senderr(ENOMEM);
1215                         SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1216                         p->pfrkt_root = r;
1217                 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1218                         SLIST_FOREACH(q, &changeq, pfrkt_workq)
1219                                 if (!pfr_ktable_compare(&key, q))
1220                                         goto _skip;
1221                         p->pfrkt_nflags = (p->pfrkt_flags &
1222                             ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1223                         SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1224                         xadd++;
1225                 }
1226 _skip:
1227         ;
1228         }
1229         if (!(flags & PFR_FLAG_DUMMY)) {
1230                 if (flags & PFR_FLAG_ATOMIC)
1231                         crit_enter();
1232                 pfr_insert_ktables(&addq);
1233                 pfr_setflags_ktables(&changeq);
1234                 if (flags & PFR_FLAG_ATOMIC)
1235                         crit_exit();
1236         } else
1237                  pfr_destroy_ktables(&addq, 0);
1238         if (nadd != NULL)
1239                 *nadd = xadd;
1240         return (0);
1241 _bad:
1242         pfr_destroy_ktables(&addq, 0);
1243         return (rv);
1244 }
1245
1246 int
1247 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1248 {
1249         struct pfr_ktableworkq   workq;
1250         struct pfr_ktable       *p, *q, key;
1251         int                      i, xdel = 0;
1252
1253         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1254         SLIST_INIT(&workq);
1255         for (i = 0; i < size; i++) {
1256                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1257                         return (EFAULT);
1258                 if (pfr_validate_table(&key.pfrkt_t, 0,
1259                     flags & PFR_FLAG_USERIOCTL))
1260                         return (EINVAL);
1261                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1262                 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1263                         SLIST_FOREACH(q, &workq, pfrkt_workq)
1264                                 if (!pfr_ktable_compare(p, q))
1265                                         goto _skip;
1266                         p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1267                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1268                         xdel++;
1269                 }
1270 _skip:
1271         ;
1272         }
1273
1274         if (!(flags & PFR_FLAG_DUMMY)) {
1275                 if (flags & PFR_FLAG_ATOMIC)
1276                         crit_enter();
1277                 pfr_setflags_ktables(&workq);
1278                 if (flags & PFR_FLAG_ATOMIC)
1279                         crit_exit();
1280         }
1281         if (ndel != NULL)
1282                 *ndel = xdel;
1283         return (0);
1284 }
1285
1286 int
1287 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1288         int flags)
1289 {
1290         struct pfr_ktable       *p;
1291         int                      n, nn;
1292
1293         ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1294         if (pfr_fix_anchor(filter->pfrt_anchor))
1295                 return (EINVAL);
1296         n = nn = pfr_table_count(filter, flags);
1297         if (n < 0)
1298                 return (ENOENT);
1299         if (n > *size) {
1300                 *size = n;
1301                 return (0);
1302         }
1303         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1304                 if (pfr_skip_table(filter, p, flags))
1305                         continue;
1306                 if (n-- <= 0)
1307                         continue;
1308                 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags))
1309                         return (EFAULT);
1310         }
1311         if (n) {
1312                 kprintf("pfr_get_tables: corruption detected (%d).\n", n);
1313                 return (ENOTTY);
1314         }
1315         *size = nn;
1316         return (0);
1317 }
1318
1319 int
1320 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1321         int flags)
1322 {
1323         struct pfr_ktable       *p;
1324         struct pfr_ktableworkq   workq;
1325         int                      n, nn;
1326         long                     tzero = time_second;
1327
1328         /* XXX PFR_FLAG_CLSTATS disabled */
1329         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1330         if (pfr_fix_anchor(filter->pfrt_anchor))
1331                 return (EINVAL);
1332         n = nn = pfr_table_count(filter, flags);
1333         if (n < 0)
1334                 return (ENOENT);
1335         if (n > *size) {
1336                 *size = n;
1337                 return (0);
1338         }
1339         SLIST_INIT(&workq);
1340         if (flags & PFR_FLAG_ATOMIC)
1341                 crit_enter();
1342         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1343                 if (pfr_skip_table(filter, p, flags))
1344                         continue;
1345                 if (n-- <= 0)
1346                         continue;
1347                 if (!(flags & PFR_FLAG_ATOMIC))
1348                         crit_enter();
1349                 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags)) {
1350                         crit_exit();
1351                         return (EFAULT);
1352                 }
1353                 if (!(flags & PFR_FLAG_ATOMIC))
1354                         crit_exit();
1355                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1356         }
1357         if (flags & PFR_FLAG_CLSTATS)
1358                 pfr_clstats_ktables(&workq, tzero,
1359                     flags & PFR_FLAG_ADDRSTOO);
1360         if (flags & PFR_FLAG_ATOMIC)
1361                 crit_exit();
1362         if (n) {
1363                 kprintf("pfr_get_tstats: corruption detected (%d).\n", n);
1364                 return (ENOTTY);
1365         }
1366         *size = nn;
1367         return (0);
1368 }
1369
1370 int
1371 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1372 {
1373         struct pfr_ktableworkq   workq;
1374         struct pfr_ktable       *p, key;
1375         int                      i, xzero = 0;
1376         long                     tzero = time_second;
1377
1378         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1379             PFR_FLAG_ADDRSTOO);
1380         SLIST_INIT(&workq);
1381         for (i = 0; i < size; i++) {
1382                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1383                         return (EFAULT);
1384                 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1385                         return (EINVAL);
1386                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1387                 if (p != NULL) {
1388                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1389                         xzero++;
1390                 }
1391         }
1392         if (!(flags & PFR_FLAG_DUMMY)) {
1393                 if (flags & PFR_FLAG_ATOMIC)
1394                         crit_enter();
1395                 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1396                 if (flags & PFR_FLAG_ATOMIC)
1397                         crit_exit();
1398         }
1399         if (nzero != NULL)
1400                 *nzero = xzero;
1401         return (0);
1402 }
1403
1404 int
1405 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1406         int *nchange, int *ndel, int flags)
1407 {
1408         struct pfr_ktableworkq   workq;
1409         struct pfr_ktable       *p, *q, key;
1410         int                      i, xchange = 0, xdel = 0;
1411
1412         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1413         if ((setflag & ~PFR_TFLAG_USRMASK) ||
1414             (clrflag & ~PFR_TFLAG_USRMASK) ||
1415             (setflag & clrflag))
1416                 return (EINVAL);
1417         SLIST_INIT(&workq);
1418         for (i = 0; i < size; i++) {
1419                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1420                         return (EFAULT);
1421                 if (pfr_validate_table(&key.pfrkt_t, 0,
1422                     flags & PFR_FLAG_USERIOCTL))
1423                         return (EINVAL);
1424                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1425                 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1426                         p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1427                             ~clrflag;
1428                         if (p->pfrkt_nflags == p->pfrkt_flags)
1429                                 goto _skip;
1430                         SLIST_FOREACH(q, &workq, pfrkt_workq)
1431                                 if (!pfr_ktable_compare(p, q))
1432                                         goto _skip;
1433                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1434                         if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1435                             (clrflag & PFR_TFLAG_PERSIST) &&
1436                             !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1437                                 xdel++;
1438                         else
1439                                 xchange++;
1440                 }
1441 _skip:
1442         ;
1443         }
1444         if (!(flags & PFR_FLAG_DUMMY)) {
1445                 if (flags & PFR_FLAG_ATOMIC)
1446                         crit_enter();
1447                 pfr_setflags_ktables(&workq);
1448                 if (flags & PFR_FLAG_ATOMIC)
1449                         crit_exit();
1450         }
1451         if (nchange != NULL)
1452                 *nchange = xchange;
1453         if (ndel != NULL)
1454                 *ndel = xdel;
1455         return (0);
1456 }
1457
1458 int
1459 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1460 {
1461         struct pfr_ktableworkq   workq;
1462         struct pfr_ktable       *p;
1463         struct pf_ruleset       *rs;
1464         int                      xdel = 0;
1465
1466         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1467         rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1468         if (rs == NULL)
1469                 return (ENOMEM);
1470         SLIST_INIT(&workq);
1471         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1472                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1473                     pfr_skip_table(trs, p, 0))
1474                         continue;
1475                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1476                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1477                 xdel++;
1478         }
1479         if (!(flags & PFR_FLAG_DUMMY)) {
1480                 pfr_setflags_ktables(&workq);
1481                 if (ticket != NULL)
1482                         *ticket = ++rs->tticket;
1483                 rs->topen = 1;
1484         } else
1485                 pf_remove_if_empty_ruleset(rs);
1486         if (ndel != NULL)
1487                 *ndel = xdel;
1488         return (0);
1489 }
1490
1491 int
1492 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1493     int *nadd, int *naddr, u_int32_t ticket, int flags)
1494 {
1495         struct pfr_ktableworkq   tableq;
1496         struct pfr_kentryworkq   addrq;
1497         struct pfr_ktable       *kt, *rt, *shadow, key;
1498         struct pfr_kentry       *p;
1499         struct pfr_addr          ad;
1500         struct pf_ruleset       *rs;
1501         int                      i, rv, xadd = 0, xaddr = 0;
1502
1503         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1504         if (size && !(flags & PFR_FLAG_ADDRSTOO))
1505                 return (EINVAL);
1506         if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1507             flags & PFR_FLAG_USERIOCTL))
1508                 return (EINVAL);
1509         rs = pf_find_ruleset(tbl->pfrt_anchor);
1510         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1511                 return (EBUSY);
1512         tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1513         SLIST_INIT(&tableq);
1514         kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1515         if (kt == NULL) {
1516                 kt = pfr_create_ktable(tbl, 0, 1);
1517                 if (kt == NULL)
1518                         return (ENOMEM);
1519                 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1520                 xadd++;
1521                 if (!tbl->pfrt_anchor[0])
1522                         goto _skip;
1523
1524                 /* find or create root table */
1525                 bzero(&key, sizeof(key));
1526                 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1527                 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1528                 if (rt != NULL) {
1529                         kt->pfrkt_root = rt;
1530                         goto _skip;
1531                 }
1532                 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1533                 if (rt == NULL) {
1534                         pfr_destroy_ktables(&tableq, 0);
1535                         return (ENOMEM);
1536                 }
1537                 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1538                 kt->pfrkt_root = rt;
1539         } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1540                 xadd++;
1541 _skip:
1542         shadow = pfr_create_ktable(tbl, 0, 0);
1543         if (shadow == NULL) {
1544                 pfr_destroy_ktables(&tableq, 0);
1545                 return (ENOMEM);
1546         }
1547         SLIST_INIT(&addrq);
1548         for (i = 0; i < size; i++) {
1549                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1550                         senderr(EFAULT);
1551                 if (pfr_validate_addr(&ad))
1552                         senderr(EINVAL);
1553                 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1554                         continue;
1555                 p = pfr_create_kentry(&ad, 0);
1556                 if (p == NULL)
1557                         senderr(ENOMEM);
1558                 if (pfr_route_kentry(shadow, p)) {
1559                         pfr_destroy_kentry(p);
1560                         continue;
1561                 }
1562                 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1563                 xaddr++;
1564         }
1565         if (!(flags & PFR_FLAG_DUMMY)) {
1566                 if (kt->pfrkt_shadow != NULL)
1567                         pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1568                 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1569                 pfr_insert_ktables(&tableq);
1570                 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1571                     xaddr : NO_ADDRESSES;
1572                 kt->pfrkt_shadow = shadow;
1573         } else {
1574                 pfr_clean_node_mask(shadow, &addrq);
1575                 pfr_destroy_ktable(shadow, 0);
1576                 pfr_destroy_ktables(&tableq, 0);
1577                 pfr_destroy_kentries(&addrq);
1578         }
1579         if (nadd != NULL)
1580                 *nadd = xadd;
1581         if (naddr != NULL)
1582                 *naddr = xaddr;
1583         return (0);
1584 _bad:
1585         pfr_destroy_ktable(shadow, 0);
1586         pfr_destroy_ktables(&tableq, 0);
1587         pfr_destroy_kentries(&addrq);
1588         return (rv);
1589 }
1590
1591 int
1592 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1593 {
1594         struct pfr_ktableworkq   workq;
1595         struct pfr_ktable       *p;
1596         struct pf_ruleset       *rs;
1597         int                      xdel = 0;
1598
1599         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1600         rs = pf_find_ruleset(trs->pfrt_anchor);
1601         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1602                 return (0);
1603         SLIST_INIT(&workq);
1604         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1605                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1606                     pfr_skip_table(trs, p, 0))
1607                         continue;
1608                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1609                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1610                 xdel++;
1611         }
1612         if (!(flags & PFR_FLAG_DUMMY)) {
1613                 pfr_setflags_ktables(&workq);
1614                 rs->topen = 0;
1615                 pf_remove_if_empty_ruleset(rs);
1616         }
1617         if (ndel != NULL)
1618                 *ndel = xdel;
1619         return (0);
1620 }
1621
1622 int
1623 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1624     int *nchange, int flags)
1625 {
1626         struct pfr_ktable       *p, *q;
1627         struct pfr_ktableworkq   workq;
1628         struct pf_ruleset       *rs;
1629         int                      xadd = 0, xchange = 0;
1630         long                     tzero = time_second;
1631
1632         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1633         rs = pf_find_ruleset(trs->pfrt_anchor);
1634         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1635                 return (EBUSY);
1636
1637         SLIST_INIT(&workq);
1638         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1639                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1640                     pfr_skip_table(trs, p, 0))
1641                         continue;
1642                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1643                 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1644                         xchange++;
1645                 else
1646                         xadd++;
1647         }
1648
1649         if (!(flags & PFR_FLAG_DUMMY)) {
1650                 if (flags & PFR_FLAG_ATOMIC)
1651                         crit_enter();
1652                 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1653                         q = SLIST_NEXT(p, pfrkt_workq);
1654                         pfr_commit_ktable(p, tzero);
1655                 }
1656                 if (flags & PFR_FLAG_ATOMIC)
1657                         crit_exit();
1658                 rs->topen = 0;
1659                 pf_remove_if_empty_ruleset(rs);
1660         }
1661         if (nadd != NULL)
1662                 *nadd = xadd;
1663         if (nchange != NULL)
1664                 *nchange = xchange;
1665
1666         return (0);
1667 }
1668
1669 void
1670 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1671 {
1672         struct pfr_ktable       *shadow = kt->pfrkt_shadow;
1673         int                      nflags;
1674
1675         if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1676                 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1677                         pfr_clstats_ktable(kt, tzero, 1);
1678         } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1679                 /* kt might contain addresses */
1680                 struct pfr_kentryworkq   addrq, addq, changeq, delq, garbageq;
1681                 struct pfr_kentry       *p, *q, *next;
1682                 struct pfr_addr          ad;
1683
1684                 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1685                 pfr_mark_addrs(kt);
1686                 SLIST_INIT(&addq);
1687                 SLIST_INIT(&changeq);
1688                 SLIST_INIT(&delq);
1689                 SLIST_INIT(&garbageq);
1690                 pfr_clean_node_mask(shadow, &addrq);
1691                 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1692                         next = SLIST_NEXT(p, pfrke_workq);      /* XXX */
1693                         pfr_copyout_addr(&ad, p);
1694                         q = pfr_lookup_addr(kt, &ad, 1);
1695                         if (q != NULL) {
1696                                 if (q->pfrke_not != p->pfrke_not)
1697                                         SLIST_INSERT_HEAD(&changeq, q,
1698                                             pfrke_workq);
1699                                 q->pfrke_mark = 1;
1700                                 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1701                         } else {
1702                                 p->pfrke_tzero = tzero;
1703                                 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1704                         }
1705                 }
1706                 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1707                 pfr_insert_kentries(kt, &addq, tzero);
1708                 pfr_remove_kentries(kt, &delq);
1709                 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1710                 pfr_destroy_kentries(&garbageq);
1711         } else {
1712                 /* kt cannot contain addresses */
1713                 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1714                     shadow->pfrkt_ip4);
1715                 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1716                     shadow->pfrkt_ip6);
1717                 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1718                 pfr_clstats_ktable(kt, tzero, 1);
1719         }
1720         nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1721             (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1722                 & ~PFR_TFLAG_INACTIVE;
1723         pfr_destroy_ktable(shadow, 0);
1724         kt->pfrkt_shadow = NULL;
1725         pfr_setflags_ktable(kt, nflags);
1726 }
1727
1728 int
1729 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1730 {
1731         int i;
1732
1733         if (!tbl->pfrt_name[0])
1734                 return (-1);
1735         if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1736                  return (-1);
1737         if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1738                 return (-1);
1739         for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1740                 if (tbl->pfrt_name[i])
1741                         return (-1);
1742         if (pfr_fix_anchor(tbl->pfrt_anchor))
1743                 return (-1);
1744         if (tbl->pfrt_flags & ~allowedflags)
1745                 return (-1);
1746         return (0);
1747 }
1748
1749 /*
1750  * Rewrite anchors referenced by tables to remove slashes
1751  * and check for validity.
1752  */
1753 int
1754 pfr_fix_anchor(char *anchor)
1755 {
1756         size_t siz = MAXPATHLEN;
1757         int i;
1758
1759         if (anchor[0] == '/') {
1760                 char *path;
1761                 int off;
1762
1763                 path = anchor;
1764                 off = 1;
1765                 while (*++path == '/')
1766                         off++;
1767                 bcopy(path, anchor, siz - off);
1768                 memset(anchor + siz - off, 0, off);
1769         }
1770         if (anchor[siz - 1])
1771                 return (-1);
1772         for (i = strlen(anchor); i < siz; i++)
1773                 if (anchor[i])
1774                         return (-1);
1775         return (0);
1776 }
1777
1778 int
1779 pfr_table_count(struct pfr_table *filter, int flags)
1780 {
1781         struct pf_ruleset *rs;
1782
1783         if (flags & PFR_FLAG_ALLRSETS)
1784                 return (pfr_ktable_cnt);
1785         if (filter->pfrt_anchor[0]) {
1786                 rs = pf_find_ruleset(filter->pfrt_anchor);
1787                 return ((rs != NULL) ? rs->tables : -1);
1788         }
1789         return (pf_main_ruleset.tables);
1790 }
1791
1792 int
1793 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1794 {
1795         if (flags & PFR_FLAG_ALLRSETS)
1796                 return (0);
1797         if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1798                 return (1);
1799         return (0);
1800 }
1801
1802 void
1803 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1804 {
1805         struct pfr_ktable       *p;
1806
1807         SLIST_FOREACH(p, workq, pfrkt_workq)
1808                 pfr_insert_ktable(p);
1809 }
1810
1811 void
1812 pfr_insert_ktable(struct pfr_ktable *kt)
1813 {
1814         RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1815         pfr_ktable_cnt++;
1816         if (kt->pfrkt_root != NULL)
1817                 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1818                         pfr_setflags_ktable(kt->pfrkt_root,
1819                             kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1820 }
1821
1822 void
1823 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1824 {
1825         struct pfr_ktable       *p, *q;
1826
1827         for (p = SLIST_FIRST(workq); p; p = q) {
1828                 q = SLIST_NEXT(p, pfrkt_workq);
1829                 pfr_setflags_ktable(p, p->pfrkt_nflags);
1830         }
1831 }
1832
1833 void
1834 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1835 {
1836         struct pfr_kentryworkq  addrq;
1837
1838         if (!(newf & PFR_TFLAG_REFERENCED) &&
1839             !(newf & PFR_TFLAG_PERSIST))
1840                 newf &= ~PFR_TFLAG_ACTIVE;
1841         if (!(newf & PFR_TFLAG_ACTIVE))
1842                 newf &= ~PFR_TFLAG_USRMASK;
1843         if (!(newf & PFR_TFLAG_SETMASK)) {
1844                 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1845                 if (kt->pfrkt_root != NULL)
1846                         if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1847                                 pfr_setflags_ktable(kt->pfrkt_root,
1848                                     kt->pfrkt_root->pfrkt_flags &
1849                                         ~PFR_TFLAG_REFDANCHOR);
1850                 pfr_destroy_ktable(kt, 1);
1851                 pfr_ktable_cnt--;
1852                 return;
1853         }
1854         if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1855                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1856                 pfr_remove_kentries(kt, &addrq);
1857         }
1858         if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1859                 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1860                 kt->pfrkt_shadow = NULL;
1861         }
1862         kt->pfrkt_flags = newf;
1863 }
1864
1865 void
1866 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1867 {
1868         struct pfr_ktable       *p;
1869
1870         SLIST_FOREACH(p, workq, pfrkt_workq)
1871                 pfr_clstats_ktable(p, tzero, recurse);
1872 }
1873
1874 void
1875 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1876 {
1877         struct pfr_kentryworkq   addrq;
1878
1879         if (recurse) {
1880                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1881                 pfr_clstats_kentries(&addrq, tzero, 0);
1882         }
1883         crit_enter();
1884         bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1885         bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1886         kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1887         crit_exit();
1888         kt->pfrkt_tzero = tzero;
1889 }
1890
1891 struct pfr_ktable *
1892 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1893 {
1894         struct pfr_ktable       *kt;
1895         struct pf_ruleset       *rs;
1896
1897         kt = pool_get(&pfr_ktable_pl, PR_NOWAIT| PR_ZERO | PR_LIMITFAIL);
1898         if (kt == NULL)
1899                 return (NULL);
1900         kt->pfrkt_t = *tbl;
1901
1902         if (attachruleset) {
1903                 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1904                 if (!rs) {
1905                         pfr_destroy_ktable(kt, 0);
1906                         return (NULL);
1907                 }
1908                 kt->pfrkt_rs = rs;
1909                 rs->tables++;
1910         }
1911
1912         KKASSERT(pf_maskhead != NULL);
1913         if (!rn_inithead((void **)&kt->pfrkt_ip4, pf_maskhead,
1914             offsetof(struct sockaddr_in, sin_addr) * 8) ||
1915             !rn_inithead((void **)&kt->pfrkt_ip6, pf_maskhead,
1916             offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1917                 pfr_destroy_ktable(kt, 0);
1918                 return (NULL);
1919         }
1920         kt->pfrkt_tzero = tzero;
1921
1922         return (kt);
1923 }
1924
1925 void
1926 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1927 {
1928         struct pfr_ktable       *p, *q;
1929
1930         for (p = SLIST_FIRST(workq); p; p = q) {
1931                 q = SLIST_NEXT(p, pfrkt_workq);
1932                 pfr_destroy_ktable(p, flushaddr);
1933         }
1934 }
1935
1936 void
1937 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1938 {
1939         struct pfr_kentryworkq   addrq;
1940
1941         if (flushaddr) {
1942                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1943                 pfr_clean_node_mask(kt, &addrq);
1944                 pfr_destroy_kentries(&addrq);
1945         }
1946         if (kt->pfrkt_ip4 != NULL)
1947                 kfree((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1948
1949         if (kt->pfrkt_ip6 != NULL)
1950                 kfree((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1951         if (kt->pfrkt_shadow != NULL) 
1952                 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1953         if (kt->pfrkt_rs != NULL) {
1954                 kt->pfrkt_rs->tables--;
1955                 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1956         }
1957         pool_put(&pfr_ktable_pl, kt);
1958 }
1959
1960 int
1961 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1962 {
1963         int d;
1964
1965         if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1966                 return (d);
1967         return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1968 }
1969
1970 struct pfr_ktable *
1971 pfr_lookup_table(struct pfr_table *tbl)
1972 {
1973         /* struct pfr_ktable start like a struct pfr_table */
1974         return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1975             (struct pfr_ktable *)tbl));
1976 }
1977
1978 int
1979 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1980 {
1981         struct pfr_kentry       *ke = NULL;
1982         int                      match;
1983
1984         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1985                 kt = kt->pfrkt_root;
1986         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1987                 return (0);
1988
1989         switch (af) {
1990 #ifdef INET
1991         case AF_INET:
1992                 pfr_sin.sin_addr.s_addr = a->addr32[0];
1993                 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
1994                     kt->pfrkt_ip4);
1995                 if (ke && KENTRY_RNF_ROOT(ke))
1996                         ke = NULL;
1997                 break;
1998 #endif /* INET */
1999 #ifdef INET6
2000         case AF_INET6:
2001                 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2002                 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2003                     kt->pfrkt_ip6);
2004                 if (ke && KENTRY_RNF_ROOT(ke))
2005                         ke = NULL;
2006                 break;
2007 #endif /* INET6 */
2008         }
2009         match = (ke && !ke->pfrke_not);
2010         if (match)
2011                 kt->pfrkt_match++;
2012         else
2013                 kt->pfrkt_nomatch++;
2014         return (match);
2015 }
2016
2017 void
2018 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2019     u_int64_t len, int dir_out, int op_pass, int notrule)
2020 {
2021         struct pfr_kentry       *ke = NULL;
2022
2023         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2024                 kt = kt->pfrkt_root;
2025         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2026                 return;
2027
2028         switch (af) {
2029 #ifdef INET
2030         case AF_INET:
2031                 pfr_sin.sin_addr.s_addr = a->addr32[0];
2032                 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
2033                     kt->pfrkt_ip4);
2034                 if (ke && KENTRY_RNF_ROOT(ke))
2035                         ke = NULL;
2036                 break;
2037 #endif /* INET */
2038 #ifdef INET6
2039         case AF_INET6:
2040                 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2041                 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2042                     kt->pfrkt_ip6);
2043                 if (ke && KENTRY_RNF_ROOT(ke))
2044                         ke = NULL;
2045                 break;
2046 #endif /* INET6 */
2047         default:
2048                 ;
2049         }
2050         if ((ke == NULL || ke->pfrke_not) != notrule) {
2051                 if (op_pass != PFR_OP_PASS)
2052                         kprintf("pfr_update_stats: assertion failed.\n");
2053                 op_pass = PFR_OP_XPASS;
2054         }
2055         kt->pfrkt_packets[dir_out][op_pass]++;
2056         kt->pfrkt_bytes[dir_out][op_pass] += len;
2057         if (ke != NULL && op_pass != PFR_OP_XPASS &&
2058             (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2059                 if (ke->pfrke_counters == NULL)
2060                         ke->pfrke_counters = pool_get(&pfr_kcounters_pl,
2061                             PR_NOWAIT | PR_ZERO);
2062                 if (ke->pfrke_counters != NULL) {
2063                         ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++;
2064                         ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len;
2065                 }
2066         }
2067 }
2068
2069 struct pfr_ktable *
2070 pfr_attach_table(struct pf_ruleset *rs, char *name)
2071 {
2072         struct pfr_ktable       *kt, *rt;
2073         struct pfr_table         tbl;
2074         struct pf_anchor        *ac = rs->anchor;
2075
2076         bzero(&tbl, sizeof(tbl));
2077         strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2078         if (ac != NULL)
2079                 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2080         kt = pfr_lookup_table(&tbl);
2081         if (kt == NULL) {
2082                 kt = pfr_create_ktable(&tbl, time_second, 1);
2083                 if (kt == NULL)
2084                         return (NULL);
2085                 if (ac != NULL) {
2086                         bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2087                         rt = pfr_lookup_table(&tbl);
2088                         if (rt == NULL) {
2089                                 rt = pfr_create_ktable(&tbl, 0, 1);
2090                                 if (rt == NULL) {
2091                                         pfr_destroy_ktable(kt, 0);
2092                                         return (NULL);
2093                                 }
2094                                 pfr_insert_ktable(rt);
2095                         }
2096                         kt->pfrkt_root = rt;
2097                 }
2098                 pfr_insert_ktable(kt);
2099         }
2100         if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2101                 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2102         return (kt);
2103 }
2104
2105 void
2106 pfr_detach_table(struct pfr_ktable *kt)
2107 {
2108         if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2109                 kprintf("pfr_detach_table: refcount = %d.\n",
2110                     kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2111         else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2112                 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2113 }
2114
2115 int
2116 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2117     struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2118 {
2119         struct pfr_kentry       *ke, *ke2 = NULL;
2120         struct pf_addr          *addr = NULL;
2121         union sockaddr_union     mask;
2122         int                      idx = -1, use_counter = 0;
2123
2124         if (af == AF_INET)
2125                 addr = (struct pf_addr *)&pfr_sin.sin_addr;
2126         else if (af == AF_INET6)
2127                 addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2128         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2129                 kt = kt->pfrkt_root;
2130         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2131                 return (-1);
2132
2133         if (pidx != NULL)
2134                 idx = *pidx;
2135         if (counter != NULL && idx >= 0)
2136                 use_counter = 1;
2137         if (idx < 0)
2138                 idx = 0;
2139
2140 _next_block:
2141         ke = pfr_kentry_byidx(kt, idx, af);
2142         if (ke == NULL) {
2143                 kt->pfrkt_nomatch++;
2144                 return (1);
2145         }
2146         pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2147         *raddr = SUNION2PF(&ke->pfrke_sa, af);
2148         *rmask = SUNION2PF(&pfr_mask, af);
2149
2150         if (use_counter) {
2151                 /* is supplied address within block? */
2152                 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2153                         /* no, go to next block in table */
2154                         idx++;
2155                         use_counter = 0;
2156                         goto _next_block;
2157                 }
2158                 PF_ACPY(addr, counter, af);
2159         } else {
2160                 /* use first address of block */
2161                 PF_ACPY(addr, *raddr, af);
2162         }
2163
2164         if (!KENTRY_NETWORK(ke)) {
2165                 /* this is a single IP address - no possible nested block */
2166                 PF_ACPY(counter, addr, af);
2167                 *pidx = idx;
2168                 kt->pfrkt_match++;
2169                 return (0);
2170         }
2171         for (;;) {
2172                 /* we don't want to use a nested block */
2173                 if (af == AF_INET)
2174                         ke2 = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
2175                             kt->pfrkt_ip4);
2176                 else if (af == AF_INET6)
2177                         ke2 = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2178                             kt->pfrkt_ip6);
2179                 /* no need to check KENTRY_RNF_ROOT() here */
2180                 if (ke2 == ke) {
2181                         /* lookup return the same block - perfect */
2182                         PF_ACPY(counter, addr, af);
2183                         *pidx = idx;
2184                         kt->pfrkt_match++;
2185                         return (0);
2186                 }
2187
2188                 /* we need to increase the counter past the nested block */
2189                 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2190                 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2191                 PF_AINC(addr, af);
2192                 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2193                         /* ok, we reached the end of our main block */
2194                         /* go to next block in table */
2195                         idx++;
2196                         use_counter = 0;
2197                         goto _next_block;
2198                 }
2199         }
2200 }
2201
2202 struct pfr_kentry *
2203 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2204 {
2205         struct pfr_walktree     w;
2206
2207         bzero(&w, sizeof(w));
2208         w.pfrw_op = PFRW_POOL_GET;
2209         w.pfrw_cnt = idx;
2210
2211         switch (af) {
2212 #ifdef INET
2213         case AF_INET:
2214                 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2215                 return (w.pfrw_kentry);
2216 #endif /* INET */
2217 #ifdef INET6
2218         case AF_INET6:
2219                 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2220                 return (w.pfrw_kentry);
2221 #endif /* INET6 */
2222         default:
2223                 return (NULL);
2224         }
2225 }
2226
2227 void
2228 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2229 {
2230         struct pfr_walktree     w;
2231
2232         bzero(&w, sizeof(w));
2233         w.pfrw_op = PFRW_DYNADDR_UPDATE;
2234         w.pfrw_dyn = dyn;
2235
2236         crit_enter();
2237         dyn->pfid_acnt4 = 0;
2238         dyn->pfid_acnt6 = 0;
2239         if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2240                 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2241         if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2242                 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2243         crit_exit();
2244 }