pf: Update to OpenBSD 4.2
[dragonfly.git] / sys / net / pf / pf_table.c
1 /*      $OpenBSD: pf_table.c,v 1.70 2007/05/23 11:53:45 markus Exp $    */
2
3 /*
4  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
5  *
6  * Copyright (c) 2002 Cedric Berger
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/socket.h>
41 #include <sys/mbuf.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/thread2.h>
45 #include <vm/vm_zone.h>
46
47 #include <net/if.h>
48 #include <net/route.h>
49 #include <netinet/in.h>
50 #include <net/pf/pfvar.h>
51
52 #define ACCEPT_FLAGS(flags, oklist)                     \
53         do {                                    \
54                 if ((flags & ~(oklist)) &       \
55                     PFR_FLAG_ALLMASK)           \
56                         return (EINVAL);        \
57         } while (0)
58
59 #define COPYIN(from, to, size, flags)           \
60         ((flags & PFR_FLAG_USERIOCTL) ?         \
61         copyin((from), (to), (size)) :          \
62         (bcopy((from), (to), (size)), 0))
63
64 #define COPYOUT(from, to, size, flags)          \
65         ((flags & PFR_FLAG_USERIOCTL) ?         \
66         copyout((from), (to), (size)) :         \
67         (bcopy((from), (to), (size)), 0))
68
69 #define FILLIN_SIN(sin, addr)                   \
70         do {                                    \
71                 (sin).sin_len = sizeof(sin);    \
72                 (sin).sin_family = AF_INET;     \
73                 (sin).sin_addr = (addr);        \
74         } while (0)
75
76 #define FILLIN_SIN6(sin6, addr)                 \
77         do {                                    \
78                 (sin6).sin6_len = sizeof(sin6); \
79                 (sin6).sin6_family = AF_INET6;  \
80                 (sin6).sin6_addr = (addr);      \
81         } while (0)
82
83 #define SWAP(type, a1, a2)                      \
84         do {                                    \
85                 type tmp = a1;                  \
86                 a1 = a2;                        \
87                 a2 = tmp;                       \
88         } while (0)
89
90 #define SUNION2PF(su, af) (((af)==AF_INET) ?    \
91     (struct pf_addr *)&(su)->sin.sin_addr :     \
92     (struct pf_addr *)&(su)->sin6.sin6_addr)
93
94 #define AF_BITS(af)             (((af)==AF_INET)?32:128)
95 #define ADDR_NETWORK(ad)        ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
96 #define KENTRY_NETWORK(ke)      ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
97 #define KENTRY_RNF_ROOT(ke) \
98                 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
99
100 #define NO_ADDRESSES            (-1)
101 #define ENQUEUE_UNMARKED_ONLY   (1)
102 #define INVERT_NEG_FLAG         (1)
103
104 struct pfr_walktree {
105         enum pfrw_op {
106                 PFRW_MARK,
107                 PFRW_SWEEP,
108                 PFRW_ENQUEUE,
109                 PFRW_GET_ADDRS,
110                 PFRW_GET_ASTATS,
111                 PFRW_POOL_GET,
112                 PFRW_DYNADDR_UPDATE
113         }        pfrw_op;
114         union {
115                 struct pfr_addr         *pfrw1_addr;
116                 struct pfr_astats       *pfrw1_astats;
117                 struct pfr_kentryworkq  *pfrw1_workq;
118                 struct pfr_kentry       *pfrw1_kentry;
119                 struct pfi_dynaddr      *pfrw1_dyn;
120         }        pfrw_1;
121         int      pfrw_free;
122         int      pfrw_flags;
123 };
124 #define pfrw_addr       pfrw_1.pfrw1_addr
125 #define pfrw_astats     pfrw_1.pfrw1_astats
126 #define pfrw_workq      pfrw_1.pfrw1_workq
127 #define pfrw_kentry     pfrw_1.pfrw1_kentry
128 #define pfrw_dyn        pfrw_1.pfrw1_dyn
129 #define pfrw_cnt        pfrw_free
130
131 #define senderr(e)      do { rv = (e); goto _bad; } while (0)
132
133 vm_zone_t                pfr_ktable_pl;
134 vm_zone_t                pfr_kentry_pl;
135 vm_zone_t                pfr_kentry_pl2;
136 struct sockaddr_in       pfr_sin;
137 struct sockaddr_in6      pfr_sin6;
138 union sockaddr_union     pfr_mask;
139 struct pf_addr           pfr_ffaddr;
140
141 void                     pfr_copyout_addr(struct pfr_addr *,
142                             struct pfr_kentry *ke);
143 int                      pfr_validate_addr(struct pfr_addr *);
144 void                     pfr_enqueue_addrs(struct pfr_ktable *,
145                             struct pfr_kentryworkq *, int *, int);
146 void                     pfr_mark_addrs(struct pfr_ktable *);
147 struct pfr_kentry       *pfr_lookup_addr(struct pfr_ktable *,
148                             struct pfr_addr *, int);
149 struct pfr_kentry       *pfr_create_kentry(struct pfr_addr *, int);
150 void                     pfr_destroy_kentries(struct pfr_kentryworkq *);
151 void                     pfr_destroy_kentry(struct pfr_kentry *);
152 void                     pfr_insert_kentries(struct pfr_ktable *,
153                             struct pfr_kentryworkq *, long);
154 void                     pfr_remove_kentries(struct pfr_ktable *,
155                             struct pfr_kentryworkq *);
156 void                     pfr_clstats_kentries(struct pfr_kentryworkq *, long,
157                             int);
158 void                     pfr_reset_feedback(struct pfr_addr *, int, int);
159 void                     pfr_prepare_network(union sockaddr_union *, int, int);
160 int                      pfr_route_kentry(struct pfr_ktable *,
161                             struct pfr_kentry *);
162 int                      pfr_unroute_kentry(struct pfr_ktable *,
163                             struct pfr_kentry *);
164 int                      pfr_walktree(struct radix_node *, void *);
165 int                      pfr_validate_table(struct pfr_table *, int, int);
166 int                      pfr_fix_anchor(char *);
167 void                     pfr_commit_ktable(struct pfr_ktable *, long);
168 void                     pfr_insert_ktables(struct pfr_ktableworkq *);
169 void                     pfr_insert_ktable(struct pfr_ktable *);
170 void                     pfr_setflags_ktables(struct pfr_ktableworkq *);
171 void                     pfr_setflags_ktable(struct pfr_ktable *, int);
172 void                     pfr_clstats_ktables(struct pfr_ktableworkq *, long,
173                             int);
174 void                     pfr_clstats_ktable(struct pfr_ktable *, long, int);
175 struct pfr_ktable       *pfr_create_ktable(struct pfr_table *, long, int);
176 void                     pfr_destroy_ktables(struct pfr_ktableworkq *, int);
177 void                     pfr_destroy_ktable(struct pfr_ktable *, int);
178 int                      pfr_ktable_compare(struct pfr_ktable *,
179                             struct pfr_ktable *);
180 struct pfr_ktable       *pfr_lookup_table(struct pfr_table *);
181 void                     pfr_clean_node_mask(struct pfr_ktable *,
182                             struct pfr_kentryworkq *);
183 int                      pfr_table_count(struct pfr_table *, int);
184 int                      pfr_skip_table(struct pfr_table *,
185                             struct pfr_ktable *, int);
186 struct pfr_kentry       *pfr_kentry_byidx(struct pfr_ktable *, int, int);
187
188 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
189 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
190
191 struct pfr_ktablehead    pfr_ktables;
192 struct pfr_table         pfr_nulltable;
193 int                      pfr_ktable_cnt;
194
195 void
196 pfr_initialize(void)
197 {
198         pfr_sin.sin_len = sizeof(pfr_sin);
199         pfr_sin.sin_family = AF_INET;
200         pfr_sin6.sin6_len = sizeof(pfr_sin6);
201         pfr_sin6.sin6_family = AF_INET6;
202
203         memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
204 }
205
206 int
207 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
208 {
209         struct pfr_ktable       *kt;
210         struct pfr_kentryworkq   workq;
211
212         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
213         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
214                 return (EINVAL);
215         kt = pfr_lookup_table(tbl);
216         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
217                 return (ESRCH);
218         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
219                 return (EPERM);
220         pfr_enqueue_addrs(kt, &workq, ndel, 0);
221
222         if (!(flags & PFR_FLAG_DUMMY)) {
223                 if (flags & PFR_FLAG_ATOMIC)
224                         crit_enter();
225                 pfr_remove_kentries(kt, &workq);
226                 if (flags & PFR_FLAG_ATOMIC)
227                         crit_exit();
228                 if (kt->pfrkt_cnt) {
229                         kprintf("pfr_clr_addrs: corruption detected (%d).\n",
230                             kt->pfrkt_cnt);
231                         kt->pfrkt_cnt = 0;
232                 }
233         }
234         return (0);
235 }
236
237 int
238 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
239     int *nadd, int flags)
240 {
241         struct pfr_ktable       *kt, *tmpkt;
242         struct pfr_kentryworkq   workq;
243         struct pfr_kentry       *p, *q;
244         struct pfr_addr          ad;
245         int                      i, rv, xadd = 0;
246         long                     tzero = time_second;
247
248         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
249             PFR_FLAG_FEEDBACK);
250         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
251                 return (EINVAL);
252         kt = pfr_lookup_table(tbl);
253         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
254                 return (ESRCH);
255         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
256                 return (EPERM);
257         tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
258         if (tmpkt == NULL)
259                 return (ENOMEM);
260         SLIST_INIT(&workq);
261         for (i = 0; i < size; i++) {
262                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
263                         senderr(EFAULT);
264                 if (pfr_validate_addr(&ad))
265                         senderr(EINVAL);
266                 p = pfr_lookup_addr(kt, &ad, 1);
267                 q = pfr_lookup_addr(tmpkt, &ad, 1);
268                 if (flags & PFR_FLAG_FEEDBACK) {
269                         if (q != NULL)
270                                 ad.pfra_fback = PFR_FB_DUPLICATE;
271                         else if (p == NULL)
272                                 ad.pfra_fback = PFR_FB_ADDED;
273                         else if (p->pfrke_not != ad.pfra_not)
274                                 ad.pfra_fback = PFR_FB_CONFLICT;
275                         else
276                                 ad.pfra_fback = PFR_FB_NONE;
277                 }
278                 if (p == NULL && q == NULL) {
279                         p = pfr_create_kentry(&ad,
280                             !(flags & PFR_FLAG_USERIOCTL));
281                         if (p == NULL)
282                                 senderr(ENOMEM);
283                         if (pfr_route_kentry(tmpkt, p)) {
284                                 pfr_destroy_kentry(p);
285                                 ad.pfra_fback = PFR_FB_NONE;
286                         } else {
287                                 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
288                                 xadd++;
289                         }
290                 }
291                 if (flags & PFR_FLAG_FEEDBACK)
292                         if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
293                                 senderr(EFAULT);
294         }
295         pfr_clean_node_mask(tmpkt, &workq);
296         if (!(flags & PFR_FLAG_DUMMY)) {
297                 if (flags & PFR_FLAG_ATOMIC)
298                         crit_enter();
299                 pfr_insert_kentries(kt, &workq, tzero);
300                 if (flags & PFR_FLAG_ATOMIC)
301                         crit_exit();
302         } else
303                 pfr_destroy_kentries(&workq);
304         if (nadd != NULL)
305                 *nadd = xadd;
306         pfr_destroy_ktable(tmpkt, 0);
307         return (0);
308 _bad:
309         pfr_clean_node_mask(tmpkt, &workq);
310         pfr_destroy_kentries(&workq);
311         if (flags & PFR_FLAG_FEEDBACK)
312                 pfr_reset_feedback(addr, size, flags);
313         pfr_destroy_ktable(tmpkt, 0);
314         return (rv);
315 }
316
317 int
318 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
319     int *ndel, int flags)
320 {
321         struct pfr_ktable       *kt;
322         struct pfr_kentryworkq   workq;
323         struct pfr_kentry       *p;
324         struct pfr_addr          ad;
325         int                      i, rv, xdel = 0, log = 1;
326
327         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
328             PFR_FLAG_FEEDBACK);
329         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
330                 return (EINVAL);
331         kt = pfr_lookup_table(tbl);
332         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
333                 return (ESRCH);
334         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
335                 return (EPERM);
336         /*
337          * there are two algorithms to choose from here.
338          * with:
339          *   n: number of addresses to delete
340          *   N: number of addresses in the table
341          *
342          * one is O(N) and is better for large 'n'
343          * one is O(n*LOG(N)) and is better for small 'n'
344          * 
345          * following code try to decide which one is best.
346          */
347         for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
348                 log++;
349         if (size > kt->pfrkt_cnt/log) {
350                 /* full table scan */
351                 pfr_mark_addrs(kt);
352         } else {
353                 /* iterate over addresses to delete */
354                 for (i = 0; i < size; i++) {
355                         if (COPYIN(addr+i, &ad, sizeof(ad), flags))
356                                 return (EFAULT);
357                         if (pfr_validate_addr(&ad))
358                                 return (EINVAL);
359                         p = pfr_lookup_addr(kt, &ad, 1);
360                         if (p != NULL)
361                                 p->pfrke_mark = 0;
362                 }
363         }
364         SLIST_INIT(&workq);
365         for (i = 0; i < size; i++) {
366                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
367                         senderr(EFAULT);
368                 if (pfr_validate_addr(&ad))
369                         senderr(EINVAL);
370                 p = pfr_lookup_addr(kt, &ad, 1);
371                 if (flags & PFR_FLAG_FEEDBACK) {
372                         if (p == NULL)
373                                 ad.pfra_fback = PFR_FB_NONE;
374                         else if (p->pfrke_not != ad.pfra_not)
375                                 ad.pfra_fback = PFR_FB_CONFLICT;
376                         else if (p->pfrke_mark)
377                                 ad.pfra_fback = PFR_FB_DUPLICATE;
378                         else
379                                 ad.pfra_fback = PFR_FB_DELETED;
380                 }
381                 if (p != NULL && p->pfrke_not == ad.pfra_not &&
382                     !p->pfrke_mark) {
383                         p->pfrke_mark = 1;
384                         SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
385                         xdel++;
386                 }
387                 if (flags & PFR_FLAG_FEEDBACK)
388                         if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
389                                 senderr(EFAULT);
390         }
391         if (!(flags & PFR_FLAG_DUMMY)) {
392                 if (flags & PFR_FLAG_ATOMIC)
393                         crit_enter();
394                 pfr_remove_kentries(kt, &workq);
395                 if (flags & PFR_FLAG_ATOMIC)
396                         crit_exit();
397         }
398         if (ndel != NULL)
399                 *ndel = xdel;
400         return (0);
401 _bad:
402         if (flags & PFR_FLAG_FEEDBACK)
403                 pfr_reset_feedback(addr, size, flags);
404         return (rv);
405 }
406
407 int
408 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
409     int *size2, int *nadd, int *ndel, int *nchange, int flags,
410     u_int32_t ignore_pfrt_flags)
411 {
412         struct pfr_ktable       *kt, *tmpkt;
413         struct pfr_kentryworkq   addq, delq, changeq;
414         struct pfr_kentry       *p, *q;
415         struct pfr_addr          ad;
416         int                      i, rv, xadd = 0, xdel = 0, xchange = 0;
417         long                     tzero = time_second;
418
419         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
420             PFR_FLAG_FEEDBACK);
421         if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
422             PFR_FLAG_USERIOCTL))
423                 return (EINVAL);
424         kt = pfr_lookup_table(tbl);
425         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
426                 return (ESRCH);
427         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
428                 return (EPERM);
429         tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
430         if (tmpkt == NULL)
431                 return (ENOMEM);
432         pfr_mark_addrs(kt);
433         SLIST_INIT(&addq);
434         SLIST_INIT(&delq);
435         SLIST_INIT(&changeq);
436         for (i = 0; i < size; i++) {
437                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
438                         senderr(EFAULT);
439                 if (pfr_validate_addr(&ad))
440                         senderr(EINVAL);
441                 ad.pfra_fback = PFR_FB_NONE;
442                 p = pfr_lookup_addr(kt, &ad, 1);
443                 if (p != NULL) {
444                         if (p->pfrke_mark) {
445                                 ad.pfra_fback = PFR_FB_DUPLICATE;
446                                 goto _skip;
447                         }
448                         p->pfrke_mark = 1;
449                         if (p->pfrke_not != ad.pfra_not) {
450                                 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
451                                 ad.pfra_fback = PFR_FB_CHANGED;
452                                 xchange++;
453                         }
454                 } else {
455                         q = pfr_lookup_addr(tmpkt, &ad, 1);
456                         if (q != NULL) {
457                                 ad.pfra_fback = PFR_FB_DUPLICATE;
458                                 goto _skip;
459                         }
460                         p = pfr_create_kentry(&ad,
461                             !(flags & PFR_FLAG_USERIOCTL));
462                         if (p == NULL)
463                                 senderr(ENOMEM);
464                         if (pfr_route_kentry(tmpkt, p)) {
465                                 pfr_destroy_kentry(p);
466                                 ad.pfra_fback = PFR_FB_NONE;
467                         } else {
468                                 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
469                                 ad.pfra_fback = PFR_FB_ADDED;
470                                 xadd++;
471                         }
472                 }
473 _skip:
474                 if (flags & PFR_FLAG_FEEDBACK)
475                         if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
476                                 senderr(EFAULT);
477         }
478         pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
479         if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
480                 if (*size2 < size+xdel) {
481                         *size2 = size+xdel;
482                         senderr(0);
483                 }
484                 i = 0;
485                 SLIST_FOREACH(p, &delq, pfrke_workq) {
486                         pfr_copyout_addr(&ad, p);
487                         ad.pfra_fback = PFR_FB_DELETED;
488                         if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags))
489                                 senderr(EFAULT);
490                         i++;
491                 }
492         }
493         pfr_clean_node_mask(tmpkt, &addq);
494         if (!(flags & PFR_FLAG_DUMMY)) {
495                 if (flags & PFR_FLAG_ATOMIC)
496                         crit_enter();
497                 pfr_insert_kentries(kt, &addq, tzero);
498                 pfr_remove_kentries(kt, &delq);
499                 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
500                 if (flags & PFR_FLAG_ATOMIC)
501                         crit_exit();
502         } else
503                 pfr_destroy_kentries(&addq);
504         if (nadd != NULL)
505                 *nadd = xadd;
506         if (ndel != NULL)
507                 *ndel = xdel;
508         if (nchange != NULL)
509                 *nchange = xchange;
510         if ((flags & PFR_FLAG_FEEDBACK) && size2)
511                 *size2 = size+xdel;
512         pfr_destroy_ktable(tmpkt, 0);
513         return (0);
514 _bad:
515         pfr_clean_node_mask(tmpkt, &addq);
516         pfr_destroy_kentries(&addq);
517         if (flags & PFR_FLAG_FEEDBACK)
518                 pfr_reset_feedback(addr, size, flags);
519         pfr_destroy_ktable(tmpkt, 0);
520         return (rv);
521 }
522
523 int
524 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
525         int *nmatch, int flags)
526 {
527         struct pfr_ktable       *kt;
528         struct pfr_kentry       *p;
529         struct pfr_addr          ad;
530         int                      i, xmatch = 0;
531
532         ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
533         if (pfr_validate_table(tbl, 0, 0))
534                 return (EINVAL);
535         kt = pfr_lookup_table(tbl);
536         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
537                 return (ESRCH);
538
539         for (i = 0; i < size; i++) {
540                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
541                         return (EFAULT);
542                 if (pfr_validate_addr(&ad))
543                         return (EINVAL);
544                 if (ADDR_NETWORK(&ad))
545                         return (EINVAL);
546                 p = pfr_lookup_addr(kt, &ad, 0);
547                 if (flags & PFR_FLAG_REPLACE)
548                         pfr_copyout_addr(&ad, p);
549                 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
550                     (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
551                 if (p != NULL && !p->pfrke_not)
552                         xmatch++;
553                 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
554                         return (EFAULT);
555         }
556         if (nmatch != NULL)
557                 *nmatch = xmatch;
558         return (0);
559 }
560
561 int
562 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
563         int flags)
564 {
565         struct pfr_ktable       *kt;
566         struct pfr_walktree      w;
567         int                      rv;
568
569         ACCEPT_FLAGS(flags, 0);
570         if (pfr_validate_table(tbl, 0, 0))
571                 return (EINVAL);
572         kt = pfr_lookup_table(tbl);
573         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
574                 return (ESRCH);
575         if (kt->pfrkt_cnt > *size) {
576                 *size = kt->pfrkt_cnt;
577                 return (0);
578         }
579
580         bzero(&w, sizeof(w));
581         w.pfrw_op = PFRW_GET_ADDRS;
582         w.pfrw_addr = addr;
583         w.pfrw_free = kt->pfrkt_cnt;
584         w.pfrw_flags = flags;
585         rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
586         if (!rv)
587                 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
588         if (rv)
589                 return (rv);
590
591         if (w.pfrw_free) {
592                 kprintf("pfr_get_addrs: corruption detected (%d).\n",
593                     w.pfrw_free);
594                 return (ENOTTY);
595         }
596         *size = kt->pfrkt_cnt;
597         return (0);
598 }
599
600 int
601 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
602         int flags)
603 {
604         struct pfr_ktable       *kt;
605         struct pfr_walktree      w;
606         struct pfr_kentryworkq   workq;
607         int                      rv;
608         long                     tzero = time_second;
609
610         /* XXX PFR_FLAG_CLSTATS disabled */
611         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
612         if (pfr_validate_table(tbl, 0, 0))
613                 return (EINVAL);
614         kt = pfr_lookup_table(tbl);
615         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
616                 return (ESRCH);
617         if (kt->pfrkt_cnt > *size) {
618                 *size = kt->pfrkt_cnt;
619                 return (0);
620         }
621
622         bzero(&w, sizeof(w));
623         w.pfrw_op = PFRW_GET_ASTATS;
624         w.pfrw_astats = addr;
625         w.pfrw_free = kt->pfrkt_cnt;
626         w.pfrw_flags = flags;
627         if (flags & PFR_FLAG_ATOMIC)
628                 crit_enter();
629         rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
630         if (!rv)
631                 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
632         if (!rv && (flags & PFR_FLAG_CLSTATS)) {
633                 pfr_enqueue_addrs(kt, &workq, NULL, 0);
634                 pfr_clstats_kentries(&workq, tzero, 0);
635         }
636         if (flags & PFR_FLAG_ATOMIC)
637                 crit_exit();
638         if (rv)
639                 return (rv);
640
641         if (w.pfrw_free) {
642                 kprintf("pfr_get_astats: corruption detected (%d).\n",
643                     w.pfrw_free);
644                 return (ENOTTY);
645         }
646         *size = kt->pfrkt_cnt;
647         return (0);
648 }
649
650 int
651 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
652     int *nzero, int flags)
653 {
654         struct pfr_ktable       *kt;
655         struct pfr_kentryworkq   workq;
656         struct pfr_kentry       *p;
657         struct pfr_addr          ad;
658         int                      i, rv, xzero = 0;
659
660         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
661             PFR_FLAG_FEEDBACK);
662         if (pfr_validate_table(tbl, 0, 0))
663                 return (EINVAL);
664         kt = pfr_lookup_table(tbl);
665         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
666                 return (ESRCH);
667         SLIST_INIT(&workq);
668         for (i = 0; i < size; i++) {
669                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
670                         senderr(EFAULT);
671                 if (pfr_validate_addr(&ad))
672                         senderr(EINVAL);
673                 p = pfr_lookup_addr(kt, &ad, 1);
674                 if (flags & PFR_FLAG_FEEDBACK) {
675                         ad.pfra_fback = (p != NULL) ?
676                             PFR_FB_CLEARED : PFR_FB_NONE;
677                         if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
678                                 senderr(EFAULT);
679                 }
680                 if (p != NULL) {
681                         SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
682                         xzero++;
683                 }
684         }
685
686         if (!(flags & PFR_FLAG_DUMMY)) {
687                 if (flags & PFR_FLAG_ATOMIC)
688                         crit_enter();
689                 pfr_clstats_kentries(&workq, 0, 0);
690                 if (flags & PFR_FLAG_ATOMIC)
691                         crit_exit();
692         }
693         if (nzero != NULL)
694                 *nzero = xzero;
695         return (0);
696 _bad:
697         if (flags & PFR_FLAG_FEEDBACK)
698                 pfr_reset_feedback(addr, size, flags);
699         return (rv);
700 }
701
702 int
703 pfr_validate_addr(struct pfr_addr *ad)
704 {
705         int i;
706
707         switch (ad->pfra_af) {
708 #ifdef INET
709         case AF_INET:
710                 if (ad->pfra_net > 32)
711                         return (-1);
712                 break;
713 #endif /* INET */
714 #ifdef INET6
715         case AF_INET6:
716                 if (ad->pfra_net > 128)
717                         return (-1);
718                 break;
719 #endif /* INET6 */
720         default:
721                 return (-1);
722         }
723         if (ad->pfra_net < 128 &&
724                 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
725                         return (-1);
726         for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
727                 if (((caddr_t)ad)[i])
728                         return (-1);
729         if (ad->pfra_not && ad->pfra_not != 1)
730                 return (-1);
731         if (ad->pfra_fback)
732                 return (-1);
733         return (0);
734 }
735
736 void
737 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
738         int *naddr, int sweep)
739 {
740         struct pfr_walktree     w;
741
742         SLIST_INIT(workq);
743         bzero(&w, sizeof(w));
744         w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
745         w.pfrw_workq = workq;
746         if (kt->pfrkt_ip4 != NULL)
747                 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
748                         kprintf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
749         if (kt->pfrkt_ip6 != NULL)
750                 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
751                         kprintf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
752         if (naddr != NULL)
753                 *naddr = w.pfrw_cnt;
754 }
755
756 void
757 pfr_mark_addrs(struct pfr_ktable *kt)
758 {
759         struct pfr_walktree     w;
760
761         bzero(&w, sizeof(w));
762         w.pfrw_op = PFRW_MARK;
763         if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
764                 kprintf("pfr_mark_addrs: IPv4 walktree failed.\n");
765         if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
766                 kprintf("pfr_mark_addrs: IPv6 walktree failed.\n");
767 }
768
769
770 struct pfr_kentry *
771 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
772 {
773         union sockaddr_union     sa, mask;
774         struct radix_node_head  *head = NULL;
775         struct pfr_kentry       *ke;
776
777         bzero(&sa, sizeof(sa));
778         if (ad->pfra_af == AF_INET) {
779                 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
780                 head = kt->pfrkt_ip4;
781         } else if ( ad->pfra_af == AF_INET6 ) {
782                 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
783                 head = kt->pfrkt_ip6;
784         }
785         if (ADDR_NETWORK(ad)) {
786                 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
787                 crit_enter(); /* rn_lookup makes use of globals */
788                 ke = (struct pfr_kentry *)rn_lookup((char *)&sa, (char *)&mask,
789                     head);
790                 crit_exit();
791                 if (ke && KENTRY_RNF_ROOT(ke))
792                         ke = NULL;
793         } else {
794                 ke = (struct pfr_kentry *)rn_match((char *)&sa, head);
795                 if (ke && KENTRY_RNF_ROOT(ke))
796                         ke = NULL;
797                 if (exact && ke && KENTRY_NETWORK(ke))
798                         ke = NULL;
799         }
800         return (ke);
801 }
802
803 struct pfr_kentry *
804 pfr_create_kentry(struct pfr_addr *ad, int intr)
805 {
806         struct pfr_kentry       *ke;
807
808         if (intr)
809                 ke = pool_get(&pfr_kentry_pl2, PR_NOWAIT);
810         else
811                 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
812         if (ke == NULL)
813                 return (NULL);
814         bzero(ke, sizeof(*ke));
815
816         if (ad->pfra_af == AF_INET)
817                 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
818         else if (ad->pfra_af == AF_INET6)
819                 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
820         ke->pfrke_af = ad->pfra_af;
821         ke->pfrke_net = ad->pfra_net;
822         ke->pfrke_not = ad->pfra_not;
823         ke->pfrke_intrpool = intr;
824         return (ke);
825 }
826
827 void
828 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
829 {
830         struct pfr_kentry       *p, *q;
831
832         for (p = SLIST_FIRST(workq); p != NULL; p = q) {
833                 q = SLIST_NEXT(p, pfrke_workq);
834                 pfr_destroy_kentry(p);
835         }
836 }
837
838 void
839 pfr_destroy_kentry(struct pfr_kentry *ke)
840 {
841         if (ke->pfrke_intrpool)
842                 pool_put(&pfr_kentry_pl2, ke);
843         else
844                 pool_put(&pfr_kentry_pl, ke);
845 }
846
847 void
848 pfr_insert_kentries(struct pfr_ktable *kt,
849     struct pfr_kentryworkq *workq, long tzero)
850 {
851         struct pfr_kentry       *p;
852         int                      rv, n = 0;
853
854         SLIST_FOREACH(p, workq, pfrke_workq) {
855                 rv = pfr_route_kentry(kt, p);
856                 if (rv) {
857                         kprintf("pfr_insert_kentries: cannot route entry "
858                             "(code=%d).\n", rv);
859                         break;
860                 }
861                 p->pfrke_tzero = tzero;
862                 n++;
863         }
864         kt->pfrkt_cnt += n;
865 }
866
867 int
868 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
869 {
870         struct pfr_kentry       *p;
871         int                      rv;
872
873         p = pfr_lookup_addr(kt, ad, 1);
874         if (p != NULL)
875                 return (0);
876         p = pfr_create_kentry(ad, 1);
877         if (p == NULL)
878                 return (EINVAL);
879
880         rv = pfr_route_kentry(kt, p);
881         if (rv)
882                 return (rv);
883
884         p->pfrke_tzero = tzero;
885         kt->pfrkt_cnt++;
886
887         return (0);
888 }
889
890 void
891 pfr_remove_kentries(struct pfr_ktable *kt,
892     struct pfr_kentryworkq *workq)
893 {
894         struct pfr_kentry       *p;
895         int                      n = 0;
896
897         SLIST_FOREACH(p, workq, pfrke_workq) {
898                 pfr_unroute_kentry(kt, p);
899                 n++;
900         }
901         kt->pfrkt_cnt -= n;
902         pfr_destroy_kentries(workq);
903 }
904
905 void
906 pfr_clean_node_mask(struct pfr_ktable *kt,
907     struct pfr_kentryworkq *workq)
908 {
909         struct pfr_kentry       *p;
910
911         SLIST_FOREACH(p, workq, pfrke_workq)
912                 pfr_unroute_kentry(kt, p);
913 }
914
915 void
916 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
917 {
918         struct pfr_kentry       *p;
919
920         SLIST_FOREACH(p, workq, pfrke_workq) {
921                 crit_enter();
922                 if (negchange)
923                         p->pfrke_not = !p->pfrke_not;
924                 bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
925                 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
926                 crit_exit();
927                 p->pfrke_tzero = tzero;
928         }
929 }
930
931 void
932 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
933 {
934         struct pfr_addr ad;
935         int             i;
936
937         for (i = 0; i < size; i++) {
938                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
939                         break;
940                 ad.pfra_fback = PFR_FB_NONE;
941                 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
942                         break;
943         }
944 }
945
946 void
947 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
948 {
949         int     i;
950
951         bzero(sa, sizeof(*sa));
952         if (af == AF_INET) {
953                 sa->sin.sin_len = sizeof(sa->sin);
954                 sa->sin.sin_family = AF_INET;
955                 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
956         } else if (af == AF_INET6) {
957                 sa->sin6.sin6_len = sizeof(sa->sin6);
958                 sa->sin6.sin6_family = AF_INET6;
959                 for (i = 0; i < 4; i++) {
960                         if (net <= 32) {
961                                 sa->sin6.sin6_addr.s6_addr32[i] =
962                                     net ? htonl(-1 << (32-net)) : 0;
963                                 break;
964                         }
965                         sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
966                         net -= 32;
967                 }
968         }
969 }
970
971 int
972 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
973 {
974         union sockaddr_union     mask;
975         struct radix_node       *rn;
976         struct radix_node_head  *head = NULL;
977
978         bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
979         if (ke->pfrke_af == AF_INET)
980                 head = kt->pfrkt_ip4;
981         else if (ke->pfrke_af == AF_INET6)
982                 head = kt->pfrkt_ip6;
983
984         crit_enter();
985         if (KENTRY_NETWORK(ke)) {
986                 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
987                 rn = rn_addroute((char *)&ke->pfrke_sa, (char *)&mask, head,
988                     ke->pfrke_node);
989         } else
990                 rn = rn_addroute((char *)&ke->pfrke_sa, NULL, head,
991                     ke->pfrke_node);
992         crit_exit();
993
994         return (rn == NULL ? -1 : 0);
995 }
996
997 int
998 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
999 {
1000         union sockaddr_union     mask;
1001         struct radix_node       *rn;
1002         struct radix_node_head  *head = NULL;
1003
1004         if (ke->pfrke_af == AF_INET)
1005                 head = kt->pfrkt_ip4;
1006         else if (ke->pfrke_af == AF_INET6)
1007                 head = kt->pfrkt_ip6;
1008
1009         crit_enter();
1010         if (KENTRY_NETWORK(ke)) {
1011                 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1012                 rn = rn_delete((char *)&ke->pfrke_sa, (char *)&mask, head);
1013         } else
1014                 rn = rn_delete((char *)&ke->pfrke_sa, NULL, head);
1015         crit_exit();
1016
1017         if (rn == NULL) {
1018                 kprintf("pfr_unroute_kentry: delete failed.\n");
1019                 return (-1);
1020         }
1021         return (0);
1022 }
1023
1024 void
1025 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1026 {
1027         bzero(ad, sizeof(*ad));
1028         if (ke == NULL)
1029                 return;
1030         ad->pfra_af = ke->pfrke_af;
1031         ad->pfra_net = ke->pfrke_net;
1032         ad->pfra_not = ke->pfrke_not;
1033         if (ad->pfra_af == AF_INET)
1034                 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1035         else if (ad->pfra_af == AF_INET6)
1036                 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1037 }
1038
1039 int
1040 pfr_walktree(struct radix_node *rn, void *arg)
1041 {
1042         struct pfr_kentry       *ke = (struct pfr_kentry *)rn;
1043         struct pfr_walktree     *w = arg;
1044         int                     flags = w->pfrw_flags;
1045
1046         switch (w->pfrw_op) {
1047         case PFRW_MARK:
1048                 ke->pfrke_mark = 0;
1049                 break;
1050         case PFRW_SWEEP:
1051                 if (ke->pfrke_mark)
1052                         break;
1053                 /* FALLTHROUGH */
1054         case PFRW_ENQUEUE:
1055                 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1056                 w->pfrw_cnt++;
1057                 break;
1058         case PFRW_GET_ADDRS:
1059                 if (w->pfrw_free-- > 0) {
1060                         struct pfr_addr ad;
1061
1062                         pfr_copyout_addr(&ad, ke);
1063                         if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1064                                 return (EFAULT);
1065                         w->pfrw_addr++;
1066                 }
1067                 break;
1068         case PFRW_GET_ASTATS:
1069                 if (w->pfrw_free-- > 0) {
1070                         struct pfr_astats as;
1071
1072                         pfr_copyout_addr(&as.pfras_a, ke);
1073
1074                         crit_enter();
1075                         bcopy(ke->pfrke_packets, as.pfras_packets,
1076                             sizeof(as.pfras_packets));
1077                         bcopy(ke->pfrke_bytes, as.pfras_bytes,
1078                             sizeof(as.pfras_bytes));
1079                         crit_exit();
1080                         as.pfras_tzero = ke->pfrke_tzero;
1081
1082                         if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags))
1083                                 return (EFAULT);
1084                         w->pfrw_astats++;
1085                 }
1086                 break;
1087         case PFRW_POOL_GET:
1088                 if (ke->pfrke_not)
1089                         break; /* negative entries are ignored */
1090                 if (!w->pfrw_cnt--) {
1091                         w->pfrw_kentry = ke;
1092                         return (1); /* finish search */
1093                 }
1094                 break;
1095         case PFRW_DYNADDR_UPDATE:
1096                 if (ke->pfrke_af == AF_INET) {
1097                         if (w->pfrw_dyn->pfid_acnt4++ > 0)
1098                                 break;
1099                         pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1100                         w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1101                             &ke->pfrke_sa, AF_INET);
1102                         w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1103                             &pfr_mask, AF_INET);
1104                 } else if (ke->pfrke_af == AF_INET6){
1105                         if (w->pfrw_dyn->pfid_acnt6++ > 0)
1106                                 break;
1107                         pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1108                         w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1109                             &ke->pfrke_sa, AF_INET6);
1110                         w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1111                             &pfr_mask, AF_INET6);
1112                 }
1113                 break;
1114         }
1115         return (0);
1116 }
1117
1118 int
1119 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1120 {
1121         struct pfr_ktableworkq   workq;
1122         struct pfr_ktable       *p;
1123         int                      xdel = 0;
1124
1125         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1126             PFR_FLAG_ALLRSETS);
1127         if (pfr_fix_anchor(filter->pfrt_anchor))
1128                 return (EINVAL);
1129         if (pfr_table_count(filter, flags) < 0)
1130                 return (ENOENT);
1131
1132         SLIST_INIT(&workq);
1133         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1134                 if (pfr_skip_table(filter, p, flags))
1135                         continue;
1136                 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1137                         continue;
1138                 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1139                         continue;
1140                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1141                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1142                 xdel++;
1143         }
1144         if (!(flags & PFR_FLAG_DUMMY)) {
1145                 if (flags & PFR_FLAG_ATOMIC)
1146                         crit_enter();
1147                 pfr_setflags_ktables(&workq);
1148                 if (flags & PFR_FLAG_ATOMIC)
1149                         crit_exit();
1150         }
1151         if (ndel != NULL)
1152                 *ndel = xdel;
1153         return (0);
1154 }
1155
1156 int
1157 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1158 {
1159         struct pfr_ktableworkq   addq, changeq;
1160         struct pfr_ktable       *p, *q, *r, key;
1161         int                      i, rv, xadd = 0;
1162         long                     tzero = time_second;
1163
1164         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1165         SLIST_INIT(&addq);
1166         SLIST_INIT(&changeq);
1167         for (i = 0; i < size; i++) {
1168                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1169                         senderr(EFAULT);
1170                 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1171                     flags & PFR_FLAG_USERIOCTL))
1172                         senderr(EINVAL);
1173                 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1174                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1175                 if (p == NULL) {
1176                         p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1177                         if (p == NULL)
1178                                 senderr(ENOMEM);
1179                         SLIST_FOREACH(q, &addq, pfrkt_workq) {
1180                                 if (!pfr_ktable_compare(p, q))
1181                                         goto _skip;
1182                         }
1183                         SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1184                         xadd++;
1185                         if (!key.pfrkt_anchor[0])
1186                                 goto _skip;
1187
1188                         /* find or create root table */
1189                         bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1190                         r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1191                         if (r != NULL) {
1192                                 p->pfrkt_root = r;
1193                                 goto _skip;
1194                         }
1195                         SLIST_FOREACH(q, &addq, pfrkt_workq) {
1196                                 if (!pfr_ktable_compare(&key, q)) {
1197                                         p->pfrkt_root = q;
1198                                         goto _skip;
1199                                 }
1200                         }
1201                         key.pfrkt_flags = 0;
1202                         r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1203                         if (r == NULL)
1204                                 senderr(ENOMEM);
1205                         SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1206                         p->pfrkt_root = r;
1207                 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1208                         SLIST_FOREACH(q, &changeq, pfrkt_workq)
1209                                 if (!pfr_ktable_compare(&key, q))
1210                                         goto _skip;
1211                         p->pfrkt_nflags = (p->pfrkt_flags &
1212                             ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1213                         SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1214                         xadd++;
1215                 }
1216 _skip:
1217         ;
1218         }
1219         if (!(flags & PFR_FLAG_DUMMY)) {
1220                 if (flags & PFR_FLAG_ATOMIC)
1221                         crit_enter();
1222                 pfr_insert_ktables(&addq);
1223                 pfr_setflags_ktables(&changeq);
1224                 if (flags & PFR_FLAG_ATOMIC)
1225                         crit_exit();
1226         } else
1227                  pfr_destroy_ktables(&addq, 0);
1228         if (nadd != NULL)
1229                 *nadd = xadd;
1230         return (0);
1231 _bad:
1232         pfr_destroy_ktables(&addq, 0);
1233         return (rv);
1234 }
1235
1236 int
1237 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1238 {
1239         struct pfr_ktableworkq   workq;
1240         struct pfr_ktable       *p, *q, key;
1241         int                      i, xdel = 0;
1242
1243         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1244         SLIST_INIT(&workq);
1245         for (i = 0; i < size; i++) {
1246                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1247                         return (EFAULT);
1248                 if (pfr_validate_table(&key.pfrkt_t, 0,
1249                     flags & PFR_FLAG_USERIOCTL))
1250                         return (EINVAL);
1251                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1252                 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1253                         SLIST_FOREACH(q, &workq, pfrkt_workq)
1254                                 if (!pfr_ktable_compare(p, q))
1255                                         goto _skip;
1256                         p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1257                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1258                         xdel++;
1259                 }
1260 _skip:
1261         ;
1262         }
1263
1264         if (!(flags & PFR_FLAG_DUMMY)) {
1265                 if (flags & PFR_FLAG_ATOMIC)
1266                         crit_enter();
1267                 pfr_setflags_ktables(&workq);
1268                 if (flags & PFR_FLAG_ATOMIC)
1269                         crit_exit();
1270         }
1271         if (ndel != NULL)
1272                 *ndel = xdel;
1273         return (0);
1274 }
1275
1276 int
1277 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1278         int flags)
1279 {
1280         struct pfr_ktable       *p;
1281         int                      n, nn;
1282
1283         ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1284         if (pfr_fix_anchor(filter->pfrt_anchor))
1285                 return (EINVAL);
1286         n = nn = pfr_table_count(filter, flags);
1287         if (n < 0)
1288                 return (ENOENT);
1289         if (n > *size) {
1290                 *size = n;
1291                 return (0);
1292         }
1293         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1294                 if (pfr_skip_table(filter, p, flags))
1295                         continue;
1296                 if (n-- <= 0)
1297                         continue;
1298                 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags))
1299                         return (EFAULT);
1300         }
1301         if (n) {
1302                 kprintf("pfr_get_tables: corruption detected (%d).\n", n);
1303                 return (ENOTTY);
1304         }
1305         *size = nn;
1306         return (0);
1307 }
1308
1309 int
1310 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1311         int flags)
1312 {
1313         struct pfr_ktable       *p;
1314         struct pfr_ktableworkq   workq;
1315         int                      n, nn;
1316         long                     tzero = time_second;
1317
1318         /* XXX PFR_FLAG_CLSTATS disabled */
1319         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1320         if (pfr_fix_anchor(filter->pfrt_anchor))
1321                 return (EINVAL);
1322         n = nn = pfr_table_count(filter, flags);
1323         if (n < 0)
1324                 return (ENOENT);
1325         if (n > *size) {
1326                 *size = n;
1327                 return (0);
1328         }
1329         SLIST_INIT(&workq);
1330         if (flags & PFR_FLAG_ATOMIC)
1331                 crit_enter();
1332         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1333                 if (pfr_skip_table(filter, p, flags))
1334                         continue;
1335                 if (n-- <= 0)
1336                         continue;
1337                 if (!(flags & PFR_FLAG_ATOMIC))
1338                         crit_enter();
1339                 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags)) {
1340                         crit_exit();
1341                         return (EFAULT);
1342                 }
1343                 if (!(flags & PFR_FLAG_ATOMIC))
1344                         crit_exit();
1345                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1346         }
1347         if (flags & PFR_FLAG_CLSTATS)
1348                 pfr_clstats_ktables(&workq, tzero,
1349                     flags & PFR_FLAG_ADDRSTOO);
1350         if (flags & PFR_FLAG_ATOMIC)
1351                 crit_exit();
1352         if (n) {
1353                 kprintf("pfr_get_tstats: corruption detected (%d).\n", n);
1354                 return (ENOTTY);
1355         }
1356         *size = nn;
1357         return (0);
1358 }
1359
1360 int
1361 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1362 {
1363         struct pfr_ktableworkq   workq;
1364         struct pfr_ktable       *p, key;
1365         int                      i, xzero = 0;
1366         long                     tzero = time_second;
1367
1368         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1369             PFR_FLAG_ADDRSTOO);
1370         SLIST_INIT(&workq);
1371         for (i = 0; i < size; i++) {
1372                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1373                         return (EFAULT);
1374                 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1375                         return (EINVAL);
1376                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1377                 if (p != NULL) {
1378                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1379                         xzero++;
1380                 }
1381         }
1382         if (!(flags & PFR_FLAG_DUMMY)) {
1383                 if (flags & PFR_FLAG_ATOMIC)
1384                         crit_enter();
1385                 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1386                 if (flags & PFR_FLAG_ATOMIC)
1387                         crit_exit();
1388         }
1389         if (nzero != NULL)
1390                 *nzero = xzero;
1391         return (0);
1392 }
1393
1394 int
1395 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1396         int *nchange, int *ndel, int flags)
1397 {
1398         struct pfr_ktableworkq   workq;
1399         struct pfr_ktable       *p, *q, key;
1400         int                      i, xchange = 0, xdel = 0;
1401
1402         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1403         if ((setflag & ~PFR_TFLAG_USRMASK) ||
1404             (clrflag & ~PFR_TFLAG_USRMASK) ||
1405             (setflag & clrflag))
1406                 return (EINVAL);
1407         SLIST_INIT(&workq);
1408         for (i = 0; i < size; i++) {
1409                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1410                         return (EFAULT);
1411                 if (pfr_validate_table(&key.pfrkt_t, 0,
1412                     flags & PFR_FLAG_USERIOCTL))
1413                         return (EINVAL);
1414                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1415                 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1416                         p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1417                             ~clrflag;
1418                         if (p->pfrkt_nflags == p->pfrkt_flags)
1419                                 goto _skip;
1420                         SLIST_FOREACH(q, &workq, pfrkt_workq)
1421                                 if (!pfr_ktable_compare(p, q))
1422                                         goto _skip;
1423                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1424                         if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1425                             (clrflag & PFR_TFLAG_PERSIST) &&
1426                             !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1427                                 xdel++;
1428                         else
1429                                 xchange++;
1430                 }
1431 _skip:
1432         ;
1433         }
1434         if (!(flags & PFR_FLAG_DUMMY)) {
1435                 if (flags & PFR_FLAG_ATOMIC)
1436                         crit_enter();
1437                 pfr_setflags_ktables(&workq);
1438                 if (flags & PFR_FLAG_ATOMIC)
1439                         crit_exit();
1440         }
1441         if (nchange != NULL)
1442                 *nchange = xchange;
1443         if (ndel != NULL)
1444                 *ndel = xdel;
1445         return (0);
1446 }
1447
1448 int
1449 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1450 {
1451         struct pfr_ktableworkq   workq;
1452         struct pfr_ktable       *p;
1453         struct pf_ruleset       *rs;
1454         int                      xdel = 0;
1455
1456         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1457         rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1458         if (rs == NULL)
1459                 return (ENOMEM);
1460         SLIST_INIT(&workq);
1461         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1462                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1463                     pfr_skip_table(trs, p, 0))
1464                         continue;
1465                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1466                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1467                 xdel++;
1468         }
1469         if (!(flags & PFR_FLAG_DUMMY)) {
1470                 pfr_setflags_ktables(&workq);
1471                 if (ticket != NULL)
1472                         *ticket = ++rs->tticket;
1473                 rs->topen = 1;
1474         } else
1475                 pf_remove_if_empty_ruleset(rs);
1476         if (ndel != NULL)
1477                 *ndel = xdel;
1478         return (0);
1479 }
1480
1481 int
1482 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1483     int *nadd, int *naddr, u_int32_t ticket, int flags)
1484 {
1485         struct pfr_ktableworkq   tableq;
1486         struct pfr_kentryworkq   addrq;
1487         struct pfr_ktable       *kt, *rt, *shadow, key;
1488         struct pfr_kentry       *p;
1489         struct pfr_addr          ad;
1490         struct pf_ruleset       *rs;
1491         int                      i, rv, xadd = 0, xaddr = 0;
1492
1493         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1494         if (size && !(flags & PFR_FLAG_ADDRSTOO))
1495                 return (EINVAL);
1496         if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1497             flags & PFR_FLAG_USERIOCTL))
1498                 return (EINVAL);
1499         rs = pf_find_ruleset(tbl->pfrt_anchor);
1500         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1501                 return (EBUSY);
1502         tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1503         SLIST_INIT(&tableq);
1504         kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1505         if (kt == NULL) {
1506                 kt = pfr_create_ktable(tbl, 0, 1);
1507                 if (kt == NULL)
1508                         return (ENOMEM);
1509                 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1510                 xadd++;
1511                 if (!tbl->pfrt_anchor[0])
1512                         goto _skip;
1513
1514                 /* find or create root table */
1515                 bzero(&key, sizeof(key));
1516                 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1517                 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1518                 if (rt != NULL) {
1519                         kt->pfrkt_root = rt;
1520                         goto _skip;
1521                 }
1522                 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1523                 if (rt == NULL) {
1524                         pfr_destroy_ktables(&tableq, 0);
1525                         return (ENOMEM);
1526                 }
1527                 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1528                 kt->pfrkt_root = rt;
1529         } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1530                 xadd++;
1531 _skip:
1532         shadow = pfr_create_ktable(tbl, 0, 0);
1533         if (shadow == NULL) {
1534                 pfr_destroy_ktables(&tableq, 0);
1535                 return (ENOMEM);
1536         }
1537         SLIST_INIT(&addrq);
1538         for (i = 0; i < size; i++) {
1539                 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1540                         senderr(EFAULT);
1541                 if (pfr_validate_addr(&ad))
1542                         senderr(EINVAL);
1543                 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1544                         continue;
1545                 p = pfr_create_kentry(&ad, 0);
1546                 if (p == NULL)
1547                         senderr(ENOMEM);
1548                 if (pfr_route_kentry(shadow, p)) {
1549                         pfr_destroy_kentry(p);
1550                         continue;
1551                 }
1552                 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1553                 xaddr++;
1554         }
1555         if (!(flags & PFR_FLAG_DUMMY)) {
1556                 if (kt->pfrkt_shadow != NULL)
1557                         pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1558                 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1559                 pfr_insert_ktables(&tableq);
1560                 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1561                     xaddr : NO_ADDRESSES;
1562                 kt->pfrkt_shadow = shadow;
1563         } else {
1564                 pfr_clean_node_mask(shadow, &addrq);
1565                 pfr_destroy_ktable(shadow, 0);
1566                 pfr_destroy_ktables(&tableq, 0);
1567                 pfr_destroy_kentries(&addrq);
1568         }
1569         if (nadd != NULL)
1570                 *nadd = xadd;
1571         if (naddr != NULL)
1572                 *naddr = xaddr;
1573         return (0);
1574 _bad:
1575         pfr_destroy_ktable(shadow, 0);
1576         pfr_destroy_ktables(&tableq, 0);
1577         pfr_destroy_kentries(&addrq);
1578         return (rv);
1579 }
1580
1581 int
1582 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1583 {
1584         struct pfr_ktableworkq   workq;
1585         struct pfr_ktable       *p;
1586         struct pf_ruleset       *rs;
1587         int                      xdel = 0;
1588
1589         ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1590         rs = pf_find_ruleset(trs->pfrt_anchor);
1591         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1592                 return (0);
1593         SLIST_INIT(&workq);
1594         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1595                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1596                     pfr_skip_table(trs, p, 0))
1597                         continue;
1598                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1599                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1600                 xdel++;
1601         }
1602         if (!(flags & PFR_FLAG_DUMMY)) {
1603                 pfr_setflags_ktables(&workq);
1604                 rs->topen = 0;
1605                 pf_remove_if_empty_ruleset(rs);
1606         }
1607         if (ndel != NULL)
1608                 *ndel = xdel;
1609         return (0);
1610 }
1611
1612 int
1613 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1614     int *nchange, int flags)
1615 {
1616         struct pfr_ktable       *p, *q;
1617         struct pfr_ktableworkq   workq;
1618         struct pf_ruleset       *rs;
1619         int                      xadd = 0, xchange = 0;
1620         long                     tzero = time_second;
1621
1622         ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1623         rs = pf_find_ruleset(trs->pfrt_anchor);
1624         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1625                 return (EBUSY);
1626
1627         SLIST_INIT(&workq);
1628         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1629                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1630                     pfr_skip_table(trs, p, 0))
1631                         continue;
1632                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1633                 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1634                         xchange++;
1635                 else
1636                         xadd++;
1637         }
1638
1639         if (!(flags & PFR_FLAG_DUMMY)) {
1640                 if (flags & PFR_FLAG_ATOMIC)
1641                         crit_enter();
1642                 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1643                         q = SLIST_NEXT(p, pfrkt_workq);
1644                         pfr_commit_ktable(p, tzero);
1645                 }
1646                 if (flags & PFR_FLAG_ATOMIC)
1647                         crit_exit();
1648                 rs->topen = 0;
1649                 pf_remove_if_empty_ruleset(rs);
1650         }
1651         if (nadd != NULL)
1652                 *nadd = xadd;
1653         if (nchange != NULL)
1654                 *nchange = xchange;
1655
1656         return (0);
1657 }
1658
1659 void
1660 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1661 {
1662         struct pfr_ktable       *shadow = kt->pfrkt_shadow;
1663         int                      nflags;
1664
1665         if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1666                 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1667                         pfr_clstats_ktable(kt, tzero, 1);
1668         } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1669                 /* kt might contain addresses */
1670                 struct pfr_kentryworkq   addrq, addq, changeq, delq, garbageq;
1671                 struct pfr_kentry       *p, *q, *next;
1672                 struct pfr_addr          ad;
1673
1674                 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1675                 pfr_mark_addrs(kt);
1676                 SLIST_INIT(&addq);
1677                 SLIST_INIT(&changeq);
1678                 SLIST_INIT(&delq);
1679                 SLIST_INIT(&garbageq);
1680                 pfr_clean_node_mask(shadow, &addrq);
1681                 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1682                         next = SLIST_NEXT(p, pfrke_workq);      /* XXX */
1683                         pfr_copyout_addr(&ad, p);
1684                         q = pfr_lookup_addr(kt, &ad, 1);
1685                         if (q != NULL) {
1686                                 if (q->pfrke_not != p->pfrke_not)
1687                                         SLIST_INSERT_HEAD(&changeq, q,
1688                                             pfrke_workq);
1689                                 q->pfrke_mark = 1;
1690                                 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1691                         } else {
1692                                 p->pfrke_tzero = tzero;
1693                                 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1694                         }
1695                 }
1696                 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1697                 pfr_insert_kentries(kt, &addq, tzero);
1698                 pfr_remove_kentries(kt, &delq);
1699                 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1700                 pfr_destroy_kentries(&garbageq);
1701         } else {
1702                 /* kt cannot contain addresses */
1703                 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1704                     shadow->pfrkt_ip4);
1705                 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1706                     shadow->pfrkt_ip6);
1707                 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1708                 pfr_clstats_ktable(kt, tzero, 1);
1709         }
1710         nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1711             (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1712                 & ~PFR_TFLAG_INACTIVE;
1713         pfr_destroy_ktable(shadow, 0);
1714         kt->pfrkt_shadow = NULL;
1715         pfr_setflags_ktable(kt, nflags);
1716 }
1717
1718 int
1719 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1720 {
1721         int i;
1722
1723         if (!tbl->pfrt_name[0])
1724                 return (-1);
1725         if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1726                  return (-1);
1727         if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1728                 return (-1);
1729         for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1730                 if (tbl->pfrt_name[i])
1731                         return (-1);
1732         if (pfr_fix_anchor(tbl->pfrt_anchor))
1733                 return (-1);
1734         if (tbl->pfrt_flags & ~allowedflags)
1735                 return (-1);
1736         return (0);
1737 }
1738
1739 /*
1740  * Rewrite anchors referenced by tables to remove slashes
1741  * and check for validity.
1742  */
1743 int
1744 pfr_fix_anchor(char *anchor)
1745 {
1746         size_t siz = MAXPATHLEN;
1747         int i;
1748
1749         if (anchor[0] == '/') {
1750                 char *path;
1751                 int off;
1752
1753                 path = anchor;
1754                 off = 1;
1755                 while (*++path == '/')
1756                         off++;
1757                 bcopy(path, anchor, siz - off);
1758                 memset(anchor + siz - off, 0, off);
1759         }
1760         if (anchor[siz - 1])
1761                 return (-1);
1762         for (i = strlen(anchor); i < siz; i++)
1763                 if (anchor[i])
1764                         return (-1);
1765         return (0);
1766 }
1767
1768 int
1769 pfr_table_count(struct pfr_table *filter, int flags)
1770 {
1771         struct pf_ruleset *rs;
1772
1773         if (flags & PFR_FLAG_ALLRSETS)
1774                 return (pfr_ktable_cnt);
1775         if (filter->pfrt_anchor[0]) {
1776                 rs = pf_find_ruleset(filter->pfrt_anchor);
1777                 return ((rs != NULL) ? rs->tables : -1);
1778         }
1779         return (pf_main_ruleset.tables);
1780 }
1781
1782 int
1783 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1784 {
1785         if (flags & PFR_FLAG_ALLRSETS)
1786                 return (0);
1787         if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1788                 return (1);
1789         return (0);
1790 }
1791
1792 void
1793 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1794 {
1795         struct pfr_ktable       *p;
1796
1797         SLIST_FOREACH(p, workq, pfrkt_workq)
1798                 pfr_insert_ktable(p);
1799 }
1800
1801 void
1802 pfr_insert_ktable(struct pfr_ktable *kt)
1803 {
1804         RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1805         pfr_ktable_cnt++;
1806         if (kt->pfrkt_root != NULL)
1807                 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1808                         pfr_setflags_ktable(kt->pfrkt_root,
1809                             kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1810 }
1811
1812 void
1813 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1814 {
1815         struct pfr_ktable       *p, *q;
1816
1817         for (p = SLIST_FIRST(workq); p; p = q) {
1818                 q = SLIST_NEXT(p, pfrkt_workq);
1819                 pfr_setflags_ktable(p, p->pfrkt_nflags);
1820         }
1821 }
1822
1823 void
1824 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1825 {
1826         struct pfr_kentryworkq  addrq;
1827
1828         if (!(newf & PFR_TFLAG_REFERENCED) &&
1829             !(newf & PFR_TFLAG_PERSIST))
1830                 newf &= ~PFR_TFLAG_ACTIVE;
1831         if (!(newf & PFR_TFLAG_ACTIVE))
1832                 newf &= ~PFR_TFLAG_USRMASK;
1833         if (!(newf & PFR_TFLAG_SETMASK)) {
1834                 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1835                 if (kt->pfrkt_root != NULL)
1836                         if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1837                                 pfr_setflags_ktable(kt->pfrkt_root,
1838                                     kt->pfrkt_root->pfrkt_flags &
1839                                         ~PFR_TFLAG_REFDANCHOR);
1840                 pfr_destroy_ktable(kt, 1);
1841                 pfr_ktable_cnt--;
1842                 return;
1843         }
1844         if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1845                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1846                 pfr_remove_kentries(kt, &addrq);
1847         }
1848         if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1849                 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1850                 kt->pfrkt_shadow = NULL;
1851         }
1852         kt->pfrkt_flags = newf;
1853 }
1854
1855 void
1856 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1857 {
1858         struct pfr_ktable       *p;
1859
1860         SLIST_FOREACH(p, workq, pfrkt_workq)
1861                 pfr_clstats_ktable(p, tzero, recurse);
1862 }
1863
1864 void
1865 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1866 {
1867         struct pfr_kentryworkq   addrq;
1868
1869         if (recurse) {
1870                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1871                 pfr_clstats_kentries(&addrq, tzero, 0);
1872         }
1873         crit_enter();
1874         bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1875         bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1876         kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1877         crit_exit();
1878         kt->pfrkt_tzero = tzero;
1879 }
1880
1881 struct pfr_ktable *
1882 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1883 {
1884         struct pfr_ktable       *kt;
1885         struct pf_ruleset       *rs;
1886
1887         kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
1888         if (kt == NULL)
1889                 return (NULL);
1890         bzero(kt, sizeof(*kt));
1891         kt->pfrkt_t = *tbl;
1892
1893         if (attachruleset) {
1894                 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1895                 if (!rs) {
1896                         pfr_destroy_ktable(kt, 0);
1897                         return (NULL);
1898                 }
1899                 kt->pfrkt_rs = rs;
1900                 rs->tables++;
1901         }
1902
1903         if (!rn_inithead((void **)&kt->pfrkt_ip4,
1904             offsetof(struct sockaddr_in, sin_addr) * 8) ||
1905             !rn_inithead((void **)&kt->pfrkt_ip6,
1906             offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1907                 pfr_destroy_ktable(kt, 0);
1908                 return (NULL);
1909         }
1910         kt->pfrkt_tzero = tzero;
1911
1912         return (kt);
1913 }
1914
1915 void
1916 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1917 {
1918         struct pfr_ktable       *p, *q;
1919
1920         for (p = SLIST_FIRST(workq); p; p = q) {
1921                 q = SLIST_NEXT(p, pfrkt_workq);
1922                 pfr_destroy_ktable(p, flushaddr);
1923         }
1924 }
1925
1926 void
1927 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1928 {
1929         struct pfr_kentryworkq   addrq;
1930
1931         if (flushaddr) {
1932                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1933                 pfr_clean_node_mask(kt, &addrq);
1934                 pfr_destroy_kentries(&addrq);
1935         }
1936         if (kt->pfrkt_ip4 != NULL)
1937                 kfree((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1938         if (kt->pfrkt_ip6 != NULL)
1939                 kfree((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1940         if (kt->pfrkt_shadow != NULL)
1941                 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1942         if (kt->pfrkt_rs != NULL) {
1943                 kt->pfrkt_rs->tables--;
1944                 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1945         }
1946         pool_put(&pfr_ktable_pl, kt);
1947 }
1948
1949 int
1950 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1951 {
1952         int d;
1953
1954         if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1955                 return (d);
1956         return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1957 }
1958
1959 struct pfr_ktable *
1960 pfr_lookup_table(struct pfr_table *tbl)
1961 {
1962         /* struct pfr_ktable start like a struct pfr_table */
1963         return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1964             (struct pfr_ktable *)tbl));
1965 }
1966
1967 int
1968 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1969 {
1970         struct pfr_kentry       *ke = NULL;
1971         int                      match;
1972
1973         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1974                 kt = kt->pfrkt_root;
1975         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1976                 return (0);
1977
1978         switch (af) {
1979 #ifdef INET
1980         case AF_INET:
1981                 pfr_sin.sin_addr.s_addr = a->addr32[0];
1982                 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
1983                     kt->pfrkt_ip4);
1984                 if (ke && KENTRY_RNF_ROOT(ke))
1985                         ke = NULL;
1986                 break;
1987 #endif /* INET */
1988 #ifdef INET6
1989         case AF_INET6:
1990                 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1991                 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
1992                     kt->pfrkt_ip6);
1993                 if (ke && KENTRY_RNF_ROOT(ke))
1994                         ke = NULL;
1995                 break;
1996 #endif /* INET6 */
1997         }
1998         match = (ke && !ke->pfrke_not);
1999         if (match)
2000                 kt->pfrkt_match++;
2001         else
2002                 kt->pfrkt_nomatch++;
2003         return (match);
2004 }
2005
2006 void
2007 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2008     u_int64_t len, int dir_out, int op_pass, int notrule)
2009 {
2010         struct pfr_kentry       *ke = NULL;
2011
2012         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2013                 kt = kt->pfrkt_root;
2014         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2015                 return;
2016
2017         switch (af) {
2018 #ifdef INET
2019         case AF_INET:
2020                 pfr_sin.sin_addr.s_addr = a->addr32[0];
2021                 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
2022                     kt->pfrkt_ip4);
2023                 if (ke && KENTRY_RNF_ROOT(ke))
2024                         ke = NULL;
2025                 break;
2026 #endif /* INET */
2027 #ifdef INET6
2028         case AF_INET6:
2029                 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2030                 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2031                     kt->pfrkt_ip6);
2032                 if (ke && KENTRY_RNF_ROOT(ke))
2033                         ke = NULL;
2034                 break;
2035 #endif /* INET6 */
2036         default:
2037                 ;
2038         }
2039         if ((ke == NULL || ke->pfrke_not) != notrule) {
2040                 if (op_pass != PFR_OP_PASS)
2041                         kprintf("pfr_update_stats: assertion failed.\n");
2042                 op_pass = PFR_OP_XPASS;
2043         }
2044         kt->pfrkt_packets[dir_out][op_pass]++;
2045         kt->pfrkt_bytes[dir_out][op_pass] += len;
2046         if (ke != NULL && op_pass != PFR_OP_XPASS) {
2047                 ke->pfrke_packets[dir_out][op_pass]++;
2048                 ke->pfrke_bytes[dir_out][op_pass] += len;
2049         }
2050 }
2051
2052 struct pfr_ktable *
2053 pfr_attach_table(struct pf_ruleset *rs, char *name)
2054 {
2055         struct pfr_ktable       *kt, *rt;
2056         struct pfr_table         tbl;
2057         struct pf_anchor        *ac = rs->anchor;
2058
2059         bzero(&tbl, sizeof(tbl));
2060         strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2061         if (ac != NULL)
2062                 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2063         kt = pfr_lookup_table(&tbl);
2064         if (kt == NULL) {
2065                 kt = pfr_create_ktable(&tbl, time_second, 1);
2066                 if (kt == NULL)
2067                         return (NULL);
2068                 if (ac != NULL) {
2069                         bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2070                         rt = pfr_lookup_table(&tbl);
2071                         if (rt == NULL) {
2072                                 rt = pfr_create_ktable(&tbl, 0, 1);
2073                                 if (rt == NULL) {
2074                                         pfr_destroy_ktable(kt, 0);
2075                                         return (NULL);
2076                                 }
2077                                 pfr_insert_ktable(rt);
2078                         }
2079                         kt->pfrkt_root = rt;
2080                 }
2081                 pfr_insert_ktable(kt);
2082         }
2083         if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2084                 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2085         return (kt);
2086 }
2087
2088 void
2089 pfr_detach_table(struct pfr_ktable *kt)
2090 {
2091         if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2092                 kprintf("pfr_detach_table: refcount = %d.\n",
2093                     kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2094         else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2095                 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2096 }
2097
2098 int
2099 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2100     struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2101 {
2102         struct pfr_kentry       *ke, *ke2 = NULL;
2103         struct pf_addr          *addr = NULL;
2104         union sockaddr_union     mask;
2105         int                      idx = -1, use_counter = 0;
2106
2107         if (af == AF_INET)
2108                 addr = (struct pf_addr *)&pfr_sin.sin_addr;
2109         else if (af == AF_INET6)
2110                 addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2111         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2112                 kt = kt->pfrkt_root;
2113         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2114                 return (-1);
2115
2116         if (pidx != NULL)
2117                 idx = *pidx;
2118         if (counter != NULL && idx >= 0)
2119                 use_counter = 1;
2120         if (idx < 0)
2121                 idx = 0;
2122
2123 _next_block:
2124         ke = pfr_kentry_byidx(kt, idx, af);
2125         if (ke == NULL)
2126                 return (1);
2127         pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2128         *raddr = SUNION2PF(&ke->pfrke_sa, af);
2129         *rmask = SUNION2PF(&pfr_mask, af);
2130
2131         if (use_counter) {
2132                 /* is supplied address within block? */
2133                 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2134                         /* no, go to next block in table */
2135                         idx++;
2136                         use_counter = 0;
2137                         goto _next_block;
2138                 }
2139                 PF_ACPY(addr, counter, af);
2140         } else {
2141                 /* use first address of block */
2142                 PF_ACPY(addr, *raddr, af);
2143         }
2144
2145         if (!KENTRY_NETWORK(ke)) {
2146                 /* this is a single IP address - no possible nested block */
2147                 PF_ACPY(counter, addr, af);
2148                 *pidx = idx;
2149                 return (0);
2150         }
2151         for (;;) {
2152                 /* we don't want to use a nested block */
2153                 if (af == AF_INET)
2154                         ke2 = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
2155                             kt->pfrkt_ip4);
2156                 else if (af == AF_INET6)
2157                         ke2 = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2158                             kt->pfrkt_ip6);
2159                 /* no need to check KENTRY_RNF_ROOT() here */
2160                 if (ke2 == ke) {
2161                         /* lookup return the same block - perfect */
2162                         PF_ACPY(counter, addr, af);
2163                         *pidx = idx;
2164                         return (0);
2165                 }
2166
2167                 /* we need to increase the counter past the nested block */
2168                 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2169                 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2170                 PF_AINC(addr, af);
2171                 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2172                         /* ok, we reached the end of our main block */
2173                         /* go to next block in table */
2174                         idx++;
2175                         use_counter = 0;
2176                         goto _next_block;
2177                 }
2178         }
2179 }
2180
2181 struct pfr_kentry *
2182 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2183 {
2184         struct pfr_walktree     w;
2185
2186         bzero(&w, sizeof(w));
2187         w.pfrw_op = PFRW_POOL_GET;
2188         w.pfrw_cnt = idx;
2189
2190         switch (af) {
2191 #ifdef INET
2192         case AF_INET:
2193                 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2194                 return (w.pfrw_kentry);
2195 #endif /* INET */
2196 #ifdef INET6
2197         case AF_INET6:
2198                 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2199                 return (w.pfrw_kentry);
2200 #endif /* INET6 */
2201         default:
2202                 return (NULL);
2203         }
2204 }
2205
2206 void
2207 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2208 {
2209         struct pfr_walktree     w;
2210
2211         bzero(&w, sizeof(w));
2212         w.pfrw_op = PFRW_DYNADDR_UPDATE;
2213         w.pfrw_dyn = dyn;
2214
2215         crit_enter();
2216         dyn->pfid_acnt4 = 0;
2217         dyn->pfid_acnt6 = 0;
2218         if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2219                 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2220         if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2221                 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2222         crit_exit();
2223 }