Rename malloc->kmalloc, free->kfree, and realloc->krealloc. Pass 1
[dragonfly.git] / sys / net / pf / pf_table.c
1 /*      $FreeBSD: src/sys/contrib/pf/net/pf_table.c,v 1.5 2004/07/28 06:14:44 kan Exp $ */
2 /*      $OpenBSD: pf_table.c,v 1.47 2004/03/09 21:44:41 mcbride Exp $   */
3 /*      $DragonFly: src/sys/net/pf/pf_table.c,v 1.4 2006/09/05 00:55:47 dillon Exp $ */
4
5 /*
6  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
7  *
8  * Copyright (c) 2002 Cedric Berger
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  *
15  *    - Redistributions of source code must retain the above copyright
16  *      notice, this list of conditions and the following disclaimer.
17  *    - Redistributions in binary form must reproduce the above
18  *      copyright notice, this list of conditions and the following
19  *      disclaimer in the documentation and/or other materials provided
20  *      with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
30  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  *
35  */
36
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/socket.h>
43 #include <sys/mbuf.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/thread2.h>
47 #include <vm/vm_zone.h>
48
49 #include <net/if.h>
50 #include <net/route.h>
51 #include <netinet/in.h>
52 #include <net/pf/pfvar.h>
53
54 #define ACCEPT_FLAGS(oklist)                    \
55         do {                                    \
56                 if ((flags & ~(oklist)) &       \
57                     PFR_FLAG_ALLMASK)           \
58                         return (EINVAL);        \
59         } while (0)
60
61 #define COPYIN(from, to, size)                  \
62         ((flags & PFR_FLAG_USERIOCTL) ?         \
63         copyin((from), (to), (size)) :          \
64         (bcopy((from), (to), (size)), 0))
65
66 #define COPYOUT(from, to, size)                 \
67         ((flags & PFR_FLAG_USERIOCTL) ?         \
68         copyout((from), (to), (size)) :         \
69         (bcopy((from), (to), (size)), 0))
70
71 #define FILLIN_SIN(sin, addr)                   \
72         do {                                    \
73                 (sin).sin_len = sizeof(sin);    \
74                 (sin).sin_family = AF_INET;     \
75                 (sin).sin_addr = (addr);        \
76         } while (0)
77
78 #define FILLIN_SIN6(sin6, addr)                 \
79         do {                                    \
80                 (sin6).sin6_len = sizeof(sin6); \
81                 (sin6).sin6_family = AF_INET6;  \
82                 (sin6).sin6_addr = (addr);      \
83         } while (0)
84
85 #define SWAP(type, a1, a2)                      \
86         do {                                    \
87                 type tmp = a1;                  \
88                 a1 = a2;                        \
89                 a2 = tmp;                       \
90         } while (0)
91
92 #define SUNION2PF(su, af) (((af)==AF_INET) ?    \
93     (struct pf_addr *)&(su)->sin.sin_addr :     \
94     (struct pf_addr *)&(su)->sin6.sin6_addr)
95
96 #define AF_BITS(af)             (((af)==AF_INET)?32:128)
97 #define ADDR_NETWORK(ad)        ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
98 #define KENTRY_NETWORK(ke)      ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
99 #define KENTRY_RNF_ROOT(ke) \
100                 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
101
102 #define NO_ADDRESSES            (-1)
103 #define ENQUEUE_UNMARKED_ONLY   (1)
104 #define INVERT_NEG_FLAG         (1)
105
106 struct pfr_walktree {
107         enum pfrw_op {
108                 PFRW_MARK,
109                 PFRW_SWEEP,
110                 PFRW_ENQUEUE,
111                 PFRW_GET_ADDRS,
112                 PFRW_GET_ASTATS,
113                 PFRW_POOL_GET,
114                 PFRW_DYNADDR_UPDATE
115         }        pfrw_op;
116         union {
117                 struct pfr_addr         *pfrw1_addr;
118                 struct pfr_astats       *pfrw1_astats;
119                 struct pfr_kentryworkq  *pfrw1_workq;
120                 struct pfr_kentry       *pfrw1_kentry;
121                 struct pfi_dynaddr      *pfrw1_dyn;
122         }        pfrw_1;
123         int      pfrw_free;
124         int      pfrw_flags;
125 };
126 #define pfrw_addr       pfrw_1.pfrw1_addr
127 #define pfrw_astats     pfrw_1.pfrw1_astats
128 #define pfrw_workq      pfrw_1.pfrw1_workq
129 #define pfrw_kentry     pfrw_1.pfrw1_kentry
130 #define pfrw_dyn        pfrw_1.pfrw1_dyn
131 #define pfrw_cnt        pfrw_free
132
133 #define senderr(e)      do { rv = (e); goto _bad; } while (0)
134
135 vm_zone_t                pfr_ktable_pl;
136 vm_zone_t                pfr_kentry_pl;
137 struct sockaddr_in       pfr_sin;
138 struct sockaddr_in6      pfr_sin6;
139 union sockaddr_union     pfr_mask;
140 struct pf_addr           pfr_ffaddr;
141
142 void                     pfr_copyout_addr(struct pfr_addr *,
143                             struct pfr_kentry *ke);
144 int                      pfr_validate_addr(struct pfr_addr *);
145 void                     pfr_enqueue_addrs(struct pfr_ktable *,
146                             struct pfr_kentryworkq *, int *, int);
147 void                     pfr_mark_addrs(struct pfr_ktable *);
148 struct pfr_kentry       *pfr_lookup_addr(struct pfr_ktable *,
149                             struct pfr_addr *, int);
150 struct pfr_kentry       *pfr_create_kentry(struct pfr_addr *);
151 void                     pfr_destroy_kentries(struct pfr_kentryworkq *);
152 void                     pfr_destroy_kentry(struct pfr_kentry *);
153 void                     pfr_insert_kentries(struct pfr_ktable *,
154                             struct pfr_kentryworkq *, long);
155 void                     pfr_remove_kentries(struct pfr_ktable *,
156                             struct pfr_kentryworkq *);
157 void                     pfr_clstats_kentries(struct pfr_kentryworkq *, long,
158                             int);
159 void                     pfr_reset_feedback(struct pfr_addr *, int, int);
160 void                     pfr_prepare_network(union sockaddr_union *, int, int);
161 int                      pfr_route_kentry(struct pfr_ktable *,
162                             struct pfr_kentry *);
163 int                      pfr_unroute_kentry(struct pfr_ktable *,
164                             struct pfr_kentry *);
165 int                      pfr_walktree(struct radix_node *, void *);
166 int                      pfr_validate_table(struct pfr_table *, int, int);
167 void                     pfr_commit_ktable(struct pfr_ktable *, long);
168 void                     pfr_insert_ktables(struct pfr_ktableworkq *);
169 void                     pfr_insert_ktable(struct pfr_ktable *);
170 void                     pfr_setflags_ktables(struct pfr_ktableworkq *);
171 void                     pfr_setflags_ktable(struct pfr_ktable *, int);
172 void                     pfr_clstats_ktables(struct pfr_ktableworkq *, long,
173                             int);
174 void                     pfr_clstats_ktable(struct pfr_ktable *, long, int);
175 struct pfr_ktable       *pfr_create_ktable(struct pfr_table *, long, int);
176 void                     pfr_destroy_ktables(struct pfr_ktableworkq *, int);
177 void                     pfr_destroy_ktable(struct pfr_ktable *, int);
178 int                      pfr_ktable_compare(struct pfr_ktable *,
179                             struct pfr_ktable *);
180 struct pfr_ktable       *pfr_lookup_table(struct pfr_table *);
181 void                     pfr_clean_node_mask(struct pfr_ktable *,
182                             struct pfr_kentryworkq *);
183 int                      pfr_table_count(struct pfr_table *, int);
184 int                      pfr_skip_table(struct pfr_table *,
185                             struct pfr_ktable *, int);
186 struct pfr_kentry       *pfr_kentry_byidx(struct pfr_ktable *, int, int);
187
188 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
189 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
190
191 struct pfr_ktablehead    pfr_ktables;
192 struct pfr_table         pfr_nulltable;
193 int                      pfr_ktable_cnt;
194
195 void
196 pfr_initialize(void)
197 {
198         pfr_sin.sin_len = sizeof(pfr_sin);
199         pfr_sin.sin_family = AF_INET;
200         pfr_sin6.sin6_len = sizeof(pfr_sin6);
201         pfr_sin6.sin6_family = AF_INET6;
202
203         memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
204 }
205
206 int
207 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
208 {
209         struct pfr_ktable       *kt;
210         struct pfr_kentryworkq   workq;
211
212         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
213         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
214                 return (EINVAL);
215         kt = pfr_lookup_table(tbl);
216         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
217                 return (ESRCH);
218         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
219                 return (EPERM);
220         pfr_enqueue_addrs(kt, &workq, ndel, 0);
221
222         if (!(flags & PFR_FLAG_DUMMY)) {
223                 if (flags & PFR_FLAG_ATOMIC)
224                         crit_enter();
225                 pfr_remove_kentries(kt, &workq);
226                 if (flags & PFR_FLAG_ATOMIC)
227                         crit_exit();
228                 if (kt->pfrkt_cnt) {
229                         printf("pfr_clr_addrs: corruption detected (%d).\n",
230                             kt->pfrkt_cnt);
231                         kt->pfrkt_cnt = 0;
232                 }
233         }
234         return (0);
235 }
236
237 int
238 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
239     int *nadd, int flags)
240 {
241         struct pfr_ktable       *kt, *tmpkt;
242         struct pfr_kentryworkq   workq;
243         struct pfr_kentry       *p, *q;
244         struct pfr_addr          ad;
245         int                      i, rv, xadd = 0;
246         long                     tzero = time_second;
247
248         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
249         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
250                 return (EINVAL);
251         kt = pfr_lookup_table(tbl);
252         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
253                 return (ESRCH);
254         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
255                 return (EPERM);
256         tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
257         if (tmpkt == NULL)
258                 return (ENOMEM);
259         SLIST_INIT(&workq);
260         for (i = 0; i < size; i++) {
261                 if (COPYIN(addr+i, &ad, sizeof(ad)))
262                         senderr(EFAULT);
263                 if (pfr_validate_addr(&ad))
264                         senderr(EINVAL);
265                 p = pfr_lookup_addr(kt, &ad, 1);
266                 q = pfr_lookup_addr(tmpkt, &ad, 1);
267                 if (flags & PFR_FLAG_FEEDBACK) {
268                         if (q != NULL)
269                                 ad.pfra_fback = PFR_FB_DUPLICATE;
270                         else if (p == NULL)
271                                 ad.pfra_fback = PFR_FB_ADDED;
272                         else if (p->pfrke_not != ad.pfra_not)
273                                 ad.pfra_fback = PFR_FB_CONFLICT;
274                         else
275                                 ad.pfra_fback = PFR_FB_NONE;
276                 }
277                 if (p == NULL && q == NULL) {
278                         p = pfr_create_kentry(&ad);
279                         if (p == NULL)
280                                 senderr(ENOMEM);
281                         if (pfr_route_kentry(tmpkt, p)) {
282                                 pfr_destroy_kentry(p);
283                                 ad.pfra_fback = PFR_FB_NONE;
284                         } else {
285                                 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
286                                 xadd++;
287                         }
288                 }
289                 if (flags & PFR_FLAG_FEEDBACK)
290                         if (COPYOUT(&ad, addr+i, sizeof(ad)))
291                                 senderr(EFAULT);
292         }
293         pfr_clean_node_mask(tmpkt, &workq);
294         if (!(flags & PFR_FLAG_DUMMY)) {
295                 if (flags & PFR_FLAG_ATOMIC)
296                         crit_enter();
297                 pfr_insert_kentries(kt, &workq, tzero);
298                 if (flags & PFR_FLAG_ATOMIC)
299                         crit_exit();
300         } else
301                 pfr_destroy_kentries(&workq);
302         if (nadd != NULL)
303                 *nadd = xadd;
304         pfr_destroy_ktable(tmpkt, 0);
305         return (0);
306 _bad:
307         pfr_clean_node_mask(tmpkt, &workq);
308         pfr_destroy_kentries(&workq);
309         if (flags & PFR_FLAG_FEEDBACK)
310                 pfr_reset_feedback(addr, size, flags);
311         pfr_destroy_ktable(tmpkt, 0);
312         return (rv);
313 }
314
315 int
316 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
317     int *ndel, int flags)
318 {
319         struct pfr_ktable       *kt;
320         struct pfr_kentryworkq   workq;
321         struct pfr_kentry       *p;
322         struct pfr_addr          ad;
323         int                      i, rv, xdel = 0;
324
325         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
326         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
327                 return (EINVAL);
328         kt = pfr_lookup_table(tbl);
329         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
330                 return (ESRCH);
331         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
332                 return (EPERM);
333         pfr_mark_addrs(kt);
334         SLIST_INIT(&workq);
335         for (i = 0; i < size; i++) {
336                 if (COPYIN(addr+i, &ad, sizeof(ad)))
337                         senderr(EFAULT);
338                 if (pfr_validate_addr(&ad))
339                         senderr(EINVAL);
340                 p = pfr_lookup_addr(kt, &ad, 1);
341                 if (flags & PFR_FLAG_FEEDBACK) {
342                         if (p == NULL)
343                                 ad.pfra_fback = PFR_FB_NONE;
344                         else if (p->pfrke_not != ad.pfra_not)
345                                 ad.pfra_fback = PFR_FB_CONFLICT;
346                         else if (p->pfrke_mark)
347                                 ad.pfra_fback = PFR_FB_DUPLICATE;
348                         else
349                                 ad.pfra_fback = PFR_FB_DELETED;
350                 }
351                 if (p != NULL && p->pfrke_not == ad.pfra_not &&
352                     !p->pfrke_mark) {
353                         p->pfrke_mark = 1;
354                         SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
355                         xdel++;
356                 }
357                 if (flags & PFR_FLAG_FEEDBACK)
358                         if (COPYOUT(&ad, addr+i, sizeof(ad)))
359                                 senderr(EFAULT);
360         }
361         if (!(flags & PFR_FLAG_DUMMY)) {
362                 if (flags & PFR_FLAG_ATOMIC)
363                         crit_enter();
364                 pfr_remove_kentries(kt, &workq);
365                 if (flags & PFR_FLAG_ATOMIC)
366                         crit_exit();
367         }
368         if (ndel != NULL)
369                 *ndel = xdel;
370         return (0);
371 _bad:
372         if (flags & PFR_FLAG_FEEDBACK)
373                 pfr_reset_feedback(addr, size, flags);
374         return (rv);
375 }
376
377 int
378 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
379     int *size2, int *nadd, int *ndel, int *nchange, int flags)
380 {
381         struct pfr_ktable       *kt, *tmpkt;
382         struct pfr_kentryworkq   addq, delq, changeq;
383         struct pfr_kentry       *p, *q;
384         struct pfr_addr          ad;
385         int                      i, rv, xadd = 0, xdel = 0, xchange = 0;
386         long                     tzero = time_second;
387
388         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
389         if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
390                 return (EINVAL);
391         kt = pfr_lookup_table(tbl);
392         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
393                 return (ESRCH);
394         if (kt->pfrkt_flags & PFR_TFLAG_CONST)
395                 return (EPERM);
396         tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
397         if (tmpkt == NULL)
398                 return (ENOMEM);
399         pfr_mark_addrs(kt);
400         SLIST_INIT(&addq);
401         SLIST_INIT(&delq);
402         SLIST_INIT(&changeq);
403         for (i = 0; i < size; i++) {
404                 if (COPYIN(addr+i, &ad, sizeof(ad)))
405                         senderr(EFAULT);
406                 if (pfr_validate_addr(&ad))
407                         senderr(EINVAL);
408                 ad.pfra_fback = PFR_FB_NONE;
409                 p = pfr_lookup_addr(kt, &ad, 1);
410                 if (p != NULL) {
411                         if (p->pfrke_mark) {
412                                 ad.pfra_fback = PFR_FB_DUPLICATE;
413                                 goto _skip;
414                         }
415                         p->pfrke_mark = 1;
416                         if (p->pfrke_not != ad.pfra_not) {
417                                 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
418                                 ad.pfra_fback = PFR_FB_CHANGED;
419                                 xchange++;
420                         }
421                 } else {
422                         q = pfr_lookup_addr(tmpkt, &ad, 1);
423                         if (q != NULL) {
424                                 ad.pfra_fback = PFR_FB_DUPLICATE;
425                                 goto _skip;
426                         }
427                         p = pfr_create_kentry(&ad);
428                         if (p == NULL)
429                                 senderr(ENOMEM);
430                         if (pfr_route_kentry(tmpkt, p)) {
431                                 pfr_destroy_kentry(p);
432                                 ad.pfra_fback = PFR_FB_NONE;
433                         } else {
434                                 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
435                                 ad.pfra_fback = PFR_FB_ADDED;
436                                 xadd++;
437                         }
438                 }
439 _skip:
440                 if (flags & PFR_FLAG_FEEDBACK)
441                         if (COPYOUT(&ad, addr+i, sizeof(ad)))
442                                 senderr(EFAULT);
443         }
444         pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
445         if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
446                 if (*size2 < size+xdel) {
447                         *size2 = size+xdel;
448                         senderr(0);
449                 }
450                 i = 0;
451                 SLIST_FOREACH(p, &delq, pfrke_workq) {
452                         pfr_copyout_addr(&ad, p);
453                         ad.pfra_fback = PFR_FB_DELETED;
454                         if (COPYOUT(&ad, addr+size+i, sizeof(ad)))
455                                 senderr(EFAULT);
456                         i++;
457                 }
458         }
459         pfr_clean_node_mask(tmpkt, &addq);
460         if (!(flags & PFR_FLAG_DUMMY)) {
461                 if (flags & PFR_FLAG_ATOMIC)
462                         crit_enter();
463                 pfr_insert_kentries(kt, &addq, tzero);
464                 pfr_remove_kentries(kt, &delq);
465                 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
466                 if (flags & PFR_FLAG_ATOMIC)
467                         crit_exit();
468         } else
469                 pfr_destroy_kentries(&addq);
470         if (nadd != NULL)
471                 *nadd = xadd;
472         if (ndel != NULL)
473                 *ndel = xdel;
474         if (nchange != NULL)
475                 *nchange = xchange;
476         if ((flags & PFR_FLAG_FEEDBACK) && size2)
477                 *size2 = size+xdel;
478         pfr_destroy_ktable(tmpkt, 0);
479         return (0);
480 _bad:
481         pfr_clean_node_mask(tmpkt, &addq);
482         pfr_destroy_kentries(&addq);
483         if (flags & PFR_FLAG_FEEDBACK)
484                 pfr_reset_feedback(addr, size, flags);
485         pfr_destroy_ktable(tmpkt, 0);
486         return (rv);
487 }
488
489 int
490 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
491         int *nmatch, int flags)
492 {
493         struct pfr_ktable       *kt;
494         struct pfr_kentry       *p;
495         struct pfr_addr          ad;
496         int                      i, xmatch = 0;
497
498         ACCEPT_FLAGS(PFR_FLAG_REPLACE);
499         if (pfr_validate_table(tbl, 0, 0))
500                 return (EINVAL);
501         kt = pfr_lookup_table(tbl);
502         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
503                 return (ESRCH);
504
505         for (i = 0; i < size; i++) {
506                 if (COPYIN(addr+i, &ad, sizeof(ad)))
507                         return (EFAULT);
508                 if (pfr_validate_addr(&ad))
509                         return (EINVAL);
510                 if (ADDR_NETWORK(&ad))
511                         return (EINVAL);
512                 p = pfr_lookup_addr(kt, &ad, 0);
513                 if (flags & PFR_FLAG_REPLACE)
514                         pfr_copyout_addr(&ad, p);
515                 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
516                     (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
517                 if (p != NULL && !p->pfrke_not)
518                         xmatch++;
519                 if (COPYOUT(&ad, addr+i, sizeof(ad)))
520                         return (EFAULT);
521         }
522         if (nmatch != NULL)
523                 *nmatch = xmatch;
524         return (0);
525 }
526
527 int
528 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
529         int flags)
530 {
531         struct pfr_ktable       *kt;
532         struct pfr_walktree      w;
533         int                      rv;
534
535         ACCEPT_FLAGS(0);
536         if (pfr_validate_table(tbl, 0, 0))
537                 return (EINVAL);
538         kt = pfr_lookup_table(tbl);
539         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
540                 return (ESRCH);
541         if (kt->pfrkt_cnt > *size) {
542                 *size = kt->pfrkt_cnt;
543                 return (0);
544         }
545
546         bzero(&w, sizeof(w));
547         w.pfrw_op = PFRW_GET_ADDRS;
548         w.pfrw_addr = addr;
549         w.pfrw_free = kt->pfrkt_cnt;
550         w.pfrw_flags = flags;
551         rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
552         if (!rv)
553                 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
554         if (rv)
555                 return (rv);
556
557         if (w.pfrw_free) {
558                 printf("pfr_get_addrs: corruption detected (%d).\n",
559                     w.pfrw_free);
560                 return (ENOTTY);
561         }
562         *size = kt->pfrkt_cnt;
563         return (0);
564 }
565
566 int
567 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
568         int flags)
569 {
570         struct pfr_ktable       *kt;
571         struct pfr_walktree      w;
572         struct pfr_kentryworkq   workq;
573         int                      rv;
574         long                     tzero = time_second;
575
576         ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
577         if (pfr_validate_table(tbl, 0, 0))
578                 return (EINVAL);
579         kt = pfr_lookup_table(tbl);
580         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
581                 return (ESRCH);
582         if (kt->pfrkt_cnt > *size) {
583                 *size = kt->pfrkt_cnt;
584                 return (0);
585         }
586
587         bzero(&w, sizeof(w));
588         w.pfrw_op = PFRW_GET_ASTATS;
589         w.pfrw_astats = addr;
590         w.pfrw_free = kt->pfrkt_cnt;
591         w.pfrw_flags = flags;
592         if (flags & PFR_FLAG_ATOMIC)
593                 crit_enter();
594         rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
595         if (!rv)
596                 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
597         if (!rv && (flags & PFR_FLAG_CLSTATS)) {
598                 pfr_enqueue_addrs(kt, &workq, NULL, 0);
599                 pfr_clstats_kentries(&workq, tzero, 0);
600         }
601         if (flags & PFR_FLAG_ATOMIC)
602                 crit_exit();
603         if (rv)
604                 return (rv);
605
606         if (w.pfrw_free) {
607                 printf("pfr_get_astats: corruption detected (%d).\n",
608                     w.pfrw_free);
609                 return (ENOTTY);
610         }
611         *size = kt->pfrkt_cnt;
612         return (0);
613 }
614
615 int
616 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
617     int *nzero, int flags)
618 {
619         struct pfr_ktable       *kt;
620         struct pfr_kentryworkq   workq;
621         struct pfr_kentry       *p;
622         struct pfr_addr          ad;
623         int                      i, rv, xzero = 0;
624
625         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
626         if (pfr_validate_table(tbl, 0, 0))
627                 return (EINVAL);
628         kt = pfr_lookup_table(tbl);
629         if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
630                 return (ESRCH);
631         SLIST_INIT(&workq);
632         for (i = 0; i < size; i++) {
633                 if (COPYIN(addr+i, &ad, sizeof(ad)))
634                         senderr(EFAULT);
635                 if (pfr_validate_addr(&ad))
636                         senderr(EINVAL);
637                 p = pfr_lookup_addr(kt, &ad, 1);
638                 if (flags & PFR_FLAG_FEEDBACK) {
639                         ad.pfra_fback = (p != NULL) ?
640                             PFR_FB_CLEARED : PFR_FB_NONE;
641                         if (COPYOUT(&ad, addr+i, sizeof(ad)))
642                                 senderr(EFAULT);
643                 }
644                 if (p != NULL) {
645                         SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
646                         xzero++;
647                 }
648         }
649
650         if (!(flags & PFR_FLAG_DUMMY)) {
651                 if (flags & PFR_FLAG_ATOMIC)
652                         crit_enter();
653                 pfr_clstats_kentries(&workq, 0, 0);
654                 if (flags & PFR_FLAG_ATOMIC)
655                         crit_exit();
656         }
657         if (nzero != NULL)
658                 *nzero = xzero;
659         return (0);
660 _bad:
661         if (flags & PFR_FLAG_FEEDBACK)
662                 pfr_reset_feedback(addr, size, flags);
663         return (rv);
664 }
665
666 int
667 pfr_validate_addr(struct pfr_addr *ad)
668 {
669         int i;
670
671         switch (ad->pfra_af) {
672         case AF_INET:
673                 if (ad->pfra_net > 32)
674                         return (-1);
675                 break;
676         case AF_INET6:
677                 if (ad->pfra_net > 128)
678                         return (-1);
679                 break;
680         default:
681                 return (-1);
682         }
683         if (ad->pfra_net < 128 &&
684                 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
685                         return (-1);
686         for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
687                 if (((caddr_t)ad)[i])
688                         return (-1);
689         if (ad->pfra_not && ad->pfra_not != 1)
690                 return (-1);
691         if (ad->pfra_fback)
692                 return (-1);
693         return (0);
694 }
695
696 void
697 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
698         int *naddr, int sweep)
699 {
700         struct pfr_walktree     w;
701
702         SLIST_INIT(workq);
703         bzero(&w, sizeof(w));
704         w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
705         w.pfrw_workq = workq;
706         if (kt->pfrkt_ip4 != NULL)
707                 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
708                         printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
709         if (kt->pfrkt_ip6 != NULL)
710                 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
711                         printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
712         if (naddr != NULL)
713                 *naddr = w.pfrw_cnt;
714 }
715
716 void
717 pfr_mark_addrs(struct pfr_ktable *kt)
718 {
719         struct pfr_walktree     w;
720
721         bzero(&w, sizeof(w));
722         w.pfrw_op = PFRW_MARK;
723         if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
724                 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
725         if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
726                 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
727 }
728
729
730 struct pfr_kentry *
731 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
732 {
733         union sockaddr_union     sa, mask;
734         struct radix_node_head  *head;
735         struct pfr_kentry       *ke;
736
737         bzero(&sa, sizeof(sa));
738         if (ad->pfra_af == AF_INET) {
739                 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
740                 head = kt->pfrkt_ip4;
741         } else {
742                 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
743                 head = kt->pfrkt_ip6;
744         }
745         if (ADDR_NETWORK(ad)) {
746                 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
747                 crit_enter(); /* rn_lookup makes use of globals */
748                 ke = (struct pfr_kentry *)rn_lookup((char *)&sa, (char *)&mask,
749                     head);
750                 crit_exit();
751                 if (ke && KENTRY_RNF_ROOT(ke))
752                         ke = NULL;
753         } else {
754                 ke = (struct pfr_kentry *)rn_match((char *)&sa, head);
755                 if (ke && KENTRY_RNF_ROOT(ke))
756                         ke = NULL;
757                 if (exact && ke && KENTRY_NETWORK(ke))
758                         ke = NULL;
759         }
760         return (ke);
761 }
762
763 struct pfr_kentry *
764 pfr_create_kentry(struct pfr_addr *ad)
765 {
766         struct pfr_kentry       *ke;
767
768         ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
769         if (ke == NULL)
770                 return (NULL);
771         bzero(ke, sizeof(*ke));
772
773         if (ad->pfra_af == AF_INET)
774                 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
775         else
776                 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
777         ke->pfrke_af = ad->pfra_af;
778         ke->pfrke_net = ad->pfra_net;
779         ke->pfrke_not = ad->pfra_not;
780         return (ke);
781 }
782
783 void
784 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
785 {
786         struct pfr_kentry       *p, *q;
787
788         for (p = SLIST_FIRST(workq); p != NULL; p = q) {
789                 q = SLIST_NEXT(p, pfrke_workq);
790                 pfr_destroy_kentry(p);
791         }
792 }
793
794 void
795 pfr_destroy_kentry(struct pfr_kentry *ke)
796 {
797         pool_put(&pfr_kentry_pl, ke);
798 }
799
800 void
801 pfr_insert_kentries(struct pfr_ktable *kt,
802     struct pfr_kentryworkq *workq, long tzero)
803 {
804         struct pfr_kentry       *p;
805         int                      rv, n = 0;
806
807         SLIST_FOREACH(p, workq, pfrke_workq) {
808                 rv = pfr_route_kentry(kt, p);
809                 if (rv) {
810                         printf("pfr_insert_kentries: cannot route entry "
811                             "(code=%d).\n", rv);
812                         break;
813                 }
814                 p->pfrke_tzero = tzero;
815                 n++;
816         }
817         kt->pfrkt_cnt += n;
818 }
819
820 void
821 pfr_remove_kentries(struct pfr_ktable *kt,
822     struct pfr_kentryworkq *workq)
823 {
824         struct pfr_kentry       *p;
825         int                      n = 0;
826
827         SLIST_FOREACH(p, workq, pfrke_workq) {
828                 pfr_unroute_kentry(kt, p);
829                 n++;
830         }
831         kt->pfrkt_cnt -= n;
832         pfr_destroy_kentries(workq);
833 }
834
835 void
836 pfr_clean_node_mask(struct pfr_ktable *kt,
837     struct pfr_kentryworkq *workq)
838 {
839         struct pfr_kentry       *p;
840
841         SLIST_FOREACH(p, workq, pfrke_workq)
842                 pfr_unroute_kentry(kt, p);
843 }
844
845 void
846 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
847 {
848         struct pfr_kentry       *p;
849
850         SLIST_FOREACH(p, workq, pfrke_workq) {
851                 crit_enter();
852                 if (negchange)
853                         p->pfrke_not = !p->pfrke_not;
854                 bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
855                 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
856                 crit_exit();
857                 p->pfrke_tzero = tzero;
858         }
859 }
860
861 void
862 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
863 {
864         struct pfr_addr ad;
865         int             i;
866
867         for (i = 0; i < size; i++) {
868                 if (COPYIN(addr+i, &ad, sizeof(ad)))
869                         break;
870                 ad.pfra_fback = PFR_FB_NONE;
871                 if (COPYOUT(&ad, addr+i, sizeof(ad)))
872                         break;
873         }
874 }
875
876 void
877 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
878 {
879         int     i;
880
881         bzero(sa, sizeof(*sa));
882         if (af == AF_INET) {
883                 sa->sin.sin_len = sizeof(sa->sin);
884                 sa->sin.sin_family = AF_INET;
885                 sa->sin.sin_addr.s_addr = htonl(-1 << (32-net));
886         } else {
887                 sa->sin6.sin6_len = sizeof(sa->sin6);
888                 sa->sin6.sin6_family = AF_INET6;
889                 for (i = 0; i < 4; i++) {
890                         if (net <= 32) {
891                                 sa->sin6.sin6_addr.s6_addr32[i] =
892                                     htonl(-1 << (32-net));
893                                 break;
894                         }
895                         sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
896                         net -= 32;
897                 }
898         }
899 }
900
901 int
902 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
903 {
904         union sockaddr_union     mask;
905         struct radix_node       *rn;
906         struct radix_node_head  *head;
907
908         bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
909         if (ke->pfrke_af == AF_INET)
910                 head = kt->pfrkt_ip4;
911         else
912                 head = kt->pfrkt_ip6;
913
914         crit_enter();
915         if (KENTRY_NETWORK(ke)) {
916                 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
917                 rn = rn_addroute((char *)&ke->pfrke_sa, (char *)&mask, head,
918                     ke->pfrke_node);
919         } else
920                 rn = rn_addroute((char *)&ke->pfrke_sa, NULL, head,
921                     ke->pfrke_node);
922         crit_exit();
923
924         return (rn == NULL ? -1 : 0);
925 }
926
927 int
928 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
929 {
930         union sockaddr_union     mask;
931         struct radix_node       *rn;
932         struct radix_node_head  *head;
933
934         if (ke->pfrke_af == AF_INET)
935                 head = kt->pfrkt_ip4;
936         else
937                 head = kt->pfrkt_ip6;
938
939         crit_enter();
940         if (KENTRY_NETWORK(ke)) {
941                 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
942                 rn = rn_delete((char *)&ke->pfrke_sa, (char *)&mask, head);
943         } else
944                 rn = rn_delete((char *)&ke->pfrke_sa, NULL, head);
945         crit_exit();
946
947         if (rn == NULL) {
948                 printf("pfr_unroute_kentry: delete failed.\n");
949                 return (-1);
950         }
951         return (0);
952 }
953
954 void
955 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
956 {
957         bzero(ad, sizeof(*ad));
958         if (ke == NULL)
959                 return;
960         ad->pfra_af = ke->pfrke_af;
961         ad->pfra_net = ke->pfrke_net;
962         ad->pfra_not = ke->pfrke_not;
963         if (ad->pfra_af == AF_INET)
964                 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
965         else
966                 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
967 }
968
969 int
970 pfr_walktree(struct radix_node *rn, void *arg)
971 {
972         struct pfr_kentry       *ke = (struct pfr_kentry *)rn;
973         struct pfr_walktree     *w = arg;
974         int                      flags = w->pfrw_flags;
975
976         switch (w->pfrw_op) {
977         case PFRW_MARK:
978                 ke->pfrke_mark = 0;
979                 break;
980         case PFRW_SWEEP:
981                 if (ke->pfrke_mark)
982                         break;
983                 /* FALLTHROUGH */
984         case PFRW_ENQUEUE:
985                 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
986                 w->pfrw_cnt++;
987                 break;
988         case PFRW_GET_ADDRS:
989                 if (w->pfrw_free-- > 0) {
990                         struct pfr_addr ad;
991
992                         pfr_copyout_addr(&ad, ke);
993                         if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
994                                 return (EFAULT);
995                         w->pfrw_addr++;
996                 }
997                 break;
998         case PFRW_GET_ASTATS:
999                 if (w->pfrw_free-- > 0) {
1000                         struct pfr_astats as;
1001
1002                         pfr_copyout_addr(&as.pfras_a, ke);
1003
1004                         crit_enter();
1005                         bcopy(ke->pfrke_packets, as.pfras_packets,
1006                             sizeof(as.pfras_packets));
1007                         bcopy(ke->pfrke_bytes, as.pfras_bytes,
1008                             sizeof(as.pfras_bytes));
1009                         crit_exit();
1010                         as.pfras_tzero = ke->pfrke_tzero;
1011
1012                         if (COPYOUT(&as, w->pfrw_astats, sizeof(as)))
1013                                 return (EFAULT);
1014                         w->pfrw_astats++;
1015                 }
1016                 break;
1017         case PFRW_POOL_GET:
1018                 if (ke->pfrke_not)
1019                         break; /* negative entries are ignored */
1020                 if (!w->pfrw_cnt--) {
1021                         w->pfrw_kentry = ke;
1022                         return (1); /* finish search */
1023                 }
1024                 break;
1025         case PFRW_DYNADDR_UPDATE:
1026                 if (ke->pfrke_af == AF_INET) {
1027                         if (w->pfrw_dyn->pfid_acnt4++ > 0)
1028                                 break;
1029                         pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1030                         w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1031                             &ke->pfrke_sa, AF_INET);
1032                         w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1033                             &pfr_mask, AF_INET);
1034                 } else {
1035                         if (w->pfrw_dyn->pfid_acnt6++ > 0)
1036                                 break;
1037                         pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1038                         w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1039                             &ke->pfrke_sa, AF_INET6);
1040                         w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1041                             &pfr_mask, AF_INET6);
1042                 }
1043                 break;
1044         }
1045         return (0);
1046 }
1047
1048 int
1049 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1050 {
1051         struct pfr_ktableworkq   workq;
1052         struct pfr_ktable       *p;
1053         int                      xdel = 0;
1054
1055         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS);
1056         if (pfr_table_count(filter, flags) < 0)
1057                 return (ENOENT);
1058
1059         SLIST_INIT(&workq);
1060         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1061                 if (pfr_skip_table(filter, p, flags))
1062                         continue;
1063                 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1064                         continue;
1065                 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1066                         continue;
1067                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1068                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1069                 xdel++;
1070         }
1071         if (!(flags & PFR_FLAG_DUMMY)) {
1072                 if (flags & PFR_FLAG_ATOMIC)
1073                         crit_enter();
1074                 pfr_setflags_ktables(&workq);
1075                 if (flags & PFR_FLAG_ATOMIC)
1076                         crit_exit();
1077         }
1078         if (ndel != NULL)
1079                 *ndel = xdel;
1080         return (0);
1081 }
1082
1083 int
1084 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1085 {
1086         struct pfr_ktableworkq   addq, changeq;
1087         struct pfr_ktable       *p, *q, *r, key;
1088         int                      i, rv, xadd = 0;
1089         long                     tzero = time_second;
1090
1091         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1092         SLIST_INIT(&addq);
1093         SLIST_INIT(&changeq);
1094         for (i = 0; i < size; i++) {
1095                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1096                         senderr(EFAULT);
1097                 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1098                     flags & PFR_FLAG_USERIOCTL))
1099                         senderr(EINVAL);
1100                 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1101                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1102                 if (p == NULL) {
1103                         p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1104                         if (p == NULL)
1105                                 senderr(ENOMEM);
1106                         SLIST_FOREACH(q, &addq, pfrkt_workq) {
1107                                 if (!pfr_ktable_compare(p, q))
1108                                         goto _skip;
1109                         }
1110                         SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1111                         xadd++;
1112                         if (!key.pfrkt_anchor[0])
1113                                 goto _skip;
1114
1115                         /* find or create root table */
1116                         bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1117                         bzero(key.pfrkt_ruleset, sizeof(key.pfrkt_ruleset));
1118                         r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1119                         if (r != NULL) {
1120                                 p->pfrkt_root = r;
1121                                 goto _skip;
1122                         }
1123                         SLIST_FOREACH(q, &addq, pfrkt_workq) {
1124                                 if (!pfr_ktable_compare(&key, q)) {
1125                                         p->pfrkt_root = q;
1126                                         goto _skip;
1127                                 }
1128                         }
1129                         key.pfrkt_flags = 0;
1130                         r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1131                         if (r == NULL)
1132                                 senderr(ENOMEM);
1133                         SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1134                         p->pfrkt_root = r;
1135                 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1136                         SLIST_FOREACH(q, &changeq, pfrkt_workq)
1137                                 if (!pfr_ktable_compare(&key, q))
1138                                         goto _skip;
1139                         p->pfrkt_nflags = (p->pfrkt_flags &
1140                             ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1141                         SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1142                         xadd++;
1143                 }
1144 _skip:
1145         ;
1146         }
1147         if (!(flags & PFR_FLAG_DUMMY)) {
1148                 if (flags & PFR_FLAG_ATOMIC)
1149                         crit_enter();
1150                 pfr_insert_ktables(&addq);
1151                 pfr_setflags_ktables(&changeq);
1152                 if (flags & PFR_FLAG_ATOMIC)
1153                         crit_exit();
1154         } else
1155                  pfr_destroy_ktables(&addq, 0);
1156         if (nadd != NULL)
1157                 *nadd = xadd;
1158         return (0);
1159 _bad:
1160         pfr_destroy_ktables(&addq, 0);
1161         return (rv);
1162 }
1163
1164 int
1165 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1166 {
1167         struct pfr_ktableworkq   workq;
1168         struct pfr_ktable       *p, *q, key;
1169         int                      i, xdel = 0;
1170
1171         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1172         SLIST_INIT(&workq);
1173         for (i = 0; i < size; i++) {
1174                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1175                         return (EFAULT);
1176                 if (pfr_validate_table(&key.pfrkt_t, 0,
1177                     flags & PFR_FLAG_USERIOCTL))
1178                         return (EINVAL);
1179                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1180                 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1181                         SLIST_FOREACH(q, &workq, pfrkt_workq)
1182                                 if (!pfr_ktable_compare(p, q))
1183                                         goto _skip;
1184                         p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1185                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1186                         xdel++;
1187                 }
1188 _skip:
1189         ;
1190         }
1191
1192         if (!(flags & PFR_FLAG_DUMMY)) {
1193                 if (flags & PFR_FLAG_ATOMIC)
1194                         crit_enter();
1195                 pfr_setflags_ktables(&workq);
1196                 if (flags & PFR_FLAG_ATOMIC)
1197                         crit_exit();
1198         }
1199         if (ndel != NULL)
1200                 *ndel = xdel;
1201         return (0);
1202 }
1203
1204 int
1205 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1206         int flags)
1207 {
1208         struct pfr_ktable       *p;
1209         int                      n, nn;
1210
1211         ACCEPT_FLAGS(PFR_FLAG_ALLRSETS);
1212         n = nn = pfr_table_count(filter, flags);
1213         if (n < 0)
1214                 return (ENOENT);
1215         if (n > *size) {
1216                 *size = n;
1217                 return (0);
1218         }
1219         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1220                 if (pfr_skip_table(filter, p, flags))
1221                         continue;
1222                 if (n-- <= 0)
1223                         continue;
1224                 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl)))
1225                         return (EFAULT);
1226         }
1227         if (n) {
1228                 printf("pfr_get_tables: corruption detected (%d).\n", n);
1229                 return (ENOTTY);
1230         }
1231         *size = nn;
1232         return (0);
1233 }
1234
1235 int
1236 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1237         int flags)
1238 {
1239         struct pfr_ktable       *p;
1240         struct pfr_ktableworkq   workq;
1241         int                      n, nn;
1242         long                     tzero = time_second;
1243
1244         ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS);
1245                                         /* XXX PFR_FLAG_CLSTATS disabled */
1246         n = nn = pfr_table_count(filter, flags);
1247         if (n < 0)
1248                 return (ENOENT);
1249         if (n > *size) {
1250                 *size = n;
1251                 return (0);
1252         }
1253         SLIST_INIT(&workq);
1254         if (flags & PFR_FLAG_ATOMIC)
1255                 crit_enter();
1256         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1257                 if (pfr_skip_table(filter, p, flags))
1258                         continue;
1259                 if (n-- <= 0)
1260                         continue;
1261                 if (!(flags & PFR_FLAG_ATOMIC))
1262                         crit_enter();
1263                 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
1264                         crit_exit();
1265                         return (EFAULT);
1266                 }
1267                 if (!(flags & PFR_FLAG_ATOMIC))
1268                         crit_exit();
1269                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1270         }
1271         if (flags & PFR_FLAG_CLSTATS)
1272                 pfr_clstats_ktables(&workq, tzero,
1273                     flags & PFR_FLAG_ADDRSTOO);
1274         if (flags & PFR_FLAG_ATOMIC)
1275                 crit_exit();
1276         if (n) {
1277                 printf("pfr_get_tstats: corruption detected (%d).\n", n);
1278                 return (ENOTTY);
1279         }
1280         *size = nn;
1281         return (0);
1282 }
1283
1284 int
1285 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1286 {
1287         struct pfr_ktableworkq   workq;
1288         struct pfr_ktable       *p, key;
1289         int                      i, xzero = 0;
1290         long                     tzero = time_second;
1291
1292         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO);
1293         SLIST_INIT(&workq);
1294         for (i = 0; i < size; i++) {
1295                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1296                         return (EFAULT);
1297                 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1298                         return (EINVAL);
1299                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1300                 if (p != NULL) {
1301                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1302                         xzero++;
1303                 }
1304         }
1305         if (!(flags & PFR_FLAG_DUMMY)) {
1306                 if (flags & PFR_FLAG_ATOMIC)
1307                         crit_enter();
1308                 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1309                 if (flags & PFR_FLAG_ATOMIC)
1310                         crit_exit();
1311         }
1312         if (nzero != NULL)
1313                 *nzero = xzero;
1314         return (0);
1315 }
1316
1317 int
1318 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1319         int *nchange, int *ndel, int flags)
1320 {
1321         struct pfr_ktableworkq   workq;
1322         struct pfr_ktable       *p, *q, key;
1323         int                      i, xchange = 0, xdel = 0;
1324
1325         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1326         if ((setflag & ~PFR_TFLAG_USRMASK) ||
1327             (clrflag & ~PFR_TFLAG_USRMASK) ||
1328             (setflag & clrflag))
1329                 return (EINVAL);
1330         SLIST_INIT(&workq);
1331         for (i = 0; i < size; i++) {
1332                 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1333                         return (EFAULT);
1334                 if (pfr_validate_table(&key.pfrkt_t, 0,
1335                     flags & PFR_FLAG_USERIOCTL))
1336                         return (EINVAL);
1337                 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1338                 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1339                         p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1340                             ~clrflag;
1341                         if (p->pfrkt_nflags == p->pfrkt_flags)
1342                                 goto _skip;
1343                         SLIST_FOREACH(q, &workq, pfrkt_workq)
1344                                 if (!pfr_ktable_compare(p, q))
1345                                         goto _skip;
1346                         SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1347                         if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1348                             (clrflag & PFR_TFLAG_PERSIST) &&
1349                             !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1350                                 xdel++;
1351                         else
1352                                 xchange++;
1353                 }
1354 _skip:
1355         ;
1356         }
1357         if (!(flags & PFR_FLAG_DUMMY)) {
1358                 if (flags & PFR_FLAG_ATOMIC)
1359                         crit_enter();
1360                 pfr_setflags_ktables(&workq);
1361                 if (flags & PFR_FLAG_ATOMIC)
1362                         crit_exit();
1363         }
1364         if (nchange != NULL)
1365                 *nchange = xchange;
1366         if (ndel != NULL)
1367                 *ndel = xdel;
1368         return (0);
1369 }
1370
1371 int
1372 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1373 {
1374         struct pfr_ktableworkq   workq;
1375         struct pfr_ktable       *p;
1376         struct pf_ruleset       *rs;
1377         int                      xdel = 0;
1378
1379         ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1380         rs = pf_find_or_create_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1381         if (rs == NULL)
1382                 return (ENOMEM);
1383         SLIST_INIT(&workq);
1384         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1385                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1386                     pfr_skip_table(trs, p, 0))
1387                         continue;
1388                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1389                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1390                 xdel++;
1391         }
1392         if (!(flags & PFR_FLAG_DUMMY)) {
1393                 pfr_setflags_ktables(&workq);
1394                 if (ticket != NULL)
1395                         *ticket = ++rs->tticket;
1396                 rs->topen = 1;
1397         } else
1398                 pf_remove_if_empty_ruleset(rs);
1399         if (ndel != NULL)
1400                 *ndel = xdel;
1401         return (0);
1402 }
1403
1404 int
1405 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1406     int *nadd, int *naddr, u_int32_t ticket, int flags)
1407 {
1408         struct pfr_ktableworkq   tableq;
1409         struct pfr_kentryworkq   addrq;
1410         struct pfr_ktable       *kt, *rt, *shadow, key;
1411         struct pfr_kentry       *p;
1412         struct pfr_addr          ad;
1413         struct pf_ruleset       *rs;
1414         int                      i, rv, xadd = 0, xaddr = 0;
1415
1416         ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO);
1417         if (size && !(flags & PFR_FLAG_ADDRSTOO))
1418                 return (EINVAL);
1419         if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1420             flags & PFR_FLAG_USERIOCTL))
1421                 return (EINVAL);
1422         rs = pf_find_ruleset(tbl->pfrt_anchor, tbl->pfrt_ruleset);
1423         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1424                 return (EBUSY);
1425         tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1426         SLIST_INIT(&tableq);
1427         kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1428         if (kt == NULL) {
1429                 kt = pfr_create_ktable(tbl, 0, 1);
1430                 if (kt == NULL)
1431                         return (ENOMEM);
1432                 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1433                 xadd++;
1434                 if (!tbl->pfrt_anchor[0])
1435                         goto _skip;
1436
1437                 /* find or create root table */
1438                 bzero(&key, sizeof(key));
1439                 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1440                 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1441                 if (rt != NULL) {
1442                         kt->pfrkt_root = rt;
1443                         goto _skip;
1444                 }
1445                 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1446                 if (rt == NULL) {
1447                         pfr_destroy_ktables(&tableq, 0);
1448                         return (ENOMEM);
1449                 }
1450                 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1451                 kt->pfrkt_root = rt;
1452         } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1453                 xadd++;
1454 _skip:
1455         shadow = pfr_create_ktable(tbl, 0, 0);
1456         if (shadow == NULL) {
1457                 pfr_destroy_ktables(&tableq, 0);
1458                 return (ENOMEM);
1459         }
1460         SLIST_INIT(&addrq);
1461         for (i = 0; i < size; i++) {
1462                 if (COPYIN(addr+i, &ad, sizeof(ad)))
1463                         senderr(EFAULT);
1464                 if (pfr_validate_addr(&ad))
1465                         senderr(EINVAL);
1466                 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1467                         continue;
1468                 p = pfr_create_kentry(&ad);
1469                 if (p == NULL)
1470                         senderr(ENOMEM);
1471                 if (pfr_route_kentry(shadow, p)) {
1472                         pfr_destroy_kentry(p);
1473                         continue;
1474                 }
1475                 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1476                 xaddr++;
1477         }
1478         if (!(flags & PFR_FLAG_DUMMY)) {
1479                 if (kt->pfrkt_shadow != NULL)
1480                         pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1481                 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1482                 pfr_insert_ktables(&tableq);
1483                 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1484                     xaddr : NO_ADDRESSES;
1485                 kt->pfrkt_shadow = shadow;
1486         } else {
1487                 pfr_clean_node_mask(shadow, &addrq);
1488                 pfr_destroy_ktable(shadow, 0);
1489                 pfr_destroy_ktables(&tableq, 0);
1490                 pfr_destroy_kentries(&addrq);
1491         }
1492         if (nadd != NULL)
1493                 *nadd = xadd;
1494         if (naddr != NULL)
1495                 *naddr = xaddr;
1496         return (0);
1497 _bad:
1498         pfr_destroy_ktable(shadow, 0);
1499         pfr_destroy_ktables(&tableq, 0);
1500         pfr_destroy_kentries(&addrq);
1501         return (rv);
1502 }
1503
1504 int
1505 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1506 {
1507         struct pfr_ktableworkq   workq;
1508         struct pfr_ktable       *p;
1509         struct pf_ruleset       *rs;
1510         int                      xdel = 0;
1511
1512         ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1513         rs = pf_find_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1514         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1515                 return (0);
1516         SLIST_INIT(&workq);
1517         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1518                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1519                     pfr_skip_table(trs, p, 0))
1520                         continue;
1521                 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1522                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1523                 xdel++;
1524         }
1525         if (!(flags & PFR_FLAG_DUMMY)) {
1526                 pfr_setflags_ktables(&workq);
1527                 rs->topen = 0;
1528                 pf_remove_if_empty_ruleset(rs);
1529         }
1530         if (ndel != NULL)
1531                 *ndel = xdel;
1532         return (0);
1533 }
1534
1535 int
1536 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1537     int *nchange, int flags)
1538 {
1539         struct pfr_ktable       *p;
1540         struct pfr_ktableworkq   workq;
1541         struct pf_ruleset       *rs;
1542         int                      xadd = 0, xchange = 0;
1543         long                     tzero = time_second;
1544
1545         ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1546         rs = pf_find_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1547         if (rs == NULL || !rs->topen || ticket != rs->tticket)
1548                 return (EBUSY);
1549
1550         SLIST_INIT(&workq);
1551         RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1552                 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1553                     pfr_skip_table(trs, p, 0))
1554                         continue;
1555                 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1556                 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1557                         xchange++;
1558                 else
1559                         xadd++;
1560         }
1561
1562         if (!(flags & PFR_FLAG_DUMMY)) {
1563                 if (flags & PFR_FLAG_ATOMIC)
1564                         crit_enter();
1565                 SLIST_FOREACH(p, &workq, pfrkt_workq)
1566                         pfr_commit_ktable(p, tzero);
1567                 if (flags & PFR_FLAG_ATOMIC)
1568                         crit_exit();
1569                 rs->topen = 0;
1570                 pf_remove_if_empty_ruleset(rs);
1571         }
1572         if (nadd != NULL)
1573                 *nadd = xadd;
1574         if (nchange != NULL)
1575                 *nchange = xchange;
1576
1577         return (0);
1578 }
1579
1580 void
1581 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1582 {
1583         struct pfr_ktable       *shadow = kt->pfrkt_shadow;
1584         int                      nflags;
1585
1586         if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1587                 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1588                         pfr_clstats_ktable(kt, tzero, 1);
1589         } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1590                 /* kt might contain addresses */
1591                 struct pfr_kentryworkq   addrq, addq, changeq, delq, garbageq;
1592                 struct pfr_kentry       *p, *q, *next;
1593                 struct pfr_addr          ad;
1594
1595                 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1596                 pfr_mark_addrs(kt);
1597                 SLIST_INIT(&addq);
1598                 SLIST_INIT(&changeq);
1599                 SLIST_INIT(&delq);
1600                 SLIST_INIT(&garbageq);
1601                 pfr_clean_node_mask(shadow, &addrq);
1602                 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1603                         next = SLIST_NEXT(p, pfrke_workq);      /* XXX */
1604                         pfr_copyout_addr(&ad, p);
1605                         q = pfr_lookup_addr(kt, &ad, 1);
1606                         if (q != NULL) {
1607                                 if (q->pfrke_not != p->pfrke_not)
1608                                         SLIST_INSERT_HEAD(&changeq, q,
1609                                             pfrke_workq);
1610                                 q->pfrke_mark = 1;
1611                                 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1612                         } else {
1613                                 p->pfrke_tzero = tzero;
1614                                 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1615                         }
1616                 }
1617                 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1618                 pfr_insert_kentries(kt, &addq, tzero);
1619                 pfr_remove_kentries(kt, &delq);
1620                 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1621                 pfr_destroy_kentries(&garbageq);
1622         } else {
1623                 /* kt cannot contain addresses */
1624                 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1625                     shadow->pfrkt_ip4);
1626                 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1627                     shadow->pfrkt_ip6);
1628                 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1629                 pfr_clstats_ktable(kt, tzero, 1);
1630         }
1631         nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1632             (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1633                 & ~PFR_TFLAG_INACTIVE;
1634         pfr_destroy_ktable(shadow, 0);
1635         kt->pfrkt_shadow = NULL;
1636         pfr_setflags_ktable(kt, nflags);
1637 }
1638
1639 int
1640 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1641 {
1642         int i;
1643
1644         if (!tbl->pfrt_name[0])
1645                 return (-1);
1646         if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1647                  return (-1);
1648         if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1649                 return (-1);
1650         for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1651                 if (tbl->pfrt_name[i])
1652                         return (-1);
1653         if (tbl->pfrt_flags & ~allowedflags)
1654                 return (-1);
1655         return (0);
1656 }
1657
1658 int
1659 pfr_table_count(struct pfr_table *filter, int flags)
1660 {
1661         struct pf_ruleset *rs;
1662         struct pf_anchor *ac;
1663
1664         if (flags & PFR_FLAG_ALLRSETS)
1665                 return (pfr_ktable_cnt);
1666         if (filter->pfrt_ruleset[0]) {
1667                 rs = pf_find_ruleset(filter->pfrt_anchor,
1668                     filter->pfrt_ruleset);
1669                 return ((rs != NULL) ? rs->tables : -1);
1670         }
1671         if (filter->pfrt_anchor[0]) {
1672                 ac = pf_find_anchor(filter->pfrt_anchor);
1673                 return ((ac != NULL) ? ac->tables : -1);
1674         }
1675         return (pf_main_ruleset.tables);
1676 }
1677
1678 int
1679 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1680 {
1681         if (flags & PFR_FLAG_ALLRSETS)
1682                 return (0);
1683         if (strncmp(filter->pfrt_anchor, kt->pfrkt_anchor,
1684             PF_ANCHOR_NAME_SIZE))
1685                 return (1);
1686         if (!filter->pfrt_ruleset[0])
1687                 return (0);
1688         if (strncmp(filter->pfrt_ruleset, kt->pfrkt_ruleset,
1689             PF_RULESET_NAME_SIZE))
1690                 return (1);
1691         return (0);
1692 }
1693
1694 void
1695 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1696 {
1697         struct pfr_ktable       *p;
1698
1699         SLIST_FOREACH(p, workq, pfrkt_workq)
1700                 pfr_insert_ktable(p);
1701 }
1702
1703 void
1704 pfr_insert_ktable(struct pfr_ktable *kt)
1705 {
1706         RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1707         pfr_ktable_cnt++;
1708         if (kt->pfrkt_root != NULL)
1709                 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1710                         pfr_setflags_ktable(kt->pfrkt_root,
1711                             kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1712 }
1713
1714 void
1715 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1716 {
1717         struct pfr_ktable       *p;
1718
1719         SLIST_FOREACH(p, workq, pfrkt_workq)
1720                 pfr_setflags_ktable(p, p->pfrkt_nflags);
1721 }
1722
1723 void
1724 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1725 {
1726         struct pfr_kentryworkq  addrq;
1727
1728         if (!(newf & PFR_TFLAG_REFERENCED) &&
1729             !(newf & PFR_TFLAG_PERSIST))
1730                 newf &= ~PFR_TFLAG_ACTIVE;
1731         if (!(newf & PFR_TFLAG_ACTIVE))
1732                 newf &= ~PFR_TFLAG_USRMASK;
1733         if (!(newf & PFR_TFLAG_SETMASK)) {
1734                 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1735                 if (kt->pfrkt_root != NULL)
1736                         if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1737                                 pfr_setflags_ktable(kt->pfrkt_root,
1738                                     kt->pfrkt_root->pfrkt_flags &
1739                                         ~PFR_TFLAG_REFDANCHOR);
1740                 pfr_destroy_ktable(kt, 1);
1741                 pfr_ktable_cnt--;
1742                 return;
1743         }
1744         if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1745                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1746                 pfr_remove_kentries(kt, &addrq);
1747         }
1748         if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1749                 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1750                 kt->pfrkt_shadow = NULL;
1751         }
1752         kt->pfrkt_flags = newf;
1753 }
1754
1755 void
1756 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1757 {
1758         struct pfr_ktable       *p;
1759
1760         SLIST_FOREACH(p, workq, pfrkt_workq)
1761                 pfr_clstats_ktable(p, tzero, recurse);
1762 }
1763
1764 void
1765 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1766 {
1767         struct pfr_kentryworkq   addrq;
1768
1769         if (recurse) {
1770                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1771                 pfr_clstats_kentries(&addrq, tzero, 0);
1772         }
1773         crit_enter();
1774         bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1775         bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1776         kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1777         crit_exit();
1778         kt->pfrkt_tzero = tzero;
1779 }
1780
1781 struct pfr_ktable *
1782 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1783 {
1784         struct pfr_ktable       *kt;
1785         struct pf_ruleset       *rs;
1786
1787         kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
1788         if (kt == NULL)
1789                 return (NULL);
1790         bzero(kt, sizeof(*kt));
1791         kt->pfrkt_t = *tbl;
1792
1793         if (attachruleset) {
1794                 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor,
1795                     tbl->pfrt_ruleset);
1796                 if (!rs) {
1797                         pfr_destroy_ktable(kt, 0);
1798                         return (NULL);
1799                 }
1800                 kt->pfrkt_rs = rs;
1801                 rs->tables++;
1802                 if (rs->anchor != NULL)
1803                         rs->anchor->tables++;
1804         }
1805
1806         if (!rn_inithead((void **)&kt->pfrkt_ip4,
1807             offsetof(struct sockaddr_in, sin_addr) * 8) ||
1808             !rn_inithead((void **)&kt->pfrkt_ip6,
1809             offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1810                 pfr_destroy_ktable(kt, 0);
1811                 return (NULL);
1812         }
1813         kt->pfrkt_tzero = tzero;
1814
1815         return (kt);
1816 }
1817
1818 void
1819 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1820 {
1821         struct pfr_ktable       *p, *q;
1822
1823         for (p = SLIST_FIRST(workq); p; p = q) {
1824                 q = SLIST_NEXT(p, pfrkt_workq);
1825                 pfr_destroy_ktable(p, flushaddr);
1826         }
1827 }
1828
1829 void
1830 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1831 {
1832         struct pfr_kentryworkq   addrq;
1833
1834         if (flushaddr) {
1835                 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1836                 pfr_clean_node_mask(kt, &addrq);
1837                 pfr_destroy_kentries(&addrq);
1838         }
1839         if (kt->pfrkt_ip4 != NULL)
1840                 kfree((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1841         if (kt->pfrkt_ip6 != NULL)
1842                 kfree((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1843         if (kt->pfrkt_shadow != NULL)
1844                 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1845         if (kt->pfrkt_rs != NULL) {
1846                 kt->pfrkt_rs->tables--;
1847                 if (kt->pfrkt_rs->anchor != NULL)
1848                         kt->pfrkt_rs->anchor->tables--;
1849                 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1850         }
1851         pool_put(&pfr_ktable_pl, kt);
1852 }
1853
1854 int
1855 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1856 {
1857         int d;
1858
1859         if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1860                 return (d);
1861         if ((d = strncmp(p->pfrkt_anchor, q->pfrkt_anchor,
1862             PF_ANCHOR_NAME_SIZE)))
1863                 return (d);
1864         return (strncmp(p->pfrkt_ruleset, q->pfrkt_ruleset,
1865             PF_RULESET_NAME_SIZE));
1866 }
1867
1868 struct pfr_ktable *
1869 pfr_lookup_table(struct pfr_table *tbl)
1870 {
1871         /* struct pfr_ktable start like a struct pfr_table */
1872         return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1873             (struct pfr_ktable *)tbl));
1874 }
1875
1876 int
1877 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1878 {
1879         struct pfr_kentry       *ke = NULL;
1880         int                      match;
1881
1882         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1883                 kt = kt->pfrkt_root;
1884         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1885                 return (0);
1886
1887         switch (af) {
1888         case AF_INET:
1889                 pfr_sin.sin_addr.s_addr = a->addr32[0];
1890                 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
1891                     kt->pfrkt_ip4);
1892                 if (ke && KENTRY_RNF_ROOT(ke))
1893                         ke = NULL;
1894                 break;
1895         case AF_INET6:
1896                 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1897                 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
1898                     kt->pfrkt_ip6);
1899                 if (ke && KENTRY_RNF_ROOT(ke))
1900                         ke = NULL;
1901                 break;
1902         }
1903         match = (ke && !ke->pfrke_not);
1904         if (match)
1905                 kt->pfrkt_match++;
1906         else
1907                 kt->pfrkt_nomatch++;
1908         return (match);
1909 }
1910
1911 void
1912 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
1913     u_int64_t len, int dir_out, int op_pass, int notrule)
1914 {
1915         struct pfr_kentry       *ke = NULL;
1916
1917         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1918                 kt = kt->pfrkt_root;
1919         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1920                 return;
1921
1922         switch (af) {
1923         case AF_INET:
1924                 pfr_sin.sin_addr.s_addr = a->addr32[0];
1925                 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
1926                     kt->pfrkt_ip4);
1927                 if (ke && KENTRY_RNF_ROOT(ke))
1928                         ke = NULL;
1929                 break;
1930         case AF_INET6:
1931                 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1932                 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
1933                     kt->pfrkt_ip6);
1934                 if (ke && KENTRY_RNF_ROOT(ke))
1935                         ke = NULL;
1936                 break;
1937         }
1938         if ((ke == NULL || ke->pfrke_not) != notrule) {
1939                 if (op_pass != PFR_OP_PASS)
1940                         printf("pfr_update_stats: assertion failed.\n");
1941                 op_pass = PFR_OP_XPASS;
1942         }
1943         kt->pfrkt_packets[dir_out][op_pass]++;
1944         kt->pfrkt_bytes[dir_out][op_pass] += len;
1945         if (ke != NULL && op_pass != PFR_OP_XPASS) {
1946                 ke->pfrke_packets[dir_out][op_pass]++;
1947                 ke->pfrke_bytes[dir_out][op_pass] += len;
1948         }
1949 }
1950
1951 struct pfr_ktable *
1952 pfr_attach_table(struct pf_ruleset *rs, char *name)
1953 {
1954         struct pfr_ktable       *kt, *rt;
1955         struct pfr_table         tbl;
1956         struct pf_anchor        *ac = rs->anchor;
1957
1958         bzero(&tbl, sizeof(tbl));
1959         strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
1960         if (ac != NULL) {
1961                 strlcpy(tbl.pfrt_anchor, ac->name, sizeof(tbl.pfrt_anchor));
1962                 strlcpy(tbl.pfrt_ruleset, rs->name, sizeof(tbl.pfrt_ruleset));
1963         }
1964         kt = pfr_lookup_table(&tbl);
1965         if (kt == NULL) {
1966                 kt = pfr_create_ktable(&tbl, time_second, 1);
1967                 if (kt == NULL)
1968                         return (NULL);
1969                 if (ac != NULL) {
1970                         bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
1971                         bzero(tbl.pfrt_ruleset, sizeof(tbl.pfrt_ruleset));
1972                         rt = pfr_lookup_table(&tbl);
1973                         if (rt == NULL) {
1974                                 rt = pfr_create_ktable(&tbl, 0, 1);
1975                                 if (rt == NULL) {
1976                                         pfr_destroy_ktable(kt, 0);
1977                                         return (NULL);
1978                                 }
1979                                 pfr_insert_ktable(rt);
1980                         }
1981                         kt->pfrkt_root = rt;
1982                 }
1983                 pfr_insert_ktable(kt);
1984         }
1985         if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
1986                 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
1987         return (kt);
1988 }
1989
1990 void
1991 pfr_detach_table(struct pfr_ktable *kt)
1992 {
1993         if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
1994                 printf("pfr_detach_table: refcount = %d.\n",
1995                     kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
1996         else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
1997                 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
1998 }
1999
2000 int
2001 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2002     struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2003 {
2004         struct pfr_kentry       *ke, *ke2;
2005         struct pf_addr          *addr;
2006         union sockaddr_union     mask;
2007         int                      idx = -1, use_counter = 0;
2008
2009         addr = (af == AF_INET) ? (struct pf_addr *)&pfr_sin.sin_addr :
2010             (struct pf_addr *)&pfr_sin6.sin6_addr;
2011         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2012                 kt = kt->pfrkt_root;
2013         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2014                 return (-1);
2015
2016         if (pidx != NULL)
2017                 idx = *pidx;
2018         if (counter != NULL && idx >= 0)
2019                 use_counter = 1;
2020         if (idx < 0)
2021                 idx = 0;
2022
2023 _next_block:
2024         ke = pfr_kentry_byidx(kt, idx, af);
2025         if (ke == NULL)
2026                 return (1);
2027         pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2028         *raddr = SUNION2PF(&ke->pfrke_sa, af);
2029         *rmask = SUNION2PF(&pfr_mask, af);
2030
2031         if (use_counter) {
2032                 /* is supplied address within block? */
2033                 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2034                         /* no, go to next block in table */
2035                         idx++;
2036                         use_counter = 0;
2037                         goto _next_block;
2038                 }
2039                 PF_ACPY(addr, counter, af);
2040         } else {
2041                 /* use first address of block */
2042                 PF_ACPY(addr, *raddr, af);
2043         }
2044
2045         if (!KENTRY_NETWORK(ke)) {
2046                 /* this is a single IP address - no possible nested block */
2047                 PF_ACPY(counter, addr, af);
2048                 *pidx = idx;
2049                 return (0);
2050         }
2051         for (;;) {
2052                 /* we don't want to use a nested block */
2053                 ke2 = (struct pfr_kentry *)(af == AF_INET ?
2054                     rn_match((char *)&pfr_sin, kt->pfrkt_ip4) :
2055                     rn_match((char *)&pfr_sin6, kt->pfrkt_ip6));
2056                 /* no need to check KENTRY_RNF_ROOT() here */
2057                 if (ke2 == ke) {
2058                         /* lookup return the same block - perfect */
2059                         PF_ACPY(counter, addr, af);
2060                         *pidx = idx;
2061                         return (0);
2062                 }
2063
2064                 /* we need to increase the counter past the nested block */
2065                 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2066                 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2067                 PF_AINC(addr, af);
2068                 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2069                         /* ok, we reached the end of our main block */
2070                         /* go to next block in table */
2071                         idx++;
2072                         use_counter = 0;
2073                         goto _next_block;
2074                 }
2075         }
2076 }
2077
2078 struct pfr_kentry *
2079 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2080 {
2081         struct pfr_walktree     w;
2082
2083         bzero(&w, sizeof(w));
2084         w.pfrw_op = PFRW_POOL_GET;
2085         w.pfrw_cnt = idx;
2086
2087         switch (af) {
2088         case AF_INET:
2089                 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2090                 return (w.pfrw_kentry);
2091         case AF_INET6:
2092                 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2093                 return (w.pfrw_kentry);
2094         default:
2095                 return (NULL);
2096         }
2097 }
2098
2099 void
2100 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2101 {
2102         struct pfr_walktree     w;
2103
2104         bzero(&w, sizeof(w));
2105         w.pfrw_op = PFRW_DYNADDR_UPDATE;
2106         w.pfrw_dyn = dyn;
2107
2108         crit_enter();
2109         dyn->pfid_acnt4 = 0;
2110         dyn->pfid_acnt6 = 0;
2111         if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2112                 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2113         if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2114                 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2115         crit_exit();
2116 }