Clean up routing code before I parallelize it.
[dragonfly.git] / sys / net / pf / pf_table.c
CommitLineData
02742ec6
JS
1/* $FreeBSD: src/sys/contrib/pf/net/pf_table.c,v 1.5 2004/07/28 06:14:44 kan Exp $ */
2/* $OpenBSD: pf_table.c,v 1.47 2004/03/09 21:44:41 mcbride Exp $ */
2e9572df 3/* $DragonFly: src/sys/net/pf/pf_table.c,v 1.2 2004/12/14 18:46:08 hsu Exp $ */
02742ec6
JS
4
5/*
6 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
7 *
8 * Copyright (c) 2002 Cedric Berger
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * - Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
30 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 *
35 */
36
37#include "opt_inet.h"
38#include "opt_inet6.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/socket.h>
43#include <sys/mbuf.h>
44#include <sys/kernel.h>
45#include <sys/malloc.h>
46#include <vm/vm_zone.h>
47
48#include <net/if.h>
49#include <net/route.h>
50#include <netinet/in.h>
51#include <net/pf/pfvar.h>
52
53#define ACCEPT_FLAGS(oklist) \
54 do { \
55 if ((flags & ~(oklist)) & \
56 PFR_FLAG_ALLMASK) \
57 return (EINVAL); \
58 } while (0)
59
60#define COPYIN(from, to, size) \
61 ((flags & PFR_FLAG_USERIOCTL) ? \
62 copyin((from), (to), (size)) : \
63 (bcopy((from), (to), (size)), 0))
64
65#define COPYOUT(from, to, size) \
66 ((flags & PFR_FLAG_USERIOCTL) ? \
67 copyout((from), (to), (size)) : \
68 (bcopy((from), (to), (size)), 0))
69
70#define FILLIN_SIN(sin, addr) \
71 do { \
72 (sin).sin_len = sizeof(sin); \
73 (sin).sin_family = AF_INET; \
74 (sin).sin_addr = (addr); \
75 } while (0)
76
77#define FILLIN_SIN6(sin6, addr) \
78 do { \
79 (sin6).sin6_len = sizeof(sin6); \
80 (sin6).sin6_family = AF_INET6; \
81 (sin6).sin6_addr = (addr); \
82 } while (0)
83
84#define SWAP(type, a1, a2) \
85 do { \
86 type tmp = a1; \
87 a1 = a2; \
88 a2 = tmp; \
89 } while (0)
90
91#define SUNION2PF(su, af) (((af)==AF_INET) ? \
92 (struct pf_addr *)&(su)->sin.sin_addr : \
93 (struct pf_addr *)&(su)->sin6.sin6_addr)
94
95#define AF_BITS(af) (((af)==AF_INET)?32:128)
96#define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
97#define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
98#define KENTRY_RNF_ROOT(ke) \
99 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
100
101#define NO_ADDRESSES (-1)
102#define ENQUEUE_UNMARKED_ONLY (1)
103#define INVERT_NEG_FLAG (1)
104
105struct pfr_walktree {
106 enum pfrw_op {
107 PFRW_MARK,
108 PFRW_SWEEP,
109 PFRW_ENQUEUE,
110 PFRW_GET_ADDRS,
111 PFRW_GET_ASTATS,
112 PFRW_POOL_GET,
113 PFRW_DYNADDR_UPDATE
114 } pfrw_op;
115 union {
116 struct pfr_addr *pfrw1_addr;
117 struct pfr_astats *pfrw1_astats;
118 struct pfr_kentryworkq *pfrw1_workq;
119 struct pfr_kentry *pfrw1_kentry;
120 struct pfi_dynaddr *pfrw1_dyn;
121 } pfrw_1;
122 int pfrw_free;
123 int pfrw_flags;
124};
125#define pfrw_addr pfrw_1.pfrw1_addr
126#define pfrw_astats pfrw_1.pfrw1_astats
127#define pfrw_workq pfrw_1.pfrw1_workq
128#define pfrw_kentry pfrw_1.pfrw1_kentry
129#define pfrw_dyn pfrw_1.pfrw1_dyn
130#define pfrw_cnt pfrw_free
131
132#define senderr(e) do { rv = (e); goto _bad; } while (0)
133
134vm_zone_t pfr_ktable_pl;
135vm_zone_t pfr_kentry_pl;
136struct sockaddr_in pfr_sin;
137struct sockaddr_in6 pfr_sin6;
138union sockaddr_union pfr_mask;
139struct pf_addr pfr_ffaddr;
140
141void pfr_copyout_addr(struct pfr_addr *,
142 struct pfr_kentry *ke);
143int pfr_validate_addr(struct pfr_addr *);
144void pfr_enqueue_addrs(struct pfr_ktable *,
145 struct pfr_kentryworkq *, int *, int);
146void pfr_mark_addrs(struct pfr_ktable *);
147struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
148 struct pfr_addr *, int);
149struct pfr_kentry *pfr_create_kentry(struct pfr_addr *);
150void pfr_destroy_kentries(struct pfr_kentryworkq *);
151void pfr_destroy_kentry(struct pfr_kentry *);
152void pfr_insert_kentries(struct pfr_ktable *,
153 struct pfr_kentryworkq *, long);
154void pfr_remove_kentries(struct pfr_ktable *,
155 struct pfr_kentryworkq *);
156void pfr_clstats_kentries(struct pfr_kentryworkq *, long,
157 int);
158void pfr_reset_feedback(struct pfr_addr *, int, int);
159void pfr_prepare_network(union sockaddr_union *, int, int);
160int pfr_route_kentry(struct pfr_ktable *,
161 struct pfr_kentry *);
162int pfr_unroute_kentry(struct pfr_ktable *,
163 struct pfr_kentry *);
164int pfr_walktree(struct radix_node *, void *);
165int pfr_validate_table(struct pfr_table *, int, int);
166void pfr_commit_ktable(struct pfr_ktable *, long);
167void pfr_insert_ktables(struct pfr_ktableworkq *);
168void pfr_insert_ktable(struct pfr_ktable *);
169void pfr_setflags_ktables(struct pfr_ktableworkq *);
170void pfr_setflags_ktable(struct pfr_ktable *, int);
171void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
172 int);
173void pfr_clstats_ktable(struct pfr_ktable *, long, int);
174struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int);
175void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
176void pfr_destroy_ktable(struct pfr_ktable *, int);
177int pfr_ktable_compare(struct pfr_ktable *,
178 struct pfr_ktable *);
179struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
180void pfr_clean_node_mask(struct pfr_ktable *,
181 struct pfr_kentryworkq *);
182int pfr_table_count(struct pfr_table *, int);
183int pfr_skip_table(struct pfr_table *,
184 struct pfr_ktable *, int);
185struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
186
187RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
188RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
189
190struct pfr_ktablehead pfr_ktables;
191struct pfr_table pfr_nulltable;
192int pfr_ktable_cnt;
193
194void
195pfr_initialize(void)
196{
197 pfr_sin.sin_len = sizeof(pfr_sin);
198 pfr_sin.sin_family = AF_INET;
199 pfr_sin6.sin6_len = sizeof(pfr_sin6);
200 pfr_sin6.sin6_family = AF_INET6;
201
202 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
203}
204
205int
206pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
207{
208 struct pfr_ktable *kt;
209 struct pfr_kentryworkq workq;
210 int s = 0;
211
212 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
213 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
214 return (EINVAL);
215 kt = pfr_lookup_table(tbl);
216 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
217 return (ESRCH);
218 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
219 return (EPERM);
220 pfr_enqueue_addrs(kt, &workq, ndel, 0);
221
222 if (!(flags & PFR_FLAG_DUMMY)) {
223 if (flags & PFR_FLAG_ATOMIC)
224 s = splsoftnet();
225 pfr_remove_kentries(kt, &workq);
226 if (flags & PFR_FLAG_ATOMIC)
227 splx(s);
228 if (kt->pfrkt_cnt) {
229 printf("pfr_clr_addrs: corruption detected (%d).\n",
230 kt->pfrkt_cnt);
231 kt->pfrkt_cnt = 0;
232 }
233 }
234 return (0);
235}
236
237int
238pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
239 int *nadd, int flags)
240{
241 struct pfr_ktable *kt, *tmpkt;
242 struct pfr_kentryworkq workq;
243 struct pfr_kentry *p, *q;
244 struct pfr_addr ad;
245 int i, rv, s = 0, xadd = 0;
246 long tzero = time_second;
247
248 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
249 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
250 return (EINVAL);
251 kt = pfr_lookup_table(tbl);
252 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
253 return (ESRCH);
254 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
255 return (EPERM);
256 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
257 if (tmpkt == NULL)
258 return (ENOMEM);
259 SLIST_INIT(&workq);
260 for (i = 0; i < size; i++) {
261 if (COPYIN(addr+i, &ad, sizeof(ad)))
262 senderr(EFAULT);
263 if (pfr_validate_addr(&ad))
264 senderr(EINVAL);
265 p = pfr_lookup_addr(kt, &ad, 1);
266 q = pfr_lookup_addr(tmpkt, &ad, 1);
267 if (flags & PFR_FLAG_FEEDBACK) {
268 if (q != NULL)
269 ad.pfra_fback = PFR_FB_DUPLICATE;
270 else if (p == NULL)
271 ad.pfra_fback = PFR_FB_ADDED;
272 else if (p->pfrke_not != ad.pfra_not)
273 ad.pfra_fback = PFR_FB_CONFLICT;
274 else
275 ad.pfra_fback = PFR_FB_NONE;
276 }
277 if (p == NULL && q == NULL) {
278 p = pfr_create_kentry(&ad);
279 if (p == NULL)
280 senderr(ENOMEM);
281 if (pfr_route_kentry(tmpkt, p)) {
282 pfr_destroy_kentry(p);
283 ad.pfra_fback = PFR_FB_NONE;
284 } else {
285 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
286 xadd++;
287 }
288 }
289 if (flags & PFR_FLAG_FEEDBACK)
290 if (COPYOUT(&ad, addr+i, sizeof(ad)))
291 senderr(EFAULT);
292 }
293 pfr_clean_node_mask(tmpkt, &workq);
294 if (!(flags & PFR_FLAG_DUMMY)) {
295 if (flags & PFR_FLAG_ATOMIC)
296 s = splsoftnet();
297 pfr_insert_kentries(kt, &workq, tzero);
298 if (flags & PFR_FLAG_ATOMIC)
299 splx(s);
300 } else
301 pfr_destroy_kentries(&workq);
302 if (nadd != NULL)
303 *nadd = xadd;
304 pfr_destroy_ktable(tmpkt, 0);
305 return (0);
306_bad:
307 pfr_clean_node_mask(tmpkt, &workq);
308 pfr_destroy_kentries(&workq);
309 if (flags & PFR_FLAG_FEEDBACK)
310 pfr_reset_feedback(addr, size, flags);
311 pfr_destroy_ktable(tmpkt, 0);
312 return (rv);
313}
314
315int
316pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
317 int *ndel, int flags)
318{
319 struct pfr_ktable *kt;
320 struct pfr_kentryworkq workq;
321 struct pfr_kentry *p;
322 struct pfr_addr ad;
323 int i, rv, s = 0, xdel = 0;
324
325 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
326 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
327 return (EINVAL);
328 kt = pfr_lookup_table(tbl);
329 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
330 return (ESRCH);
331 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
332 return (EPERM);
333 pfr_mark_addrs(kt);
334 SLIST_INIT(&workq);
335 for (i = 0; i < size; i++) {
336 if (COPYIN(addr+i, &ad, sizeof(ad)))
337 senderr(EFAULT);
338 if (pfr_validate_addr(&ad))
339 senderr(EINVAL);
340 p = pfr_lookup_addr(kt, &ad, 1);
341 if (flags & PFR_FLAG_FEEDBACK) {
342 if (p == NULL)
343 ad.pfra_fback = PFR_FB_NONE;
344 else if (p->pfrke_not != ad.pfra_not)
345 ad.pfra_fback = PFR_FB_CONFLICT;
346 else if (p->pfrke_mark)
347 ad.pfra_fback = PFR_FB_DUPLICATE;
348 else
349 ad.pfra_fback = PFR_FB_DELETED;
350 }
351 if (p != NULL && p->pfrke_not == ad.pfra_not &&
352 !p->pfrke_mark) {
353 p->pfrke_mark = 1;
354 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
355 xdel++;
356 }
357 if (flags & PFR_FLAG_FEEDBACK)
358 if (COPYOUT(&ad, addr+i, sizeof(ad)))
359 senderr(EFAULT);
360 }
361 if (!(flags & PFR_FLAG_DUMMY)) {
362 if (flags & PFR_FLAG_ATOMIC)
363 s = splsoftnet();
364 pfr_remove_kentries(kt, &workq);
365 if (flags & PFR_FLAG_ATOMIC)
366 splx(s);
367 }
368 if (ndel != NULL)
369 *ndel = xdel;
370 return (0);
371_bad:
372 if (flags & PFR_FLAG_FEEDBACK)
373 pfr_reset_feedback(addr, size, flags);
374 return (rv);
375}
376
377int
378pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
379 int *size2, int *nadd, int *ndel, int *nchange, int flags)
380{
381 struct pfr_ktable *kt, *tmpkt;
382 struct pfr_kentryworkq addq, delq, changeq;
383 struct pfr_kentry *p, *q;
384 struct pfr_addr ad;
385 int i, rv, s = 0, xadd = 0, xdel = 0, xchange = 0;
386 long tzero = time_second;
387
388 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
389 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
390 return (EINVAL);
391 kt = pfr_lookup_table(tbl);
392 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
393 return (ESRCH);
394 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
395 return (EPERM);
396 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
397 if (tmpkt == NULL)
398 return (ENOMEM);
399 pfr_mark_addrs(kt);
400 SLIST_INIT(&addq);
401 SLIST_INIT(&delq);
402 SLIST_INIT(&changeq);
403 for (i = 0; i < size; i++) {
404 if (COPYIN(addr+i, &ad, sizeof(ad)))
405 senderr(EFAULT);
406 if (pfr_validate_addr(&ad))
407 senderr(EINVAL);
408 ad.pfra_fback = PFR_FB_NONE;
409 p = pfr_lookup_addr(kt, &ad, 1);
410 if (p != NULL) {
411 if (p->pfrke_mark) {
412 ad.pfra_fback = PFR_FB_DUPLICATE;
413 goto _skip;
414 }
415 p->pfrke_mark = 1;
416 if (p->pfrke_not != ad.pfra_not) {
417 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
418 ad.pfra_fback = PFR_FB_CHANGED;
419 xchange++;
420 }
421 } else {
422 q = pfr_lookup_addr(tmpkt, &ad, 1);
423 if (q != NULL) {
424 ad.pfra_fback = PFR_FB_DUPLICATE;
425 goto _skip;
426 }
427 p = pfr_create_kentry(&ad);
428 if (p == NULL)
429 senderr(ENOMEM);
430 if (pfr_route_kentry(tmpkt, p)) {
431 pfr_destroy_kentry(p);
432 ad.pfra_fback = PFR_FB_NONE;
433 } else {
434 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
435 ad.pfra_fback = PFR_FB_ADDED;
436 xadd++;
437 }
438 }
439_skip:
440 if (flags & PFR_FLAG_FEEDBACK)
441 if (COPYOUT(&ad, addr+i, sizeof(ad)))
442 senderr(EFAULT);
443 }
444 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
445 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
446 if (*size2 < size+xdel) {
447 *size2 = size+xdel;
448 senderr(0);
449 }
450 i = 0;
451 SLIST_FOREACH(p, &delq, pfrke_workq) {
452 pfr_copyout_addr(&ad, p);
453 ad.pfra_fback = PFR_FB_DELETED;
454 if (COPYOUT(&ad, addr+size+i, sizeof(ad)))
455 senderr(EFAULT);
456 i++;
457 }
458 }
459 pfr_clean_node_mask(tmpkt, &addq);
460 if (!(flags & PFR_FLAG_DUMMY)) {
461 if (flags & PFR_FLAG_ATOMIC)
462 s = splsoftnet();
463 pfr_insert_kentries(kt, &addq, tzero);
464 pfr_remove_kentries(kt, &delq);
465 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
466 if (flags & PFR_FLAG_ATOMIC)
467 splx(s);
468 } else
469 pfr_destroy_kentries(&addq);
470 if (nadd != NULL)
471 *nadd = xadd;
472 if (ndel != NULL)
473 *ndel = xdel;
474 if (nchange != NULL)
475 *nchange = xchange;
476 if ((flags & PFR_FLAG_FEEDBACK) && size2)
477 *size2 = size+xdel;
478 pfr_destroy_ktable(tmpkt, 0);
479 return (0);
480_bad:
481 pfr_clean_node_mask(tmpkt, &addq);
482 pfr_destroy_kentries(&addq);
483 if (flags & PFR_FLAG_FEEDBACK)
484 pfr_reset_feedback(addr, size, flags);
485 pfr_destroy_ktable(tmpkt, 0);
486 return (rv);
487}
488
489int
490pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
491 int *nmatch, int flags)
492{
493 struct pfr_ktable *kt;
494 struct pfr_kentry *p;
495 struct pfr_addr ad;
496 int i, xmatch = 0;
497
498 ACCEPT_FLAGS(PFR_FLAG_REPLACE);
499 if (pfr_validate_table(tbl, 0, 0))
500 return (EINVAL);
501 kt = pfr_lookup_table(tbl);
502 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
503 return (ESRCH);
504
505 for (i = 0; i < size; i++) {
506 if (COPYIN(addr+i, &ad, sizeof(ad)))
507 return (EFAULT);
508 if (pfr_validate_addr(&ad))
509 return (EINVAL);
510 if (ADDR_NETWORK(&ad))
511 return (EINVAL);
512 p = pfr_lookup_addr(kt, &ad, 0);
513 if (flags & PFR_FLAG_REPLACE)
514 pfr_copyout_addr(&ad, p);
515 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
516 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
517 if (p != NULL && !p->pfrke_not)
518 xmatch++;
519 if (COPYOUT(&ad, addr+i, sizeof(ad)))
520 return (EFAULT);
521 }
522 if (nmatch != NULL)
523 *nmatch = xmatch;
524 return (0);
525}
526
527int
528pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
529 int flags)
530{
531 struct pfr_ktable *kt;
532 struct pfr_walktree w;
533 int rv;
534
535 ACCEPT_FLAGS(0);
536 if (pfr_validate_table(tbl, 0, 0))
537 return (EINVAL);
538 kt = pfr_lookup_table(tbl);
539 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
540 return (ESRCH);
541 if (kt->pfrkt_cnt > *size) {
542 *size = kt->pfrkt_cnt;
543 return (0);
544 }
545
546 bzero(&w, sizeof(w));
547 w.pfrw_op = PFRW_GET_ADDRS;
548 w.pfrw_addr = addr;
549 w.pfrw_free = kt->pfrkt_cnt;
550 w.pfrw_flags = flags;
551 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
552 if (!rv)
553 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
554 if (rv)
555 return (rv);
556
557 if (w.pfrw_free) {
558 printf("pfr_get_addrs: corruption detected (%d).\n",
559 w.pfrw_free);
560 return (ENOTTY);
561 }
562 *size = kt->pfrkt_cnt;
563 return (0);
564}
565
566int
567pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
568 int flags)
569{
570 struct pfr_ktable *kt;
571 struct pfr_walktree w;
572 struct pfr_kentryworkq workq;
573 int rv, s = 0;
574 long tzero = time_second;
575
576 ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
577 if (pfr_validate_table(tbl, 0, 0))
578 return (EINVAL);
579 kt = pfr_lookup_table(tbl);
580 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
581 return (ESRCH);
582 if (kt->pfrkt_cnt > *size) {
583 *size = kt->pfrkt_cnt;
584 return (0);
585 }
586
587 bzero(&w, sizeof(w));
588 w.pfrw_op = PFRW_GET_ASTATS;
589 w.pfrw_astats = addr;
590 w.pfrw_free = kt->pfrkt_cnt;
591 w.pfrw_flags = flags;
592 if (flags & PFR_FLAG_ATOMIC)
593 s = splsoftnet();
594 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
595 if (!rv)
596 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
597 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
598 pfr_enqueue_addrs(kt, &workq, NULL, 0);
599 pfr_clstats_kentries(&workq, tzero, 0);
600 }
601 if (flags & PFR_FLAG_ATOMIC)
602 splx(s);
603 if (rv)
604 return (rv);
605
606 if (w.pfrw_free) {
607 printf("pfr_get_astats: corruption detected (%d).\n",
608 w.pfrw_free);
609 return (ENOTTY);
610 }
611 *size = kt->pfrkt_cnt;
612 return (0);
613}
614
615int
616pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
617 int *nzero, int flags)
618{
619 struct pfr_ktable *kt;
620 struct pfr_kentryworkq workq;
621 struct pfr_kentry *p;
622 struct pfr_addr ad;
623 int i, rv, s = 0, xzero = 0;
624
625 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
626 if (pfr_validate_table(tbl, 0, 0))
627 return (EINVAL);
628 kt = pfr_lookup_table(tbl);
629 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
630 return (ESRCH);
631 SLIST_INIT(&workq);
632 for (i = 0; i < size; i++) {
633 if (COPYIN(addr+i, &ad, sizeof(ad)))
634 senderr(EFAULT);
635 if (pfr_validate_addr(&ad))
636 senderr(EINVAL);
637 p = pfr_lookup_addr(kt, &ad, 1);
638 if (flags & PFR_FLAG_FEEDBACK) {
639 ad.pfra_fback = (p != NULL) ?
640 PFR_FB_CLEARED : PFR_FB_NONE;
641 if (COPYOUT(&ad, addr+i, sizeof(ad)))
642 senderr(EFAULT);
643 }
644 if (p != NULL) {
645 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
646 xzero++;
647 }
648 }
649
650 if (!(flags & PFR_FLAG_DUMMY)) {
651 if (flags & PFR_FLAG_ATOMIC)
652 s = splsoftnet();
653 pfr_clstats_kentries(&workq, 0, 0);
654 if (flags & PFR_FLAG_ATOMIC)
655 splx(s);
656 }
657 if (nzero != NULL)
658 *nzero = xzero;
659 return (0);
660_bad:
661 if (flags & PFR_FLAG_FEEDBACK)
662 pfr_reset_feedback(addr, size, flags);
663 return (rv);
664}
665
666int
667pfr_validate_addr(struct pfr_addr *ad)
668{
669 int i;
670
671 switch (ad->pfra_af) {
672 case AF_INET:
673 if (ad->pfra_net > 32)
674 return (-1);
675 break;
676 case AF_INET6:
677 if (ad->pfra_net > 128)
678 return (-1);
679 break;
680 default:
681 return (-1);
682 }
683 if (ad->pfra_net < 128 &&
684 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
685 return (-1);
686 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
687 if (((caddr_t)ad)[i])
688 return (-1);
689 if (ad->pfra_not && ad->pfra_not != 1)
690 return (-1);
691 if (ad->pfra_fback)
692 return (-1);
693 return (0);
694}
695
696void
697pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
698 int *naddr, int sweep)
699{
700 struct pfr_walktree w;
701
702 SLIST_INIT(workq);
703 bzero(&w, sizeof(w));
704 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
705 w.pfrw_workq = workq;
706 if (kt->pfrkt_ip4 != NULL)
707 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
708 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
709 if (kt->pfrkt_ip6 != NULL)
710 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
711 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
712 if (naddr != NULL)
713 *naddr = w.pfrw_cnt;
714}
715
716void
717pfr_mark_addrs(struct pfr_ktable *kt)
718{
719 struct pfr_walktree w;
720
721 bzero(&w, sizeof(w));
722 w.pfrw_op = PFRW_MARK;
723 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
724 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
725 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
726 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
727}
728
729
730struct pfr_kentry *
731pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
732{
733 union sockaddr_union sa, mask;
734 struct radix_node_head *head;
735 struct pfr_kentry *ke;
736 int s;
737
738 bzero(&sa, sizeof(sa));
739 if (ad->pfra_af == AF_INET) {
740 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
741 head = kt->pfrkt_ip4;
742 } else {
743 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
744 head = kt->pfrkt_ip6;
745 }
746 if (ADDR_NETWORK(ad)) {
747 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
748 s = splsoftnet(); /* rn_lookup makes use of globals */
2e9572df
JH
749 ke = (struct pfr_kentry *)rn_lookup((char *)&sa, (char *)&mask,
750 head);
02742ec6
JS
751 splx(s);
752 if (ke && KENTRY_RNF_ROOT(ke))
753 ke = NULL;
754 } else {
2e9572df 755 ke = (struct pfr_kentry *)rn_match((char *)&sa, head);
02742ec6
JS
756 if (ke && KENTRY_RNF_ROOT(ke))
757 ke = NULL;
758 if (exact && ke && KENTRY_NETWORK(ke))
759 ke = NULL;
760 }
761 return (ke);
762}
763
764struct pfr_kentry *
765pfr_create_kentry(struct pfr_addr *ad)
766{
767 struct pfr_kentry *ke;
768
769 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
770 if (ke == NULL)
771 return (NULL);
772 bzero(ke, sizeof(*ke));
773
774 if (ad->pfra_af == AF_INET)
775 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
776 else
777 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
778 ke->pfrke_af = ad->pfra_af;
779 ke->pfrke_net = ad->pfra_net;
780 ke->pfrke_not = ad->pfra_not;
781 return (ke);
782}
783
784void
785pfr_destroy_kentries(struct pfr_kentryworkq *workq)
786{
787 struct pfr_kentry *p, *q;
788
789 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
790 q = SLIST_NEXT(p, pfrke_workq);
791 pfr_destroy_kentry(p);
792 }
793}
794
795void
796pfr_destroy_kentry(struct pfr_kentry *ke)
797{
798 pool_put(&pfr_kentry_pl, ke);
799}
800
801void
802pfr_insert_kentries(struct pfr_ktable *kt,
803 struct pfr_kentryworkq *workq, long tzero)
804{
805 struct pfr_kentry *p;
806 int rv, n = 0;
807
808 SLIST_FOREACH(p, workq, pfrke_workq) {
809 rv = pfr_route_kentry(kt, p);
810 if (rv) {
811 printf("pfr_insert_kentries: cannot route entry "
812 "(code=%d).\n", rv);
813 break;
814 }
815 p->pfrke_tzero = tzero;
816 n++;
817 }
818 kt->pfrkt_cnt += n;
819}
820
821void
822pfr_remove_kentries(struct pfr_ktable *kt,
823 struct pfr_kentryworkq *workq)
824{
825 struct pfr_kentry *p;
826 int n = 0;
827
828 SLIST_FOREACH(p, workq, pfrke_workq) {
829 pfr_unroute_kentry(kt, p);
830 n++;
831 }
832 kt->pfrkt_cnt -= n;
833 pfr_destroy_kentries(workq);
834}
835
836void
837pfr_clean_node_mask(struct pfr_ktable *kt,
838 struct pfr_kentryworkq *workq)
839{
840 struct pfr_kentry *p;
841
842 SLIST_FOREACH(p, workq, pfrke_workq)
843 pfr_unroute_kentry(kt, p);
844}
845
846void
847pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
848{
849 struct pfr_kentry *p;
850 int s;
851
852 SLIST_FOREACH(p, workq, pfrke_workq) {
853 s = splsoftnet();
854 if (negchange)
855 p->pfrke_not = !p->pfrke_not;
856 bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
857 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
858 splx(s);
859 p->pfrke_tzero = tzero;
860 }
861}
862
863void
864pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
865{
866 struct pfr_addr ad;
867 int i;
868
869 for (i = 0; i < size; i++) {
870 if (COPYIN(addr+i, &ad, sizeof(ad)))
871 break;
872 ad.pfra_fback = PFR_FB_NONE;
873 if (COPYOUT(&ad, addr+i, sizeof(ad)))
874 break;
875 }
876}
877
878void
879pfr_prepare_network(union sockaddr_union *sa, int af, int net)
880{
881 int i;
882
883 bzero(sa, sizeof(*sa));
884 if (af == AF_INET) {
885 sa->sin.sin_len = sizeof(sa->sin);
886 sa->sin.sin_family = AF_INET;
887 sa->sin.sin_addr.s_addr = htonl(-1 << (32-net));
888 } else {
889 sa->sin6.sin6_len = sizeof(sa->sin6);
890 sa->sin6.sin6_family = AF_INET6;
891 for (i = 0; i < 4; i++) {
892 if (net <= 32) {
893 sa->sin6.sin6_addr.s6_addr32[i] =
894 htonl(-1 << (32-net));
895 break;
896 }
897 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
898 net -= 32;
899 }
900 }
901}
902
903int
904pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
905{
906 union sockaddr_union mask;
907 struct radix_node *rn;
908 struct radix_node_head *head;
909 int s;
910
911 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
912 if (ke->pfrke_af == AF_INET)
913 head = kt->pfrkt_ip4;
914 else
915 head = kt->pfrkt_ip6;
916
917 s = splsoftnet();
918 if (KENTRY_NETWORK(ke)) {
919 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
2e9572df
JH
920 rn = rn_addroute((char *)&ke->pfrke_sa, (char *)&mask, head,
921 ke->pfrke_node);
02742ec6 922 } else
2e9572df
JH
923 rn = rn_addroute((char *)&ke->pfrke_sa, NULL, head,
924 ke->pfrke_node);
02742ec6
JS
925 splx(s);
926
927 return (rn == NULL ? -1 : 0);
928}
929
930int
931pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
932{
933 union sockaddr_union mask;
934 struct radix_node *rn;
935 struct radix_node_head *head;
936 int s;
937
938 if (ke->pfrke_af == AF_INET)
939 head = kt->pfrkt_ip4;
940 else
941 head = kt->pfrkt_ip6;
942
943 s = splsoftnet();
944 if (KENTRY_NETWORK(ke)) {
945 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
2e9572df 946 rn = rn_delete((char *)&ke->pfrke_sa, (char *)&mask, head);
02742ec6 947 } else
2e9572df 948 rn = rn_delete((char *)&ke->pfrke_sa, NULL, head);
02742ec6
JS
949 splx(s);
950
951 if (rn == NULL) {
952 printf("pfr_unroute_kentry: delete failed.\n");
953 return (-1);
954 }
955 return (0);
956}
957
958void
959pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
960{
961 bzero(ad, sizeof(*ad));
962 if (ke == NULL)
963 return;
964 ad->pfra_af = ke->pfrke_af;
965 ad->pfra_net = ke->pfrke_net;
966 ad->pfra_not = ke->pfrke_not;
967 if (ad->pfra_af == AF_INET)
968 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
969 else
970 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
971}
972
973int
974pfr_walktree(struct radix_node *rn, void *arg)
975{
976 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
977 struct pfr_walktree *w = arg;
978 int s, flags = w->pfrw_flags;
979
980 switch (w->pfrw_op) {
981 case PFRW_MARK:
982 ke->pfrke_mark = 0;
983 break;
984 case PFRW_SWEEP:
985 if (ke->pfrke_mark)
986 break;
987 /* FALLTHROUGH */
988 case PFRW_ENQUEUE:
989 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
990 w->pfrw_cnt++;
991 break;
992 case PFRW_GET_ADDRS:
993 if (w->pfrw_free-- > 0) {
994 struct pfr_addr ad;
995
996 pfr_copyout_addr(&ad, ke);
997 if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
998 return (EFAULT);
999 w->pfrw_addr++;
1000 }
1001 break;
1002 case PFRW_GET_ASTATS:
1003 if (w->pfrw_free-- > 0) {
1004 struct pfr_astats as;
1005
1006 pfr_copyout_addr(&as.pfras_a, ke);
1007
1008 s = splsoftnet();
1009 bcopy(ke->pfrke_packets, as.pfras_packets,
1010 sizeof(as.pfras_packets));
1011 bcopy(ke->pfrke_bytes, as.pfras_bytes,
1012 sizeof(as.pfras_bytes));
1013 splx(s);
1014 as.pfras_tzero = ke->pfrke_tzero;
1015
1016 if (COPYOUT(&as, w->pfrw_astats, sizeof(as)))
1017 return (EFAULT);
1018 w->pfrw_astats++;
1019 }
1020 break;
1021 case PFRW_POOL_GET:
1022 if (ke->pfrke_not)
1023 break; /* negative entries are ignored */
1024 if (!w->pfrw_cnt--) {
1025 w->pfrw_kentry = ke;
1026 return (1); /* finish search */
1027 }
1028 break;
1029 case PFRW_DYNADDR_UPDATE:
1030 if (ke->pfrke_af == AF_INET) {
1031 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1032 break;
1033 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1034 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1035 &ke->pfrke_sa, AF_INET);
1036 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1037 &pfr_mask, AF_INET);
1038 } else {
1039 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1040 break;
1041 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1042 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1043 &ke->pfrke_sa, AF_INET6);
1044 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1045 &pfr_mask, AF_INET6);
1046 }
1047 break;
1048 }
1049 return (0);
1050}
1051
1052int
1053pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1054{
1055 struct pfr_ktableworkq workq;
1056 struct pfr_ktable *p;
1057 int s = 0, xdel = 0;
1058
1059 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS);
1060 if (pfr_table_count(filter, flags) < 0)
1061 return (ENOENT);
1062
1063 SLIST_INIT(&workq);
1064 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1065 if (pfr_skip_table(filter, p, flags))
1066 continue;
1067 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1068 continue;
1069 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1070 continue;
1071 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1072 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1073 xdel++;
1074 }
1075 if (!(flags & PFR_FLAG_DUMMY)) {
1076 if (flags & PFR_FLAG_ATOMIC)
1077 s = splsoftnet();
1078 pfr_setflags_ktables(&workq);
1079 if (flags & PFR_FLAG_ATOMIC)
1080 splx(s);
1081 }
1082 if (ndel != NULL)
1083 *ndel = xdel;
1084 return (0);
1085}
1086
1087int
1088pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1089{
1090 struct pfr_ktableworkq addq, changeq;
1091 struct pfr_ktable *p, *q, *r, key;
1092 int i, rv, s = 0, xadd = 0;
1093 long tzero = time_second;
1094
1095 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1096 SLIST_INIT(&addq);
1097 SLIST_INIT(&changeq);
1098 for (i = 0; i < size; i++) {
1099 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1100 senderr(EFAULT);
1101 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1102 flags & PFR_FLAG_USERIOCTL))
1103 senderr(EINVAL);
1104 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1105 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1106 if (p == NULL) {
1107 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1108 if (p == NULL)
1109 senderr(ENOMEM);
1110 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1111 if (!pfr_ktable_compare(p, q))
1112 goto _skip;
1113 }
1114 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1115 xadd++;
1116 if (!key.pfrkt_anchor[0])
1117 goto _skip;
1118
1119 /* find or create root table */
1120 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1121 bzero(key.pfrkt_ruleset, sizeof(key.pfrkt_ruleset));
1122 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1123 if (r != NULL) {
1124 p->pfrkt_root = r;
1125 goto _skip;
1126 }
1127 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1128 if (!pfr_ktable_compare(&key, q)) {
1129 p->pfrkt_root = q;
1130 goto _skip;
1131 }
1132 }
1133 key.pfrkt_flags = 0;
1134 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1135 if (r == NULL)
1136 senderr(ENOMEM);
1137 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1138 p->pfrkt_root = r;
1139 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1140 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1141 if (!pfr_ktable_compare(&key, q))
1142 goto _skip;
1143 p->pfrkt_nflags = (p->pfrkt_flags &
1144 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1145 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1146 xadd++;
1147 }
1148_skip:
1149 ;
1150 }
1151 if (!(flags & PFR_FLAG_DUMMY)) {
1152 if (flags & PFR_FLAG_ATOMIC)
1153 s = splsoftnet();
1154 pfr_insert_ktables(&addq);
1155 pfr_setflags_ktables(&changeq);
1156 if (flags & PFR_FLAG_ATOMIC)
1157 splx(s);
1158 } else
1159 pfr_destroy_ktables(&addq, 0);
1160 if (nadd != NULL)
1161 *nadd = xadd;
1162 return (0);
1163_bad:
1164 pfr_destroy_ktables(&addq, 0);
1165 return (rv);
1166}
1167
1168int
1169pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1170{
1171 struct pfr_ktableworkq workq;
1172 struct pfr_ktable *p, *q, key;
1173 int i, s = 0, xdel = 0;
1174
1175 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1176 SLIST_INIT(&workq);
1177 for (i = 0; i < size; i++) {
1178 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1179 return (EFAULT);
1180 if (pfr_validate_table(&key.pfrkt_t, 0,
1181 flags & PFR_FLAG_USERIOCTL))
1182 return (EINVAL);
1183 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1184 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1185 SLIST_FOREACH(q, &workq, pfrkt_workq)
1186 if (!pfr_ktable_compare(p, q))
1187 goto _skip;
1188 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1189 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1190 xdel++;
1191 }
1192_skip:
1193 ;
1194 }
1195
1196 if (!(flags & PFR_FLAG_DUMMY)) {
1197 if (flags & PFR_FLAG_ATOMIC)
1198 s = splsoftnet();
1199 pfr_setflags_ktables(&workq);
1200 if (flags & PFR_FLAG_ATOMIC)
1201 splx(s);
1202 }
1203 if (ndel != NULL)
1204 *ndel = xdel;
1205 return (0);
1206}
1207
1208int
1209pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1210 int flags)
1211{
1212 struct pfr_ktable *p;
1213 int n, nn;
1214
1215 ACCEPT_FLAGS(PFR_FLAG_ALLRSETS);
1216 n = nn = pfr_table_count(filter, flags);
1217 if (n < 0)
1218 return (ENOENT);
1219 if (n > *size) {
1220 *size = n;
1221 return (0);
1222 }
1223 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1224 if (pfr_skip_table(filter, p, flags))
1225 continue;
1226 if (n-- <= 0)
1227 continue;
1228 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl)))
1229 return (EFAULT);
1230 }
1231 if (n) {
1232 printf("pfr_get_tables: corruption detected (%d).\n", n);
1233 return (ENOTTY);
1234 }
1235 *size = nn;
1236 return (0);
1237}
1238
1239int
1240pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1241 int flags)
1242{
1243 struct pfr_ktable *p;
1244 struct pfr_ktableworkq workq;
1245 int s = 0, n, nn;
1246 long tzero = time_second;
1247
1248 ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS);
1249 /* XXX PFR_FLAG_CLSTATS disabled */
1250 n = nn = pfr_table_count(filter, flags);
1251 if (n < 0)
1252 return (ENOENT);
1253 if (n > *size) {
1254 *size = n;
1255 return (0);
1256 }
1257 SLIST_INIT(&workq);
1258 if (flags & PFR_FLAG_ATOMIC)
1259 s = splsoftnet();
1260 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1261 if (pfr_skip_table(filter, p, flags))
1262 continue;
1263 if (n-- <= 0)
1264 continue;
1265 if (!(flags & PFR_FLAG_ATOMIC))
1266 s = splsoftnet();
1267 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
1268 splx(s);
1269 return (EFAULT);
1270 }
1271 if (!(flags & PFR_FLAG_ATOMIC))
1272 splx(s);
1273 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1274 }
1275 if (flags & PFR_FLAG_CLSTATS)
1276 pfr_clstats_ktables(&workq, tzero,
1277 flags & PFR_FLAG_ADDRSTOO);
1278 if (flags & PFR_FLAG_ATOMIC)
1279 splx(s);
1280 if (n) {
1281 printf("pfr_get_tstats: corruption detected (%d).\n", n);
1282 return (ENOTTY);
1283 }
1284 *size = nn;
1285 return (0);
1286}
1287
1288int
1289pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1290{
1291 struct pfr_ktableworkq workq;
1292 struct pfr_ktable *p, key;
1293 int i, s = 0, xzero = 0;
1294 long tzero = time_second;
1295
1296 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO);
1297 SLIST_INIT(&workq);
1298 for (i = 0; i < size; i++) {
1299 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1300 return (EFAULT);
1301 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1302 return (EINVAL);
1303 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1304 if (p != NULL) {
1305 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1306 xzero++;
1307 }
1308 }
1309 if (!(flags & PFR_FLAG_DUMMY)) {
1310 if (flags & PFR_FLAG_ATOMIC)
1311 s = splsoftnet();
1312 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1313 if (flags & PFR_FLAG_ATOMIC)
1314 splx(s);
1315 }
1316 if (nzero != NULL)
1317 *nzero = xzero;
1318 return (0);
1319}
1320
1321int
1322pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1323 int *nchange, int *ndel, int flags)
1324{
1325 struct pfr_ktableworkq workq;
1326 struct pfr_ktable *p, *q, key;
1327 int i, s = 0, xchange = 0, xdel = 0;
1328
1329 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1330 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1331 (clrflag & ~PFR_TFLAG_USRMASK) ||
1332 (setflag & clrflag))
1333 return (EINVAL);
1334 SLIST_INIT(&workq);
1335 for (i = 0; i < size; i++) {
1336 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1337 return (EFAULT);
1338 if (pfr_validate_table(&key.pfrkt_t, 0,
1339 flags & PFR_FLAG_USERIOCTL))
1340 return (EINVAL);
1341 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1342 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1343 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1344 ~clrflag;
1345 if (p->pfrkt_nflags == p->pfrkt_flags)
1346 goto _skip;
1347 SLIST_FOREACH(q, &workq, pfrkt_workq)
1348 if (!pfr_ktable_compare(p, q))
1349 goto _skip;
1350 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1351 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1352 (clrflag & PFR_TFLAG_PERSIST) &&
1353 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1354 xdel++;
1355 else
1356 xchange++;
1357 }
1358_skip:
1359 ;
1360 }
1361 if (!(flags & PFR_FLAG_DUMMY)) {
1362 if (flags & PFR_FLAG_ATOMIC)
1363 s = splsoftnet();
1364 pfr_setflags_ktables(&workq);
1365 if (flags & PFR_FLAG_ATOMIC)
1366 splx(s);
1367 }
1368 if (nchange != NULL)
1369 *nchange = xchange;
1370 if (ndel != NULL)
1371 *ndel = xdel;
1372 return (0);
1373}
1374
1375int
1376pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1377{
1378 struct pfr_ktableworkq workq;
1379 struct pfr_ktable *p;
1380 struct pf_ruleset *rs;
1381 int xdel = 0;
1382
1383 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1384 rs = pf_find_or_create_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1385 if (rs == NULL)
1386 return (ENOMEM);
1387 SLIST_INIT(&workq);
1388 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1389 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1390 pfr_skip_table(trs, p, 0))
1391 continue;
1392 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1393 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1394 xdel++;
1395 }
1396 if (!(flags & PFR_FLAG_DUMMY)) {
1397 pfr_setflags_ktables(&workq);
1398 if (ticket != NULL)
1399 *ticket = ++rs->tticket;
1400 rs->topen = 1;
1401 } else
1402 pf_remove_if_empty_ruleset(rs);
1403 if (ndel != NULL)
1404 *ndel = xdel;
1405 return (0);
1406}
1407
1408int
1409pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1410 int *nadd, int *naddr, u_int32_t ticket, int flags)
1411{
1412 struct pfr_ktableworkq tableq;
1413 struct pfr_kentryworkq addrq;
1414 struct pfr_ktable *kt, *rt, *shadow, key;
1415 struct pfr_kentry *p;
1416 struct pfr_addr ad;
1417 struct pf_ruleset *rs;
1418 int i, rv, xadd = 0, xaddr = 0;
1419
1420 ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO);
1421 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1422 return (EINVAL);
1423 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1424 flags & PFR_FLAG_USERIOCTL))
1425 return (EINVAL);
1426 rs = pf_find_ruleset(tbl->pfrt_anchor, tbl->pfrt_ruleset);
1427 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1428 return (EBUSY);
1429 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1430 SLIST_INIT(&tableq);
1431 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1432 if (kt == NULL) {
1433 kt = pfr_create_ktable(tbl, 0, 1);
1434 if (kt == NULL)
1435 return (ENOMEM);
1436 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1437 xadd++;
1438 if (!tbl->pfrt_anchor[0])
1439 goto _skip;
1440
1441 /* find or create root table */
1442 bzero(&key, sizeof(key));
1443 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1444 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1445 if (rt != NULL) {
1446 kt->pfrkt_root = rt;
1447 goto _skip;
1448 }
1449 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1450 if (rt == NULL) {
1451 pfr_destroy_ktables(&tableq, 0);
1452 return (ENOMEM);
1453 }
1454 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1455 kt->pfrkt_root = rt;
1456 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1457 xadd++;
1458_skip:
1459 shadow = pfr_create_ktable(tbl, 0, 0);
1460 if (shadow == NULL) {
1461 pfr_destroy_ktables(&tableq, 0);
1462 return (ENOMEM);
1463 }
1464 SLIST_INIT(&addrq);
1465 for (i = 0; i < size; i++) {
1466 if (COPYIN(addr+i, &ad, sizeof(ad)))
1467 senderr(EFAULT);
1468 if (pfr_validate_addr(&ad))
1469 senderr(EINVAL);
1470 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1471 continue;
1472 p = pfr_create_kentry(&ad);
1473 if (p == NULL)
1474 senderr(ENOMEM);
1475 if (pfr_route_kentry(shadow, p)) {
1476 pfr_destroy_kentry(p);
1477 continue;
1478 }
1479 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1480 xaddr++;
1481 }
1482 if (!(flags & PFR_FLAG_DUMMY)) {
1483 if (kt->pfrkt_shadow != NULL)
1484 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1485 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1486 pfr_insert_ktables(&tableq);
1487 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1488 xaddr : NO_ADDRESSES;
1489 kt->pfrkt_shadow = shadow;
1490 } else {
1491 pfr_clean_node_mask(shadow, &addrq);
1492 pfr_destroy_ktable(shadow, 0);
1493 pfr_destroy_ktables(&tableq, 0);
1494 pfr_destroy_kentries(&addrq);
1495 }
1496 if (nadd != NULL)
1497 *nadd = xadd;
1498 if (naddr != NULL)
1499 *naddr = xaddr;
1500 return (0);
1501_bad:
1502 pfr_destroy_ktable(shadow, 0);
1503 pfr_destroy_ktables(&tableq, 0);
1504 pfr_destroy_kentries(&addrq);
1505 return (rv);
1506}
1507
1508int
1509pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1510{
1511 struct pfr_ktableworkq workq;
1512 struct pfr_ktable *p;
1513 struct pf_ruleset *rs;
1514 int xdel = 0;
1515
1516 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1517 rs = pf_find_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1518 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1519 return (0);
1520 SLIST_INIT(&workq);
1521 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1522 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1523 pfr_skip_table(trs, p, 0))
1524 continue;
1525 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1526 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1527 xdel++;
1528 }
1529 if (!(flags & PFR_FLAG_DUMMY)) {
1530 pfr_setflags_ktables(&workq);
1531 rs->topen = 0;
1532 pf_remove_if_empty_ruleset(rs);
1533 }
1534 if (ndel != NULL)
1535 *ndel = xdel;
1536 return (0);
1537}
1538
1539int
1540pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1541 int *nchange, int flags)
1542{
1543 struct pfr_ktable *p;
1544 struct pfr_ktableworkq workq;
1545 struct pf_ruleset *rs;
1546 int s = 0, xadd = 0, xchange = 0;
1547 long tzero = time_second;
1548
1549 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1550 rs = pf_find_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1551 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1552 return (EBUSY);
1553
1554 SLIST_INIT(&workq);
1555 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1556 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1557 pfr_skip_table(trs, p, 0))
1558 continue;
1559 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1560 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1561 xchange++;
1562 else
1563 xadd++;
1564 }
1565
1566 if (!(flags & PFR_FLAG_DUMMY)) {
1567 if (flags & PFR_FLAG_ATOMIC)
1568 s = splsoftnet();
1569 SLIST_FOREACH(p, &workq, pfrkt_workq)
1570 pfr_commit_ktable(p, tzero);
1571 if (flags & PFR_FLAG_ATOMIC)
1572 splx(s);
1573 rs->topen = 0;
1574 pf_remove_if_empty_ruleset(rs);
1575 }
1576 if (nadd != NULL)
1577 *nadd = xadd;
1578 if (nchange != NULL)
1579 *nchange = xchange;
1580
1581 return (0);
1582}
1583
1584void
1585pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1586{
1587 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1588 int nflags;
1589
1590 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1591 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1592 pfr_clstats_ktable(kt, tzero, 1);
1593 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1594 /* kt might contain addresses */
1595 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1596 struct pfr_kentry *p, *q, *next;
1597 struct pfr_addr ad;
1598
1599 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1600 pfr_mark_addrs(kt);
1601 SLIST_INIT(&addq);
1602 SLIST_INIT(&changeq);
1603 SLIST_INIT(&delq);
1604 SLIST_INIT(&garbageq);
1605 pfr_clean_node_mask(shadow, &addrq);
1606 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1607 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1608 pfr_copyout_addr(&ad, p);
1609 q = pfr_lookup_addr(kt, &ad, 1);
1610 if (q != NULL) {
1611 if (q->pfrke_not != p->pfrke_not)
1612 SLIST_INSERT_HEAD(&changeq, q,
1613 pfrke_workq);
1614 q->pfrke_mark = 1;
1615 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1616 } else {
1617 p->pfrke_tzero = tzero;
1618 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1619 }
1620 }
1621 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1622 pfr_insert_kentries(kt, &addq, tzero);
1623 pfr_remove_kentries(kt, &delq);
1624 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1625 pfr_destroy_kentries(&garbageq);
1626 } else {
1627 /* kt cannot contain addresses */
1628 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1629 shadow->pfrkt_ip4);
1630 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1631 shadow->pfrkt_ip6);
1632 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1633 pfr_clstats_ktable(kt, tzero, 1);
1634 }
1635 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1636 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1637 & ~PFR_TFLAG_INACTIVE;
1638 pfr_destroy_ktable(shadow, 0);
1639 kt->pfrkt_shadow = NULL;
1640 pfr_setflags_ktable(kt, nflags);
1641}
1642
1643int
1644pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1645{
1646 int i;
1647
1648 if (!tbl->pfrt_name[0])
1649 return (-1);
1650 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1651 return (-1);
1652 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1653 return (-1);
1654 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1655 if (tbl->pfrt_name[i])
1656 return (-1);
1657 if (tbl->pfrt_flags & ~allowedflags)
1658 return (-1);
1659 return (0);
1660}
1661
1662int
1663pfr_table_count(struct pfr_table *filter, int flags)
1664{
1665 struct pf_ruleset *rs;
1666 struct pf_anchor *ac;
1667
1668 if (flags & PFR_FLAG_ALLRSETS)
1669 return (pfr_ktable_cnt);
1670 if (filter->pfrt_ruleset[0]) {
1671 rs = pf_find_ruleset(filter->pfrt_anchor,
1672 filter->pfrt_ruleset);
1673 return ((rs != NULL) ? rs->tables : -1);
1674 }
1675 if (filter->pfrt_anchor[0]) {
1676 ac = pf_find_anchor(filter->pfrt_anchor);
1677 return ((ac != NULL) ? ac->tables : -1);
1678 }
1679 return (pf_main_ruleset.tables);
1680}
1681
1682int
1683pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1684{
1685 if (flags & PFR_FLAG_ALLRSETS)
1686 return (0);
1687 if (strncmp(filter->pfrt_anchor, kt->pfrkt_anchor,
1688 PF_ANCHOR_NAME_SIZE))
1689 return (1);
1690 if (!filter->pfrt_ruleset[0])
1691 return (0);
1692 if (strncmp(filter->pfrt_ruleset, kt->pfrkt_ruleset,
1693 PF_RULESET_NAME_SIZE))
1694 return (1);
1695 return (0);
1696}
1697
1698void
1699pfr_insert_ktables(struct pfr_ktableworkq *workq)
1700{
1701 struct pfr_ktable *p;
1702
1703 SLIST_FOREACH(p, workq, pfrkt_workq)
1704 pfr_insert_ktable(p);
1705}
1706
1707void
1708pfr_insert_ktable(struct pfr_ktable *kt)
1709{
1710 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1711 pfr_ktable_cnt++;
1712 if (kt->pfrkt_root != NULL)
1713 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1714 pfr_setflags_ktable(kt->pfrkt_root,
1715 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1716}
1717
1718void
1719pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1720{
1721 struct pfr_ktable *p;
1722
1723 SLIST_FOREACH(p, workq, pfrkt_workq)
1724 pfr_setflags_ktable(p, p->pfrkt_nflags);
1725}
1726
1727void
1728pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1729{
1730 struct pfr_kentryworkq addrq;
1731
1732 if (!(newf & PFR_TFLAG_REFERENCED) &&
1733 !(newf & PFR_TFLAG_PERSIST))
1734 newf &= ~PFR_TFLAG_ACTIVE;
1735 if (!(newf & PFR_TFLAG_ACTIVE))
1736 newf &= ~PFR_TFLAG_USRMASK;
1737 if (!(newf & PFR_TFLAG_SETMASK)) {
1738 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1739 if (kt->pfrkt_root != NULL)
1740 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1741 pfr_setflags_ktable(kt->pfrkt_root,
1742 kt->pfrkt_root->pfrkt_flags &
1743 ~PFR_TFLAG_REFDANCHOR);
1744 pfr_destroy_ktable(kt, 1);
1745 pfr_ktable_cnt--;
1746 return;
1747 }
1748 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1749 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1750 pfr_remove_kentries(kt, &addrq);
1751 }
1752 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1753 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1754 kt->pfrkt_shadow = NULL;
1755 }
1756 kt->pfrkt_flags = newf;
1757}
1758
1759void
1760pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1761{
1762 struct pfr_ktable *p;
1763
1764 SLIST_FOREACH(p, workq, pfrkt_workq)
1765 pfr_clstats_ktable(p, tzero, recurse);
1766}
1767
1768void
1769pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1770{
1771 struct pfr_kentryworkq addrq;
1772 int s;
1773
1774 if (recurse) {
1775 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1776 pfr_clstats_kentries(&addrq, tzero, 0);
1777 }
1778 s = splsoftnet();
1779 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1780 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1781 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1782 splx(s);
1783 kt->pfrkt_tzero = tzero;
1784}
1785
1786struct pfr_ktable *
1787pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1788{
1789 struct pfr_ktable *kt;
1790 struct pf_ruleset *rs;
1791
1792 kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
1793 if (kt == NULL)
1794 return (NULL);
1795 bzero(kt, sizeof(*kt));
1796 kt->pfrkt_t = *tbl;
1797
1798 if (attachruleset) {
1799 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor,
1800 tbl->pfrt_ruleset);
1801 if (!rs) {
1802 pfr_destroy_ktable(kt, 0);
1803 return (NULL);
1804 }
1805 kt->pfrkt_rs = rs;
1806 rs->tables++;
1807 if (rs->anchor != NULL)
1808 rs->anchor->tables++;
1809 }
1810
1811 if (!rn_inithead((void **)&kt->pfrkt_ip4,
1812 offsetof(struct sockaddr_in, sin_addr) * 8) ||
1813 !rn_inithead((void **)&kt->pfrkt_ip6,
1814 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1815 pfr_destroy_ktable(kt, 0);
1816 return (NULL);
1817 }
1818 kt->pfrkt_tzero = tzero;
1819
1820 return (kt);
1821}
1822
1823void
1824pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1825{
1826 struct pfr_ktable *p, *q;
1827
1828 for (p = SLIST_FIRST(workq); p; p = q) {
1829 q = SLIST_NEXT(p, pfrkt_workq);
1830 pfr_destroy_ktable(p, flushaddr);
1831 }
1832}
1833
1834void
1835pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1836{
1837 struct pfr_kentryworkq addrq;
1838
1839 if (flushaddr) {
1840 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1841 pfr_clean_node_mask(kt, &addrq);
1842 pfr_destroy_kentries(&addrq);
1843 }
1844 if (kt->pfrkt_ip4 != NULL)
1845 free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1846 if (kt->pfrkt_ip6 != NULL)
1847 free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1848 if (kt->pfrkt_shadow != NULL)
1849 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1850 if (kt->pfrkt_rs != NULL) {
1851 kt->pfrkt_rs->tables--;
1852 if (kt->pfrkt_rs->anchor != NULL)
1853 kt->pfrkt_rs->anchor->tables--;
1854 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1855 }
1856 pool_put(&pfr_ktable_pl, kt);
1857}
1858
1859int
1860pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1861{
1862 int d;
1863
1864 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1865 return (d);
1866 if ((d = strncmp(p->pfrkt_anchor, q->pfrkt_anchor,
1867 PF_ANCHOR_NAME_SIZE)))
1868 return (d);
1869 return (strncmp(p->pfrkt_ruleset, q->pfrkt_ruleset,
1870 PF_RULESET_NAME_SIZE));
1871}
1872
1873struct pfr_ktable *
1874pfr_lookup_table(struct pfr_table *tbl)
1875{
1876 /* struct pfr_ktable start like a struct pfr_table */
1877 return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1878 (struct pfr_ktable *)tbl));
1879}
1880
1881int
1882pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1883{
1884 struct pfr_kentry *ke = NULL;
1885 int match;
1886
1887 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1888 kt = kt->pfrkt_root;
1889 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1890 return (0);
1891
1892 switch (af) {
1893 case AF_INET:
1894 pfr_sin.sin_addr.s_addr = a->addr32[0];
2e9572df
JH
1895 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
1896 kt->pfrkt_ip4);
02742ec6
JS
1897 if (ke && KENTRY_RNF_ROOT(ke))
1898 ke = NULL;
1899 break;
1900 case AF_INET6:
1901 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2e9572df
JH
1902 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
1903 kt->pfrkt_ip6);
02742ec6
JS
1904 if (ke && KENTRY_RNF_ROOT(ke))
1905 ke = NULL;
1906 break;
1907 }
1908 match = (ke && !ke->pfrke_not);
1909 if (match)
1910 kt->pfrkt_match++;
1911 else
1912 kt->pfrkt_nomatch++;
1913 return (match);
1914}
1915
1916void
1917pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
1918 u_int64_t len, int dir_out, int op_pass, int notrule)
1919{
1920 struct pfr_kentry *ke = NULL;
1921
1922 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1923 kt = kt->pfrkt_root;
1924 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1925 return;
1926
1927 switch (af) {
1928 case AF_INET:
1929 pfr_sin.sin_addr.s_addr = a->addr32[0];
2e9572df
JH
1930 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
1931 kt->pfrkt_ip4);
02742ec6
JS
1932 if (ke && KENTRY_RNF_ROOT(ke))
1933 ke = NULL;
1934 break;
1935 case AF_INET6:
1936 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2e9572df
JH
1937 ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
1938 kt->pfrkt_ip6);
02742ec6
JS
1939 if (ke && KENTRY_RNF_ROOT(ke))
1940 ke = NULL;
1941 break;
1942 }
1943 if ((ke == NULL || ke->pfrke_not) != notrule) {
1944 if (op_pass != PFR_OP_PASS)
1945 printf("pfr_update_stats: assertion failed.\n");
1946 op_pass = PFR_OP_XPASS;
1947 }
1948 kt->pfrkt_packets[dir_out][op_pass]++;
1949 kt->pfrkt_bytes[dir_out][op_pass] += len;
1950 if (ke != NULL && op_pass != PFR_OP_XPASS) {
1951 ke->pfrke_packets[dir_out][op_pass]++;
1952 ke->pfrke_bytes[dir_out][op_pass] += len;
1953 }
1954}
1955
1956struct pfr_ktable *
1957pfr_attach_table(struct pf_ruleset *rs, char *name)
1958{
1959 struct pfr_ktable *kt, *rt;
1960 struct pfr_table tbl;
1961 struct pf_anchor *ac = rs->anchor;
1962
1963 bzero(&tbl, sizeof(tbl));
1964 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
1965 if (ac != NULL) {
1966 strlcpy(tbl.pfrt_anchor, ac->name, sizeof(tbl.pfrt_anchor));
1967 strlcpy(tbl.pfrt_ruleset, rs->name, sizeof(tbl.pfrt_ruleset));
1968 }
1969 kt = pfr_lookup_table(&tbl);
1970 if (kt == NULL) {
1971 kt = pfr_create_ktable(&tbl, time_second, 1);
1972 if (kt == NULL)
1973 return (NULL);
1974 if (ac != NULL) {
1975 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
1976 bzero(tbl.pfrt_ruleset, sizeof(tbl.pfrt_ruleset));
1977 rt = pfr_lookup_table(&tbl);
1978 if (rt == NULL) {
1979 rt = pfr_create_ktable(&tbl, 0, 1);
1980 if (rt == NULL) {
1981 pfr_destroy_ktable(kt, 0);
1982 return (NULL);
1983 }
1984 pfr_insert_ktable(rt);
1985 }
1986 kt->pfrkt_root = rt;
1987 }
1988 pfr_insert_ktable(kt);
1989 }
1990 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
1991 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
1992 return (kt);
1993}
1994
1995void
1996pfr_detach_table(struct pfr_ktable *kt)
1997{
1998 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
1999 printf("pfr_detach_table: refcount = %d.\n",
2000 kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2001 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2002 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2003}
2004
2005int
2006pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2007 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2008{
2009 struct pfr_kentry *ke, *ke2;
2010 struct pf_addr *addr;
2011 union sockaddr_union mask;
2012 int idx = -1, use_counter = 0;
2013
2014 addr = (af == AF_INET) ? (struct pf_addr *)&pfr_sin.sin_addr :
2015 (struct pf_addr *)&pfr_sin6.sin6_addr;
2016 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2017 kt = kt->pfrkt_root;
2018 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2019 return (-1);
2020
2021 if (pidx != NULL)
2022 idx = *pidx;
2023 if (counter != NULL && idx >= 0)
2024 use_counter = 1;
2025 if (idx < 0)
2026 idx = 0;
2027
2028_next_block:
2029 ke = pfr_kentry_byidx(kt, idx, af);
2030 if (ke == NULL)
2031 return (1);
2032 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2033 *raddr = SUNION2PF(&ke->pfrke_sa, af);
2034 *rmask = SUNION2PF(&pfr_mask, af);
2035
2036 if (use_counter) {
2037 /* is supplied address within block? */
2038 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2039 /* no, go to next block in table */
2040 idx++;
2041 use_counter = 0;
2042 goto _next_block;
2043 }
2044 PF_ACPY(addr, counter, af);
2045 } else {
2046 /* use first address of block */
2047 PF_ACPY(addr, *raddr, af);
2048 }
2049
2050 if (!KENTRY_NETWORK(ke)) {
2051 /* this is a single IP address - no possible nested block */
2052 PF_ACPY(counter, addr, af);
2053 *pidx = idx;
2054 return (0);
2055 }
2056 for (;;) {
2057 /* we don't want to use a nested block */
2058 ke2 = (struct pfr_kentry *)(af == AF_INET ?
2e9572df
JH
2059 rn_match((char *)&pfr_sin, kt->pfrkt_ip4) :
2060 rn_match((char *)&pfr_sin6, kt->pfrkt_ip6));
02742ec6
JS
2061 /* no need to check KENTRY_RNF_ROOT() here */
2062 if (ke2 == ke) {
2063 /* lookup return the same block - perfect */
2064 PF_ACPY(counter, addr, af);
2065 *pidx = idx;
2066 return (0);
2067 }
2068
2069 /* we need to increase the counter past the nested block */
2070 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2071 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2072 PF_AINC(addr, af);
2073 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2074 /* ok, we reached the end of our main block */
2075 /* go to next block in table */
2076 idx++;
2077 use_counter = 0;
2078 goto _next_block;
2079 }
2080 }
2081}
2082
2083struct pfr_kentry *
2084pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2085{
2086 struct pfr_walktree w;
2087
2088 bzero(&w, sizeof(w));
2089 w.pfrw_op = PFRW_POOL_GET;
2090 w.pfrw_cnt = idx;
2091
2092 switch (af) {
2093 case AF_INET:
2094 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2095 return (w.pfrw_kentry);
2096 case AF_INET6:
2097 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2098 return (w.pfrw_kentry);
2099 default:
2100 return (NULL);
2101 }
2102}
2103
2104void
2105pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2106{
2107 struct pfr_walktree w;
2108 int s;
2109
2110 bzero(&w, sizeof(w));
2111 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2112 w.pfrw_dyn = dyn;
2113
2114 s = splsoftnet();
2115 dyn->pfid_acnt4 = 0;
2116 dyn->pfid_acnt6 = 0;
2117 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2118 kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2119 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2120 kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2121 splx(s);
2122}