Commit | Line | Data |
---|---|---|
ed1f0be2 | 1 | /* $OpenBSD: pf_table.c,v 1.78 2008/06/14 03:50:14 art Exp $ */ |
02742ec6 JS |
2 | |
3 | /* | |
ed1f0be2 | 4 | * Copyright (c) 2010 The DragonFly Project. All rights reserved. |
02742ec6 JS |
5 | * |
6 | * Copyright (c) 2002 Cedric Berger | |
7 | * All rights reserved. | |
8 | * | |
9 | * Redistribution and use in source and binary forms, with or without | |
10 | * modification, are permitted provided that the following conditions | |
11 | * are met: | |
12 | * | |
13 | * - Redistributions of source code must retain the above copyright | |
14 | * notice, this list of conditions and the following disclaimer. | |
15 | * - Redistributions in binary form must reproduce the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer in the documentation and/or other materials provided | |
18 | * with the distribution. | |
19 | * | |
20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
23 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | |
24 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | |
25 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | |
26 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
27 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | |
28 | * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
29 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | |
30 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
31 | * POSSIBILITY OF SUCH DAMAGE. | |
32 | * | |
33 | */ | |
34 | ||
35 | #include "opt_inet.h" | |
36 | #include "opt_inet6.h" | |
37 | ||
38 | #include <sys/param.h> | |
39 | #include <sys/systm.h> | |
40 | #include <sys/socket.h> | |
41 | #include <sys/mbuf.h> | |
42 | #include <sys/kernel.h> | |
43 | #include <sys/malloc.h> | |
cc6e5672 | 44 | #include <sys/thread2.h> |
02742ec6 JS |
45 | |
46 | #include <net/if.h> | |
47 | #include <net/route.h> | |
48 | #include <netinet/in.h> | |
49 | #include <net/pf/pfvar.h> | |
50 | ||
ed1f0be2 | 51 | #define ACCEPT_FLAGS(flags, oklist) \ |
02742ec6 JS |
52 | do { \ |
53 | if ((flags & ~(oklist)) & \ | |
54 | PFR_FLAG_ALLMASK) \ | |
55 | return (EINVAL); \ | |
56 | } while (0) | |
57 | ||
315a7da3 | 58 | #define COPYIN(from, to, size, flags) \ |
02742ec6 JS |
59 | ((flags & PFR_FLAG_USERIOCTL) ? \ |
60 | copyin((from), (to), (size)) : \ | |
61 | (bcopy((from), (to), (size)), 0)) | |
62 | ||
315a7da3 | 63 | #define COPYOUT(from, to, size, flags) \ |
02742ec6 JS |
64 | ((flags & PFR_FLAG_USERIOCTL) ? \ |
65 | copyout((from), (to), (size)) : \ | |
66 | (bcopy((from), (to), (size)), 0)) | |
67 | ||
68 | #define FILLIN_SIN(sin, addr) \ | |
69 | do { \ | |
70 | (sin).sin_len = sizeof(sin); \ | |
71 | (sin).sin_family = AF_INET; \ | |
72 | (sin).sin_addr = (addr); \ | |
73 | } while (0) | |
74 | ||
75 | #define FILLIN_SIN6(sin6, addr) \ | |
76 | do { \ | |
77 | (sin6).sin6_len = sizeof(sin6); \ | |
78 | (sin6).sin6_family = AF_INET6; \ | |
79 | (sin6).sin6_addr = (addr); \ | |
80 | } while (0) | |
81 | ||
82 | #define SWAP(type, a1, a2) \ | |
83 | do { \ | |
84 | type tmp = a1; \ | |
85 | a1 = a2; \ | |
86 | a2 = tmp; \ | |
87 | } while (0) | |
88 | ||
89 | #define SUNION2PF(su, af) (((af)==AF_INET) ? \ | |
90 | (struct pf_addr *)&(su)->sin.sin_addr : \ | |
91 | (struct pf_addr *)&(su)->sin6.sin6_addr) | |
92 | ||
93 | #define AF_BITS(af) (((af)==AF_INET)?32:128) | |
94 | #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) | |
95 | #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) | |
96 | #define KENTRY_RNF_ROOT(ke) \ | |
97 | ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) | |
98 | ||
99 | #define NO_ADDRESSES (-1) | |
100 | #define ENQUEUE_UNMARKED_ONLY (1) | |
101 | #define INVERT_NEG_FLAG (1) | |
102 | ||
1186cbc0 JL |
103 | static MALLOC_DEFINE(M_PFRKTABLEPL, "pfrktable", "pf radix table pool list"); |
104 | static MALLOC_DEFINE(M_PFRKENTRYPL, "pfrkentry", "pf radix entry pool list"); | |
105 | static MALLOC_DEFINE(M_PFRKENTRYPL2, "pfrkentry2", "pf radix entry 2 pool list"); | |
106 | static MALLOC_DEFINE(M_PFRKCOUNTERSPL, "pfrkcounters", "pf radix counters"); | |
107 | ||
02742ec6 JS |
108 | struct pfr_walktree { |
109 | enum pfrw_op { | |
110 | PFRW_MARK, | |
111 | PFRW_SWEEP, | |
112 | PFRW_ENQUEUE, | |
113 | PFRW_GET_ADDRS, | |
114 | PFRW_GET_ASTATS, | |
115 | PFRW_POOL_GET, | |
116 | PFRW_DYNADDR_UPDATE | |
117 | } pfrw_op; | |
118 | union { | |
119 | struct pfr_addr *pfrw1_addr; | |
120 | struct pfr_astats *pfrw1_astats; | |
121 | struct pfr_kentryworkq *pfrw1_workq; | |
122 | struct pfr_kentry *pfrw1_kentry; | |
123 | struct pfi_dynaddr *pfrw1_dyn; | |
124 | } pfrw_1; | |
125 | int pfrw_free; | |
126 | int pfrw_flags; | |
127 | }; | |
128 | #define pfrw_addr pfrw_1.pfrw1_addr | |
129 | #define pfrw_astats pfrw_1.pfrw1_astats | |
130 | #define pfrw_workq pfrw_1.pfrw1_workq | |
131 | #define pfrw_kentry pfrw_1.pfrw1_kentry | |
132 | #define pfrw_dyn pfrw_1.pfrw1_dyn | |
133 | #define pfrw_cnt pfrw_free | |
134 | ||
135 | #define senderr(e) do { rv = (e); goto _bad; } while (0) | |
1186cbc0 JL |
136 | struct malloc_type *pfr_ktable_pl; |
137 | struct malloc_type *pfr_kentry_pl; | |
138 | struct malloc_type *pfr_kentry_pl2; | |
d66d8bc0 | 139 | static struct pf_addr pfr_ffaddr; /* constant after setup */ |
02742ec6 JS |
140 | |
141 | void pfr_copyout_addr(struct pfr_addr *, | |
142 | struct pfr_kentry *ke); | |
143 | int pfr_validate_addr(struct pfr_addr *); | |
144 | void pfr_enqueue_addrs(struct pfr_ktable *, | |
145 | struct pfr_kentryworkq *, int *, int); | |
146 | void pfr_mark_addrs(struct pfr_ktable *); | |
147 | struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *, | |
148 | struct pfr_addr *, int); | |
70224baa | 149 | struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, int); |
02742ec6 JS |
150 | void pfr_destroy_kentries(struct pfr_kentryworkq *); |
151 | void pfr_destroy_kentry(struct pfr_kentry *); | |
152 | void pfr_insert_kentries(struct pfr_ktable *, | |
153 | struct pfr_kentryworkq *, long); | |
154 | void pfr_remove_kentries(struct pfr_ktable *, | |
155 | struct pfr_kentryworkq *); | |
156 | void pfr_clstats_kentries(struct pfr_kentryworkq *, long, | |
157 | int); | |
158 | void pfr_reset_feedback(struct pfr_addr *, int, int); | |
159 | void pfr_prepare_network(union sockaddr_union *, int, int); | |
160 | int pfr_route_kentry(struct pfr_ktable *, | |
161 | struct pfr_kentry *); | |
162 | int pfr_unroute_kentry(struct pfr_ktable *, | |
163 | struct pfr_kentry *); | |
164 | int pfr_walktree(struct radix_node *, void *); | |
165 | int pfr_validate_table(struct pfr_table *, int, int); | |
70224baa | 166 | int pfr_fix_anchor(char *); |
02742ec6 JS |
167 | void pfr_commit_ktable(struct pfr_ktable *, long); |
168 | void pfr_insert_ktables(struct pfr_ktableworkq *); | |
169 | void pfr_insert_ktable(struct pfr_ktable *); | |
170 | void pfr_setflags_ktables(struct pfr_ktableworkq *); | |
171 | void pfr_setflags_ktable(struct pfr_ktable *, int); | |
172 | void pfr_clstats_ktables(struct pfr_ktableworkq *, long, | |
173 | int); | |
174 | void pfr_clstats_ktable(struct pfr_ktable *, long, int); | |
175 | struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int); | |
176 | void pfr_destroy_ktables(struct pfr_ktableworkq *, int); | |
177 | void pfr_destroy_ktable(struct pfr_ktable *, int); | |
178 | int pfr_ktable_compare(struct pfr_ktable *, | |
179 | struct pfr_ktable *); | |
180 | struct pfr_ktable *pfr_lookup_table(struct pfr_table *); | |
181 | void pfr_clean_node_mask(struct pfr_ktable *, | |
182 | struct pfr_kentryworkq *); | |
183 | int pfr_table_count(struct pfr_table *, int); | |
184 | int pfr_skip_table(struct pfr_table *, | |
185 | struct pfr_ktable *, int); | |
186 | struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int); | |
187 | ||
188 | RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); | |
189 | RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); | |
190 | ||
191 | struct pfr_ktablehead pfr_ktables; | |
192 | struct pfr_table pfr_nulltable; | |
193 | int pfr_ktable_cnt; | |
194 | ||
195 | void | |
196 | pfr_initialize(void) | |
197 | { | |
02742ec6 JS |
198 | memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr)); |
199 | } | |
200 | ||
201 | int | |
202 | pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) | |
203 | { | |
204 | struct pfr_ktable *kt; | |
205 | struct pfr_kentryworkq workq; | |
02742ec6 | 206 | |
315a7da3 | 207 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY); |
02742ec6 JS |
208 | if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) |
209 | return (EINVAL); | |
210 | kt = pfr_lookup_table(tbl); | |
211 | if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
212 | return (ESRCH); | |
213 | if (kt->pfrkt_flags & PFR_TFLAG_CONST) | |
214 | return (EPERM); | |
215 | pfr_enqueue_addrs(kt, &workq, ndel, 0); | |
216 | ||
217 | if (!(flags & PFR_FLAG_DUMMY)) { | |
218 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 219 | crit_enter(); |
02742ec6 JS |
220 | pfr_remove_kentries(kt, &workq); |
221 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 222 | crit_exit(); |
02742ec6 | 223 | if (kt->pfrkt_cnt) { |
4b1cf444 | 224 | kprintf("pfr_clr_addrs: corruption detected (%d).\n", |
02742ec6 JS |
225 | kt->pfrkt_cnt); |
226 | kt->pfrkt_cnt = 0; | |
227 | } | |
228 | } | |
229 | return (0); | |
230 | } | |
231 | ||
232 | int | |
233 | pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, | |
234 | int *nadd, int flags) | |
235 | { | |
236 | struct pfr_ktable *kt, *tmpkt; | |
237 | struct pfr_kentryworkq workq; | |
238 | struct pfr_kentry *p, *q; | |
239 | struct pfr_addr ad; | |
cc6e5672 | 240 | int i, rv, xadd = 0; |
02742ec6 JS |
241 | long tzero = time_second; |
242 | ||
315a7da3 JL |
243 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY | |
244 | PFR_FLAG_FEEDBACK); | |
02742ec6 JS |
245 | if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) |
246 | return (EINVAL); | |
247 | kt = pfr_lookup_table(tbl); | |
248 | if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
249 | return (ESRCH); | |
250 | if (kt->pfrkt_flags & PFR_TFLAG_CONST) | |
251 | return (EPERM); | |
252 | tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); | |
253 | if (tmpkt == NULL) | |
254 | return (ENOMEM); | |
255 | SLIST_INIT(&workq); | |
256 | for (i = 0; i < size; i++) { | |
315a7da3 | 257 | if (COPYIN(addr+i, &ad, sizeof(ad), flags)) |
02742ec6 JS |
258 | senderr(EFAULT); |
259 | if (pfr_validate_addr(&ad)) | |
260 | senderr(EINVAL); | |
261 | p = pfr_lookup_addr(kt, &ad, 1); | |
262 | q = pfr_lookup_addr(tmpkt, &ad, 1); | |
263 | if (flags & PFR_FLAG_FEEDBACK) { | |
264 | if (q != NULL) | |
265 | ad.pfra_fback = PFR_FB_DUPLICATE; | |
266 | else if (p == NULL) | |
267 | ad.pfra_fback = PFR_FB_ADDED; | |
268 | else if (p->pfrke_not != ad.pfra_not) | |
269 | ad.pfra_fback = PFR_FB_CONFLICT; | |
270 | else | |
271 | ad.pfra_fback = PFR_FB_NONE; | |
272 | } | |
273 | if (p == NULL && q == NULL) { | |
315a7da3 JL |
274 | p = pfr_create_kentry(&ad, |
275 | !(flags & PFR_FLAG_USERIOCTL)); | |
02742ec6 JS |
276 | if (p == NULL) |
277 | senderr(ENOMEM); | |
278 | if (pfr_route_kentry(tmpkt, p)) { | |
279 | pfr_destroy_kentry(p); | |
280 | ad.pfra_fback = PFR_FB_NONE; | |
281 | } else { | |
282 | SLIST_INSERT_HEAD(&workq, p, pfrke_workq); | |
283 | xadd++; | |
284 | } | |
285 | } | |
286 | if (flags & PFR_FLAG_FEEDBACK) | |
315a7da3 | 287 | if (COPYOUT(&ad, addr+i, sizeof(ad), flags)) |
02742ec6 JS |
288 | senderr(EFAULT); |
289 | } | |
290 | pfr_clean_node_mask(tmpkt, &workq); | |
291 | if (!(flags & PFR_FLAG_DUMMY)) { | |
292 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 293 | crit_enter(); |
02742ec6 JS |
294 | pfr_insert_kentries(kt, &workq, tzero); |
295 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 296 | crit_exit(); |
02742ec6 JS |
297 | } else |
298 | pfr_destroy_kentries(&workq); | |
299 | if (nadd != NULL) | |
300 | *nadd = xadd; | |
301 | pfr_destroy_ktable(tmpkt, 0); | |
302 | return (0); | |
303 | _bad: | |
304 | pfr_clean_node_mask(tmpkt, &workq); | |
305 | pfr_destroy_kentries(&workq); | |
306 | if (flags & PFR_FLAG_FEEDBACK) | |
307 | pfr_reset_feedback(addr, size, flags); | |
308 | pfr_destroy_ktable(tmpkt, 0); | |
309 | return (rv); | |
310 | } | |
311 | ||
312 | int | |
313 | pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, | |
314 | int *ndel, int flags) | |
315 | { | |
316 | struct pfr_ktable *kt; | |
317 | struct pfr_kentryworkq workq; | |
318 | struct pfr_kentry *p; | |
319 | struct pfr_addr ad; | |
70224baa | 320 | int i, rv, xdel = 0, log = 1; |
02742ec6 | 321 | |
315a7da3 JL |
322 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY | |
323 | PFR_FLAG_FEEDBACK); | |
02742ec6 JS |
324 | if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) |
325 | return (EINVAL); | |
326 | kt = pfr_lookup_table(tbl); | |
327 | if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
328 | return (ESRCH); | |
329 | if (kt->pfrkt_flags & PFR_TFLAG_CONST) | |
330 | return (EPERM); | |
70224baa JL |
331 | /* |
332 | * there are two algorithms to choose from here. | |
333 | * with: | |
334 | * n: number of addresses to delete | |
335 | * N: number of addresses in the table | |
336 | * | |
337 | * one is O(N) and is better for large 'n' | |
338 | * one is O(n*LOG(N)) and is better for small 'n' | |
b272101a | 339 | * |
70224baa JL |
340 | * following code try to decide which one is best. |
341 | */ | |
342 | for (i = kt->pfrkt_cnt; i > 0; i >>= 1) | |
343 | log++; | |
344 | if (size > kt->pfrkt_cnt/log) { | |
345 | /* full table scan */ | |
346 | pfr_mark_addrs(kt); | |
347 | } else { | |
348 | /* iterate over addresses to delete */ | |
349 | for (i = 0; i < size; i++) { | |
315a7da3 | 350 | if (COPYIN(addr+i, &ad, sizeof(ad), flags)) |
70224baa JL |
351 | return (EFAULT); |
352 | if (pfr_validate_addr(&ad)) | |
353 | return (EINVAL); | |
354 | p = pfr_lookup_addr(kt, &ad, 1); | |
355 | if (p != NULL) | |
356 | p->pfrke_mark = 0; | |
357 | } | |
358 | } | |
02742ec6 JS |
359 | SLIST_INIT(&workq); |
360 | for (i = 0; i < size; i++) { | |
315a7da3 | 361 | if (COPYIN(addr+i, &ad, sizeof(ad), flags)) |
02742ec6 JS |
362 | senderr(EFAULT); |
363 | if (pfr_validate_addr(&ad)) | |
364 | senderr(EINVAL); | |
365 | p = pfr_lookup_addr(kt, &ad, 1); | |
366 | if (flags & PFR_FLAG_FEEDBACK) { | |
367 | if (p == NULL) | |
368 | ad.pfra_fback = PFR_FB_NONE; | |
369 | else if (p->pfrke_not != ad.pfra_not) | |
370 | ad.pfra_fback = PFR_FB_CONFLICT; | |
371 | else if (p->pfrke_mark) | |
372 | ad.pfra_fback = PFR_FB_DUPLICATE; | |
373 | else | |
374 | ad.pfra_fback = PFR_FB_DELETED; | |
375 | } | |
376 | if (p != NULL && p->pfrke_not == ad.pfra_not && | |
377 | !p->pfrke_mark) { | |
378 | p->pfrke_mark = 1; | |
379 | SLIST_INSERT_HEAD(&workq, p, pfrke_workq); | |
380 | xdel++; | |
381 | } | |
382 | if (flags & PFR_FLAG_FEEDBACK) | |
315a7da3 | 383 | if (COPYOUT(&ad, addr+i, sizeof(ad), flags)) |
02742ec6 JS |
384 | senderr(EFAULT); |
385 | } | |
386 | if (!(flags & PFR_FLAG_DUMMY)) { | |
387 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 388 | crit_enter(); |
02742ec6 JS |
389 | pfr_remove_kentries(kt, &workq); |
390 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 391 | crit_exit(); |
02742ec6 JS |
392 | } |
393 | if (ndel != NULL) | |
394 | *ndel = xdel; | |
395 | return (0); | |
396 | _bad: | |
397 | if (flags & PFR_FLAG_FEEDBACK) | |
398 | pfr_reset_feedback(addr, size, flags); | |
399 | return (rv); | |
400 | } | |
401 | ||
402 | int | |
403 | pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, | |
70224baa JL |
404 | int *size2, int *nadd, int *ndel, int *nchange, int flags, |
405 | u_int32_t ignore_pfrt_flags) | |
02742ec6 JS |
406 | { |
407 | struct pfr_ktable *kt, *tmpkt; | |
408 | struct pfr_kentryworkq addq, delq, changeq; | |
409 | struct pfr_kentry *p, *q; | |
410 | struct pfr_addr ad; | |
cc6e5672 | 411 | int i, rv, xadd = 0, xdel = 0, xchange = 0; |
02742ec6 JS |
412 | long tzero = time_second; |
413 | ||
315a7da3 JL |
414 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY | |
415 | PFR_FLAG_FEEDBACK); | |
70224baa JL |
416 | if (pfr_validate_table(tbl, ignore_pfrt_flags, flags & |
417 | PFR_FLAG_USERIOCTL)) | |
02742ec6 JS |
418 | return (EINVAL); |
419 | kt = pfr_lookup_table(tbl); | |
420 | if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
421 | return (ESRCH); | |
422 | if (kt->pfrkt_flags & PFR_TFLAG_CONST) | |
423 | return (EPERM); | |
424 | tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); | |
425 | if (tmpkt == NULL) | |
426 | return (ENOMEM); | |
427 | pfr_mark_addrs(kt); | |
428 | SLIST_INIT(&addq); | |
429 | SLIST_INIT(&delq); | |
430 | SLIST_INIT(&changeq); | |
431 | for (i = 0; i < size; i++) { | |
315a7da3 | 432 | if (COPYIN(addr+i, &ad, sizeof(ad), flags)) |
02742ec6 JS |
433 | senderr(EFAULT); |
434 | if (pfr_validate_addr(&ad)) | |
435 | senderr(EINVAL); | |
436 | ad.pfra_fback = PFR_FB_NONE; | |
437 | p = pfr_lookup_addr(kt, &ad, 1); | |
438 | if (p != NULL) { | |
439 | if (p->pfrke_mark) { | |
440 | ad.pfra_fback = PFR_FB_DUPLICATE; | |
441 | goto _skip; | |
442 | } | |
443 | p->pfrke_mark = 1; | |
444 | if (p->pfrke_not != ad.pfra_not) { | |
445 | SLIST_INSERT_HEAD(&changeq, p, pfrke_workq); | |
446 | ad.pfra_fback = PFR_FB_CHANGED; | |
447 | xchange++; | |
448 | } | |
449 | } else { | |
450 | q = pfr_lookup_addr(tmpkt, &ad, 1); | |
451 | if (q != NULL) { | |
452 | ad.pfra_fback = PFR_FB_DUPLICATE; | |
453 | goto _skip; | |
454 | } | |
315a7da3 JL |
455 | p = pfr_create_kentry(&ad, |
456 | !(flags & PFR_FLAG_USERIOCTL)); | |
02742ec6 JS |
457 | if (p == NULL) |
458 | senderr(ENOMEM); | |
459 | if (pfr_route_kentry(tmpkt, p)) { | |
460 | pfr_destroy_kentry(p); | |
461 | ad.pfra_fback = PFR_FB_NONE; | |
462 | } else { | |
463 | SLIST_INSERT_HEAD(&addq, p, pfrke_workq); | |
464 | ad.pfra_fback = PFR_FB_ADDED; | |
465 | xadd++; | |
466 | } | |
467 | } | |
468 | _skip: | |
469 | if (flags & PFR_FLAG_FEEDBACK) | |
315a7da3 | 470 | if (COPYOUT(&ad, addr+i, sizeof(ad), flags)) |
02742ec6 JS |
471 | senderr(EFAULT); |
472 | } | |
473 | pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); | |
474 | if ((flags & PFR_FLAG_FEEDBACK) && *size2) { | |
475 | if (*size2 < size+xdel) { | |
476 | *size2 = size+xdel; | |
477 | senderr(0); | |
478 | } | |
479 | i = 0; | |
480 | SLIST_FOREACH(p, &delq, pfrke_workq) { | |
481 | pfr_copyout_addr(&ad, p); | |
482 | ad.pfra_fback = PFR_FB_DELETED; | |
315a7da3 | 483 | if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags)) |
02742ec6 JS |
484 | senderr(EFAULT); |
485 | i++; | |
486 | } | |
487 | } | |
488 | pfr_clean_node_mask(tmpkt, &addq); | |
489 | if (!(flags & PFR_FLAG_DUMMY)) { | |
490 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 491 | crit_enter(); |
02742ec6 JS |
492 | pfr_insert_kentries(kt, &addq, tzero); |
493 | pfr_remove_kentries(kt, &delq); | |
494 | pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); | |
495 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 496 | crit_exit(); |
02742ec6 JS |
497 | } else |
498 | pfr_destroy_kentries(&addq); | |
499 | if (nadd != NULL) | |
500 | *nadd = xadd; | |
501 | if (ndel != NULL) | |
502 | *ndel = xdel; | |
503 | if (nchange != NULL) | |
504 | *nchange = xchange; | |
505 | if ((flags & PFR_FLAG_FEEDBACK) && size2) | |
506 | *size2 = size+xdel; | |
507 | pfr_destroy_ktable(tmpkt, 0); | |
508 | return (0); | |
509 | _bad: | |
510 | pfr_clean_node_mask(tmpkt, &addq); | |
511 | pfr_destroy_kentries(&addq); | |
512 | if (flags & PFR_FLAG_FEEDBACK) | |
513 | pfr_reset_feedback(addr, size, flags); | |
514 | pfr_destroy_ktable(tmpkt, 0); | |
515 | return (rv); | |
516 | } | |
517 | ||
518 | int | |
519 | pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, | |
520 | int *nmatch, int flags) | |
521 | { | |
522 | struct pfr_ktable *kt; | |
523 | struct pfr_kentry *p; | |
524 | struct pfr_addr ad; | |
525 | int i, xmatch = 0; | |
526 | ||
315a7da3 | 527 | ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE); |
02742ec6 JS |
528 | if (pfr_validate_table(tbl, 0, 0)) |
529 | return (EINVAL); | |
530 | kt = pfr_lookup_table(tbl); | |
531 | if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
532 | return (ESRCH); | |
533 | ||
534 | for (i = 0; i < size; i++) { | |
315a7da3 | 535 | if (COPYIN(addr+i, &ad, sizeof(ad), flags)) |
02742ec6 JS |
536 | return (EFAULT); |
537 | if (pfr_validate_addr(&ad)) | |
538 | return (EINVAL); | |
539 | if (ADDR_NETWORK(&ad)) | |
540 | return (EINVAL); | |
541 | p = pfr_lookup_addr(kt, &ad, 0); | |
542 | if (flags & PFR_FLAG_REPLACE) | |
543 | pfr_copyout_addr(&ad, p); | |
544 | ad.pfra_fback = (p == NULL) ? PFR_FB_NONE : | |
545 | (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH); | |
546 | if (p != NULL && !p->pfrke_not) | |
547 | xmatch++; | |
315a7da3 | 548 | if (COPYOUT(&ad, addr+i, sizeof(ad), flags)) |
02742ec6 JS |
549 | return (EFAULT); |
550 | } | |
551 | if (nmatch != NULL) | |
552 | *nmatch = xmatch; | |
553 | return (0); | |
554 | } | |
555 | ||
556 | int | |
557 | pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size, | |
558 | int flags) | |
559 | { | |
560 | struct pfr_ktable *kt; | |
561 | struct pfr_walktree w; | |
562 | int rv; | |
563 | ||
315a7da3 | 564 | ACCEPT_FLAGS(flags, 0); |
02742ec6 JS |
565 | if (pfr_validate_table(tbl, 0, 0)) |
566 | return (EINVAL); | |
567 | kt = pfr_lookup_table(tbl); | |
568 | if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
569 | return (ESRCH); | |
570 | if (kt->pfrkt_cnt > *size) { | |
571 | *size = kt->pfrkt_cnt; | |
572 | return (0); | |
573 | } | |
574 | ||
575 | bzero(&w, sizeof(w)); | |
576 | w.pfrw_op = PFRW_GET_ADDRS; | |
577 | w.pfrw_addr = addr; | |
578 | w.pfrw_free = kt->pfrkt_cnt; | |
579 | w.pfrw_flags = flags; | |
580 | rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w); | |
581 | if (!rv) | |
582 | rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w); | |
583 | if (rv) | |
584 | return (rv); | |
585 | ||
586 | if (w.pfrw_free) { | |
4b1cf444 | 587 | kprintf("pfr_get_addrs: corruption detected (%d).\n", |
02742ec6 JS |
588 | w.pfrw_free); |
589 | return (ENOTTY); | |
590 | } | |
591 | *size = kt->pfrkt_cnt; | |
592 | return (0); | |
593 | } | |
594 | ||
595 | int | |
596 | pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size, | |
597 | int flags) | |
598 | { | |
599 | struct pfr_ktable *kt; | |
600 | struct pfr_walktree w; | |
601 | struct pfr_kentryworkq workq; | |
cc6e5672 | 602 | int rv; |
02742ec6 JS |
603 | long tzero = time_second; |
604 | ||
315a7da3 JL |
605 | /* XXX PFR_FLAG_CLSTATS disabled */ |
606 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC); | |
02742ec6 JS |
607 | if (pfr_validate_table(tbl, 0, 0)) |
608 | return (EINVAL); | |
609 | kt = pfr_lookup_table(tbl); | |
610 | if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
611 | return (ESRCH); | |
612 | if (kt->pfrkt_cnt > *size) { | |
613 | *size = kt->pfrkt_cnt; | |
614 | return (0); | |
615 | } | |
616 | ||
617 | bzero(&w, sizeof(w)); | |
618 | w.pfrw_op = PFRW_GET_ASTATS; | |
619 | w.pfrw_astats = addr; | |
620 | w.pfrw_free = kt->pfrkt_cnt; | |
621 | w.pfrw_flags = flags; | |
622 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 623 | crit_enter(); |
02742ec6 JS |
624 | rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w); |
625 | if (!rv) | |
626 | rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w); | |
627 | if (!rv && (flags & PFR_FLAG_CLSTATS)) { | |
628 | pfr_enqueue_addrs(kt, &workq, NULL, 0); | |
629 | pfr_clstats_kentries(&workq, tzero, 0); | |
630 | } | |
631 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 632 | crit_exit(); |
02742ec6 JS |
633 | if (rv) |
634 | return (rv); | |
635 | ||
636 | if (w.pfrw_free) { | |
4b1cf444 | 637 | kprintf("pfr_get_astats: corruption detected (%d).\n", |
02742ec6 JS |
638 | w.pfrw_free); |
639 | return (ENOTTY); | |
640 | } | |
641 | *size = kt->pfrkt_cnt; | |
642 | return (0); | |
643 | } | |
644 | ||
645 | int | |
646 | pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size, | |
647 | int *nzero, int flags) | |
648 | { | |
649 | struct pfr_ktable *kt; | |
650 | struct pfr_kentryworkq workq; | |
651 | struct pfr_kentry *p; | |
652 | struct pfr_addr ad; | |
cc6e5672 | 653 | int i, rv, xzero = 0; |
02742ec6 | 654 | |
315a7da3 JL |
655 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY | |
656 | PFR_FLAG_FEEDBACK); | |
02742ec6 JS |
657 | if (pfr_validate_table(tbl, 0, 0)) |
658 | return (EINVAL); | |
659 | kt = pfr_lookup_table(tbl); | |
660 | if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
661 | return (ESRCH); | |
662 | SLIST_INIT(&workq); | |
663 | for (i = 0; i < size; i++) { | |
315a7da3 | 664 | if (COPYIN(addr+i, &ad, sizeof(ad), flags)) |
02742ec6 JS |
665 | senderr(EFAULT); |
666 | if (pfr_validate_addr(&ad)) | |
667 | senderr(EINVAL); | |
668 | p = pfr_lookup_addr(kt, &ad, 1); | |
669 | if (flags & PFR_FLAG_FEEDBACK) { | |
670 | ad.pfra_fback = (p != NULL) ? | |
671 | PFR_FB_CLEARED : PFR_FB_NONE; | |
315a7da3 | 672 | if (COPYOUT(&ad, addr+i, sizeof(ad), flags)) |
02742ec6 JS |
673 | senderr(EFAULT); |
674 | } | |
675 | if (p != NULL) { | |
676 | SLIST_INSERT_HEAD(&workq, p, pfrke_workq); | |
677 | xzero++; | |
678 | } | |
679 | } | |
680 | ||
681 | if (!(flags & PFR_FLAG_DUMMY)) { | |
682 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 683 | crit_enter(); |
02742ec6 JS |
684 | pfr_clstats_kentries(&workq, 0, 0); |
685 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 686 | crit_exit(); |
02742ec6 JS |
687 | } |
688 | if (nzero != NULL) | |
689 | *nzero = xzero; | |
690 | return (0); | |
691 | _bad: | |
692 | if (flags & PFR_FLAG_FEEDBACK) | |
693 | pfr_reset_feedback(addr, size, flags); | |
694 | return (rv); | |
695 | } | |
696 | ||
697 | int | |
698 | pfr_validate_addr(struct pfr_addr *ad) | |
699 | { | |
700 | int i; | |
701 | ||
702 | switch (ad->pfra_af) { | |
70224baa | 703 | #ifdef INET |
02742ec6 JS |
704 | case AF_INET: |
705 | if (ad->pfra_net > 32) | |
706 | return (-1); | |
707 | break; | |
70224baa JL |
708 | #endif /* INET */ |
709 | #ifdef INET6 | |
02742ec6 JS |
710 | case AF_INET6: |
711 | if (ad->pfra_net > 128) | |
712 | return (-1); | |
713 | break; | |
70224baa | 714 | #endif /* INET6 */ |
02742ec6 JS |
715 | default: |
716 | return (-1); | |
717 | } | |
718 | if (ad->pfra_net < 128 && | |
719 | (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) | |
720 | return (-1); | |
721 | for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++) | |
722 | if (((caddr_t)ad)[i]) | |
723 | return (-1); | |
724 | if (ad->pfra_not && ad->pfra_not != 1) | |
725 | return (-1); | |
726 | if (ad->pfra_fback) | |
727 | return (-1); | |
728 | return (0); | |
729 | } | |
730 | ||
731 | void | |
732 | pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, | |
733 | int *naddr, int sweep) | |
734 | { | |
735 | struct pfr_walktree w; | |
736 | ||
737 | SLIST_INIT(workq); | |
738 | bzero(&w, sizeof(w)); | |
739 | w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; | |
740 | w.pfrw_workq = workq; | |
741 | if (kt->pfrkt_ip4 != NULL) | |
742 | if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) | |
4b1cf444 | 743 | kprintf("pfr_enqueue_addrs: IPv4 walktree failed.\n"); |
02742ec6 JS |
744 | if (kt->pfrkt_ip6 != NULL) |
745 | if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) | |
4b1cf444 | 746 | kprintf("pfr_enqueue_addrs: IPv6 walktree failed.\n"); |
02742ec6 JS |
747 | if (naddr != NULL) |
748 | *naddr = w.pfrw_cnt; | |
749 | } | |
750 | ||
751 | void | |
752 | pfr_mark_addrs(struct pfr_ktable *kt) | |
753 | { | |
754 | struct pfr_walktree w; | |
755 | ||
756 | bzero(&w, sizeof(w)); | |
757 | w.pfrw_op = PFRW_MARK; | |
758 | if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) | |
4b1cf444 | 759 | kprintf("pfr_mark_addrs: IPv4 walktree failed.\n"); |
02742ec6 | 760 | if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) |
4b1cf444 | 761 | kprintf("pfr_mark_addrs: IPv6 walktree failed.\n"); |
02742ec6 JS |
762 | } |
763 | ||
764 | ||
765 | struct pfr_kentry * | |
766 | pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) | |
767 | { | |
768 | union sockaddr_union sa, mask; | |
70224baa | 769 | struct radix_node_head *head = NULL; |
02742ec6 | 770 | struct pfr_kentry *ke; |
02742ec6 JS |
771 | |
772 | bzero(&sa, sizeof(sa)); | |
773 | if (ad->pfra_af == AF_INET) { | |
774 | FILLIN_SIN(sa.sin, ad->pfra_ip4addr); | |
775 | head = kt->pfrkt_ip4; | |
70224baa | 776 | } else if ( ad->pfra_af == AF_INET6 ) { |
02742ec6 JS |
777 | FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); |
778 | head = kt->pfrkt_ip6; | |
779 | } | |
780 | if (ADDR_NETWORK(ad)) { | |
781 | pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); | |
d8449084 | 782 | ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); |
02742ec6 JS |
783 | if (ke && KENTRY_RNF_ROOT(ke)) |
784 | ke = NULL; | |
785 | } else { | |
d8449084 | 786 | ke = (struct pfr_kentry *)rn_match(&sa, head); |
02742ec6 JS |
787 | if (ke && KENTRY_RNF_ROOT(ke)) |
788 | ke = NULL; | |
789 | if (exact && ke && KENTRY_NETWORK(ke)) | |
790 | ke = NULL; | |
791 | } | |
792 | return (ke); | |
793 | } | |
794 | ||
795 | struct pfr_kentry * | |
70224baa | 796 | pfr_create_kentry(struct pfr_addr *ad, int intr) |
02742ec6 JS |
797 | { |
798 | struct pfr_kentry *ke; | |
799 | ||
70224baa | 800 | if (intr) |
1186cbc0 | 801 | ke = kmalloc(sizeof(struct pfr_kentry), M_PFRKENTRYPL2, M_NOWAIT|M_ZERO); |
70224baa | 802 | else |
1186cbc0 | 803 | ke = kmalloc(sizeof(struct pfr_kentry), M_PFRKENTRYPL, M_NOWAIT|M_ZERO|M_NULLOK); |
02742ec6 JS |
804 | if (ke == NULL) |
805 | return (NULL); | |
02742ec6 JS |
806 | |
807 | if (ad->pfra_af == AF_INET) | |
808 | FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); | |
70224baa | 809 | else if (ad->pfra_af == AF_INET6) |
02742ec6 JS |
810 | FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); |
811 | ke->pfrke_af = ad->pfra_af; | |
812 | ke->pfrke_net = ad->pfra_net; | |
813 | ke->pfrke_not = ad->pfra_not; | |
70224baa | 814 | ke->pfrke_intrpool = intr; |
02742ec6 JS |
815 | return (ke); |
816 | } | |
817 | ||
818 | void | |
819 | pfr_destroy_kentries(struct pfr_kentryworkq *workq) | |
820 | { | |
821 | struct pfr_kentry *p, *q; | |
822 | ||
823 | for (p = SLIST_FIRST(workq); p != NULL; p = q) { | |
824 | q = SLIST_NEXT(p, pfrke_workq); | |
825 | pfr_destroy_kentry(p); | |
826 | } | |
827 | } | |
828 | ||
829 | void | |
830 | pfr_destroy_kentry(struct pfr_kentry *ke) | |
831 | { | |
ed1f0be2 | 832 | if (ke->pfrke_counters) |
1186cbc0 | 833 | kfree(ke->pfrke_counters, M_PFRKCOUNTERSPL); |
70224baa | 834 | if (ke->pfrke_intrpool) |
1186cbc0 | 835 | kfree(ke, M_PFRKENTRYPL2); |
70224baa | 836 | else |
1186cbc0 | 837 | kfree(ke, M_PFRKENTRYPL); |
02742ec6 JS |
838 | } |
839 | ||
840 | void | |
841 | pfr_insert_kentries(struct pfr_ktable *kt, | |
842 | struct pfr_kentryworkq *workq, long tzero) | |
843 | { | |
844 | struct pfr_kentry *p; | |
845 | int rv, n = 0; | |
846 | ||
847 | SLIST_FOREACH(p, workq, pfrke_workq) { | |
848 | rv = pfr_route_kentry(kt, p); | |
849 | if (rv) { | |
4b1cf444 | 850 | kprintf("pfr_insert_kentries: cannot route entry " |
02742ec6 JS |
851 | "(code=%d).\n", rv); |
852 | break; | |
853 | } | |
854 | p->pfrke_tzero = tzero; | |
855 | n++; | |
856 | } | |
857 | kt->pfrkt_cnt += n; | |
858 | } | |
859 | ||
70224baa JL |
860 | int |
861 | pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero) | |
862 | { | |
863 | struct pfr_kentry *p; | |
864 | int rv; | |
865 | ||
866 | p = pfr_lookup_addr(kt, ad, 1); | |
867 | if (p != NULL) | |
868 | return (0); | |
869 | p = pfr_create_kentry(ad, 1); | |
870 | if (p == NULL) | |
871 | return (EINVAL); | |
872 | ||
873 | rv = pfr_route_kentry(kt, p); | |
874 | if (rv) | |
875 | return (rv); | |
876 | ||
877 | p->pfrke_tzero = tzero; | |
878 | kt->pfrkt_cnt++; | |
879 | ||
880 | return (0); | |
881 | } | |
882 | ||
02742ec6 JS |
883 | void |
884 | pfr_remove_kentries(struct pfr_ktable *kt, | |
885 | struct pfr_kentryworkq *workq) | |
886 | { | |
887 | struct pfr_kentry *p; | |
888 | int n = 0; | |
889 | ||
890 | SLIST_FOREACH(p, workq, pfrke_workq) { | |
891 | pfr_unroute_kentry(kt, p); | |
892 | n++; | |
893 | } | |
894 | kt->pfrkt_cnt -= n; | |
895 | pfr_destroy_kentries(workq); | |
896 | } | |
897 | ||
898 | void | |
899 | pfr_clean_node_mask(struct pfr_ktable *kt, | |
900 | struct pfr_kentryworkq *workq) | |
901 | { | |
902 | struct pfr_kentry *p; | |
903 | ||
904 | SLIST_FOREACH(p, workq, pfrke_workq) | |
905 | pfr_unroute_kentry(kt, p); | |
906 | } | |
907 | ||
908 | void | |
909 | pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange) | |
910 | { | |
911 | struct pfr_kentry *p; | |
02742ec6 JS |
912 | |
913 | SLIST_FOREACH(p, workq, pfrke_workq) { | |
cc6e5672 | 914 | crit_enter(); |
02742ec6 JS |
915 | if (negchange) |
916 | p->pfrke_not = !p->pfrke_not; | |
ed1f0be2 | 917 | if (p->pfrke_counters) { |
1186cbc0 | 918 | kfree(p->pfrke_counters, M_PFRKCOUNTERSPL); |
ed1f0be2 JL |
919 | p->pfrke_counters = NULL; |
920 | } | |
cc6e5672 | 921 | crit_exit(); |
02742ec6 JS |
922 | p->pfrke_tzero = tzero; |
923 | } | |
924 | } | |
925 | ||
926 | void | |
927 | pfr_reset_feedback(struct pfr_addr *addr, int size, int flags) | |
928 | { | |
929 | struct pfr_addr ad; | |
930 | int i; | |
931 | ||
932 | for (i = 0; i < size; i++) { | |
315a7da3 | 933 | if (COPYIN(addr+i, &ad, sizeof(ad), flags)) |
02742ec6 JS |
934 | break; |
935 | ad.pfra_fback = PFR_FB_NONE; | |
315a7da3 | 936 | if (COPYOUT(&ad, addr+i, sizeof(ad), flags)) |
02742ec6 JS |
937 | break; |
938 | } | |
939 | } | |
940 | ||
941 | void | |
942 | pfr_prepare_network(union sockaddr_union *sa, int af, int net) | |
943 | { | |
944 | int i; | |
945 | ||
946 | bzero(sa, sizeof(*sa)); | |
947 | if (af == AF_INET) { | |
948 | sa->sin.sin_len = sizeof(sa->sin); | |
949 | sa->sin.sin_family = AF_INET; | |
70224baa JL |
950 | sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0; |
951 | } else if (af == AF_INET6) { | |
02742ec6 JS |
952 | sa->sin6.sin6_len = sizeof(sa->sin6); |
953 | sa->sin6.sin6_family = AF_INET6; | |
954 | for (i = 0; i < 4; i++) { | |
955 | if (net <= 32) { | |
956 | sa->sin6.sin6_addr.s6_addr32[i] = | |
70224baa | 957 | net ? htonl(-1 << (32-net)) : 0; |
02742ec6 JS |
958 | break; |
959 | } | |
960 | sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; | |
961 | net -= 32; | |
962 | } | |
963 | } | |
964 | } | |
965 | ||
966 | int | |
967 | pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) | |
968 | { | |
969 | union sockaddr_union mask; | |
970 | struct radix_node *rn; | |
70224baa | 971 | struct radix_node_head *head = NULL; |
02742ec6 JS |
972 | |
973 | bzero(ke->pfrke_node, sizeof(ke->pfrke_node)); | |
974 | if (ke->pfrke_af == AF_INET) | |
975 | head = kt->pfrkt_ip4; | |
70224baa | 976 | else if (ke->pfrke_af == AF_INET6) |
02742ec6 JS |
977 | head = kt->pfrkt_ip6; |
978 | ||
02742ec6 JS |
979 | if (KENTRY_NETWORK(ke)) { |
980 | pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); | |
d8449084 | 981 | rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node); |
02742ec6 | 982 | } else |
d8449084 | 983 | rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node); |
02742ec6 JS |
984 | |
985 | return (rn == NULL ? -1 : 0); | |
986 | } | |
987 | ||
988 | int | |
989 | pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) | |
990 | { | |
991 | union sockaddr_union mask; | |
992 | struct radix_node *rn; | |
70224baa | 993 | struct radix_node_head *head = NULL; |
02742ec6 JS |
994 | |
995 | if (ke->pfrke_af == AF_INET) | |
996 | head = kt->pfrkt_ip4; | |
70224baa | 997 | else if (ke->pfrke_af == AF_INET6) |
02742ec6 JS |
998 | head = kt->pfrkt_ip6; |
999 | ||
02742ec6 JS |
1000 | if (KENTRY_NETWORK(ke)) { |
1001 | pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); | |
d8449084 | 1002 | rn = rn_delete(&ke->pfrke_sa, &mask, head); |
02742ec6 | 1003 | } else |
d8449084 | 1004 | rn = rn_delete(&ke->pfrke_sa, NULL, head); |
02742ec6 JS |
1005 | |
1006 | if (rn == NULL) { | |
4b1cf444 | 1007 | kprintf("pfr_unroute_kentry: delete failed.\n"); |
02742ec6 JS |
1008 | return (-1); |
1009 | } | |
1010 | return (0); | |
1011 | } | |
1012 | ||
1013 | void | |
1014 | pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke) | |
1015 | { | |
1016 | bzero(ad, sizeof(*ad)); | |
1017 | if (ke == NULL) | |
1018 | return; | |
1019 | ad->pfra_af = ke->pfrke_af; | |
1020 | ad->pfra_net = ke->pfrke_net; | |
1021 | ad->pfra_not = ke->pfrke_not; | |
1022 | if (ad->pfra_af == AF_INET) | |
1023 | ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; | |
70224baa | 1024 | else if (ad->pfra_af == AF_INET6) |
02742ec6 JS |
1025 | ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; |
1026 | } | |
1027 | ||
1028 | int | |
1029 | pfr_walktree(struct radix_node *rn, void *arg) | |
1030 | { | |
1031 | struct pfr_kentry *ke = (struct pfr_kentry *)rn; | |
1032 | struct pfr_walktree *w = arg; | |
d66d8bc0 | 1033 | union sockaddr_union pfr_mask; |
70224baa | 1034 | int flags = w->pfrw_flags; |
02742ec6 JS |
1035 | |
1036 | switch (w->pfrw_op) { | |
1037 | case PFRW_MARK: | |
1038 | ke->pfrke_mark = 0; | |
1039 | break; | |
1040 | case PFRW_SWEEP: | |
1041 | if (ke->pfrke_mark) | |
1042 | break; | |
1043 | /* FALLTHROUGH */ | |
1044 | case PFRW_ENQUEUE: | |
1045 | SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); | |
1046 | w->pfrw_cnt++; | |
1047 | break; | |
1048 | case PFRW_GET_ADDRS: | |
1049 | if (w->pfrw_free-- > 0) { | |
1050 | struct pfr_addr ad; | |
1051 | ||
1052 | pfr_copyout_addr(&ad, ke); | |
1053 | if (copyout(&ad, w->pfrw_addr, sizeof(ad))) | |
1054 | return (EFAULT); | |
1055 | w->pfrw_addr++; | |
1056 | } | |
1057 | break; | |
1058 | case PFRW_GET_ASTATS: | |
1059 | if (w->pfrw_free-- > 0) { | |
1060 | struct pfr_astats as; | |
1061 | ||
1062 | pfr_copyout_addr(&as.pfras_a, ke); | |
1063 | ||
cc6e5672 | 1064 | crit_enter(); |
ed1f0be2 JL |
1065 | if (ke->pfrke_counters) { |
1066 | bcopy(ke->pfrke_counters->pfrkc_packets, | |
1067 | as.pfras_packets, sizeof(as.pfras_packets)); | |
1068 | bcopy(ke->pfrke_counters->pfrkc_bytes, | |
1069 | as.pfras_bytes, sizeof(as.pfras_bytes)); | |
1070 | } else { | |
1071 | bzero(as.pfras_packets, sizeof(as.pfras_packets)); | |
1072 | bzero(as.pfras_bytes, sizeof(as.pfras_bytes)); | |
1073 | as.pfras_a.pfra_fback = PFR_FB_NOCOUNT; | |
1074 | } | |
cc6e5672 | 1075 | crit_exit(); |
02742ec6 JS |
1076 | as.pfras_tzero = ke->pfrke_tzero; |
1077 | ||
315a7da3 | 1078 | if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags)) |
02742ec6 JS |
1079 | return (EFAULT); |
1080 | w->pfrw_astats++; | |
1081 | } | |
1082 | break; | |
1083 | case PFRW_POOL_GET: | |
1084 | if (ke->pfrke_not) | |
1085 | break; /* negative entries are ignored */ | |
1086 | if (!w->pfrw_cnt--) { | |
1087 | w->pfrw_kentry = ke; | |
1088 | return (1); /* finish search */ | |
1089 | } | |
1090 | break; | |
1091 | case PFRW_DYNADDR_UPDATE: | |
1092 | if (ke->pfrke_af == AF_INET) { | |
1093 | if (w->pfrw_dyn->pfid_acnt4++ > 0) | |
1094 | break; | |
1095 | pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net); | |
1096 | w->pfrw_dyn->pfid_addr4 = *SUNION2PF( | |
1097 | &ke->pfrke_sa, AF_INET); | |
1098 | w->pfrw_dyn->pfid_mask4 = *SUNION2PF( | |
1099 | &pfr_mask, AF_INET); | |
70224baa | 1100 | } else if (ke->pfrke_af == AF_INET6){ |
02742ec6 JS |
1101 | if (w->pfrw_dyn->pfid_acnt6++ > 0) |
1102 | break; | |
1103 | pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net); | |
1104 | w->pfrw_dyn->pfid_addr6 = *SUNION2PF( | |
1105 | &ke->pfrke_sa, AF_INET6); | |
1106 | w->pfrw_dyn->pfid_mask6 = *SUNION2PF( | |
1107 | &pfr_mask, AF_INET6); | |
1108 | } | |
1109 | break; | |
1110 | } | |
1111 | return (0); | |
1112 | } | |
1113 | ||
1114 | int | |
1115 | pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) | |
1116 | { | |
1117 | struct pfr_ktableworkq workq; | |
1118 | struct pfr_ktable *p; | |
cc6e5672 | 1119 | int xdel = 0; |
02742ec6 | 1120 | |
315a7da3 JL |
1121 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY | |
1122 | PFR_FLAG_ALLRSETS); | |
70224baa JL |
1123 | if (pfr_fix_anchor(filter->pfrt_anchor)) |
1124 | return (EINVAL); | |
02742ec6 JS |
1125 | if (pfr_table_count(filter, flags) < 0) |
1126 | return (ENOENT); | |
1127 | ||
1128 | SLIST_INIT(&workq); | |
1129 | RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { | |
1130 | if (pfr_skip_table(filter, p, flags)) | |
1131 | continue; | |
1132 | if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR)) | |
1133 | continue; | |
1134 | if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
1135 | continue; | |
1136 | p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; | |
1137 | SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); | |
1138 | xdel++; | |
1139 | } | |
1140 | if (!(flags & PFR_FLAG_DUMMY)) { | |
1141 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 1142 | crit_enter(); |
02742ec6 JS |
1143 | pfr_setflags_ktables(&workq); |
1144 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 1145 | crit_exit(); |
02742ec6 JS |
1146 | } |
1147 | if (ndel != NULL) | |
1148 | *ndel = xdel; | |
1149 | return (0); | |
1150 | } | |
1151 | ||
1152 | int | |
1153 | pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags) | |
1154 | { | |
1155 | struct pfr_ktableworkq addq, changeq; | |
1156 | struct pfr_ktable *p, *q, *r, key; | |
cc6e5672 | 1157 | int i, rv, xadd = 0; |
02742ec6 JS |
1158 | long tzero = time_second; |
1159 | ||
315a7da3 | 1160 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY); |
02742ec6 JS |
1161 | SLIST_INIT(&addq); |
1162 | SLIST_INIT(&changeq); | |
1163 | for (i = 0; i < size; i++) { | |
315a7da3 | 1164 | if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) |
02742ec6 JS |
1165 | senderr(EFAULT); |
1166 | if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK, | |
1167 | flags & PFR_FLAG_USERIOCTL)) | |
1168 | senderr(EINVAL); | |
1169 | key.pfrkt_flags |= PFR_TFLAG_ACTIVE; | |
1170 | p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); | |
1171 | if (p == NULL) { | |
1172 | p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); | |
1173 | if (p == NULL) | |
1174 | senderr(ENOMEM); | |
1175 | SLIST_FOREACH(q, &addq, pfrkt_workq) { | |
1176 | if (!pfr_ktable_compare(p, q)) | |
1177 | goto _skip; | |
1178 | } | |
1179 | SLIST_INSERT_HEAD(&addq, p, pfrkt_workq); | |
1180 | xadd++; | |
1181 | if (!key.pfrkt_anchor[0]) | |
1182 | goto _skip; | |
1183 | ||
1184 | /* find or create root table */ | |
1185 | bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); | |
02742ec6 JS |
1186 | r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); |
1187 | if (r != NULL) { | |
1188 | p->pfrkt_root = r; | |
1189 | goto _skip; | |
1190 | } | |
1191 | SLIST_FOREACH(q, &addq, pfrkt_workq) { | |
1192 | if (!pfr_ktable_compare(&key, q)) { | |
1193 | p->pfrkt_root = q; | |
1194 | goto _skip; | |
1195 | } | |
1196 | } | |
1197 | key.pfrkt_flags = 0; | |
1198 | r = pfr_create_ktable(&key.pfrkt_t, 0, 1); | |
1199 | if (r == NULL) | |
1200 | senderr(ENOMEM); | |
1201 | SLIST_INSERT_HEAD(&addq, r, pfrkt_workq); | |
1202 | p->pfrkt_root = r; | |
1203 | } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { | |
1204 | SLIST_FOREACH(q, &changeq, pfrkt_workq) | |
1205 | if (!pfr_ktable_compare(&key, q)) | |
1206 | goto _skip; | |
1207 | p->pfrkt_nflags = (p->pfrkt_flags & | |
1208 | ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; | |
1209 | SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); | |
1210 | xadd++; | |
1211 | } | |
1212 | _skip: | |
1213 | ; | |
1214 | } | |
1215 | if (!(flags & PFR_FLAG_DUMMY)) { | |
1216 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 1217 | crit_enter(); |
02742ec6 JS |
1218 | pfr_insert_ktables(&addq); |
1219 | pfr_setflags_ktables(&changeq); | |
1220 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 1221 | crit_exit(); |
02742ec6 JS |
1222 | } else |
1223 | pfr_destroy_ktables(&addq, 0); | |
1224 | if (nadd != NULL) | |
1225 | *nadd = xadd; | |
1226 | return (0); | |
1227 | _bad: | |
1228 | pfr_destroy_ktables(&addq, 0); | |
1229 | return (rv); | |
1230 | } | |
1231 | ||
1232 | int | |
1233 | pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags) | |
1234 | { | |
1235 | struct pfr_ktableworkq workq; | |
1236 | struct pfr_ktable *p, *q, key; | |
cc6e5672 | 1237 | int i, xdel = 0; |
02742ec6 | 1238 | |
315a7da3 | 1239 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY); |
02742ec6 JS |
1240 | SLIST_INIT(&workq); |
1241 | for (i = 0; i < size; i++) { | |
315a7da3 | 1242 | if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) |
02742ec6 JS |
1243 | return (EFAULT); |
1244 | if (pfr_validate_table(&key.pfrkt_t, 0, | |
1245 | flags & PFR_FLAG_USERIOCTL)) | |
1246 | return (EINVAL); | |
1247 | p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); | |
1248 | if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { | |
1249 | SLIST_FOREACH(q, &workq, pfrkt_workq) | |
1250 | if (!pfr_ktable_compare(p, q)) | |
1251 | goto _skip; | |
1252 | p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; | |
1253 | SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); | |
1254 | xdel++; | |
1255 | } | |
1256 | _skip: | |
1257 | ; | |
1258 | } | |
1259 | ||
1260 | if (!(flags & PFR_FLAG_DUMMY)) { | |
1261 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 1262 | crit_enter(); |
02742ec6 JS |
1263 | pfr_setflags_ktables(&workq); |
1264 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 1265 | crit_exit(); |
02742ec6 JS |
1266 | } |
1267 | if (ndel != NULL) | |
1268 | *ndel = xdel; | |
1269 | return (0); | |
1270 | } | |
1271 | ||
1272 | int | |
1273 | pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size, | |
1274 | int flags) | |
1275 | { | |
1276 | struct pfr_ktable *p; | |
1277 | int n, nn; | |
1278 | ||
315a7da3 | 1279 | ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); |
70224baa JL |
1280 | if (pfr_fix_anchor(filter->pfrt_anchor)) |
1281 | return (EINVAL); | |
02742ec6 JS |
1282 | n = nn = pfr_table_count(filter, flags); |
1283 | if (n < 0) | |
1284 | return (ENOENT); | |
1285 | if (n > *size) { | |
1286 | *size = n; | |
1287 | return (0); | |
1288 | } | |
1289 | RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { | |
1290 | if (pfr_skip_table(filter, p, flags)) | |
1291 | continue; | |
1292 | if (n-- <= 0) | |
1293 | continue; | |
315a7da3 | 1294 | if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags)) |
02742ec6 JS |
1295 | return (EFAULT); |
1296 | } | |
1297 | if (n) { | |
4b1cf444 | 1298 | kprintf("pfr_get_tables: corruption detected (%d).\n", n); |
02742ec6 JS |
1299 | return (ENOTTY); |
1300 | } | |
1301 | *size = nn; | |
1302 | return (0); | |
1303 | } | |
1304 | ||
1305 | int | |
1306 | pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, | |
1307 | int flags) | |
1308 | { | |
1309 | struct pfr_ktable *p; | |
1310 | struct pfr_ktableworkq workq; | |
cc6e5672 | 1311 | int n, nn; |
02742ec6 JS |
1312 | long tzero = time_second; |
1313 | ||
315a7da3 JL |
1314 | /* XXX PFR_FLAG_CLSTATS disabled */ |
1315 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS); | |
70224baa JL |
1316 | if (pfr_fix_anchor(filter->pfrt_anchor)) |
1317 | return (EINVAL); | |
02742ec6 JS |
1318 | n = nn = pfr_table_count(filter, flags); |
1319 | if (n < 0) | |
1320 | return (ENOENT); | |
1321 | if (n > *size) { | |
1322 | *size = n; | |
1323 | return (0); | |
1324 | } | |
1325 | SLIST_INIT(&workq); | |
1326 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 1327 | crit_enter(); |
02742ec6 JS |
1328 | RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { |
1329 | if (pfr_skip_table(filter, p, flags)) | |
1330 | continue; | |
1331 | if (n-- <= 0) | |
1332 | continue; | |
1333 | if (!(flags & PFR_FLAG_ATOMIC)) | |
cc6e5672 | 1334 | crit_enter(); |
315a7da3 | 1335 | if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags)) { |
cc6e5672 | 1336 | crit_exit(); |
02742ec6 JS |
1337 | return (EFAULT); |
1338 | } | |
1339 | if (!(flags & PFR_FLAG_ATOMIC)) | |
cc6e5672 | 1340 | crit_exit(); |
02742ec6 JS |
1341 | SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); |
1342 | } | |
1343 | if (flags & PFR_FLAG_CLSTATS) | |
1344 | pfr_clstats_ktables(&workq, tzero, | |
1345 | flags & PFR_FLAG_ADDRSTOO); | |
1346 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 1347 | crit_exit(); |
02742ec6 | 1348 | if (n) { |
4b1cf444 | 1349 | kprintf("pfr_get_tstats: corruption detected (%d).\n", n); |
02742ec6 JS |
1350 | return (ENOTTY); |
1351 | } | |
1352 | *size = nn; | |
1353 | return (0); | |
1354 | } | |
1355 | ||
1356 | int | |
1357 | pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags) | |
1358 | { | |
1359 | struct pfr_ktableworkq workq; | |
1360 | struct pfr_ktable *p, key; | |
cc6e5672 | 1361 | int i, xzero = 0; |
02742ec6 JS |
1362 | long tzero = time_second; |
1363 | ||
315a7da3 JL |
1364 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY | |
1365 | PFR_FLAG_ADDRSTOO); | |
02742ec6 JS |
1366 | SLIST_INIT(&workq); |
1367 | for (i = 0; i < size; i++) { | |
315a7da3 | 1368 | if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) |
02742ec6 JS |
1369 | return (EFAULT); |
1370 | if (pfr_validate_table(&key.pfrkt_t, 0, 0)) | |
1371 | return (EINVAL); | |
1372 | p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); | |
1373 | if (p != NULL) { | |
1374 | SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); | |
1375 | xzero++; | |
1376 | } | |
1377 | } | |
1378 | if (!(flags & PFR_FLAG_DUMMY)) { | |
1379 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 1380 | crit_enter(); |
02742ec6 JS |
1381 | pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); |
1382 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 1383 | crit_exit(); |
02742ec6 JS |
1384 | } |
1385 | if (nzero != NULL) | |
1386 | *nzero = xzero; | |
1387 | return (0); | |
1388 | } | |
1389 | ||
1390 | int | |
1391 | pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag, | |
1392 | int *nchange, int *ndel, int flags) | |
1393 | { | |
1394 | struct pfr_ktableworkq workq; | |
1395 | struct pfr_ktable *p, *q, key; | |
cc6e5672 | 1396 | int i, xchange = 0, xdel = 0; |
02742ec6 | 1397 | |
315a7da3 | 1398 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY); |
02742ec6 JS |
1399 | if ((setflag & ~PFR_TFLAG_USRMASK) || |
1400 | (clrflag & ~PFR_TFLAG_USRMASK) || | |
1401 | (setflag & clrflag)) | |
1402 | return (EINVAL); | |
1403 | SLIST_INIT(&workq); | |
1404 | for (i = 0; i < size; i++) { | |
315a7da3 | 1405 | if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) |
02742ec6 JS |
1406 | return (EFAULT); |
1407 | if (pfr_validate_table(&key.pfrkt_t, 0, | |
1408 | flags & PFR_FLAG_USERIOCTL)) | |
1409 | return (EINVAL); | |
1410 | p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); | |
1411 | if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { | |
1412 | p->pfrkt_nflags = (p->pfrkt_flags | setflag) & | |
1413 | ~clrflag; | |
1414 | if (p->pfrkt_nflags == p->pfrkt_flags) | |
1415 | goto _skip; | |
1416 | SLIST_FOREACH(q, &workq, pfrkt_workq) | |
1417 | if (!pfr_ktable_compare(p, q)) | |
1418 | goto _skip; | |
1419 | SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); | |
1420 | if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && | |
1421 | (clrflag & PFR_TFLAG_PERSIST) && | |
1422 | !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) | |
1423 | xdel++; | |
1424 | else | |
1425 | xchange++; | |
1426 | } | |
1427 | _skip: | |
1428 | ; | |
1429 | } | |
1430 | if (!(flags & PFR_FLAG_DUMMY)) { | |
1431 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 1432 | crit_enter(); |
02742ec6 JS |
1433 | pfr_setflags_ktables(&workq); |
1434 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 1435 | crit_exit(); |
02742ec6 JS |
1436 | } |
1437 | if (nchange != NULL) | |
1438 | *nchange = xchange; | |
1439 | if (ndel != NULL) | |
1440 | *ndel = xdel; | |
1441 | return (0); | |
1442 | } | |
1443 | ||
1444 | int | |
1445 | pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) | |
1446 | { | |
1447 | struct pfr_ktableworkq workq; | |
1448 | struct pfr_ktable *p; | |
1449 | struct pf_ruleset *rs; | |
1450 | int xdel = 0; | |
1451 | ||
315a7da3 | 1452 | ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); |
70224baa | 1453 | rs = pf_find_or_create_ruleset(trs->pfrt_anchor); |
02742ec6 JS |
1454 | if (rs == NULL) |
1455 | return (ENOMEM); | |
1456 | SLIST_INIT(&workq); | |
1457 | RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { | |
1458 | if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || | |
1459 | pfr_skip_table(trs, p, 0)) | |
1460 | continue; | |
1461 | p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; | |
1462 | SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); | |
1463 | xdel++; | |
1464 | } | |
1465 | if (!(flags & PFR_FLAG_DUMMY)) { | |
1466 | pfr_setflags_ktables(&workq); | |
1467 | if (ticket != NULL) | |
1468 | *ticket = ++rs->tticket; | |
1469 | rs->topen = 1; | |
1470 | } else | |
1471 | pf_remove_if_empty_ruleset(rs); | |
1472 | if (ndel != NULL) | |
1473 | *ndel = xdel; | |
1474 | return (0); | |
1475 | } | |
1476 | ||
1477 | int | |
1478 | pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size, | |
1479 | int *nadd, int *naddr, u_int32_t ticket, int flags) | |
1480 | { | |
1481 | struct pfr_ktableworkq tableq; | |
1482 | struct pfr_kentryworkq addrq; | |
1483 | struct pfr_ktable *kt, *rt, *shadow, key; | |
1484 | struct pfr_kentry *p; | |
1485 | struct pfr_addr ad; | |
1486 | struct pf_ruleset *rs; | |
1487 | int i, rv, xadd = 0, xaddr = 0; | |
1488 | ||
315a7da3 | 1489 | ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); |
02742ec6 JS |
1490 | if (size && !(flags & PFR_FLAG_ADDRSTOO)) |
1491 | return (EINVAL); | |
1492 | if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK, | |
1493 | flags & PFR_FLAG_USERIOCTL)) | |
1494 | return (EINVAL); | |
70224baa | 1495 | rs = pf_find_ruleset(tbl->pfrt_anchor); |
02742ec6 JS |
1496 | if (rs == NULL || !rs->topen || ticket != rs->tticket) |
1497 | return (EBUSY); | |
1498 | tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; | |
1499 | SLIST_INIT(&tableq); | |
1500 | kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl); | |
1501 | if (kt == NULL) { | |
1502 | kt = pfr_create_ktable(tbl, 0, 1); | |
1503 | if (kt == NULL) | |
1504 | return (ENOMEM); | |
1505 | SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); | |
1506 | xadd++; | |
1507 | if (!tbl->pfrt_anchor[0]) | |
1508 | goto _skip; | |
1509 | ||
1510 | /* find or create root table */ | |
1511 | bzero(&key, sizeof(key)); | |
1512 | strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); | |
1513 | rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); | |
1514 | if (rt != NULL) { | |
1515 | kt->pfrkt_root = rt; | |
1516 | goto _skip; | |
1517 | } | |
1518 | rt = pfr_create_ktable(&key.pfrkt_t, 0, 1); | |
1519 | if (rt == NULL) { | |
1520 | pfr_destroy_ktables(&tableq, 0); | |
1521 | return (ENOMEM); | |
1522 | } | |
1523 | SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); | |
1524 | kt->pfrkt_root = rt; | |
1525 | } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) | |
1526 | xadd++; | |
1527 | _skip: | |
1528 | shadow = pfr_create_ktable(tbl, 0, 0); | |
1529 | if (shadow == NULL) { | |
1530 | pfr_destroy_ktables(&tableq, 0); | |
1531 | return (ENOMEM); | |
1532 | } | |
1533 | SLIST_INIT(&addrq); | |
1534 | for (i = 0; i < size; i++) { | |
315a7da3 | 1535 | if (COPYIN(addr+i, &ad, sizeof(ad), flags)) |
02742ec6 JS |
1536 | senderr(EFAULT); |
1537 | if (pfr_validate_addr(&ad)) | |
1538 | senderr(EINVAL); | |
1539 | if (pfr_lookup_addr(shadow, &ad, 1) != NULL) | |
1540 | continue; | |
70224baa | 1541 | p = pfr_create_kentry(&ad, 0); |
02742ec6 JS |
1542 | if (p == NULL) |
1543 | senderr(ENOMEM); | |
1544 | if (pfr_route_kentry(shadow, p)) { | |
1545 | pfr_destroy_kentry(p); | |
1546 | continue; | |
1547 | } | |
1548 | SLIST_INSERT_HEAD(&addrq, p, pfrke_workq); | |
1549 | xaddr++; | |
1550 | } | |
1551 | if (!(flags & PFR_FLAG_DUMMY)) { | |
1552 | if (kt->pfrkt_shadow != NULL) | |
1553 | pfr_destroy_ktable(kt->pfrkt_shadow, 1); | |
1554 | kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; | |
1555 | pfr_insert_ktables(&tableq); | |
1556 | shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? | |
1557 | xaddr : NO_ADDRESSES; | |
1558 | kt->pfrkt_shadow = shadow; | |
1559 | } else { | |
1560 | pfr_clean_node_mask(shadow, &addrq); | |
1561 | pfr_destroy_ktable(shadow, 0); | |
1562 | pfr_destroy_ktables(&tableq, 0); | |
1563 | pfr_destroy_kentries(&addrq); | |
1564 | } | |
1565 | if (nadd != NULL) | |
1566 | *nadd = xadd; | |
1567 | if (naddr != NULL) | |
1568 | *naddr = xaddr; | |
1569 | return (0); | |
1570 | _bad: | |
1571 | pfr_destroy_ktable(shadow, 0); | |
1572 | pfr_destroy_ktables(&tableq, 0); | |
1573 | pfr_destroy_kentries(&addrq); | |
1574 | return (rv); | |
1575 | } | |
1576 | ||
1577 | int | |
1578 | pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) | |
1579 | { | |
1580 | struct pfr_ktableworkq workq; | |
1581 | struct pfr_ktable *p; | |
1582 | struct pf_ruleset *rs; | |
1583 | int xdel = 0; | |
1584 | ||
315a7da3 | 1585 | ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); |
70224baa | 1586 | rs = pf_find_ruleset(trs->pfrt_anchor); |
02742ec6 JS |
1587 | if (rs == NULL || !rs->topen || ticket != rs->tticket) |
1588 | return (0); | |
1589 | SLIST_INIT(&workq); | |
1590 | RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { | |
1591 | if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || | |
1592 | pfr_skip_table(trs, p, 0)) | |
1593 | continue; | |
1594 | p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; | |
1595 | SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); | |
1596 | xdel++; | |
1597 | } | |
1598 | if (!(flags & PFR_FLAG_DUMMY)) { | |
1599 | pfr_setflags_ktables(&workq); | |
1600 | rs->topen = 0; | |
1601 | pf_remove_if_empty_ruleset(rs); | |
1602 | } | |
1603 | if (ndel != NULL) | |
1604 | *ndel = xdel; | |
1605 | return (0); | |
1606 | } | |
1607 | ||
1608 | int | |
1609 | pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, | |
1610 | int *nchange, int flags) | |
1611 | { | |
70224baa | 1612 | struct pfr_ktable *p, *q; |
02742ec6 JS |
1613 | struct pfr_ktableworkq workq; |
1614 | struct pf_ruleset *rs; | |
cc6e5672 | 1615 | int xadd = 0, xchange = 0; |
02742ec6 JS |
1616 | long tzero = time_second; |
1617 | ||
315a7da3 | 1618 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY); |
70224baa | 1619 | rs = pf_find_ruleset(trs->pfrt_anchor); |
02742ec6 JS |
1620 | if (rs == NULL || !rs->topen || ticket != rs->tticket) |
1621 | return (EBUSY); | |
1622 | ||
1623 | SLIST_INIT(&workq); | |
1624 | RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { | |
1625 | if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || | |
1626 | pfr_skip_table(trs, p, 0)) | |
1627 | continue; | |
1628 | SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); | |
1629 | if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) | |
1630 | xchange++; | |
1631 | else | |
1632 | xadd++; | |
1633 | } | |
1634 | ||
1635 | if (!(flags & PFR_FLAG_DUMMY)) { | |
1636 | if (flags & PFR_FLAG_ATOMIC) | |
cc6e5672 | 1637 | crit_enter(); |
70224baa JL |
1638 | for (p = SLIST_FIRST(&workq); p != NULL; p = q) { |
1639 | q = SLIST_NEXT(p, pfrkt_workq); | |
02742ec6 | 1640 | pfr_commit_ktable(p, tzero); |
70224baa | 1641 | } |
02742ec6 | 1642 | if (flags & PFR_FLAG_ATOMIC) |
cc6e5672 | 1643 | crit_exit(); |
02742ec6 JS |
1644 | rs->topen = 0; |
1645 | pf_remove_if_empty_ruleset(rs); | |
1646 | } | |
1647 | if (nadd != NULL) | |
1648 | *nadd = xadd; | |
1649 | if (nchange != NULL) | |
1650 | *nchange = xchange; | |
1651 | ||
1652 | return (0); | |
1653 | } | |
1654 | ||
1655 | void | |
1656 | pfr_commit_ktable(struct pfr_ktable *kt, long tzero) | |
1657 | { | |
1658 | struct pfr_ktable *shadow = kt->pfrkt_shadow; | |
1659 | int nflags; | |
1660 | ||
1661 | if (shadow->pfrkt_cnt == NO_ADDRESSES) { | |
1662 | if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
1663 | pfr_clstats_ktable(kt, tzero, 1); | |
1664 | } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { | |
1665 | /* kt might contain addresses */ | |
1666 | struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; | |
1667 | struct pfr_kentry *p, *q, *next; | |
1668 | struct pfr_addr ad; | |
1669 | ||
1670 | pfr_enqueue_addrs(shadow, &addrq, NULL, 0); | |
1671 | pfr_mark_addrs(kt); | |
1672 | SLIST_INIT(&addq); | |
1673 | SLIST_INIT(&changeq); | |
1674 | SLIST_INIT(&delq); | |
1675 | SLIST_INIT(&garbageq); | |
1676 | pfr_clean_node_mask(shadow, &addrq); | |
1677 | for (p = SLIST_FIRST(&addrq); p != NULL; p = next) { | |
1678 | next = SLIST_NEXT(p, pfrke_workq); /* XXX */ | |
1679 | pfr_copyout_addr(&ad, p); | |
1680 | q = pfr_lookup_addr(kt, &ad, 1); | |
1681 | if (q != NULL) { | |
1682 | if (q->pfrke_not != p->pfrke_not) | |
1683 | SLIST_INSERT_HEAD(&changeq, q, | |
1684 | pfrke_workq); | |
1685 | q->pfrke_mark = 1; | |
1686 | SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); | |
1687 | } else { | |
1688 | p->pfrke_tzero = tzero; | |
1689 | SLIST_INSERT_HEAD(&addq, p, pfrke_workq); | |
1690 | } | |
1691 | } | |
1692 | pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); | |
1693 | pfr_insert_kentries(kt, &addq, tzero); | |
1694 | pfr_remove_kentries(kt, &delq); | |
1695 | pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); | |
1696 | pfr_destroy_kentries(&garbageq); | |
1697 | } else { | |
1698 | /* kt cannot contain addresses */ | |
1699 | SWAP(struct radix_node_head *, kt->pfrkt_ip4, | |
1700 | shadow->pfrkt_ip4); | |
1701 | SWAP(struct radix_node_head *, kt->pfrkt_ip6, | |
1702 | shadow->pfrkt_ip6); | |
1703 | SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt); | |
1704 | pfr_clstats_ktable(kt, tzero, 1); | |
1705 | } | |
1706 | nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) | | |
1707 | (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) | |
1708 | & ~PFR_TFLAG_INACTIVE; | |
1709 | pfr_destroy_ktable(shadow, 0); | |
1710 | kt->pfrkt_shadow = NULL; | |
1711 | pfr_setflags_ktable(kt, nflags); | |
1712 | } | |
1713 | ||
1714 | int | |
1715 | pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved) | |
1716 | { | |
1717 | int i; | |
1718 | ||
1719 | if (!tbl->pfrt_name[0]) | |
1720 | return (-1); | |
1721 | if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR)) | |
1722 | return (-1); | |
1723 | if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) | |
1724 | return (-1); | |
1725 | for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) | |
1726 | if (tbl->pfrt_name[i]) | |
1727 | return (-1); | |
70224baa JL |
1728 | if (pfr_fix_anchor(tbl->pfrt_anchor)) |
1729 | return (-1); | |
02742ec6 JS |
1730 | if (tbl->pfrt_flags & ~allowedflags) |
1731 | return (-1); | |
1732 | return (0); | |
1733 | } | |
1734 | ||
70224baa JL |
1735 | /* |
1736 | * Rewrite anchors referenced by tables to remove slashes | |
1737 | * and check for validity. | |
1738 | */ | |
1739 | int | |
1740 | pfr_fix_anchor(char *anchor) | |
1741 | { | |
1742 | size_t siz = MAXPATHLEN; | |
1743 | int i; | |
1744 | ||
1745 | if (anchor[0] == '/') { | |
1746 | char *path; | |
1747 | int off; | |
1748 | ||
1749 | path = anchor; | |
1750 | off = 1; | |
1751 | while (*++path == '/') | |
1752 | off++; | |
1753 | bcopy(path, anchor, siz - off); | |
1754 | memset(anchor + siz - off, 0, off); | |
1755 | } | |
1756 | if (anchor[siz - 1]) | |
1757 | return (-1); | |
1758 | for (i = strlen(anchor); i < siz; i++) | |
1759 | if (anchor[i]) | |
1760 | return (-1); | |
1761 | return (0); | |
1762 | } | |
1763 | ||
02742ec6 JS |
1764 | int |
1765 | pfr_table_count(struct pfr_table *filter, int flags) | |
1766 | { | |
1767 | struct pf_ruleset *rs; | |
02742ec6 JS |
1768 | |
1769 | if (flags & PFR_FLAG_ALLRSETS) | |
1770 | return (pfr_ktable_cnt); | |
02742ec6 | 1771 | if (filter->pfrt_anchor[0]) { |
70224baa JL |
1772 | rs = pf_find_ruleset(filter->pfrt_anchor); |
1773 | return ((rs != NULL) ? rs->tables : -1); | |
02742ec6 JS |
1774 | } |
1775 | return (pf_main_ruleset.tables); | |
1776 | } | |
1777 | ||
1778 | int | |
1779 | pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) | |
1780 | { | |
1781 | if (flags & PFR_FLAG_ALLRSETS) | |
1782 | return (0); | |
70224baa | 1783 | if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) |
02742ec6 JS |
1784 | return (1); |
1785 | return (0); | |
1786 | } | |
1787 | ||
1788 | void | |
1789 | pfr_insert_ktables(struct pfr_ktableworkq *workq) | |
1790 | { | |
1791 | struct pfr_ktable *p; | |
1792 | ||
1793 | SLIST_FOREACH(p, workq, pfrkt_workq) | |
1794 | pfr_insert_ktable(p); | |
1795 | } | |
1796 | ||
1797 | void | |
1798 | pfr_insert_ktable(struct pfr_ktable *kt) | |
1799 | { | |
1800 | RB_INSERT(pfr_ktablehead, &pfr_ktables, kt); | |
1801 | pfr_ktable_cnt++; | |
1802 | if (kt->pfrkt_root != NULL) | |
1803 | if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) | |
1804 | pfr_setflags_ktable(kt->pfrkt_root, | |
1805 | kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); | |
1806 | } | |
1807 | ||
1808 | void | |
1809 | pfr_setflags_ktables(struct pfr_ktableworkq *workq) | |
1810 | { | |
70224baa | 1811 | struct pfr_ktable *p, *q; |
02742ec6 | 1812 | |
70224baa JL |
1813 | for (p = SLIST_FIRST(workq); p; p = q) { |
1814 | q = SLIST_NEXT(p, pfrkt_workq); | |
02742ec6 | 1815 | pfr_setflags_ktable(p, p->pfrkt_nflags); |
70224baa | 1816 | } |
02742ec6 JS |
1817 | } |
1818 | ||
1819 | void | |
1820 | pfr_setflags_ktable(struct pfr_ktable *kt, int newf) | |
1821 | { | |
1822 | struct pfr_kentryworkq addrq; | |
1823 | ||
1824 | if (!(newf & PFR_TFLAG_REFERENCED) && | |
1825 | !(newf & PFR_TFLAG_PERSIST)) | |
1826 | newf &= ~PFR_TFLAG_ACTIVE; | |
1827 | if (!(newf & PFR_TFLAG_ACTIVE)) | |
1828 | newf &= ~PFR_TFLAG_USRMASK; | |
1829 | if (!(newf & PFR_TFLAG_SETMASK)) { | |
1830 | RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt); | |
1831 | if (kt->pfrkt_root != NULL) | |
1832 | if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) | |
1833 | pfr_setflags_ktable(kt->pfrkt_root, | |
1834 | kt->pfrkt_root->pfrkt_flags & | |
1835 | ~PFR_TFLAG_REFDANCHOR); | |
1836 | pfr_destroy_ktable(kt, 1); | |
1837 | pfr_ktable_cnt--; | |
1838 | return; | |
1839 | } | |
1840 | if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { | |
1841 | pfr_enqueue_addrs(kt, &addrq, NULL, 0); | |
1842 | pfr_remove_kentries(kt, &addrq); | |
1843 | } | |
1844 | if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) { | |
1845 | pfr_destroy_ktable(kt->pfrkt_shadow, 1); | |
1846 | kt->pfrkt_shadow = NULL; | |
1847 | } | |
1848 | kt->pfrkt_flags = newf; | |
1849 | } | |
1850 | ||
1851 | void | |
1852 | pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse) | |
1853 | { | |
1854 | struct pfr_ktable *p; | |
1855 | ||
1856 | SLIST_FOREACH(p, workq, pfrkt_workq) | |
1857 | pfr_clstats_ktable(p, tzero, recurse); | |
1858 | } | |
1859 | ||
1860 | void | |
1861 | pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) | |
1862 | { | |
1863 | struct pfr_kentryworkq addrq; | |
02742ec6 JS |
1864 | |
1865 | if (recurse) { | |
1866 | pfr_enqueue_addrs(kt, &addrq, NULL, 0); | |
1867 | pfr_clstats_kentries(&addrq, tzero, 0); | |
1868 | } | |
cc6e5672 | 1869 | crit_enter(); |
02742ec6 JS |
1870 | bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets)); |
1871 | bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes)); | |
1872 | kt->pfrkt_match = kt->pfrkt_nomatch = 0; | |
cc6e5672 | 1873 | crit_exit(); |
02742ec6 JS |
1874 | kt->pfrkt_tzero = tzero; |
1875 | } | |
1876 | ||
1877 | struct pfr_ktable * | |
1878 | pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) | |
1879 | { | |
1880 | struct pfr_ktable *kt; | |
1881 | struct pf_ruleset *rs; | |
1882 | ||
1186cbc0 | 1883 | kt = kmalloc(sizeof(struct pfr_ktable), M_PFRKTABLEPL, M_NOWAIT|M_ZERO|M_NULLOK); |
02742ec6 JS |
1884 | if (kt == NULL) |
1885 | return (NULL); | |
02742ec6 JS |
1886 | kt->pfrkt_t = *tbl; |
1887 | ||
1888 | if (attachruleset) { | |
70224baa | 1889 | rs = pf_find_or_create_ruleset(tbl->pfrt_anchor); |
02742ec6 JS |
1890 | if (!rs) { |
1891 | pfr_destroy_ktable(kt, 0); | |
1892 | return (NULL); | |
1893 | } | |
1894 | kt->pfrkt_rs = rs; | |
1895 | rs->tables++; | |
02742ec6 JS |
1896 | } |
1897 | ||
b4628cf9 | 1898 | KKASSERT(pf_maskhead != NULL); |
6823c302 AL |
1899 | if (!rn_inithead(&kt->pfrkt_ip4, pf_maskhead, |
1900 | offsetof(struct sockaddr_in, sin_addr)) || | |
1901 | !rn_inithead(&kt->pfrkt_ip6, pf_maskhead, | |
1902 | offsetof(struct sockaddr_in6, sin6_addr))) { | |
02742ec6 JS |
1903 | pfr_destroy_ktable(kt, 0); |
1904 | return (NULL); | |
1905 | } | |
1906 | kt->pfrkt_tzero = tzero; | |
1907 | ||
1908 | return (kt); | |
1909 | } | |
1910 | ||
1911 | void | |
1912 | pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) | |
1913 | { | |
1914 | struct pfr_ktable *p, *q; | |
1915 | ||
1916 | for (p = SLIST_FIRST(workq); p; p = q) { | |
1917 | q = SLIST_NEXT(p, pfrkt_workq); | |
1918 | pfr_destroy_ktable(p, flushaddr); | |
1919 | } | |
1920 | } | |
1921 | ||
1922 | void | |
1923 | pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) | |
1924 | { | |
1925 | struct pfr_kentryworkq addrq; | |
1926 | ||
1927 | if (flushaddr) { | |
1928 | pfr_enqueue_addrs(kt, &addrq, NULL, 0); | |
1929 | pfr_clean_node_mask(kt, &addrq); | |
1930 | pfr_destroy_kentries(&addrq); | |
1931 | } | |
1932 | if (kt->pfrkt_ip4 != NULL) | |
efda3bd0 | 1933 | kfree((caddr_t)kt->pfrkt_ip4, M_RTABLE); |
ed1f0be2 | 1934 | |
02742ec6 | 1935 | if (kt->pfrkt_ip6 != NULL) |
efda3bd0 | 1936 | kfree((caddr_t)kt->pfrkt_ip6, M_RTABLE); |
b272101a | 1937 | if (kt->pfrkt_shadow != NULL) |
02742ec6 JS |
1938 | pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); |
1939 | if (kt->pfrkt_rs != NULL) { | |
1940 | kt->pfrkt_rs->tables--; | |
02742ec6 JS |
1941 | pf_remove_if_empty_ruleset(kt->pfrkt_rs); |
1942 | } | |
1186cbc0 | 1943 | kfree(kt, M_PFRKTABLEPL); |
02742ec6 JS |
1944 | } |
1945 | ||
1946 | int | |
1947 | pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) | |
1948 | { | |
1949 | int d; | |
1950 | ||
1951 | if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) | |
1952 | return (d); | |
70224baa | 1953 | return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor)); |
02742ec6 JS |
1954 | } |
1955 | ||
1956 | struct pfr_ktable * | |
1957 | pfr_lookup_table(struct pfr_table *tbl) | |
1958 | { | |
1959 | /* struct pfr_ktable start like a struct pfr_table */ | |
1960 | return (RB_FIND(pfr_ktablehead, &pfr_ktables, | |
1961 | (struct pfr_ktable *)tbl)); | |
1962 | } | |
1963 | ||
1964 | int | |
1965 | pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) | |
1966 | { | |
1967 | struct pfr_kentry *ke = NULL; | |
1968 | int match; | |
d66d8bc0 | 1969 | struct sockaddr_in pfr_sin; |
06397f9c | 1970 | #ifdef INET6 |
d66d8bc0 | 1971 | struct sockaddr_in6 pfr_sin6; |
06397f9c | 1972 | #endif |
02742ec6 JS |
1973 | |
1974 | if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) | |
1975 | kt = kt->pfrkt_root; | |
1976 | if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
1977 | return (0); | |
1978 | ||
1979 | switch (af) { | |
70224baa | 1980 | #ifdef INET |
02742ec6 | 1981 | case AF_INET: |
f059a200 | 1982 | bzero(&pfr_sin, sizeof(pfr_sin)); |
d66d8bc0 MD |
1983 | pfr_sin.sin_len = sizeof(pfr_sin); |
1984 | pfr_sin.sin_family = AF_INET; | |
02742ec6 | 1985 | pfr_sin.sin_addr.s_addr = a->addr32[0]; |
d8449084 | 1986 | ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4); |
02742ec6 JS |
1987 | if (ke && KENTRY_RNF_ROOT(ke)) |
1988 | ke = NULL; | |
1989 | break; | |
70224baa JL |
1990 | #endif /* INET */ |
1991 | #ifdef INET6 | |
02742ec6 | 1992 | case AF_INET6: |
f059a200 | 1993 | bzero(&pfr_sin6, sizeof(pfr_sin6)); |
d66d8bc0 MD |
1994 | pfr_sin6.sin6_len = sizeof(pfr_sin6); |
1995 | pfr_sin6.sin6_family = AF_INET6; | |
02742ec6 | 1996 | bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr)); |
d8449084 | 1997 | ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); |
02742ec6 JS |
1998 | if (ke && KENTRY_RNF_ROOT(ke)) |
1999 | ke = NULL; | |
2000 | break; | |
70224baa | 2001 | #endif /* INET6 */ |
02742ec6 JS |
2002 | } |
2003 | match = (ke && !ke->pfrke_not); | |
2004 | if (match) | |
2005 | kt->pfrkt_match++; | |
2006 | else | |
2007 | kt->pfrkt_nomatch++; | |
2008 | return (match); | |
2009 | } | |
2010 | ||
2011 | void | |
2012 | pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, | |
2013 | u_int64_t len, int dir_out, int op_pass, int notrule) | |
2014 | { | |
2015 | struct pfr_kentry *ke = NULL; | |
d66d8bc0 | 2016 | struct sockaddr_in pfr_sin; |
06397f9c | 2017 | #ifdef INET6 |
d66d8bc0 | 2018 | struct sockaddr_in6 pfr_sin6; |
06397f9c | 2019 | #endif |
02742ec6 JS |
2020 | |
2021 | if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) | |
2022 | kt = kt->pfrkt_root; | |
2023 | if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
2024 | return; | |
2025 | ||
2026 | switch (af) { | |
70224baa | 2027 | #ifdef INET |
02742ec6 | 2028 | case AF_INET: |
f059a200 | 2029 | bzero(&pfr_sin, sizeof(pfr_sin)); |
d66d8bc0 MD |
2030 | pfr_sin.sin_len = sizeof(pfr_sin); |
2031 | pfr_sin.sin_family = AF_INET; | |
02742ec6 | 2032 | pfr_sin.sin_addr.s_addr = a->addr32[0]; |
d8449084 | 2033 | ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4); |
02742ec6 JS |
2034 | if (ke && KENTRY_RNF_ROOT(ke)) |
2035 | ke = NULL; | |
2036 | break; | |
70224baa JL |
2037 | #endif /* INET */ |
2038 | #ifdef INET6 | |
02742ec6 | 2039 | case AF_INET6: |
f059a200 | 2040 | bzero(&pfr_sin6, sizeof(pfr_sin6)); |
d66d8bc0 MD |
2041 | pfr_sin6.sin6_len = sizeof(pfr_sin6); |
2042 | pfr_sin6.sin6_family = AF_INET6; | |
02742ec6 | 2043 | bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr)); |
d8449084 | 2044 | ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); |
02742ec6 JS |
2045 | if (ke && KENTRY_RNF_ROOT(ke)) |
2046 | ke = NULL; | |
2047 | break; | |
70224baa JL |
2048 | #endif /* INET6 */ |
2049 | default: | |
2050 | ; | |
02742ec6 JS |
2051 | } |
2052 | if ((ke == NULL || ke->pfrke_not) != notrule) { | |
2053 | if (op_pass != PFR_OP_PASS) | |
4b1cf444 | 2054 | kprintf("pfr_update_stats: assertion failed.\n"); |
02742ec6 JS |
2055 | op_pass = PFR_OP_XPASS; |
2056 | } | |
2057 | kt->pfrkt_packets[dir_out][op_pass]++; | |
2058 | kt->pfrkt_bytes[dir_out][op_pass] += len; | |
ed1f0be2 JL |
2059 | if (ke != NULL && op_pass != PFR_OP_XPASS && |
2060 | (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { | |
2061 | if (ke->pfrke_counters == NULL) | |
1186cbc0 JL |
2062 | ke->pfrke_counters = kmalloc(sizeof(struct pfr_kcounters), |
2063 | M_PFRKCOUNTERSPL, M_NOWAIT|M_ZERO); | |
ed1f0be2 JL |
2064 | if (ke->pfrke_counters != NULL) { |
2065 | ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++; | |
2066 | ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len; | |
2067 | } | |
02742ec6 JS |
2068 | } |
2069 | } | |
2070 | ||
2071 | struct pfr_ktable * | |
2072 | pfr_attach_table(struct pf_ruleset *rs, char *name) | |
2073 | { | |
2074 | struct pfr_ktable *kt, *rt; | |
2075 | struct pfr_table tbl; | |
2076 | struct pf_anchor *ac = rs->anchor; | |
2077 | ||
2078 | bzero(&tbl, sizeof(tbl)); | |
2079 | strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); | |
70224baa JL |
2080 | if (ac != NULL) |
2081 | strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor)); | |
02742ec6 JS |
2082 | kt = pfr_lookup_table(&tbl); |
2083 | if (kt == NULL) { | |
2084 | kt = pfr_create_ktable(&tbl, time_second, 1); | |
2085 | if (kt == NULL) | |
2086 | return (NULL); | |
2087 | if (ac != NULL) { | |
2088 | bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); | |
02742ec6 JS |
2089 | rt = pfr_lookup_table(&tbl); |
2090 | if (rt == NULL) { | |
2091 | rt = pfr_create_ktable(&tbl, 0, 1); | |
2092 | if (rt == NULL) { | |
2093 | pfr_destroy_ktable(kt, 0); | |
2094 | return (NULL); | |
2095 | } | |
2096 | pfr_insert_ktable(rt); | |
2097 | } | |
2098 | kt->pfrkt_root = rt; | |
2099 | } | |
2100 | pfr_insert_ktable(kt); | |
2101 | } | |
2102 | if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) | |
2103 | pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); | |
2104 | return (kt); | |
2105 | } | |
2106 | ||
2107 | void | |
2108 | pfr_detach_table(struct pfr_ktable *kt) | |
2109 | { | |
2110 | if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0) | |
4b1cf444 | 2111 | kprintf("pfr_detach_table: refcount = %d.\n", |
02742ec6 JS |
2112 | kt->pfrkt_refcnt[PFR_REFCNT_RULE]); |
2113 | else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) | |
2114 | pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); | |
2115 | } | |
2116 | ||
2117 | int | |
2118 | pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, | |
2119 | struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af) | |
2120 | { | |
70224baa JL |
2121 | struct pfr_kentry *ke, *ke2 = NULL; |
2122 | struct pf_addr *addr = NULL; | |
02742ec6 JS |
2123 | union sockaddr_union mask; |
2124 | int idx = -1, use_counter = 0; | |
d66d8bc0 MD |
2125 | struct sockaddr_in pfr_sin; |
2126 | struct sockaddr_in6 pfr_sin6; | |
2127 | union sockaddr_union pfr_mask; | |
02742ec6 | 2128 | |
70224baa JL |
2129 | if (af == AF_INET) |
2130 | addr = (struct pf_addr *)&pfr_sin.sin_addr; | |
2131 | else if (af == AF_INET6) | |
2132 | addr = (struct pf_addr *)&pfr_sin6.sin6_addr; | |
02742ec6 JS |
2133 | if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) |
2134 | kt = kt->pfrkt_root; | |
2135 | if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
2136 | return (-1); | |
2137 | ||
2138 | if (pidx != NULL) | |
2139 | idx = *pidx; | |
2140 | if (counter != NULL && idx >= 0) | |
2141 | use_counter = 1; | |
2142 | if (idx < 0) | |
2143 | idx = 0; | |
2144 | ||
2145 | _next_block: | |
2146 | ke = pfr_kentry_byidx(kt, idx, af); | |
ed1f0be2 JL |
2147 | if (ke == NULL) { |
2148 | kt->pfrkt_nomatch++; | |
02742ec6 | 2149 | return (1); |
ed1f0be2 | 2150 | } |
02742ec6 JS |
2151 | pfr_prepare_network(&pfr_mask, af, ke->pfrke_net); |
2152 | *raddr = SUNION2PF(&ke->pfrke_sa, af); | |
2153 | *rmask = SUNION2PF(&pfr_mask, af); | |
2154 | ||
2155 | if (use_counter) { | |
2156 | /* is supplied address within block? */ | |
2157 | if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) { | |
2158 | /* no, go to next block in table */ | |
2159 | idx++; | |
2160 | use_counter = 0; | |
2161 | goto _next_block; | |
2162 | } | |
2163 | PF_ACPY(addr, counter, af); | |
2164 | } else { | |
2165 | /* use first address of block */ | |
2166 | PF_ACPY(addr, *raddr, af); | |
2167 | } | |
2168 | ||
2169 | if (!KENTRY_NETWORK(ke)) { | |
2170 | /* this is a single IP address - no possible nested block */ | |
2171 | PF_ACPY(counter, addr, af); | |
2172 | *pidx = idx; | |
ed1f0be2 | 2173 | kt->pfrkt_match++; |
02742ec6 JS |
2174 | return (0); |
2175 | } | |
2176 | for (;;) { | |
2177 | /* we don't want to use a nested block */ | |
70224baa | 2178 | if (af == AF_INET) |
d8449084 | 2179 | ke2 = (struct pfr_kentry *)rn_match(&pfr_sin, |
70224baa JL |
2180 | kt->pfrkt_ip4); |
2181 | else if (af == AF_INET6) | |
d8449084 | 2182 | ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6, |
70224baa | 2183 | kt->pfrkt_ip6); |
02742ec6 JS |
2184 | /* no need to check KENTRY_RNF_ROOT() here */ |
2185 | if (ke2 == ke) { | |
2186 | /* lookup return the same block - perfect */ | |
2187 | PF_ACPY(counter, addr, af); | |
2188 | *pidx = idx; | |
ed1f0be2 | 2189 | kt->pfrkt_match++; |
02742ec6 JS |
2190 | return (0); |
2191 | } | |
2192 | ||
2193 | /* we need to increase the counter past the nested block */ | |
2194 | pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net); | |
2195 | PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af); | |
2196 | PF_AINC(addr, af); | |
2197 | if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) { | |
2198 | /* ok, we reached the end of our main block */ | |
2199 | /* go to next block in table */ | |
2200 | idx++; | |
2201 | use_counter = 0; | |
2202 | goto _next_block; | |
2203 | } | |
2204 | } | |
2205 | } | |
2206 | ||
2207 | struct pfr_kentry * | |
2208 | pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) | |
2209 | { | |
2210 | struct pfr_walktree w; | |
2211 | ||
2212 | bzero(&w, sizeof(w)); | |
2213 | w.pfrw_op = PFRW_POOL_GET; | |
2214 | w.pfrw_cnt = idx; | |
2215 | ||
2216 | switch (af) { | |
70224baa | 2217 | #ifdef INET |
02742ec6 JS |
2218 | case AF_INET: |
2219 | kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w); | |
2220 | return (w.pfrw_kentry); | |
70224baa JL |
2221 | #endif /* INET */ |
2222 | #ifdef INET6 | |
02742ec6 JS |
2223 | case AF_INET6: |
2224 | kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w); | |
2225 | return (w.pfrw_kentry); | |
70224baa | 2226 | #endif /* INET6 */ |
02742ec6 JS |
2227 | default: |
2228 | return (NULL); | |
2229 | } | |
2230 | } | |
2231 | ||
2232 | void | |
2233 | pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn) | |
2234 | { | |
2235 | struct pfr_walktree w; | |
02742ec6 JS |
2236 | |
2237 | bzero(&w, sizeof(w)); | |
2238 | w.pfrw_op = PFRW_DYNADDR_UPDATE; | |
2239 | w.pfrw_dyn = dyn; | |
2240 | ||
cc6e5672 | 2241 | crit_enter(); |
02742ec6 JS |
2242 | dyn->pfid_acnt4 = 0; |
2243 | dyn->pfid_acnt6 = 0; | |
2244 | if (!dyn->pfid_af || dyn->pfid_af == AF_INET) | |
2245 | kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w); | |
2246 | if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) | |
2247 | kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w); | |
cc6e5672 | 2248 | crit_exit(); |
02742ec6 | 2249 | } |