Merge branch 'vendor/XZ'
[dragonfly.git] / sys / net / pf / pf_ioctl.c
1 /*      $OpenBSD: pf_ioctl.c,v 1.209 2008/06/29 08:42:15 mcbride Exp $ */
2 /*add $OpenBSD: pf_ioctl.c,v 1.212 2009/02/15 20:42:33 mbalmer Exp $ */
3
4 /*
5  * Copyright (c) 2010 The DragonFly Project.  All rights reserved.
6  *
7  * Copyright (c) 2001 Daniel Hartmeier
8  * Copyright (c) 2002,2003 Henning Brauer
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  *
15  *    - Redistributions of source code must retain the above copyright
16  *      notice, this list of conditions and the following disclaimer.
17  *    - Redistributions in binary form must reproduce the above
18  *      copyright notice, this list of conditions and the following
19  *      disclaimer in the documentation and/or other materials provided
20  *      with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
30  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  *
35  * Effort sponsored in part by the Defense Advanced Research Projects
36  * Agency (DARPA) and Air Force Research Laboratory, Air Force
37  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
38  *
39  */
40
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/conf.h>
47 #include <sys/device.h>
48 #include <sys/mbuf.h>
49 #include <sys/filio.h>
50 #include <sys/fcntl.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/kernel.h>
54 #include <sys/kthread.h>
55 #include <sys/time.h>
56 #include <sys/proc.h>
57 #include <sys/malloc.h>
58 #include <sys/module.h>
59 #include <sys/lock.h>
60
61 #include <sys/thread2.h>
62
63 #include <net/if.h>
64 #include <net/if_types.h>
65 #include <net/route.h>
66
67 #include <netinet/in.h>
68 #include <netinet/in_var.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/ip.h>
71 #include <netinet/ip_var.h>
72 #include <netinet/ip_icmp.h>
73
74 #include <net/pf/pfvar.h>
75 #include <sys/md5.h>
76
77 #include <net/pf/if_pflog.h>
78 #include <net/pf/if_pfsync.h>
79
80 #ifdef INET6
81 #include <netinet/ip6.h>
82 #include <netinet/in_pcb.h>
83 #endif /* INET6 */
84
85 #ifdef ALTQ
86 #include <net/altq/altq.h>
87 #endif
88
89 #include <machine/limits.h>
90 #include <net/pfil.h>
91 #include <sys/mutex.h>
92
93 u_int rt_numfibs = RT_NUMFIBS;
94
95 void                     pfattach(void);
96 struct pf_pool          *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
97                             u_int8_t, u_int8_t, u_int8_t);
98
99 void                     pf_mv_pool(struct pf_palist *, struct pf_palist *);
100 void                     pf_empty_pool(struct pf_palist *);
101 #ifdef ALTQ
102 int                      pf_begin_altq(u_int32_t *);
103 int                      pf_rollback_altq(u_int32_t);
104 int                      pf_commit_altq(u_int32_t);
105 int                      pf_enable_altq(struct pf_altq *);
106 int                      pf_disable_altq(struct pf_altq *);
107 #endif /* ALTQ */
108 int                      pf_begin_rules(u_int32_t *, int, const char *);
109 int                      pf_rollback_rules(u_int32_t, int, char *);
110 int                      pf_setup_pfsync_matching(struct pf_ruleset *);
111 void                     pf_hash_rule(MD5_CTX *, struct pf_rule *);
112 void                     pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
113 int                      pf_commit_rules(u_int32_t, int, char *);
114 int                      pf_addr_setup(struct pf_ruleset *,
115                             struct pf_addr_wrap *, sa_family_t);
116 void                     pf_addr_copyout(struct pf_addr_wrap *);
117
118 struct pf_rule           pf_default_rule;
119 struct lock              pf_consistency_lock;
120 struct lock              pf_global_statetbl_lock;
121 #ifdef ALTQ
122 static int               pf_altq_running;
123 #endif
124
125 #define TAGID_MAX        50000
126 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
127                                 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
128
129 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
130 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
131 #endif
132 u_int16_t                tagname2tag(struct pf_tags *, char *);
133 void                     tag2tagname(struct pf_tags *, u_int16_t, char *);
134 void                     tag_unref(struct pf_tags *, u_int16_t);
135 int                      pf_rtlabel_add(struct pf_addr_wrap *);
136 void                     pf_rtlabel_remove(struct pf_addr_wrap *);
137 void                     pf_rtlabel_copyout(struct pf_addr_wrap *);
138
139 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) kprintf x
140
141 static cdev_t   pf_dev;
142
143 static MALLOC_DEFINE(M_PFRULEPL, "pfrulepl", "pf rule pool list");
144 static MALLOC_DEFINE(M_PFALTQPL, "pfaltqpl", "pf altq pool list");
145 static MALLOC_DEFINE(M_PFPOOLADDRPL, "pfpooladdrpl", "pf pool address pool list");
146 static MALLOC_DEFINE(M_PFFRENTPL, "pffrent", "pf frent pool list");
147
148
149 /*
150  * XXX - These are new and need to be checked when moveing to a new version
151  */
152 static void              pf_clear_states(void);
153 static int               pf_clear_tables(void);
154 static void              pf_clear_srcnodes(void);
155 /*
156  * XXX - These are new and need to be checked when moveing to a new version
157  */
158  
159 /*
160  * Wrapper functions for pfil(9) hooks
161  */
162 static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
163                 int dir);
164 static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
165                 int dir);
166 #ifdef INET6
167 static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
168                 int dir);
169 static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
170                 int dir);
171 #endif
172
173 static int               hook_pf(void);
174 static int               dehook_pf(void);
175 static int               shutdown_pf(void);
176 static int               pf_load(void);
177 static int               pf_unload(void);
178
179 d_open_t        pfopen;
180 d_close_t       pfclose;
181 d_ioctl_t       pfioctl;
182
183 static struct dev_ops pf_ops = {            /* XXX convert to port model */
184         { PF_NAME, 73, 0 },
185         .d_open =       pfopen,
186         .d_close =      pfclose,
187         .d_ioctl =      pfioctl
188 };
189
190 static volatile int pf_pfil_hooked = 0;
191 int pf_end_threads = 0;
192
193 int debug_pfugidhack = 0;
194 SYSCTL_INT(_debug, OID_AUTO, pfugidhack, CTLFLAG_RW, &debug_pfugidhack, 0,
195         "Enable/disable pf user/group rules mpsafe hack");
196
197 void
198 pfattach(void)
199 {
200         u_int32_t *my_timeout = pf_default_rule.timeout;
201         int nn;
202
203         if (!rn_inithead((void **)&pf_maskhead, NULL, 0)) {
204                 kprintf("pf mask radix tree create failed\n");
205                 return;
206         }
207         kmalloc_create(&pf_state_pl, "pf state pool list");
208         kmalloc_raise_limit(pf_state_pl, 0);
209         kmalloc_create(&pf_frent_pl, "pf fragment pool list");
210         kmalloc_raise_limit(pf_frent_pl, 0);
211         kmalloc_create(&pf_cent_pl, "pf cent pool list");
212         kmalloc_raise_limit(pf_cent_pl, 0);
213         
214         pfr_initialize();
215         pfi_initialize();
216         pf_osfp_initialize();
217
218         pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl;
219         pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
220         pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl;
221         pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
222         if (ctob(physmem) <= 100*1024*1024)
223                 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
224                     PFR_KENTRY_HIWAT_SMALL;
225
226         for (nn = 0; nn < ncpus; ++nn) {
227                 RB_INIT(&tree_src_tracking[nn]);
228                 RB_INIT(&tree_id[nn]);
229         }
230         RB_INIT(&pf_anchors);
231         pf_init_ruleset(&pf_main_ruleset);
232         TAILQ_INIT(&pf_altqs[0]);
233         TAILQ_INIT(&pf_altqs[1]);
234         TAILQ_INIT(&pf_pabuf);
235         pf_altqs_active = &pf_altqs[0];
236         pf_altqs_inactive = &pf_altqs[1];
237         for (nn = 0; nn < ncpus; ++nn)
238                 TAILQ_INIT(&state_list[nn]);
239
240         /* default rule should never be garbage collected */
241         pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
242         pf_default_rule.action = PF_PASS;
243         pf_default_rule.nr = (uint32_t)(-1);
244         pf_default_rule.rtableid = -1;
245
246         /* initialize default timeouts */
247         my_timeout[PFTM_TCP_FIRST_PACKET] = 120;        /* First TCP packet */
248         my_timeout[PFTM_TCP_OPENING] = 30;              /* No response yet */
249         my_timeout[PFTM_TCP_ESTABLISHED] = 24*60*60;    /* Established */
250         my_timeout[PFTM_TCP_CLOSING] = 15 * 60;         /* Half closed */
251         my_timeout[PFTM_TCP_FIN_WAIT] = 45;             /* Got both FINs */
252         my_timeout[PFTM_TCP_CLOSED] = 90;               /* Got a RST */
253         my_timeout[PFTM_UDP_FIRST_PACKET] = 60;         /* First UDP packet */
254         my_timeout[PFTM_UDP_SINGLE] = 30;               /* Unidirectional */
255         my_timeout[PFTM_UDP_MULTIPLE] = 60;             /* Bidirectional */
256         my_timeout[PFTM_ICMP_FIRST_PACKET] = 20;        /* First ICMP packet */
257         my_timeout[PFTM_ICMP_ERROR_REPLY] = 10;         /* Got error response */
258         my_timeout[PFTM_OTHER_FIRST_PACKET] = 60;       /* First packet */
259         my_timeout[PFTM_OTHER_SINGLE] = 30;             /* Unidirectional */
260         my_timeout[PFTM_OTHER_MULTIPLE] = 60;           /* Bidirectional */
261         my_timeout[PFTM_FRAG] = 30;                     /* Fragment expire */
262         my_timeout[PFTM_INTERVAL] = 10;                 /* Expire interval */
263         my_timeout[PFTM_SRC_NODE] = 0;          /* Source Tracking */
264         my_timeout[PFTM_TS_DIFF] = 30;          /* Allowed TS diff */
265         my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
266         my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
267         
268         pf_normalize_init();
269         bzero(&pf_status, sizeof(pf_status));
270         pf_status.debug = PF_DEBUG_URGENT;
271         /* XXX do our best to avoid a conflict */
272         pf_status.hostid = karc4random();
273
274         if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge"))
275                 panic("pfpurge thread");
276 }
277
278 int
279 pfopen(struct dev_open_args *ap)
280 {
281         lwkt_gettoken(&pf_token);
282         cdev_t dev = ap->a_head.a_dev;
283         if (minor(dev) >= 1) {
284                 lwkt_reltoken(&pf_token);
285                 return (ENXIO);
286         }
287         lwkt_reltoken(&pf_token);
288         return (0);
289 }
290
291 int
292 pfclose(struct dev_close_args *ap)
293 {
294         lwkt_gettoken(&pf_token);
295         cdev_t dev = ap->a_head.a_dev;
296         if (minor(dev) >= 1) {
297                 lwkt_reltoken(&pf_token);
298                 return (ENXIO);
299         }
300         lwkt_reltoken(&pf_token);
301         return (0);
302 }
303
304 struct pf_pool *
305 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
306     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
307     u_int8_t check_ticket)
308 {
309         struct pf_ruleset       *ruleset;
310         struct pf_rule          *rule;
311         int                      rs_num;
312
313         ruleset = pf_find_ruleset(anchor);
314         if (ruleset == NULL)
315                 return (NULL);
316         rs_num = pf_get_ruleset_number(rule_action);
317         if (rs_num >= PF_RULESET_MAX)
318                 return (NULL);
319         if (active) {
320                 if (check_ticket && ticket !=
321                     ruleset->rules[rs_num].active.ticket)
322                         return (NULL);
323                 if (r_last)
324                         rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
325                             pf_rulequeue);
326                 else
327                         rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
328         } else {
329                 if (check_ticket && ticket !=
330                     ruleset->rules[rs_num].inactive.ticket)
331                         return (NULL);
332                 if (r_last)
333                         rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
334                             pf_rulequeue);
335                 else
336                         rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
337         }
338         if (!r_last) {
339                 while ((rule != NULL) && (rule->nr != rule_number))
340                         rule = TAILQ_NEXT(rule, entries);
341         }
342         if (rule == NULL)
343                 return (NULL);
344
345         return (&rule->rpool);
346 }
347
348 void
349 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
350 {
351         struct pf_pooladdr      *mv_pool_pa;
352
353         while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
354                 TAILQ_REMOVE(poola, mv_pool_pa, entries);
355                 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
356         }
357 }
358
359 void
360 pf_empty_pool(struct pf_palist *poola)
361 {
362         struct pf_pooladdr      *empty_pool_pa;
363
364         while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
365                 pfi_dynaddr_remove(&empty_pool_pa->addr);
366                 pf_tbladdr_remove(&empty_pool_pa->addr);
367                 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
368                 TAILQ_REMOVE(poola, empty_pool_pa, entries);
369                 kfree(empty_pool_pa, M_PFPOOLADDRPL);
370         }
371 }
372
373 void
374 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
375 {
376         if (rulequeue != NULL) {
377                 if (rule->states_cur <= 0) {
378                         /*
379                          * XXX - we need to remove the table *before* detaching
380                          * the rule to make sure the table code does not delete
381                          * the anchor under our feet.
382                          */
383                         pf_tbladdr_remove(&rule->src.addr);
384                         pf_tbladdr_remove(&rule->dst.addr);
385                         if (rule->overload_tbl)
386                                 pfr_detach_table(rule->overload_tbl);
387                 }
388                 TAILQ_REMOVE(rulequeue, rule, entries);
389                 rule->entries.tqe_prev = NULL;
390                 rule->nr = -1;
391         }
392
393         if (rule->states_cur > 0 || rule->src_nodes > 0 ||
394             rule->entries.tqe_prev != NULL)
395                 return;
396         pf_tag_unref(rule->tag);
397         pf_tag_unref(rule->match_tag);
398 #ifdef ALTQ
399         if (rule->pqid != rule->qid)
400                 pf_qid_unref(rule->pqid);
401         pf_qid_unref(rule->qid);
402 #endif
403         pf_rtlabel_remove(&rule->src.addr);
404         pf_rtlabel_remove(&rule->dst.addr);
405         pfi_dynaddr_remove(&rule->src.addr);
406         pfi_dynaddr_remove(&rule->dst.addr);
407         if (rulequeue == NULL) {
408                 pf_tbladdr_remove(&rule->src.addr);
409                 pf_tbladdr_remove(&rule->dst.addr);
410                 if (rule->overload_tbl)
411                         pfr_detach_table(rule->overload_tbl);
412         }
413         pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
414         pf_anchor_remove(rule);
415         pf_empty_pool(&rule->rpool.list);
416         kfree(rule, M_PFRULEPL);
417 }
418
419 u_int16_t
420 tagname2tag(struct pf_tags *head, char *tagname)
421 {
422         struct pf_tagname       *tag, *p = NULL;
423         u_int16_t                new_tagid = 1;
424
425         TAILQ_FOREACH(tag, head, entries)
426                 if (strcmp(tagname, tag->name) == 0) {
427                         tag->ref++;
428                         return (tag->tag);
429                 }
430
431         /*
432          * to avoid fragmentation, we do a linear search from the beginning
433          * and take the first free slot we find. if there is none or the list
434          * is empty, append a new entry at the end.
435          */
436
437         /* new entry */
438         if (!TAILQ_EMPTY(head))
439                 for (p = TAILQ_FIRST(head); p != NULL &&
440                     p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
441                         new_tagid = p->tag + 1;
442
443         if (new_tagid > TAGID_MAX)
444                 return (0);
445
446         /* allocate and fill new struct pf_tagname */
447         tag = kmalloc(sizeof(*tag), M_TEMP, M_WAITOK);
448         strlcpy(tag->name, tagname, sizeof(tag->name));
449         tag->tag = new_tagid;
450         tag->ref++;
451
452         if (p != NULL)  /* insert new entry before p */
453                 TAILQ_INSERT_BEFORE(p, tag, entries);
454         else    /* either list empty or no free slot in between */
455                 TAILQ_INSERT_TAIL(head, tag, entries);
456
457         return (tag->tag);
458 }
459
460 void
461 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
462 {
463         struct pf_tagname       *tag;
464
465         TAILQ_FOREACH(tag, head, entries)
466                 if (tag->tag == tagid) {
467                         strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
468                         return;
469                 }
470 }
471
472 void
473 tag_unref(struct pf_tags *head, u_int16_t tag)
474 {
475         struct pf_tagname       *p, *next;
476
477         if (tag == 0)
478                 return;
479
480         for (p = TAILQ_FIRST(head); p != NULL; p = next) {
481                 next = TAILQ_NEXT(p, entries);
482                 if (tag == p->tag) {
483                         if (--p->ref == 0) {
484                                 TAILQ_REMOVE(head, p, entries);
485                                 kfree(p, M_TEMP);
486                         }
487                         break;
488                 }
489         }
490 }
491
492 u_int16_t
493 pf_tagname2tag(char *tagname)
494 {
495         return (tagname2tag(&pf_tags, tagname));
496 }
497
498 void
499 pf_tag2tagname(u_int16_t tagid, char *p)
500 {
501         tag2tagname(&pf_tags, tagid, p);
502 }
503
504 void
505 pf_tag_ref(u_int16_t tag)
506 {
507         struct pf_tagname *t;
508
509         TAILQ_FOREACH(t, &pf_tags, entries)
510                 if (t->tag == tag)
511                         break;
512         if (t != NULL)
513                 t->ref++;
514 }
515
516 void
517 pf_tag_unref(u_int16_t tag)
518 {
519         tag_unref(&pf_tags, tag);
520 }
521
522 int
523 pf_rtlabel_add(struct pf_addr_wrap *a)
524 {
525         return (0);
526 }
527
528 void
529 pf_rtlabel_remove(struct pf_addr_wrap *a)
530 {
531 }
532
533 void
534 pf_rtlabel_copyout(struct pf_addr_wrap *a)
535 {
536         if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel)
537                 strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname));
538 }
539
540 #ifdef ALTQ
541 u_int32_t
542 pf_qname2qid(char *qname)
543 {
544         return ((u_int32_t)tagname2tag(&pf_qids, qname));
545 }
546
547 void
548 pf_qid2qname(u_int32_t qid, char *p)
549 {
550         tag2tagname(&pf_qids, (u_int16_t)qid, p);
551 }
552
553 void
554 pf_qid_unref(u_int32_t qid)
555 {
556         tag_unref(&pf_qids, (u_int16_t)qid);
557 }
558
559 int
560 pf_begin_altq(u_int32_t *ticket)
561 {
562         struct pf_altq  *altq;
563         int              error = 0;
564
565         /* Purge the old altq list */
566         while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
567                 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
568                 if (altq->qname[0] == 0) {
569                         /* detach and destroy the discipline */
570                         error = altq_remove(altq);
571                 } else
572                         pf_qid_unref(altq->qid);
573                 kfree(altq, M_PFALTQPL);
574         }
575         if (error)
576                 return (error);
577         *ticket = ++ticket_altqs_inactive;
578         altqs_inactive_open = 1;
579         return (0);
580 }
581
582 int
583 pf_rollback_altq(u_int32_t ticket)
584 {
585         struct pf_altq  *altq;
586         int              error = 0;
587
588         if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
589                 return (0);
590         /* Purge the old altq list */
591         while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
592                 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
593                 if (altq->qname[0] == 0) {
594                         /* detach and destroy the discipline */
595                         error = altq_remove(altq);
596                 } else
597                         pf_qid_unref(altq->qid);
598                 kfree(altq, M_PFALTQPL);
599         }
600         altqs_inactive_open = 0;
601         return (error);
602 }
603
604 int
605 pf_commit_altq(u_int32_t ticket)
606 {
607         struct pf_altqqueue     *old_altqs;
608         struct pf_altq          *altq;
609         int                      err, error = 0;
610
611         if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
612                 return (EBUSY);
613
614         /* swap altqs, keep the old. */
615         crit_enter();
616         old_altqs = pf_altqs_active;
617         pf_altqs_active = pf_altqs_inactive;
618         pf_altqs_inactive = old_altqs;
619         ticket_altqs_active = ticket_altqs_inactive;
620
621         /* Attach new disciplines */
622         TAILQ_FOREACH(altq, pf_altqs_active, entries) {
623                 if (altq->qname[0] == 0) {
624                         /* attach the discipline */
625                         error = altq_pfattach(altq);
626                         if (error) {
627                                 crit_exit();
628                                 return (error);
629                         }
630                 }
631         }
632
633         /* Purge the old altq list */
634         while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
635                 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
636                 if (altq->qname[0] == 0) {
637                         /* detach and destroy the discipline */
638                         if (pf_altq_running)
639                                 error = pf_disable_altq(altq);
640                         err = altq_pfdetach(altq);
641                         if (err != 0 && error == 0)
642                                 error = err;
643                         err = altq_remove(altq);
644                         if (err != 0 && error == 0)
645                                 error = err;
646                 } else
647                         pf_qid_unref(altq->qid);
648                 kfree(altq, M_PFALTQPL);
649         }
650         crit_exit();
651
652         altqs_inactive_open = 0;
653         return (error);
654 }
655
656 int
657 pf_enable_altq(struct pf_altq *altq)
658 {
659         struct ifnet            *ifp;
660         struct tb_profile        tb;
661         int                      error = 0;
662
663         if ((ifp = ifunit(altq->ifname)) == NULL)
664                 return (EINVAL);
665
666         if (ifp->if_snd.altq_type != ALTQT_NONE)
667                 error = altq_enable(&ifp->if_snd);
668
669         /* set tokenbucket regulator */
670         if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
671                 tb.rate = altq->ifbandwidth;
672                 tb.depth = altq->tbrsize;
673                 crit_enter();
674                 error = tbr_set(&ifp->if_snd, &tb);
675                 crit_exit();
676         }
677
678         return (error);
679 }
680
681 int
682 pf_disable_altq(struct pf_altq *altq)
683 {
684         struct ifnet            *ifp;
685         struct tb_profile        tb;
686         int                      error;
687
688         if ((ifp = ifunit(altq->ifname)) == NULL)
689                 return (EINVAL);
690
691         /*
692          * when the discipline is no longer referenced, it was overridden
693          * by a new one.  if so, just return.
694          */
695         if (altq->altq_disc != ifp->if_snd.altq_disc)
696                 return (0);
697
698         error = altq_disable(&ifp->if_snd);
699
700         if (error == 0) {
701                 /* clear tokenbucket regulator */
702                 tb.rate = 0;
703                 crit_enter();
704                 error = tbr_set(&ifp->if_snd, &tb);
705                 crit_exit();
706         }
707
708         return (error);
709 }
710 #endif /* ALTQ */
711
712 int
713 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
714 {
715         struct pf_ruleset       *rs;
716         struct pf_rule          *rule;
717
718         if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
719                 return (EINVAL);
720         rs = pf_find_or_create_ruleset(anchor);
721         if (rs == NULL)
722                 return (EINVAL);
723         while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
724                 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
725                 rs->rules[rs_num].inactive.rcount--;
726         }
727         *ticket = ++rs->rules[rs_num].inactive.ticket;
728         rs->rules[rs_num].inactive.open = 1;
729         return (0);
730 }
731
732 int
733 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
734 {
735         struct pf_ruleset       *rs;
736         struct pf_rule          *rule;
737
738         if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
739                 return (EINVAL);
740         rs = pf_find_ruleset(anchor);
741         if (rs == NULL || !rs->rules[rs_num].inactive.open ||
742             rs->rules[rs_num].inactive.ticket != ticket)
743                 return (0);
744         while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
745                 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
746                 rs->rules[rs_num].inactive.rcount--;
747         }
748         rs->rules[rs_num].inactive.open = 0;
749         return (0);
750 }
751
752 #define PF_MD5_UPD(st, elm)                                             \
753                 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
754
755 #define PF_MD5_UPD_STR(st, elm)                                         \
756                 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
757
758 #define PF_MD5_UPD_HTONL(st, elm, stor) do {                            \
759                 (stor) = htonl((st)->elm);                              \
760                 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
761 } while (0)
762
763 #define PF_MD5_UPD_HTONS(st, elm, stor) do {                            \
764                 (stor) = htons((st)->elm);                              \
765                 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
766 } while (0)
767
768 void
769 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
770 {
771         PF_MD5_UPD(pfr, addr.type);
772         switch (pfr->addr.type) {
773                 case PF_ADDR_DYNIFTL:
774                         PF_MD5_UPD(pfr, addr.v.ifname);
775                         PF_MD5_UPD(pfr, addr.iflags);
776                         break;
777                 case PF_ADDR_TABLE:
778                         PF_MD5_UPD(pfr, addr.v.tblname);
779                         break;
780                 case PF_ADDR_ADDRMASK:
781                         /* XXX ignore af? */
782                         PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
783                         PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
784                         break;
785                 case PF_ADDR_RTLABEL:
786                         PF_MD5_UPD(pfr, addr.v.rtlabelname);
787                         break;
788         }
789
790         PF_MD5_UPD(pfr, port[0]);
791         PF_MD5_UPD(pfr, port[1]);
792         PF_MD5_UPD(pfr, neg);
793         PF_MD5_UPD(pfr, port_op);
794 }
795
796 void
797 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
798 {
799         u_int16_t x;
800         u_int32_t y;
801
802         pf_hash_rule_addr(ctx, &rule->src);
803         pf_hash_rule_addr(ctx, &rule->dst);
804         PF_MD5_UPD_STR(rule, label);
805         PF_MD5_UPD_STR(rule, ifname);
806         PF_MD5_UPD_STR(rule, match_tagname);
807         PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
808         PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
809         PF_MD5_UPD_HTONL(rule, prob, y);
810         PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
811         PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
812         PF_MD5_UPD(rule, uid.op);
813         PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
814         PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
815         PF_MD5_UPD(rule, gid.op);
816         PF_MD5_UPD_HTONL(rule, rule_flag, y);
817         PF_MD5_UPD(rule, action);
818         PF_MD5_UPD(rule, direction);
819         PF_MD5_UPD(rule, af);
820         PF_MD5_UPD(rule, quick);
821         PF_MD5_UPD(rule, ifnot);
822         PF_MD5_UPD(rule, match_tag_not);
823         PF_MD5_UPD(rule, natpass);
824         PF_MD5_UPD(rule, keep_state);
825         PF_MD5_UPD(rule, proto);
826         PF_MD5_UPD(rule, type);
827         PF_MD5_UPD(rule, code);
828         PF_MD5_UPD(rule, flags);
829         PF_MD5_UPD(rule, flagset);
830         PF_MD5_UPD(rule, allow_opts);
831         PF_MD5_UPD(rule, rt);
832         PF_MD5_UPD(rule, tos);
833 }
834
835 int
836 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
837 {
838         struct pf_ruleset       *rs;
839         struct pf_rule          *rule, **old_array;
840         struct pf_rulequeue     *old_rules;
841         int                      error;
842         u_int32_t                old_rcount;
843
844         if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
845                 return (EINVAL);
846         rs = pf_find_ruleset(anchor);
847         if (rs == NULL || !rs->rules[rs_num].inactive.open ||
848             ticket != rs->rules[rs_num].inactive.ticket)
849                 return (EBUSY);
850
851         /* Calculate checksum for the main ruleset */
852         if (rs == &pf_main_ruleset) {
853                 error = pf_setup_pfsync_matching(rs);
854                 if (error != 0)
855                         return (error);
856         }
857
858         /* Swap rules, keep the old. */
859         crit_enter();
860         old_rules = rs->rules[rs_num].active.ptr;
861         old_rcount = rs->rules[rs_num].active.rcount;
862         old_array = rs->rules[rs_num].active.ptr_array;
863
864         rs->rules[rs_num].active.ptr =
865             rs->rules[rs_num].inactive.ptr;
866         rs->rules[rs_num].active.ptr_array =
867             rs->rules[rs_num].inactive.ptr_array;
868         rs->rules[rs_num].active.rcount =
869             rs->rules[rs_num].inactive.rcount;
870         rs->rules[rs_num].inactive.ptr = old_rules;
871         rs->rules[rs_num].inactive.ptr_array = old_array;
872         rs->rules[rs_num].inactive.rcount = old_rcount;
873
874         rs->rules[rs_num].active.ticket =
875             rs->rules[rs_num].inactive.ticket;
876         pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
877
878
879         /* Purge the old rule list. */
880         while ((rule = TAILQ_FIRST(old_rules)) != NULL)
881                 pf_rm_rule(old_rules, rule);
882         if (rs->rules[rs_num].inactive.ptr_array)
883                 kfree(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
884         rs->rules[rs_num].inactive.ptr_array = NULL;
885         rs->rules[rs_num].inactive.rcount = 0;
886         rs->rules[rs_num].inactive.open = 0;
887         pf_remove_if_empty_ruleset(rs);
888         crit_exit();
889         return (0);
890 }
891
892 int
893 pf_setup_pfsync_matching(struct pf_ruleset *rs)
894 {
895         MD5_CTX                  ctx;
896         struct pf_rule          *rule;
897         int                      rs_cnt;
898         u_int8_t                 digest[PF_MD5_DIGEST_LENGTH];
899
900         MD5Init(&ctx);
901         for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
902                 /* XXX PF_RULESET_SCRUB as well? */
903                 if (rs_cnt == PF_RULESET_SCRUB)
904                         continue;
905
906                 if (rs->rules[rs_cnt].inactive.ptr_array)
907                         kfree(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
908                 rs->rules[rs_cnt].inactive.ptr_array = NULL;
909
910                 if (rs->rules[rs_cnt].inactive.rcount) {
911                         rs->rules[rs_cnt].inactive.ptr_array =
912                             kmalloc(sizeof(caddr_t) *
913                                     rs->rules[rs_cnt].inactive.rcount,
914                                     M_TEMP, M_WAITOK);
915                 }
916
917                 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
918                     entries) {
919                         pf_hash_rule(&ctx, rule);
920                         (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
921                 }
922         }
923
924         MD5Final(digest, &ctx);
925         memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
926         return (0);
927 }
928
929 int
930 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
931     sa_family_t af)
932 {
933         if (pfi_dynaddr_setup(addr, af) ||
934             pf_tbladdr_setup(ruleset, addr))
935                 return (EINVAL);
936
937         return (0);
938 }
939
940 void
941 pf_addr_copyout(struct pf_addr_wrap *addr)
942 {
943         pfi_dynaddr_copyout(addr);
944         pf_tbladdr_copyout(addr);
945         pf_rtlabel_copyout(addr);
946 }
947
948 int
949 pfioctl(struct dev_ioctl_args *ap)
950 {
951         u_long cmd = ap->a_cmd;
952         caddr_t addr = ap->a_data;
953         struct pf_pooladdr      *pa = NULL;
954         struct pf_pool          *pool = NULL;
955         int                      error = 0;
956
957         lwkt_gettoken(&pf_token);
958
959         /* XXX keep in sync with switch() below */
960         if (securelevel > 1) {
961                 switch (cmd) {
962                 case DIOCGETRULES:
963                 case DIOCGETRULE:
964                 case DIOCGETADDRS:
965                 case DIOCGETADDR:
966                 case DIOCGETSTATE:
967                 case DIOCSETSTATUSIF:
968                 case DIOCGETSTATUS:
969                 case DIOCCLRSTATUS:
970                 case DIOCNATLOOK:
971                 case DIOCSETDEBUG:
972                 case DIOCGETSTATES:
973                 case DIOCGETTIMEOUT:
974                 case DIOCCLRRULECTRS:
975                 case DIOCGETLIMIT:
976                 case DIOCGETALTQS:
977                 case DIOCGETALTQ:
978                 case DIOCGETQSTATS:
979                 case DIOCGETRULESETS:
980                 case DIOCGETRULESET:
981                 case DIOCRGETTABLES:
982                 case DIOCRGETTSTATS:
983                 case DIOCRCLRTSTATS:
984                 case DIOCRCLRADDRS:
985                 case DIOCRADDADDRS:
986                 case DIOCRDELADDRS:
987                 case DIOCRSETADDRS:
988                 case DIOCRGETADDRS:
989                 case DIOCRGETASTATS:
990                 case DIOCRCLRASTATS:
991                 case DIOCRTSTADDRS:
992                 case DIOCOSFPGET:
993                 case DIOCGETSRCNODES:
994                 case DIOCCLRSRCNODES:
995                 case DIOCIGETIFACES:
996                 case DIOCSETIFFLAG:
997                 case DIOCCLRIFFLAG:
998                 case DIOCGIFSPEED:
999                         break;
1000                 case DIOCRCLRTABLES:
1001                 case DIOCRADDTABLES:
1002                 case DIOCRDELTABLES:
1003                 case DIOCRSETTFLAGS:
1004                         if (((struct pfioc_table *)addr)->pfrio_flags &
1005                             PFR_FLAG_DUMMY)
1006                                 break; /* dummy operation ok */
1007                         lwkt_reltoken(&pf_token);
1008                         return (EPERM);
1009                 default:
1010                         lwkt_reltoken(&pf_token);
1011                         return (EPERM);
1012                 }
1013         }
1014
1015         if (!(ap->a_fflag & FWRITE)) {
1016                 switch (cmd) {
1017                 case DIOCGETRULES:
1018                 case DIOCGETADDRS:
1019                 case DIOCGETADDR:
1020                 case DIOCGETSTATE:
1021                 case DIOCGETSTATUS:
1022                 case DIOCGETSTATES:
1023                 case DIOCGETTIMEOUT:
1024                 case DIOCGETLIMIT:
1025                 case DIOCGETALTQS:
1026                 case DIOCGETALTQ:
1027                 case DIOCGETQSTATS:
1028                 case DIOCGETRULESETS:
1029                 case DIOCGETRULESET:
1030                 case DIOCNATLOOK:
1031                 case DIOCRGETTABLES:
1032                 case DIOCRGETTSTATS:
1033                 case DIOCRGETADDRS:
1034                 case DIOCRGETASTATS:
1035                 case DIOCRTSTADDRS:
1036                 case DIOCOSFPGET:
1037                 case DIOCGETSRCNODES:
1038                 case DIOCIGETIFACES:
1039                 case DIOCGIFSPEED:
1040                         break;
1041                 case DIOCRCLRTABLES:
1042                 case DIOCRADDTABLES:
1043                 case DIOCRDELTABLES:
1044                 case DIOCRCLRTSTATS:
1045                 case DIOCRCLRADDRS:
1046                 case DIOCRADDADDRS:
1047                 case DIOCRDELADDRS:
1048                 case DIOCRSETADDRS:
1049                 case DIOCRSETTFLAGS:
1050                         if (((struct pfioc_table *)addr)->pfrio_flags &
1051                             PFR_FLAG_DUMMY)
1052                                 break; /* dummy operation ok */
1053                         lwkt_reltoken(&pf_token);
1054                         return (EACCES);
1055                 case DIOCGETRULE:
1056                         if (((struct pfioc_rule *)addr)->action ==
1057                             PF_GET_CLR_CNTR) {
1058                                 lwkt_reltoken(&pf_token);
1059                                 return (EACCES);
1060                         }
1061                         break;
1062                 default:
1063                         lwkt_reltoken(&pf_token);
1064                         return (EACCES);
1065                 }
1066         }
1067
1068         switch (cmd) {
1069         case DIOCSTART:
1070                 if (pf_status.running)
1071                         error = EEXIST;
1072                 else {
1073                         error = hook_pf();
1074                         if (error) {
1075                                 DPFPRINTF(PF_DEBUG_MISC,
1076                                     ("pf: pfil registration fail\n"));
1077                                 break;
1078                         }
1079                         pf_status.running = 1;
1080                         pf_status.since = time_second;
1081                         if (pf_status.stateid == 0) {
1082                                 pf_status.stateid = time_second;
1083                                 pf_status.stateid = pf_status.stateid << 32;
1084                         }
1085                         DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1086                 }
1087                 break;
1088
1089         case DIOCSTOP:
1090                 if (!pf_status.running)
1091                         error = ENOENT;
1092                 else {
1093                         pf_status.running = 0;
1094                         error = dehook_pf();
1095                         if (error) {
1096                                 pf_status.running = 1;
1097                                 DPFPRINTF(PF_DEBUG_MISC,
1098                                         ("pf: pfil unregistration failed\n"));
1099                         }
1100                         pf_status.since = time_second;
1101                         DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1102                 }
1103                 break;
1104
1105         case DIOCADDRULE: {
1106                 struct pfioc_rule       *pr = (struct pfioc_rule *)addr;
1107                 struct pf_ruleset       *ruleset;
1108                 struct pf_rule          *rule, *tail;
1109                 struct pf_pooladdr      *pa;
1110                 int                      rs_num;
1111
1112                 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1113                 ruleset = pf_find_ruleset(pr->anchor);
1114                 if (ruleset == NULL) {
1115                         error = EINVAL;
1116                         break;
1117                 }
1118                 rs_num = pf_get_ruleset_number(pr->rule.action);
1119                 if (rs_num >= PF_RULESET_MAX) {
1120                         error = EINVAL;
1121                         break;
1122                 }
1123                 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1124                         error = EINVAL;
1125                         break;
1126                 }
1127                 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1128                         error = EBUSY;
1129                         break;
1130                 }
1131                 if (pr->pool_ticket != ticket_pabuf) {
1132                         error = EBUSY;
1133                         break;
1134                 }
1135                 rule = kmalloc(sizeof(struct pf_rule), M_PFRULEPL, M_WAITOK);
1136                 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1137                 rule->cuid = ap->a_cred->cr_ruid;
1138                 rule->cpid = 0;
1139                 rule->anchor = NULL;
1140                 rule->kif = NULL;
1141                 TAILQ_INIT(&rule->rpool.list);
1142                 /* initialize refcounting */
1143                 rule->states_cur = 0;
1144                 rule->src_nodes = 0;
1145                 rule->entries.tqe_prev = NULL;
1146 #ifndef INET
1147                 if (rule->af == AF_INET) {
1148                         kfree(rule, M_PFRULEPL);
1149                         error = EAFNOSUPPORT;
1150                         break;
1151                 }
1152 #endif /* INET */
1153 #ifndef INET6
1154                 if (rule->af == AF_INET6) {
1155                         kfree(rule, M_PFRULEPL);
1156                         error = EAFNOSUPPORT;
1157                         break;
1158                 }
1159 #endif /* INET6 */
1160                 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1161                     pf_rulequeue);
1162                 if (tail)
1163                         rule->nr = tail->nr + 1;
1164                 else
1165                         rule->nr = 0;
1166                 if (rule->ifname[0]) {
1167                         rule->kif = pfi_kif_get(rule->ifname);
1168                         if (rule->kif == NULL) {
1169                                 kfree(rule, M_PFRULEPL);
1170                                 error = EINVAL;
1171                                 break;
1172                         }
1173                         pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
1174                 }
1175
1176                 if (rule->rtableid > 0 && rule->rtableid > rt_numfibs)
1177                         error = EBUSY;
1178
1179 #ifdef ALTQ
1180                 /* set queue IDs */
1181                 if (rule->qname[0] != 0) {
1182                         if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1183                                 error = EBUSY;
1184                         else if (rule->pqname[0] != 0) {
1185                                 if ((rule->pqid =
1186                                     pf_qname2qid(rule->pqname)) == 0)
1187                                         error = EBUSY;
1188                         } else
1189                                 rule->pqid = rule->qid;
1190                 }
1191 #endif
1192                 if (rule->tagname[0])
1193                         if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1194                                 error = EBUSY;
1195                 if (rule->match_tagname[0])
1196                         if ((rule->match_tag =
1197                             pf_tagname2tag(rule->match_tagname)) == 0)
1198                                 error = EBUSY;
1199                 if (rule->rt && !rule->direction)
1200                         error = EINVAL;
1201 #if NPFLOG > 0
1202                 if (!rule->log)
1203                         rule->logif = 0;
1204                 if (rule->logif >= PFLOGIFS_MAX)
1205                         error = EINVAL;
1206 #endif
1207                 if (pf_rtlabel_add(&rule->src.addr) ||
1208                     pf_rtlabel_add(&rule->dst.addr))
1209                         error = EBUSY;
1210                 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1211                         error = EINVAL;
1212                 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1213                         error = EINVAL;
1214                 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1215                         error = EINVAL;
1216                 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1217                         if (pf_tbladdr_setup(ruleset, &pa->addr))
1218                                 error = EINVAL;
1219
1220                 if (rule->overload_tblname[0]) {
1221                         if ((rule->overload_tbl = pfr_attach_table(ruleset,
1222                             rule->overload_tblname)) == NULL)
1223                                 error = EINVAL;
1224                         else
1225                                 rule->overload_tbl->pfrkt_flags |=
1226                                     PFR_TFLAG_ACTIVE;
1227                 }
1228
1229                 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1230                 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1231                     (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1232                     (rule->rt > PF_FASTROUTE)) &&
1233                     (TAILQ_FIRST(&rule->rpool.list) == NULL))
1234                         error = EINVAL;
1235
1236                 if (error) {
1237                         pf_rm_rule(NULL, rule);
1238                         break;
1239                 }
1240                 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1241                 rule->evaluations = rule->packets[0] = rule->packets[1] =
1242                     rule->bytes[0] = rule->bytes[1] = 0;
1243                 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1244                     rule, entries);
1245                 ruleset->rules[rs_num].inactive.rcount++;
1246                 break;
1247         }
1248
1249         case DIOCGETRULES: {
1250                 struct pfioc_rule       *pr = (struct pfioc_rule *)addr;
1251                 struct pf_ruleset       *ruleset;
1252                 struct pf_rule          *tail;
1253                 int                      rs_num;
1254
1255                 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1256                 ruleset = pf_find_ruleset(pr->anchor);
1257                 if (ruleset == NULL) {
1258                         error = EINVAL;
1259                         break;
1260                 }
1261                 rs_num = pf_get_ruleset_number(pr->rule.action);
1262                 if (rs_num >= PF_RULESET_MAX) {
1263                         error = EINVAL;
1264                         break;
1265                 }
1266                 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1267                     pf_rulequeue);
1268                 if (tail)
1269                         pr->nr = tail->nr + 1;
1270                 else
1271                         pr->nr = 0;
1272                 pr->ticket = ruleset->rules[rs_num].active.ticket;
1273                 break;
1274         }
1275
1276         case DIOCGETRULE: {
1277                 struct pfioc_rule       *pr = (struct pfioc_rule *)addr;
1278                 struct pf_ruleset       *ruleset;
1279                 struct pf_rule          *rule;
1280                 int                      rs_num, i;
1281
1282                 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1283                 ruleset = pf_find_ruleset(pr->anchor);
1284                 if (ruleset == NULL) {
1285                         error = EINVAL;
1286                         break;
1287                 }
1288                 rs_num = pf_get_ruleset_number(pr->rule.action);
1289                 if (rs_num >= PF_RULESET_MAX) {
1290                         error = EINVAL;
1291                         break;
1292                 }
1293                 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1294                         error = EBUSY;
1295                         break;
1296                 }
1297                 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1298                 while ((rule != NULL) && (rule->nr != pr->nr))
1299                         rule = TAILQ_NEXT(rule, entries);
1300                 if (rule == NULL) {
1301                         error = EBUSY;
1302                         break;
1303                 }
1304                 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1305                 if (pf_anchor_copyout(ruleset, rule, pr)) {
1306                         error = EBUSY;
1307                         break;
1308                 }
1309                 pf_addr_copyout(&pr->rule.src.addr);
1310                 pf_addr_copyout(&pr->rule.dst.addr);
1311                 for (i = 0; i < PF_SKIP_COUNT; ++i)
1312                         if (rule->skip[i].ptr == NULL)
1313                                 pr->rule.skip[i].nr = (uint32_t)(-1);
1314                         else
1315                                 pr->rule.skip[i].nr =
1316                                     rule->skip[i].ptr->nr;
1317
1318                 if (pr->action == PF_GET_CLR_CNTR) {
1319                         rule->evaluations = 0;
1320                         rule->packets[0] = rule->packets[1] = 0;
1321                         rule->bytes[0] = rule->bytes[1] = 0;
1322                         rule->states_tot = 0;
1323                 }
1324                 break;
1325         }
1326
1327         case DIOCCHANGERULE: {
1328                 struct pfioc_rule       *pcr = (struct pfioc_rule *)addr;
1329                 struct pf_ruleset       *ruleset;
1330                 struct pf_rule          *oldrule = NULL, *newrule = NULL;
1331                 u_int32_t                nr = 0;
1332                 int                      rs_num;
1333
1334                 if (!(pcr->action == PF_CHANGE_REMOVE ||
1335                     pcr->action == PF_CHANGE_GET_TICKET) &&
1336                     pcr->pool_ticket != ticket_pabuf) {
1337                         error = EBUSY;
1338                         break;
1339                 }
1340
1341                 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1342                     pcr->action > PF_CHANGE_GET_TICKET) {
1343                         error = EINVAL;
1344                         break;
1345                 }
1346                 ruleset = pf_find_ruleset(pcr->anchor);
1347                 if (ruleset == NULL) {
1348                         error = EINVAL;
1349                         break;
1350                 }
1351                 rs_num = pf_get_ruleset_number(pcr->rule.action);
1352                 if (rs_num >= PF_RULESET_MAX) {
1353                         error = EINVAL;
1354                         break;
1355                 }
1356
1357                 if (pcr->action == PF_CHANGE_GET_TICKET) {
1358                         pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1359                         break;
1360                 } else {
1361                         if (pcr->ticket !=
1362                             ruleset->rules[rs_num].active.ticket) {
1363                                 error = EINVAL;
1364                                 break;
1365                         }
1366                         if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1367                                 error = EINVAL;
1368                                 break;
1369                         }
1370                 }
1371
1372                 if (pcr->action != PF_CHANGE_REMOVE) {
1373                         newrule = kmalloc(sizeof(struct pf_rule), M_PFRULEPL, M_WAITOK|M_NULLOK);
1374                         if (newrule == NULL) {
1375                                 error = ENOMEM;
1376                                 break;
1377                         }
1378                         bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1379                         newrule->cuid = ap->a_cred->cr_ruid;
1380                         newrule->cpid = 0;
1381                         TAILQ_INIT(&newrule->rpool.list);
1382                         /* initialize refcounting */
1383                         newrule->states_cur = 0;
1384                         newrule->entries.tqe_prev = NULL;
1385 #ifndef INET
1386                         if (newrule->af == AF_INET) {
1387                                 kfree(newrule, M_PFRULEPL);
1388                                 error = EAFNOSUPPORT;
1389                                 break;
1390                         }
1391 #endif /* INET */
1392 #ifndef INET6
1393                         if (newrule->af == AF_INET6) {
1394                                 kfree(newrule, M_PFRULEPL);
1395                                 error = EAFNOSUPPORT;
1396                                 break;
1397                         }
1398 #endif /* INET6 */
1399                         if (newrule->ifname[0]) {
1400                                 newrule->kif = pfi_kif_get(newrule->ifname);
1401                                 if (newrule->kif == NULL) {
1402                                         kfree(newrule, M_PFRULEPL);
1403                                         error = EINVAL;
1404                                         break;
1405                                 }
1406                                 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
1407                         } else
1408                                 newrule->kif = NULL;
1409
1410                         if (newrule->rtableid > 0 &&
1411                             newrule->rtableid > rt_numfibs)
1412                                 error = EBUSY;
1413
1414 #ifdef ALTQ
1415                         /* set queue IDs */
1416                         if (newrule->qname[0] != 0) {
1417                                 if ((newrule->qid =
1418                                     pf_qname2qid(newrule->qname)) == 0)
1419                                         error = EBUSY;
1420                                 else if (newrule->pqname[0] != 0) {
1421                                         if ((newrule->pqid =
1422                                             pf_qname2qid(newrule->pqname)) == 0)
1423                                                 error = EBUSY;
1424                                 } else
1425                                         newrule->pqid = newrule->qid;
1426                         }
1427 #endif /* ALTQ */
1428                         if (newrule->tagname[0])
1429                                 if ((newrule->tag =
1430                                     pf_tagname2tag(newrule->tagname)) == 0)
1431                                         error = EBUSY;
1432                         if (newrule->match_tagname[0])
1433                                 if ((newrule->match_tag = pf_tagname2tag(
1434                                     newrule->match_tagname)) == 0)
1435                                         error = EBUSY;
1436                         if (newrule->rt && !newrule->direction)
1437                                 error = EINVAL;
1438 #if NPFLOG > 0
1439                         if (!newrule->log)
1440                                 newrule->logif = 0;
1441                         if (newrule->logif >= PFLOGIFS_MAX)
1442                                 error = EINVAL;
1443 #endif
1444                         if (pf_rtlabel_add(&newrule->src.addr) ||
1445                             pf_rtlabel_add(&newrule->dst.addr))
1446                                 error = EBUSY;
1447                         if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1448                                 error = EINVAL;
1449                         if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1450                                 error = EINVAL;
1451                         if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1452                                 error = EINVAL;
1453                         TAILQ_FOREACH(pa, &pf_pabuf, entries)
1454                                 if (pf_tbladdr_setup(ruleset, &pa->addr))
1455                                         error = EINVAL;
1456
1457                         if (newrule->overload_tblname[0]) {
1458                                 if ((newrule->overload_tbl = pfr_attach_table(
1459                                     ruleset, newrule->overload_tblname)) ==
1460                                     NULL)
1461                                         error = EINVAL;
1462                                 else
1463                                         newrule->overload_tbl->pfrkt_flags |=
1464                                             PFR_TFLAG_ACTIVE;
1465                         }
1466
1467                         pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1468                         if (((((newrule->action == PF_NAT) ||
1469                             (newrule->action == PF_RDR) ||
1470                             (newrule->action == PF_BINAT) ||
1471                             (newrule->rt > PF_FASTROUTE)) &&
1472                             !newrule->anchor)) &&
1473                             (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1474                                 error = EINVAL;
1475
1476                         if (error) {
1477                                 pf_rm_rule(NULL, newrule);
1478                                 break;
1479                         }
1480                         newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1481                         newrule->evaluations = 0;
1482                         newrule->packets[0] = newrule->packets[1] = 0;
1483                         newrule->bytes[0] = newrule->bytes[1] = 0;
1484                 }
1485                 pf_empty_pool(&pf_pabuf);
1486
1487                 if (pcr->action == PF_CHANGE_ADD_HEAD)
1488                         oldrule = TAILQ_FIRST(
1489                             ruleset->rules[rs_num].active.ptr);
1490                 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1491                         oldrule = TAILQ_LAST(
1492                             ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1493                 else {
1494                         oldrule = TAILQ_FIRST(
1495                             ruleset->rules[rs_num].active.ptr);
1496                         while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1497                                 oldrule = TAILQ_NEXT(oldrule, entries);
1498                         if (oldrule == NULL) {
1499                                 if (newrule != NULL)
1500                                         pf_rm_rule(NULL, newrule);
1501                                 error = EINVAL;
1502                                 break;
1503                         }
1504                 }
1505
1506                 if (pcr->action == PF_CHANGE_REMOVE) {
1507                         pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1508                         ruleset->rules[rs_num].active.rcount--;
1509                 } else {
1510                         if (oldrule == NULL)
1511                                 TAILQ_INSERT_TAIL(
1512                                     ruleset->rules[rs_num].active.ptr,
1513                                     newrule, entries);
1514                         else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1515                             pcr->action == PF_CHANGE_ADD_BEFORE)
1516                                 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1517                         else
1518                                 TAILQ_INSERT_AFTER(
1519                                     ruleset->rules[rs_num].active.ptr,
1520                                     oldrule, newrule, entries);
1521                         ruleset->rules[rs_num].active.rcount++;
1522                 }
1523
1524                 nr = 0;
1525                 TAILQ_FOREACH(oldrule,
1526                     ruleset->rules[rs_num].active.ptr, entries)
1527                         oldrule->nr = nr++;
1528
1529                 ruleset->rules[rs_num].active.ticket++;
1530
1531                 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1532                 pf_remove_if_empty_ruleset(ruleset);
1533
1534                 break;
1535         }
1536
1537         case DIOCCLRSTATES: {
1538                 struct pf_state         *s, *nexts;
1539                 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1540                 u_int                    killed = 0;
1541                 globaldata_t save_gd = mycpu;
1542                 int nn;
1543
1544                 for (nn = 0; nn < ncpus; ++nn) {
1545                         lwkt_setcpu_self(globaldata_find(nn));
1546                         for (s = RB_MIN(pf_state_tree_id, &tree_id[nn]);
1547                              s; s = nexts) {
1548                                 nexts = RB_NEXT(pf_state_tree_id,
1549                                                 &tree_id[nn], s);
1550
1551                                 if (!psk->psk_ifname[0] ||
1552                                     !strcmp(psk->psk_ifname,
1553                                             s->kif->pfik_name)) {
1554                                         /*
1555                                          * don't send out individual
1556                                          * delete messages
1557                                          */
1558                                         s->sync_flags = PFSTATE_NOSYNC;
1559                                         pf_unlink_state(s);
1560                                         killed++;
1561                                 }
1562                         }
1563                 }
1564                 lwkt_setcpu_self(save_gd);
1565                 psk->psk_killed = killed;
1566                 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1567                 break;
1568         }
1569
1570         case DIOCKILLSTATES: {
1571                 struct pf_state         *s, *nexts;
1572                 struct pf_state_key     *sk;
1573                 struct pf_addr          *srcaddr, *dstaddr;
1574                 u_int16_t                srcport, dstport;
1575                 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1576                 u_int                    killed = 0;
1577                 globaldata_t save_gd = mycpu;
1578                 int nn;
1579
1580                 if (psk->psk_pfcmp.id) {
1581                         if (psk->psk_pfcmp.creatorid == 0)
1582                                 psk->psk_pfcmp.creatorid = pf_status.hostid;
1583                         for (nn = 0; nn < ncpus; ++nn) {
1584                                 lwkt_setcpu_self(globaldata_find(nn));
1585                                 if ((s = pf_find_state_byid(&psk->psk_pfcmp))) {
1586                                         /* send immediate delete of state */
1587                                         pfsync_delete_state(s);
1588                                         s->sync_flags |= PFSTATE_NOSYNC;
1589                                         pf_unlink_state(s);
1590                                         ++psk->psk_killed;
1591                                 }
1592                         }
1593                         lwkt_setcpu_self(save_gd);
1594                         break;
1595                 }
1596
1597                 for (nn = 0; nn < ncpus; ++nn) {
1598                     lwkt_setcpu_self(globaldata_find(nn));
1599                     for (s = RB_MIN(pf_state_tree_id, &tree_id[nn]);
1600                          s; s = nexts) {
1601                             nexts = RB_NEXT(pf_state_tree_id, &tree_id[nn], s);
1602                             sk = s->key[PF_SK_WIRE];
1603
1604                             if (s->direction == PF_OUT) {
1605                                     srcaddr = &sk->addr[1];
1606                                     dstaddr = &sk->addr[0];
1607                                     srcport = sk->port[0];
1608                                     dstport = sk->port[0];
1609                             } else {
1610                                     srcaddr = &sk->addr[0];
1611                                     dstaddr = &sk->addr[1];
1612                                     srcport = sk->port[0];
1613                                     dstport = sk->port[0];
1614                             }
1615                             if ((!psk->psk_af || sk->af == psk->psk_af)
1616                                 && (!psk->psk_proto || psk->psk_proto ==
1617                                                        sk->proto) &&
1618                                 PF_MATCHA(psk->psk_src.neg,
1619                                           &psk->psk_src.addr.v.a.addr,
1620                                           &psk->psk_src.addr.v.a.mask,
1621                                           srcaddr, sk->af) &&
1622                                 PF_MATCHA(psk->psk_dst.neg,
1623                                           &psk->psk_dst.addr.v.a.addr,
1624                                           &psk->psk_dst.addr.v.a.mask,
1625                                           dstaddr, sk->af) &&
1626                                 (psk->psk_src.port_op == 0 ||
1627                                  pf_match_port(psk->psk_src.port_op,
1628                                                psk->psk_src.port[0],
1629                                                psk->psk_src.port[1],
1630                                                srcport)) &&
1631                                 (psk->psk_dst.port_op == 0 ||
1632                                  pf_match_port(psk->psk_dst.port_op,
1633                                                psk->psk_dst.port[0],
1634                                                psk->psk_dst.port[1],
1635                                                dstport)) &&
1636                                 (!psk->psk_label[0] ||
1637                                  (s->rule.ptr->label[0] &&
1638                                   !strcmp(psk->psk_label, s->rule.ptr->label))) &&
1639                                 (!psk->psk_ifname[0] ||
1640                                  !strcmp(psk->psk_ifname, s->kif->pfik_name))) {
1641                                     /* send immediate delete of state */
1642                                     pfsync_delete_state(s);
1643                                     s->sync_flags |= PFSTATE_NOSYNC;
1644                                     pf_unlink_state(s);
1645                                     killed++;
1646                             }
1647                     }
1648                 }
1649                 lwkt_setcpu_self(save_gd);
1650                 psk->psk_killed = killed;
1651                 break;
1652         }
1653
1654         case DIOCADDSTATE: {
1655                 struct pfioc_state      *ps = (struct pfioc_state *)addr;
1656                 struct pfsync_state     *sp = &ps->state;
1657
1658                 if (sp->timeout >= PFTM_MAX &&
1659                     sp->timeout != PFTM_UNTIL_PACKET) {
1660                         error = EINVAL;
1661                         break;
1662                 }
1663                 error = pfsync_state_import(sp, PFSYNC_SI_IOCTL);
1664                 break;
1665         }
1666
1667         case DIOCGETSTATE: {
1668                 struct pfioc_state      *ps = (struct pfioc_state *)addr;
1669                 struct pf_state         *s;
1670                 struct pf_state_cmp      id_key;
1671                 globaldata_t save_gd = mycpu;
1672                 int nn;
1673
1674                 bcopy(ps->state.id, &id_key.id, sizeof(id_key.id));
1675                 id_key.creatorid = ps->state.creatorid;
1676                 s = NULL;
1677                 for (nn = 0; nn < ncpus; ++nn) {
1678                         lwkt_setcpu_self(globaldata_find(nn));
1679                         s = pf_find_state_byid(&id_key);
1680                         if (s)
1681                                 break;
1682                 }
1683                 if (s) {
1684                         pfsync_state_export(&ps->state, s);
1685                 } else {
1686                         error = ENOENT;
1687                 }
1688                 lwkt_setcpu_self(save_gd);
1689                 break;
1690         }
1691
1692         case DIOCGETSTATES: {
1693                 struct pfioc_states     *ps = (struct pfioc_states *)addr;
1694                 struct pf_state         *state;
1695                 struct pfsync_state     *p, *pstore;
1696                 u_int32_t                nr = 0;
1697                 globaldata_t save_gd = mycpu;
1698                 int nn;
1699
1700                 if (ps->ps_len == 0) {
1701                         nr = pf_status.states;
1702                         ps->ps_len = sizeof(struct pfsync_state) * nr;
1703                         break;
1704                 }
1705
1706                 pstore = kmalloc(sizeof(*pstore), M_TEMP, M_WAITOK);
1707
1708                 p = ps->ps_states;
1709
1710                 for (nn = 0; nn < ncpus; ++nn) {
1711                         lwkt_setcpu_self(globaldata_find(nn));
1712                         state = TAILQ_FIRST(&state_list[nn]);
1713                         while (state) {
1714                                 if (state->timeout != PFTM_UNLINKED) {
1715                                         if ((nr + 1) * sizeof(*p) >
1716                                             (unsigned)ps->ps_len) {
1717                                                 break;
1718                                         }
1719                                         pfsync_state_export(pstore, state);
1720                                         error = copyout(pstore, p, sizeof(*p));
1721                                         if (error) {
1722                                                 kfree(pstore, M_TEMP);
1723                                                 lwkt_setcpu_self(save_gd);
1724                                                 goto fail;
1725                                         }
1726                                         p++;
1727                                         nr++;
1728                                 }
1729                                 state = TAILQ_NEXT(state, entry_list);
1730                         }
1731                 }
1732                 lwkt_setcpu_self(save_gd);
1733                 ps->ps_len = sizeof(struct pfsync_state) * nr;
1734                 kfree(pstore, M_TEMP);
1735                 break;
1736         }
1737
1738         case DIOCGETSTATUS: {
1739                 struct pf_status *s = (struct pf_status *)addr;
1740                 bcopy(&pf_status, s, sizeof(struct pf_status));
1741                 pfi_update_status(s->ifname, s);
1742                 break;
1743         }
1744
1745         case DIOCSETSTATUSIF: {
1746                 struct pfioc_if *pi = (struct pfioc_if *)addr;
1747
1748                 if (pi->ifname[0] == 0) {
1749                         bzero(pf_status.ifname, IFNAMSIZ);
1750                         break;
1751                 }
1752                 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1753                 break;
1754         }
1755
1756         case DIOCCLRSTATUS: {
1757                 bzero(pf_status.counters, sizeof(pf_status.counters));
1758                 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1759                 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1760                 pf_status.since = time_second;
1761                 if (*pf_status.ifname)
1762                         pfi_update_status(pf_status.ifname, NULL);
1763                 break;
1764         }
1765
1766         case DIOCNATLOOK: {
1767                 struct pfioc_natlook    *pnl = (struct pfioc_natlook *)addr;
1768                 struct pf_state_key     *sk;
1769                 struct pf_state         *state;
1770                 struct pf_state_key_cmp  key;
1771                 int                      m = 0, direction = pnl->direction;
1772                 int                      sidx, didx;
1773                 globaldata_t save_gd = mycpu;
1774                 int nn;
1775
1776                 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
1777                 sidx = (direction == PF_IN) ? 1 : 0;
1778                 didx = (direction == PF_IN) ? 0 : 1;
1779
1780                 if (!pnl->proto ||
1781                     PF_AZERO(&pnl->saddr, pnl->af) ||
1782                     PF_AZERO(&pnl->daddr, pnl->af) ||
1783                     ((pnl->proto == IPPROTO_TCP ||
1784                     pnl->proto == IPPROTO_UDP) &&
1785                     (!pnl->dport || !pnl->sport)))
1786                         error = EINVAL;
1787                 else {
1788                         key.af = pnl->af;
1789                         key.proto = pnl->proto;
1790                         PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
1791                         key.port[sidx] = pnl->sport;
1792                         PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
1793                         key.port[didx] = pnl->dport;
1794
1795                         state = NULL;
1796                         for (nn = 0; nn < ncpus; ++nn) {
1797                                 lwkt_setcpu_self(globaldata_find(nn));
1798                                 state = pf_find_state_all(&key, direction, &m);
1799                                 if (state || m > 1)
1800                                         break;
1801                                 m = 0;
1802                         }
1803
1804                         if (m > 1) {
1805                                 error = E2BIG;  /* more than one state */
1806                         } else if (state != NULL) {
1807                                 sk = state->key[sidx];
1808                                 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
1809                                 pnl->rsport = sk->port[sidx];
1810                                 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
1811                                 pnl->rdport = sk->port[didx];
1812                         } else {
1813                                 error = ENOENT;
1814                         }
1815                         lwkt_setcpu_self(save_gd);
1816                 }
1817                 break;
1818         }
1819
1820         case DIOCSETTIMEOUT: {
1821                 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1822                 int              old;
1823
1824                 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1825                     pt->seconds < 0) {
1826                         error = EINVAL;
1827                         goto fail;
1828                 }
1829                 old = pf_default_rule.timeout[pt->timeout];
1830                 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1831                         pt->seconds = 1;
1832                 pf_default_rule.timeout[pt->timeout] = pt->seconds;
1833                 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
1834                         wakeup(pf_purge_thread);
1835                 pt->seconds = old;
1836                 break;
1837         }
1838
1839         case DIOCGETTIMEOUT: {
1840                 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1841
1842                 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1843                         error = EINVAL;
1844                         goto fail;
1845                 }
1846                 pt->seconds = pf_default_rule.timeout[pt->timeout];
1847                 break;
1848         }
1849
1850         case DIOCGETLIMIT: {
1851                 struct pfioc_limit      *pl = (struct pfioc_limit *)addr;
1852
1853                 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1854                         error = EINVAL;
1855                         goto fail;
1856                 }
1857                 pl->limit = pf_pool_limits[pl->index].limit;
1858                 break;
1859         }
1860
1861         case DIOCSETLIMIT: {
1862                 struct pfioc_limit      *pl = (struct pfioc_limit *)addr;
1863                 int                      old_limit;
1864
1865                 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1866                     pf_pool_limits[pl->index].pp == NULL) {
1867                         error = EINVAL;
1868                         goto fail;
1869                 }
1870
1871                 /* XXX Get an API to set limits on the zone/pool */
1872                 old_limit = pf_pool_limits[pl->index].limit;
1873                 pf_pool_limits[pl->index].limit = pl->limit;
1874                 pl->limit = old_limit;
1875                 break;
1876         }
1877
1878         case DIOCSETDEBUG: {
1879                 u_int32_t       *level = (u_int32_t *)addr;
1880
1881                 pf_status.debug = *level;
1882                 break;
1883         }
1884
1885         case DIOCCLRRULECTRS: {
1886                 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1887                 struct pf_ruleset       *ruleset = &pf_main_ruleset;
1888                 struct pf_rule          *rule;
1889
1890                 TAILQ_FOREACH(rule,
1891                     ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1892                         rule->evaluations = 0;
1893                         rule->packets[0] = rule->packets[1] = 0;
1894                         rule->bytes[0] = rule->bytes[1] = 0;
1895                 }
1896                 break;
1897         }
1898
1899         case DIOCGIFSPEED: {
1900                 struct pf_ifspeed       *psp = (struct pf_ifspeed *)addr;
1901                 struct pf_ifspeed       ps;
1902                 struct ifnet            *ifp;
1903
1904                 if (psp->ifname[0] != 0) {
1905                         /* Can we completely trust user-land? */
1906                         strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
1907                         ifp = ifunit(ps.ifname);
1908                         if (ifp )
1909                                 psp->baudrate = ifp->if_baudrate;
1910                         else
1911                                 error = EINVAL;
1912                 } else
1913                         error = EINVAL;
1914                 break;
1915         }
1916 #ifdef ALTQ
1917         case DIOCSTARTALTQ: {
1918                 struct pf_altq          *altq;
1919
1920                 /* enable all altq interfaces on active list */
1921                 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1922                         if (altq->qname[0] == 0) {
1923                                 error = pf_enable_altq(altq);
1924                                 if (error != 0)
1925                                         break;
1926                         }
1927                 }
1928                 if (error == 0)
1929                         pf_altq_running = 1;
1930                 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
1931                 break;
1932         }
1933
1934         case DIOCSTOPALTQ: {
1935                 struct pf_altq          *altq;
1936
1937                 /* disable all altq interfaces on active list */
1938                 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1939                         if (altq->qname[0] == 0) {
1940                                 error = pf_disable_altq(altq);
1941                                 if (error != 0)
1942                                         break;
1943                         }
1944                 }
1945                 if (error == 0)
1946                         pf_altq_running = 0;
1947                 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
1948                 break;
1949         }
1950
1951         case DIOCADDALTQ: {
1952                 struct pfioc_altq       *pa = (struct pfioc_altq *)addr;
1953                 struct pf_altq          *altq, *a;
1954
1955                 if (pa->ticket != ticket_altqs_inactive) {
1956                         error = EBUSY;
1957                         break;
1958                 }
1959                 altq = kmalloc(sizeof(struct pf_altq), M_PFALTQPL, M_WAITOK|M_NULLOK);
1960                 if (altq == NULL) {
1961                         error = ENOMEM;
1962                         break;
1963                 }
1964                 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
1965
1966                 /*
1967                  * if this is for a queue, find the discipline and
1968                  * copy the necessary fields
1969                  */
1970                 if (altq->qname[0] != 0) {
1971                         if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
1972                                 error = EBUSY;
1973                                 kfree(altq, M_PFALTQPL);
1974                                 break;
1975                         }
1976                         altq->altq_disc = NULL;
1977                         TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
1978                                 if (strncmp(a->ifname, altq->ifname,
1979                                     IFNAMSIZ) == 0 && a->qname[0] == 0) {
1980                                         altq->altq_disc = a->altq_disc;
1981                                         break;
1982                                 }
1983                         }
1984                 }
1985
1986                 error = altq_add(altq);
1987                 if (error) {
1988                         kfree(altq, M_PFALTQPL);
1989                         break;
1990                 }
1991
1992                 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
1993                 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
1994                 break;
1995         }
1996
1997         case DIOCGETALTQS: {
1998                 struct pfioc_altq       *pa = (struct pfioc_altq *)addr;
1999                 struct pf_altq          *altq;
2000
2001                 pa->nr = 0;
2002                 TAILQ_FOREACH(altq, pf_altqs_active, entries)
2003                         pa->nr++;
2004                 pa->ticket = ticket_altqs_active;
2005                 break;
2006         }
2007
2008         case DIOCGETALTQ: {
2009                 struct pfioc_altq       *pa = (struct pfioc_altq *)addr;
2010                 struct pf_altq          *altq;
2011                 u_int32_t                nr;
2012
2013                 if (pa->ticket != ticket_altqs_active) {
2014                         error = EBUSY;
2015                         break;
2016                 }
2017                 nr = 0;
2018                 altq = TAILQ_FIRST(pf_altqs_active);
2019                 while ((altq != NULL) && (nr < pa->nr)) {
2020                         altq = TAILQ_NEXT(altq, entries);
2021                         nr++;
2022                 }
2023                 if (altq == NULL) {
2024                         error = EBUSY;
2025                         break;
2026                 }
2027                 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2028                 break;
2029         }
2030
2031         case DIOCCHANGEALTQ:
2032                 /* CHANGEALTQ not supported yet! */
2033                 error = ENODEV;
2034                 break;
2035
2036         case DIOCGETQSTATS: {
2037                 struct pfioc_qstats     *pq = (struct pfioc_qstats *)addr;
2038                 struct pf_altq          *altq;
2039                 u_int32_t                nr;
2040                 int                      nbytes;
2041
2042                 if (pq->ticket != ticket_altqs_active) {
2043                         error = EBUSY;
2044                         break;
2045                 }
2046                 nbytes = pq->nbytes;
2047                 nr = 0;
2048                 altq = TAILQ_FIRST(pf_altqs_active);
2049                 while ((altq != NULL) && (nr < pq->nr)) {
2050                         altq = TAILQ_NEXT(altq, entries);
2051                         nr++;
2052                 }
2053                 if (altq == NULL) {
2054                         error = EBUSY;
2055                         break;
2056                 }
2057                 error = altq_getqstats(altq, pq->buf, &nbytes);
2058                 if (error == 0) {
2059                         pq->scheduler = altq->scheduler;
2060                         pq->nbytes = nbytes;
2061                 }
2062                 break;
2063         }
2064 #endif /* ALTQ */
2065
2066         case DIOCBEGINADDRS: {
2067                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
2068
2069                 pf_empty_pool(&pf_pabuf);
2070                 pp->ticket = ++ticket_pabuf;
2071                 break;
2072         }
2073
2074         case DIOCADDADDR: {
2075                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
2076
2077                 if (pp->ticket != ticket_pabuf) {
2078                         error = EBUSY;
2079                         break;
2080                 }
2081 #ifndef INET
2082                 if (pp->af == AF_INET) {
2083                         error = EAFNOSUPPORT;
2084                         break;
2085                 }
2086 #endif /* INET */
2087 #ifndef INET6
2088                 if (pp->af == AF_INET6) {
2089                         error = EAFNOSUPPORT;
2090                         break;
2091                 }
2092 #endif /* INET6 */
2093                 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2094                     pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2095                     pp->addr.addr.type != PF_ADDR_TABLE) {
2096                         error = EINVAL;
2097                         break;
2098                 }
2099                 pa = kmalloc(sizeof(struct pf_altq), M_PFPOOLADDRPL, M_WAITOK|M_NULLOK);
2100                 if (pa == NULL) {
2101                         error = ENOMEM;
2102                         break;
2103                 }
2104                 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2105                 if (pa->ifname[0]) {
2106                         pa->kif = pfi_kif_get(pa->ifname);
2107                         if (pa->kif == NULL) {
2108                                 kfree(ap, M_PFPOOLADDRPL);
2109                                 error = EINVAL;
2110                                 break;
2111                         }
2112                         pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
2113                 }
2114                 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2115                         pfi_dynaddr_remove(&pa->addr);
2116                         pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
2117                         kfree(pa, M_PFPOOLADDRPL);
2118                         error = EINVAL;
2119                         break;
2120                 }
2121                 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2122                 break;
2123         }
2124
2125         case DIOCGETADDRS: {
2126                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
2127
2128                 pp->nr = 0;
2129                 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2130                     pp->r_num, 0, 1, 0);
2131                 if (pool == NULL) {
2132                         error = EBUSY;
2133                         break;
2134                 }
2135                 TAILQ_FOREACH(pa, &pool->list, entries)
2136                         pp->nr++;
2137                 break;
2138         }
2139
2140         case DIOCGETADDR: {
2141                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
2142                 u_int32_t                nr = 0;
2143
2144                 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2145                     pp->r_num, 0, 1, 1);
2146                 if (pool == NULL) {
2147                         error = EBUSY;
2148                         break;
2149                 }
2150                 pa = TAILQ_FIRST(&pool->list);
2151                 while ((pa != NULL) && (nr < pp->nr)) {
2152                         pa = TAILQ_NEXT(pa, entries);
2153                         nr++;
2154                 }
2155                 if (pa == NULL) {
2156                         error = EBUSY;
2157                         break;
2158                 }
2159                 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2160                 pf_addr_copyout(&pp->addr.addr);
2161                 break;
2162         }
2163
2164         case DIOCCHANGEADDR: {
2165                 struct pfioc_pooladdr   *pca = (struct pfioc_pooladdr *)addr;
2166                 struct pf_pooladdr      *oldpa = NULL, *newpa = NULL;
2167                 struct pf_ruleset       *ruleset;
2168
2169                 if (pca->action < PF_CHANGE_ADD_HEAD ||
2170                     pca->action > PF_CHANGE_REMOVE) {
2171                         error = EINVAL;
2172                         break;
2173                 }
2174                 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2175                     pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2176                     pca->addr.addr.type != PF_ADDR_TABLE) {
2177                         error = EINVAL;
2178                         break;
2179                 }
2180
2181                 ruleset = pf_find_ruleset(pca->anchor);
2182                 if (ruleset == NULL) {
2183                         error = EBUSY;
2184                         break;
2185                 }
2186                 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2187                     pca->r_num, pca->r_last, 1, 1);
2188                 if (pool == NULL) {
2189                         error = EBUSY;
2190                         break;
2191                 }
2192                 if (pca->action != PF_CHANGE_REMOVE) {
2193                         newpa = kmalloc(sizeof(struct pf_pooladdr),
2194                                 M_PFPOOLADDRPL, M_WAITOK|M_NULLOK);
2195                         if (newpa == NULL) {
2196                                 error = ENOMEM;
2197                                 break;
2198                         }
2199                         bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2200 #ifndef INET
2201                         if (pca->af == AF_INET) {
2202                                 kfree(newpa, M_PFPOOLADDRPL);
2203                                 error = EAFNOSUPPORT;
2204                                 break;
2205                         }
2206 #endif /* INET */
2207 #ifndef INET6
2208                         if (pca->af == AF_INET6) {
2209                                 kfree(newpa, M_PFPOOLADDRPL);
2210                                 error = EAFNOSUPPORT;
2211                                 break;
2212                         }
2213 #endif /* INET6 */
2214                         if (newpa->ifname[0]) {
2215                                 newpa->kif = pfi_kif_get(newpa->ifname);
2216                                 if (newpa->kif == NULL) {
2217                                         kfree(newpa, M_PFPOOLADDRPL);
2218                                         error = EINVAL;
2219                                         break;
2220                                 }
2221                                 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
2222                         } else
2223                                 newpa->kif = NULL;
2224                         if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2225                             pf_tbladdr_setup(ruleset, &newpa->addr)) {
2226                                 pfi_dynaddr_remove(&newpa->addr);
2227                                 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
2228                                 kfree(newpa, M_PFPOOLADDRPL);
2229                                 error = EINVAL;
2230                                 break;
2231                         }
2232                 }
2233
2234                 if (pca->action == PF_CHANGE_ADD_HEAD)
2235                         oldpa = TAILQ_FIRST(&pool->list);
2236                 else if (pca->action == PF_CHANGE_ADD_TAIL)
2237                         oldpa = TAILQ_LAST(&pool->list, pf_palist);
2238                 else {
2239                         int     i = 0;
2240
2241                         oldpa = TAILQ_FIRST(&pool->list);
2242                         while ((oldpa != NULL) && (i < pca->nr)) {
2243                                 oldpa = TAILQ_NEXT(oldpa, entries);
2244                                 i++;
2245                         }
2246                         if (oldpa == NULL) {
2247                                 error = EINVAL;
2248                                 break;
2249                         }
2250                 }
2251
2252                 if (pca->action == PF_CHANGE_REMOVE) {
2253                         TAILQ_REMOVE(&pool->list, oldpa, entries);
2254                         pfi_dynaddr_remove(&oldpa->addr);
2255                         pf_tbladdr_remove(&oldpa->addr);
2256                         pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
2257                         kfree(oldpa, M_PFPOOLADDRPL);
2258                 } else {
2259                         if (oldpa == NULL)
2260                                 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2261                         else if (pca->action == PF_CHANGE_ADD_HEAD ||
2262                             pca->action == PF_CHANGE_ADD_BEFORE)
2263                                 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2264                         else
2265                                 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2266                                     newpa, entries);
2267                 }
2268
2269                 pool->cur = TAILQ_FIRST(&pool->list);
2270                 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2271                     pca->af);
2272                 break;
2273         }
2274
2275         case DIOCGETRULESETS: {
2276                 struct pfioc_ruleset    *pr = (struct pfioc_ruleset *)addr;
2277                 struct pf_ruleset       *ruleset;
2278                 struct pf_anchor        *anchor;
2279
2280                 pr->path[sizeof(pr->path) - 1] = 0;
2281                 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2282                         error = EINVAL;
2283                         break;
2284                 }
2285                 pr->nr = 0;
2286                 if (ruleset->anchor == NULL) {
2287                         /* XXX kludge for pf_main_ruleset */
2288                         RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2289                                 if (anchor->parent == NULL)
2290                                         pr->nr++;
2291                 } else {
2292                         RB_FOREACH(anchor, pf_anchor_node,
2293                             &ruleset->anchor->children)
2294                                 pr->nr++;
2295                 }
2296                 break;
2297         }
2298
2299         case DIOCGETRULESET: {
2300                 struct pfioc_ruleset    *pr = (struct pfioc_ruleset *)addr;
2301                 struct pf_ruleset       *ruleset;
2302                 struct pf_anchor        *anchor;
2303                 u_int32_t                nr = 0;
2304
2305                 pr->path[sizeof(pr->path) - 1] = 0;
2306                 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2307                         error = EINVAL;
2308                         break;
2309                 }
2310                 pr->name[0] = 0;
2311                 if (ruleset->anchor == NULL) {
2312                         /* XXX kludge for pf_main_ruleset */
2313                         RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2314                                 if (anchor->parent == NULL && nr++ == pr->nr) {
2315                                         strlcpy(pr->name, anchor->name,
2316                                             sizeof(pr->name));
2317                                         break;
2318                                 }
2319                 } else {
2320                         RB_FOREACH(anchor, pf_anchor_node,
2321                             &ruleset->anchor->children)
2322                                 if (nr++ == pr->nr) {
2323                                         strlcpy(pr->name, anchor->name,
2324                                             sizeof(pr->name));
2325                                         break;
2326                                 }
2327                 }
2328                 if (!pr->name[0])
2329                         error = EBUSY;
2330                 break;
2331         }
2332
2333         case DIOCRCLRTABLES: {
2334                 struct pfioc_table *io = (struct pfioc_table *)addr;
2335
2336                 if (io->pfrio_esize != 0) {
2337                         error = ENODEV;
2338                         break;
2339                 }
2340                 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2341                     io->pfrio_flags | PFR_FLAG_USERIOCTL);
2342                 break;
2343         }
2344
2345         case DIOCRADDTABLES: {
2346                 struct pfioc_table *io = (struct pfioc_table *)addr;
2347
2348                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2349                         error = ENODEV;
2350                         break;
2351                 }
2352                 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2353                     &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2354                 break;
2355         }
2356
2357         case DIOCRDELTABLES: {
2358                 struct pfioc_table *io = (struct pfioc_table *)addr;
2359
2360                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2361                         error = ENODEV;
2362                         break;
2363                 }
2364                 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2365                     &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2366                 break;
2367         }
2368
2369         case DIOCRGETTABLES: {
2370                 struct pfioc_table *io = (struct pfioc_table *)addr;
2371
2372                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2373                         error = ENODEV;
2374                         break;
2375                 }
2376                 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2377                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2378                 break;
2379         }
2380
2381         case DIOCRGETTSTATS: {
2382                 struct pfioc_table *io = (struct pfioc_table *)addr;
2383
2384                 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2385                         error = ENODEV;
2386                         break;
2387                 }
2388                 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2389                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2390                 break;
2391         }
2392
2393         case DIOCRCLRTSTATS: {
2394                 struct pfioc_table *io = (struct pfioc_table *)addr;
2395
2396                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2397                         error = ENODEV;
2398                         break;
2399                 }
2400                 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2401                     &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2402                 break;
2403         }
2404
2405         case DIOCRSETTFLAGS: {
2406                 struct pfioc_table *io = (struct pfioc_table *)addr;
2407
2408                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2409                         error = ENODEV;
2410                         break;
2411                 }
2412                 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2413                     io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2414                     &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2415                 break;
2416         }
2417
2418         case DIOCRCLRADDRS: {
2419                 struct pfioc_table *io = (struct pfioc_table *)addr;
2420
2421                 if (io->pfrio_esize != 0) {
2422                         error = ENODEV;
2423                         break;
2424                 }
2425                 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2426                     io->pfrio_flags | PFR_FLAG_USERIOCTL);
2427                 break;
2428         }
2429
2430         case DIOCRADDADDRS: {
2431                 struct pfioc_table *io = (struct pfioc_table *)addr;
2432
2433                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2434                         error = ENODEV;
2435                         break;
2436                 }
2437                 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2438                     io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2439                     PFR_FLAG_USERIOCTL);
2440                 break;
2441         }
2442
2443         case DIOCRDELADDRS: {
2444                 struct pfioc_table *io = (struct pfioc_table *)addr;
2445
2446                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2447                         error = ENODEV;
2448                         break;
2449                 }
2450                 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2451                     io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2452                     PFR_FLAG_USERIOCTL);
2453                 break;
2454         }
2455
2456         case DIOCRSETADDRS: {
2457                 struct pfioc_table *io = (struct pfioc_table *)addr;
2458
2459                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2460                         error = ENODEV;
2461                         break;
2462                 }
2463                 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2464                     io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2465                     &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2466                     PFR_FLAG_USERIOCTL, 0);
2467                 break;
2468         }
2469
2470         case DIOCRGETADDRS: {
2471                 struct pfioc_table *io = (struct pfioc_table *)addr;
2472
2473                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2474                         error = ENODEV;
2475                         break;
2476                 }
2477                 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2478                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2479                 break;
2480         }
2481
2482         case DIOCRGETASTATS: {
2483                 struct pfioc_table *io = (struct pfioc_table *)addr;
2484
2485                 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2486                         error = ENODEV;
2487                         break;
2488                 }
2489                 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2490                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2491                 break;
2492         }
2493
2494         case DIOCRCLRASTATS: {
2495                 struct pfioc_table *io = (struct pfioc_table *)addr;
2496
2497                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2498                         error = ENODEV;
2499                         break;
2500                 }
2501                 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2502                     io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2503                     PFR_FLAG_USERIOCTL);
2504                 break;
2505         }
2506
2507         case DIOCRTSTADDRS: {
2508                 struct pfioc_table *io = (struct pfioc_table *)addr;
2509
2510                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2511                         error = ENODEV;
2512                         break;
2513                 }
2514                 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2515                     io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2516                     PFR_FLAG_USERIOCTL);
2517                 break;
2518         }
2519
2520         case DIOCRINADEFINE: {
2521                 struct pfioc_table *io = (struct pfioc_table *)addr;
2522
2523                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2524                         error = ENODEV;
2525                         break;
2526                 }
2527                 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2528                     io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2529                     io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2530                 break;
2531         }
2532
2533         case DIOCOSFPADD: {
2534                 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2535                 error = pf_osfp_add(io);
2536                 break;
2537         }
2538
2539         case DIOCOSFPGET: {
2540                 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2541                 error = pf_osfp_get(io);
2542                 break;
2543         }
2544
2545         case DIOCXBEGIN: {
2546                 struct pfioc_trans      *io = (struct pfioc_trans *)addr;
2547                 struct pfioc_trans_e    *ioe;
2548                 struct pfr_table        *table;
2549                 int                      i;
2550
2551                 if (io->esize != sizeof(*ioe)) {
2552                         error = ENODEV;
2553                         goto fail;
2554                 }
2555                 ioe = kmalloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2556                 table = kmalloc(sizeof(*table), M_TEMP, M_WAITOK);
2557                 for (i = 0; i < io->size; i++) {
2558                         if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2559                                 kfree(table, M_TEMP);
2560                                 kfree(ioe, M_TEMP);
2561                                 error = EFAULT;
2562                                 goto fail;
2563                         }
2564                         switch (ioe->rs_num) {
2565 #ifdef ALTQ
2566                         case PF_RULESET_ALTQ:
2567                                 if (ioe->anchor[0]) {
2568                                         kfree(table, M_TEMP);
2569                                         kfree(ioe, M_TEMP);
2570                                         error = EINVAL;
2571                                         goto fail;
2572                                 }
2573                                 if ((error = pf_begin_altq(&ioe->ticket))) {
2574                                         kfree(table, M_TEMP);
2575                                         kfree(ioe, M_TEMP);
2576                                         goto fail;
2577                                 }
2578                                 break;
2579 #endif /* ALTQ */
2580                         case PF_RULESET_TABLE:
2581                                 bzero(table, sizeof(*table));
2582                                 strlcpy(table->pfrt_anchor, ioe->anchor,
2583                                     sizeof(table->pfrt_anchor));
2584                                 if ((error = pfr_ina_begin(table,
2585                                     &ioe->ticket, NULL, 0))) {
2586                                         kfree(table, M_TEMP);
2587                                         kfree(ioe, M_TEMP);
2588                                         goto fail;
2589                                 }
2590                                 break;
2591                         default:
2592                                 if ((error = pf_begin_rules(&ioe->ticket,
2593                                     ioe->rs_num, ioe->anchor))) {
2594                                         kfree(table, M_TEMP);
2595                                         kfree(ioe, M_TEMP);
2596                                         goto fail;
2597                                 }
2598                                 break;
2599                         }
2600                         if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
2601                                 kfree(table, M_TEMP);
2602                                 kfree(ioe, M_TEMP);
2603                                 error = EFAULT;
2604                                 goto fail;
2605                         }
2606                 }
2607                 kfree(table, M_TEMP);
2608                 kfree(ioe, M_TEMP);
2609                 break;
2610         }
2611
2612         case DIOCXROLLBACK: {
2613                 struct pfioc_trans      *io = (struct pfioc_trans *)addr;
2614                 struct pfioc_trans_e    *ioe;
2615                 struct pfr_table        *table;
2616                 int                      i;
2617
2618                 if (io->esize != sizeof(*ioe)) {
2619                         error = ENODEV;
2620                         goto fail;
2621                 }
2622                 ioe = kmalloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2623                 table = kmalloc(sizeof(*table), M_TEMP, M_WAITOK);
2624                 for (i = 0; i < io->size; i++) {
2625                         if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2626                                 kfree(table, M_TEMP);
2627                                 kfree(ioe, M_TEMP);
2628                                 error = EFAULT;
2629                                 goto fail;
2630                         }
2631                         switch (ioe->rs_num) {
2632 #ifdef ALTQ
2633                         case PF_RULESET_ALTQ:
2634                                 if (ioe->anchor[0]) {
2635                                         kfree(table, M_TEMP);
2636                                         kfree(ioe, M_TEMP);
2637                                         error = EINVAL;
2638                                         goto fail;
2639                                 }
2640                                 if ((error = pf_rollback_altq(ioe->ticket))) {
2641                                         kfree(table, M_TEMP);
2642                                         kfree(ioe, M_TEMP);
2643                                         goto fail; /* really bad */
2644                                 }
2645                                 break;
2646 #endif /* ALTQ */
2647                         case PF_RULESET_TABLE:
2648                                 bzero(table, sizeof(*table));
2649                                 strlcpy(table->pfrt_anchor, ioe->anchor,
2650                                     sizeof(table->pfrt_anchor));
2651                                 if ((error = pfr_ina_rollback(table,
2652                                     ioe->ticket, NULL, 0))) {
2653                                         kfree(table, M_TEMP);
2654                                         kfree(ioe, M_TEMP);
2655                                         goto fail; /* really bad */
2656                                 }
2657                                 break;
2658                         default:
2659                                 if ((error = pf_rollback_rules(ioe->ticket,
2660                                     ioe->rs_num, ioe->anchor))) {
2661                                         kfree(table, M_TEMP);
2662                                         kfree(ioe, M_TEMP);
2663                                         goto fail; /* really bad */
2664                                 }
2665                                 break;
2666                         }
2667                 }
2668                 kfree(table, M_TEMP);
2669                 kfree(ioe, M_TEMP);
2670                 break;
2671         }
2672
2673         case DIOCXCOMMIT: {
2674                 struct pfioc_trans      *io = (struct pfioc_trans *)addr;
2675                 struct pfioc_trans_e    *ioe;
2676                 struct pfr_table        *table;
2677                 struct pf_ruleset       *rs;
2678                 int                      i;
2679
2680                 if (io->esize != sizeof(*ioe)) {
2681                         error = ENODEV;
2682                         goto fail;
2683                 }
2684                 ioe = kmalloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2685                 table = kmalloc(sizeof(*table), M_TEMP, M_WAITOK);
2686                 /* first makes sure everything will succeed */
2687                 for (i = 0; i < io->size; i++) {
2688                         if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2689                                 kfree(table, M_TEMP);
2690                                 kfree(ioe, M_TEMP);
2691                                 error = EFAULT;
2692                                 goto fail;
2693                         }
2694                         switch (ioe->rs_num) {
2695 #ifdef ALTQ
2696                         case PF_RULESET_ALTQ:
2697                                 if (ioe->anchor[0]) {
2698                                         kfree(table, M_TEMP);
2699                                         kfree(ioe, M_TEMP);
2700                                         error = EINVAL;
2701                                         goto fail;
2702                                 }
2703                                 if (!altqs_inactive_open || ioe->ticket !=
2704                                     ticket_altqs_inactive) {
2705                                         kfree(table, M_TEMP);
2706                                         kfree(ioe, M_TEMP);
2707                                         error = EBUSY;
2708                                         goto fail;
2709                                 }
2710                                 break;
2711 #endif /* ALTQ */
2712                         case PF_RULESET_TABLE:
2713                                 rs = pf_find_ruleset(ioe->anchor);
2714                                 if (rs == NULL || !rs->topen || ioe->ticket !=
2715                                      rs->tticket) {
2716                                         kfree(table, M_TEMP);
2717                                         kfree(ioe, M_TEMP);
2718                                         error = EBUSY;
2719                                         goto fail;
2720                                 }
2721                                 break;
2722                         default:
2723                                 if (ioe->rs_num < 0 || ioe->rs_num >=
2724                                     PF_RULESET_MAX) {
2725                                         kfree(table, M_TEMP);
2726                                         kfree(ioe, M_TEMP);
2727                                         error = EINVAL;
2728                                         goto fail;
2729                                 }
2730                                 rs = pf_find_ruleset(ioe->anchor);
2731                                 if (rs == NULL ||
2732                                     !rs->rules[ioe->rs_num].inactive.open ||
2733                                     rs->rules[ioe->rs_num].inactive.ticket !=
2734                                     ioe->ticket) {
2735                                         kfree(table, M_TEMP);
2736                                         kfree(ioe, M_TEMP);
2737                                         error = EBUSY;
2738                                         goto fail;
2739                                 }
2740                                 break;
2741                         }
2742                 }
2743                 /* now do the commit - no errors should happen here */
2744                 for (i = 0; i < io->size; i++) {
2745                         if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2746                                 kfree(table, M_TEMP);
2747                                 kfree(ioe, M_TEMP);
2748                                 error = EFAULT;
2749                                 goto fail;
2750                         }
2751                         switch (ioe->rs_num) {
2752 #ifdef ALTQ
2753                         case PF_RULESET_ALTQ:
2754                                 if ((error = pf_commit_altq(ioe->ticket))) {
2755                                         kfree(table, M_TEMP);
2756                                         kfree(ioe, M_TEMP);
2757                                         goto fail; /* really bad */
2758                                 }
2759                                 break;
2760 #endif /* ALTQ */
2761                         case PF_RULESET_TABLE:
2762                                 bzero(table, sizeof(*table));
2763                                 strlcpy(table->pfrt_anchor, ioe->anchor,
2764                                     sizeof(table->pfrt_anchor));
2765                                 if ((error = pfr_ina_commit(table, ioe->ticket,
2766                                     NULL, NULL, 0))) {
2767                                         kfree(table, M_TEMP);
2768                                         kfree(ioe, M_TEMP);
2769                                         goto fail; /* really bad */
2770                                 }
2771                                 break;
2772                         default:
2773                                 if ((error = pf_commit_rules(ioe->ticket,
2774                                     ioe->rs_num, ioe->anchor))) {
2775                                         kfree(table, M_TEMP);
2776                                         kfree(ioe, M_TEMP);
2777                                         goto fail; /* really bad */
2778                                 }
2779                                 break;
2780                         }
2781                 }
2782                 kfree(table, M_TEMP);
2783                 kfree(ioe, M_TEMP);
2784                 break;
2785         }
2786
2787         case DIOCGETSRCNODES: {
2788                 struct pfioc_src_nodes  *psn = (struct pfioc_src_nodes *)addr;
2789                 struct pf_src_node      *n, *p, *pstore;
2790                 u_int32_t nr = 0;
2791                 int     space = psn->psn_len;
2792                 int     nn;
2793
2794                 if (space == 0) {
2795                         for (nn = 0; nn < ncpus; ++nn) {
2796                                 RB_FOREACH(n, pf_src_tree,
2797                                            &tree_src_tracking[nn]) {
2798                                         nr++;
2799                                 }
2800                         }
2801                         psn->psn_len = sizeof(struct pf_src_node) * nr;
2802                         break;
2803                 }
2804
2805                 pstore = kmalloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2806
2807                 p = psn->psn_src_nodes;
2808
2809                 /*
2810                  * WARNING: We are not switching cpus so we cannot call
2811                  *          nominal pf.c support routines for cpu-specific
2812                  *          data.
2813                  */
2814                 for (nn = 0; nn < ncpus; ++nn) {
2815                         RB_FOREACH(n, pf_src_tree, &tree_src_tracking[nn]) {
2816                                 int     secs = time_second, diff;
2817
2818                                 if ((nr + 1) * sizeof(*p) >
2819                                     (unsigned)psn->psn_len) {
2820                                         break;
2821                                 }
2822
2823                                 bcopy(n, pstore, sizeof(*pstore));
2824                                 if (n->rule.ptr != NULL)
2825                                         pstore->rule.nr = n->rule.ptr->nr;
2826                                 pstore->creation = secs - pstore->creation;
2827                                 if (pstore->expire > secs)
2828                                         pstore->expire -= secs;
2829                                 else
2830                                         pstore->expire = 0;
2831
2832                                 /* adjust the connection rate estimate */
2833                                 diff = secs - n->conn_rate.last;
2834                                 if (diff >= n->conn_rate.seconds)
2835                                         pstore->conn_rate.count = 0;
2836                                 else
2837                                         pstore->conn_rate.count -=
2838                                             n->conn_rate.count * diff /
2839                                             n->conn_rate.seconds;
2840
2841                                 error = copyout(pstore, p, sizeof(*p));
2842                                 if (error) {
2843                                         kfree(pstore, M_TEMP);
2844                                         goto fail;
2845                                 }
2846                                 p++;
2847                                 nr++;
2848                         }
2849                 }
2850                 psn->psn_len = sizeof(struct pf_src_node) * nr;
2851                 kfree(pstore, M_TEMP);
2852                 break;
2853         }
2854
2855         case DIOCCLRSRCNODES: {
2856                 struct pf_src_node      *n;
2857                 struct pf_state         *state;
2858                 globaldata_t save_gd = mycpu;
2859                 int nn;
2860
2861                 /*
2862                  * WARNING: We are not switching cpus so we cannot call
2863                  *          nominal pf.c support routines for cpu-specific
2864                  *          data.
2865                  */
2866                 for (nn = 0; nn < ncpus; ++nn) {
2867                         RB_FOREACH(state, pf_state_tree_id, &tree_id[nn]) {
2868                                 state->src_node = NULL;
2869                                 state->nat_src_node = NULL;
2870                         }
2871                         RB_FOREACH(n, pf_src_tree, &tree_src_tracking[nn]) {
2872                                 n->expire = 1;
2873                                 n->states = 0;
2874                         }
2875                 }
2876
2877                 /*
2878                  * WARNING: Must move to the target cpu for nominal calls
2879                  *          into pf.c
2880                  */
2881                 for (nn = 0; nn < ncpus; ++nn) {
2882                         lwkt_setcpu_self(globaldata_find(nn));
2883                         pf_purge_expired_src_nodes(1);
2884                 }
2885                 lwkt_setcpu_self(save_gd);
2886                 pf_status.src_nodes = 0;
2887                 break;
2888         }
2889
2890         case DIOCKILLSRCNODES: {
2891                 struct pf_src_node      *sn;
2892                 struct pf_state         *s;
2893                 struct pfioc_src_node_kill *psnk =
2894                     (struct pfioc_src_node_kill *)addr;
2895                 u_int                   killed = 0;
2896                 globaldata_t save_gd = mycpu;
2897                 int nn;
2898
2899                 /*
2900                  * WARNING: We are not switching cpus so we cannot call
2901                  *          nominal pf.c support routines for cpu-specific
2902                  *          data.
2903                  */
2904                 for (nn = 0; nn < ncpus; ++nn) {
2905                     RB_FOREACH(sn, pf_src_tree, &tree_src_tracking[nn]) {
2906                         if (PF_MATCHA(psnk->psnk_src.neg,
2907                                 &psnk->psnk_src.addr.v.a.addr,
2908                                 &psnk->psnk_src.addr.v.a.mask,
2909                                 &sn->addr, sn->af) &&
2910                             PF_MATCHA(psnk->psnk_dst.neg,
2911                                 &psnk->psnk_dst.addr.v.a.addr,
2912                                 &psnk->psnk_dst.addr.v.a.mask,
2913                                 &sn->raddr, sn->af)) {
2914                                 /* Handle state to src_node linkage */
2915                                 if (sn->states != 0) {
2916                                         RB_FOREACH(s, pf_state_tree_id,
2917                                             &tree_id[nn]) {
2918                                                 if (s->src_node == sn)
2919                                                         s->src_node = NULL;
2920                                                 if (s->nat_src_node == sn)
2921                                                         s->nat_src_node = NULL;
2922                                         }
2923                                         sn->states = 0;
2924                                 }
2925                                 sn->expire = 1;
2926                                 killed++;
2927                         }
2928                     }
2929                 }
2930                 if (killed > 0) {
2931                         for (nn = 0; nn < ncpus; ++nn) {
2932                                 lwkt_setcpu_self(globaldata_find(nn));
2933                                 pf_purge_expired_src_nodes(1);
2934                         }
2935                         lwkt_setcpu_self(save_gd);
2936                 }
2937
2938                 psnk->psnk_killed = killed;
2939                 break;
2940         }
2941
2942         case DIOCSETHOSTID: {
2943                 u_int32_t       *hostid = (u_int32_t *)addr;
2944
2945                 if (*hostid == 0)
2946                         pf_status.hostid = karc4random();
2947                 else
2948                         pf_status.hostid = *hostid;
2949                 break;
2950         }
2951
2952         case DIOCOSFPFLUSH:
2953                 crit_enter();
2954                 pf_osfp_flush();
2955                 crit_exit();
2956                 break;
2957
2958         case DIOCIGETIFACES: {
2959                 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2960
2961                 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
2962                         error = ENODEV;
2963                         break;
2964                 }
2965                 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2966                     &io->pfiio_size);
2967                 break;
2968         }
2969
2970         case DIOCSETIFFLAG: {
2971                 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2972
2973                 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2974                 break;
2975         }
2976
2977         case DIOCCLRIFFLAG: {
2978                 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2979
2980                 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2981                 break;
2982         }
2983
2984         default:
2985                 error = ENODEV;
2986                 break;
2987         }
2988 fail:
2989         lwkt_reltoken(&pf_token);
2990         return (error);
2991 }
2992
2993 /*
2994  * XXX - Check for version missmatch!!!
2995  */
2996 static void
2997 pf_clear_states(void)
2998 {
2999         struct pf_state         *s, *nexts;
3000         u_int                   killed = 0;
3001         globaldata_t save_gd = mycpu;
3002         int nn;
3003
3004         for (nn = 0; nn < ncpus; ++nn) {
3005                 lwkt_setcpu_self(globaldata_find(nn));
3006                 for (s = RB_MIN(pf_state_tree_id, &tree_id[nn]); s; s = nexts) {
3007                         nexts = RB_NEXT(pf_state_tree_id, &tree_id[nn], s);
3008
3009                         /* don't send out individual delete messages */
3010                         s->sync_flags = PFSTATE_NOSYNC;
3011                         pf_unlink_state(s);
3012                         killed++;
3013                 }
3014                         
3015         }
3016         lwkt_setcpu_self(save_gd);
3017
3018 #if 0 /* PFSYNC */
3019 /*
3020  * XXX This is called on module unload, we do not want to sync that over? */
3021  */
3022         pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3023 #endif
3024 }
3025
3026 static int
3027 pf_clear_tables(void)
3028 {
3029         struct pfioc_table io;
3030         int error;
3031
3032         bzero(&io, sizeof(io));
3033
3034         error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
3035             io.pfrio_flags);
3036
3037         return (error);
3038 }
3039
3040 static void
3041 pf_clear_srcnodes(void)
3042 {
3043         struct pf_src_node      *n;
3044         struct pf_state         *state;
3045         globaldata_t save_gd = mycpu;
3046         int nn;
3047
3048         for (nn = 0; nn < ncpus; ++nn) {
3049                 lwkt_setcpu_self(globaldata_find(nn));
3050                 RB_FOREACH(state, pf_state_tree_id, &tree_id[nn]) {
3051                         state->src_node = NULL;
3052                         state->nat_src_node = NULL;
3053                 }
3054                 RB_FOREACH(n, pf_src_tree, &tree_src_tracking[nn]) {
3055                         n->expire = 1;
3056                         n->states = 0;
3057                 }
3058                 pf_purge_expired_src_nodes(0);
3059         }
3060         lwkt_setcpu_self(save_gd);
3061
3062         pf_status.src_nodes = 0;
3063 }
3064
3065 /*
3066  * XXX - Check for version missmatch!!!
3067  */
3068
3069 /*
3070  * Duplicate pfctl -Fa operation to get rid of as much as we can.
3071  */
3072 static int
3073 shutdown_pf(void)
3074 {
3075         int error = 0;
3076         u_int32_t t[5];
3077         char nn = '\0';
3078
3079
3080         pf_status.running = 0;
3081         error = dehook_pf();
3082         if (error) {
3083                 pf_status.running = 1;
3084                 DPFPRINTF(PF_DEBUG_MISC,
3085                     ("pf: pfil unregistration failed\n"));
3086                 return(error);
3087         }
3088         do {
3089                 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) != 0) {
3090                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
3091                         break;
3092                 }
3093                 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) != 0) {
3094                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
3095                         break;          /* XXX: rollback? */
3096                 }
3097                 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))    != 0) {
3098                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
3099                         break;          /* XXX: rollback? */
3100                 }
3101                 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
3102                     != 0) {
3103                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
3104                         break;          /* XXX: rollback? */
3105                 }
3106                 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
3107                     != 0) {
3108                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
3109                         break;          /* XXX: rollback? */
3110                 }
3111
3112                 /* XXX: these should always succeed here */
3113                 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
3114                 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
3115                 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
3116                 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
3117                 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
3118
3119                 if ((error = pf_clear_tables()) != 0)
3120                         break;
3121 #ifdef ALTQ
3122                 if ((error = pf_begin_altq(&t[0])) != 0) {
3123                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
3124                         break;
3125                 }
3126                 pf_commit_altq(t[0]);
3127 #endif
3128                 pf_clear_states();
3129                 pf_clear_srcnodes();
3130
3131                 /* status does not use malloced mem so no need to cleanup */
3132                 /* fingerprints and interfaces have their own cleanup code */
3133         } while(0);
3134         return (error);
3135 }
3136
3137 static int
3138 pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
3139 {
3140         /*
3141          * DragonFly's version of pf uses FreeBSD's native host byte ordering
3142          * for ip_len/ip_off. This is why we don't have to change byte order
3143          * like the FreeBSD-5 version does.
3144          */
3145         int chk;
3146
3147         lwkt_gettoken_shared(&pf_token);
3148
3149         chk = pf_test(PF_IN, ifp, m, NULL, NULL);
3150         if (chk && *m) {
3151                 m_freem(*m);
3152                 *m = NULL;
3153         }
3154         lwkt_reltoken(&pf_token);
3155         return chk;
3156 }
3157
3158 static int
3159 pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
3160 {
3161         /*
3162          * DragonFly's version of pf uses FreeBSD's native host byte ordering
3163          * for ip_len/ip_off. This is why we don't have to change byte order
3164          * like the FreeBSD-5 version does.
3165          */
3166         int chk;
3167
3168         lwkt_gettoken_shared(&pf_token);
3169
3170         /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
3171         if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
3172                 in_delayed_cksum(*m);
3173                 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
3174         }
3175         chk = pf_test(PF_OUT, ifp, m, NULL, NULL);
3176         if (chk && *m) {
3177                 m_freem(*m);
3178                 *m = NULL;
3179         }
3180         lwkt_reltoken(&pf_token);
3181         return chk;
3182 }
3183
3184 #ifdef INET6
3185 static int
3186 pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
3187 {
3188         /*
3189          * IPv6 is not affected by ip_len/ip_off byte order changes.
3190          */
3191         int chk;
3192
3193         lwkt_gettoken_shared(&pf_token);
3194
3195         chk = pf_test6(PF_IN, ifp, m, NULL, NULL);
3196         if (chk && *m) {
3197                 m_freem(*m);
3198                 *m = NULL;
3199         }
3200         lwkt_reltoken(&pf_token);
3201         return chk;
3202 }
3203
3204 static int
3205 pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir)
3206 {
3207         /*
3208          * IPv6 is not affected by ip_len/ip_off byte order changes.
3209          */
3210         int chk;
3211
3212         lwkt_gettoken_shared(&pf_token);
3213
3214         /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
3215         if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
3216                 in_delayed_cksum(*m);
3217                 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
3218         }
3219         chk = pf_test6(PF_OUT, ifp, m, NULL, NULL);
3220         if (chk && *m) {
3221                 m_freem(*m);
3222                 *m = NULL;
3223         }
3224         lwkt_reltoken(&pf_token);
3225         return chk;
3226 }
3227 #endif /* INET6 */
3228
3229 static int
3230 hook_pf(void)
3231 {
3232         struct pfil_head *pfh_inet;
3233 #ifdef INET6
3234         struct pfil_head *pfh_inet6;
3235 #endif
3236
3237         lwkt_gettoken(&pf_token);
3238
3239         if (pf_pfil_hooked) {
3240                 lwkt_reltoken(&pf_token);
3241                 return (0);
3242         }
3243         
3244         pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3245         if (pfh_inet == NULL) {
3246                 lwkt_reltoken(&pf_token);
3247                 return (ENODEV);
3248         }
3249         pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_MPSAFE, pfh_inet);
3250         pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_MPSAFE, pfh_inet);
3251 #ifdef INET6
3252         pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3253         if (pfh_inet6 == NULL) {
3254                 pfil_remove_hook(pf_check_in, NULL, PFIL_IN, pfh_inet);
3255                 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT, pfh_inet);
3256                 lwkt_reltoken(&pf_token);
3257                 return (ENODEV);
3258         }
3259         pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_MPSAFE, pfh_inet6);
3260         pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_MPSAFE, pfh_inet6);
3261 #endif
3262
3263         pf_pfil_hooked = 1;
3264         lwkt_reltoken(&pf_token);
3265         return (0);
3266 }
3267
3268 static int
3269 dehook_pf(void)
3270 {
3271         struct pfil_head *pfh_inet;
3272 #ifdef INET6
3273         struct pfil_head *pfh_inet6;
3274 #endif
3275
3276         lwkt_gettoken(&pf_token);
3277
3278         if (pf_pfil_hooked == 0) {
3279                 lwkt_reltoken(&pf_token);
3280                 return (0);
3281         }
3282
3283         pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3284         if (pfh_inet == NULL) {
3285                 lwkt_reltoken(&pf_token);
3286                 return (ENODEV);
3287         }
3288         pfil_remove_hook(pf_check_in, NULL, PFIL_IN, pfh_inet);
3289         pfil_remove_hook(pf_check_out, NULL, PFIL_OUT, pfh_inet);
3290 #ifdef INET6
3291         pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3292         if (pfh_inet6 == NULL) {
3293                 lwkt_reltoken(&pf_token);
3294                 return (ENODEV);
3295         }
3296         pfil_remove_hook(pf_check6_in, NULL, PFIL_IN, pfh_inet6);
3297         pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT, pfh_inet6);
3298 #endif
3299
3300         pf_pfil_hooked = 0;
3301         lwkt_reltoken(&pf_token);
3302         return (0);
3303 }
3304
3305 static int
3306 pf_load(void)
3307 {
3308         lwkt_gettoken(&pf_token);
3309
3310         pf_dev = make_dev(&pf_ops, 0, 0, 0, 0600, PF_NAME);
3311         pfattach();
3312         lockinit(&pf_consistency_lock, "pfconslck", 0, LK_CANRECURSE);
3313         lockinit(&pf_global_statetbl_lock, "pfglstlk", 0, 0);
3314         lwkt_reltoken(&pf_token);
3315         return (0);
3316 }
3317
3318 static int
3319 pf_mask_del(struct radix_node *rn, void *arg)
3320 {
3321         struct radix_node_head *rnh = arg;
3322
3323         rnh->rnh_deladdr(rn->rn_key, rn->rn_mask, rnh);
3324         Free(rn);
3325         return 0;
3326 }
3327
3328 static int
3329 pf_unload(void)
3330 {
3331         int error;
3332         pf_status.running = 0;
3333
3334         lwkt_gettoken(&pf_token);
3335
3336         error = dehook_pf();
3337         if (error) {
3338                 /*
3339                  * Should not happen!
3340                  * XXX Due to error code ESRCH, kldunload will show
3341                  * a message like 'No such process'.
3342                  */
3343                 kprintf("pfil unregistration fail\n");
3344                 lwkt_reltoken(&pf_token);
3345                 return error;
3346         }
3347         shutdown_pf();
3348         pf_end_threads = 1;
3349         while (pf_end_threads < 2) {
3350                 wakeup_one(pf_purge_thread);
3351                 tsleep(pf_purge_thread, 0, "pftmo", hz);
3352         }
3353         pfi_cleanup();
3354         pf_osfp_flush();
3355         dev_ops_remove_all(&pf_ops);
3356         lockuninit(&pf_consistency_lock);
3357         lwkt_reltoken(&pf_token);
3358
3359         if (pf_maskhead != NULL) {
3360                 pf_maskhead->rnh_walktree(pf_maskhead,
3361                         pf_mask_del, pf_maskhead);
3362                 Free(pf_maskhead);
3363                 pf_maskhead = NULL;
3364         }
3365         kmalloc_destroy(&pf_state_pl);
3366         kmalloc_destroy(&pf_frent_pl);
3367         kmalloc_destroy(&pf_cent_pl);
3368         return 0;
3369 }
3370
3371 static int
3372 pf_modevent(module_t mod, int type, void *data)
3373 {
3374         int error = 0;
3375
3376         lwkt_gettoken(&pf_token);
3377
3378         switch(type) {
3379         case MOD_LOAD:
3380                 error = pf_load();
3381                 break;
3382
3383         case MOD_UNLOAD:
3384                 error = pf_unload();
3385                 break;
3386         default:
3387                 error = EINVAL;
3388                 break;
3389         }
3390         lwkt_reltoken(&pf_token);
3391         return error;
3392 }
3393
3394 static moduledata_t pf_mod = {
3395         "pf",
3396         pf_modevent,
3397         0
3398 };
3399 DECLARE_MODULE(pf, pf_mod, SI_SUB_PSEUDO, SI_ORDER_FIRST);
3400 MODULE_VERSION(pf, PF_MODVER);