f1a637afe202b0038a325e88d55be91c4dccdc30
[dragonfly.git] / sys / net / pf / pf.c
1 /*      $OpenBSD: pf.c,v 1.614 2008/08/02 12:34:37 henning Exp $ */
2
3 /*
4  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
5  *
6  * Copyright (c) 2001 Daniel Hartmeier
7  * Copyright (c) 2002 - 2008 Henning Brauer
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  *    - Redistributions of source code must retain the above copyright
15  *      notice, this list of conditions and the following disclaimer.
16  *    - Redistributions in binary form must reproduce the above
17  *      copyright notice, this list of conditions and the following
18  *      disclaimer in the documentation and/or other materials provided
19  *      with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  *
34  * Effort sponsored in part by the Defense Advanced Research Projects
35  * Agency (DARPA) and Air Force Research Laboratory, Air Force
36  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
37  *
38  */
39
40 #include "opt_inet.h"
41 #include "opt_inet6.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/filio.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/kernel.h>
51 #include <sys/time.h>
52 #include <sys/sysctl.h>
53 #include <sys/endian.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/spinlock.h>
57
58 #include <machine/inttypes.h>
59
60 #include <sys/md5.h>
61
62 #include <net/if.h>
63 #include <net/if_types.h>
64 #include <net/bpf.h>
65 #include <net/netisr2.h>
66 #include <net/route.h>
67
68 #include <netinet/in.h>
69 #include <netinet/in_var.h>
70 #include <netinet/in_systm.h>
71 #include <netinet/ip.h>
72 #include <netinet/ip_var.h>
73 #include <netinet/tcp.h>
74 #include <netinet/tcp_seq.h>
75 #include <netinet/udp.h>
76 #include <netinet/ip_icmp.h>
77 #include <netinet/in_pcb.h>
78 #include <netinet/tcp_timer.h>
79 #include <netinet/tcp_var.h>
80 #include <netinet/udp_var.h>
81 #include <netinet/icmp_var.h>
82 #include <netinet/if_ether.h>
83
84 #include <net/pf/pfvar.h>
85 #include <net/pf/if_pflog.h>
86
87 #include <net/pf/if_pfsync.h>
88
89 #ifdef INET6
90 #include <netinet/ip6.h>
91 #include <netinet/icmp6.h>
92 #include <netinet6/nd6.h>
93 #include <netinet6/ip6_var.h>
94 #include <netinet6/in6_pcb.h>
95 #endif /* INET6 */
96
97 #include <sys/in_cksum.h>
98 #include <sys/ucred.h>
99 #include <machine/limits.h>
100 #include <sys/msgport2.h>
101 #include <sys/spinlock2.h>
102 #include <net/netmsg2.h>
103 #include <net/toeplitz2.h>
104
105 extern int ip_optcopy(struct ip *, struct ip *);
106 extern int debug_pfugidhack;
107
108 /*
109  * pf_token - shared lock for cpu-localized operations,
110  *            exclusive lock otherwise.
111  *
112  * pf_gtoken- exclusive lock used for initialization.
113  *
114  * pf_spin  - only used to atomically fetch and increment stateid
115  *            on 32-bit systems.
116  */
117 struct lwkt_token pf_token = LWKT_TOKEN_INITIALIZER(pf_token);
118 struct lwkt_token pf_gtoken = LWKT_TOKEN_INITIALIZER(pf_gtoken);
119 #if __SIZEOF_LONG__ != 8
120 struct spinlock pf_spin = SPINLOCK_INITIALIZER(pf_spin, "pf_spin");
121 #endif
122
123 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) kprintf x
124
125 #define FAIL(code)      { error = (code); goto done; }
126
127 /*
128  * Global variables
129  */
130
131 /* mask radix tree */
132 struct radix_node_head  *pf_maskhead;
133
134 /* state tables */
135 struct pf_state_tree     pf_statetbl[MAXCPU+1]; /* incls one global table */
136
137 struct pf_altqqueue      pf_altqs[2];
138 struct pf_palist         pf_pabuf;
139 struct pf_altqqueue     *pf_altqs_active;
140 struct pf_altqqueue     *pf_altqs_inactive;
141 struct pf_status         pf_status;
142
143 u_int32_t                ticket_altqs_active;
144 u_int32_t                ticket_altqs_inactive;
145 int                      altqs_inactive_open;
146 u_int32_t                ticket_pabuf;
147
148 MD5_CTX                  pf_tcp_secret_ctx;
149 u_char                   pf_tcp_secret[16];
150 int                      pf_tcp_secret_init;
151 int                      pf_tcp_iss_off;
152
153 struct pf_anchor_stackframe {
154         struct pf_ruleset                       *rs;
155         struct pf_rule                          *r;
156         struct pf_anchor_node                   *parent;
157         struct pf_anchor                        *child;
158 } pf_anchor_stack[64];
159
160 struct malloc_type       *pf_src_tree_pl, *pf_rule_pl, *pf_pooladdr_pl;
161 struct malloc_type       *pf_state_pl, *pf_state_key_pl, *pf_state_item_pl;
162 struct malloc_type       *pf_altq_pl;
163
164 void                     pf_print_host(struct pf_addr *, u_int16_t, u_int8_t);
165
166 void                     pf_init_threshold(struct pf_threshold *, u_int32_t,
167                             u_int32_t);
168 void                     pf_add_threshold(struct pf_threshold *);
169 int                      pf_check_threshold(struct pf_threshold *);
170
171 void                     pf_change_ap(struct pf_addr *, u_int16_t *,
172                             u_int16_t *, u_int16_t *, struct pf_addr *,
173                             u_int16_t, u_int8_t, sa_family_t);
174 int                      pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
175                             struct tcphdr *, struct pf_state_peer *);
176 #ifdef INET6
177 void                     pf_change_a6(struct pf_addr *, u_int16_t *,
178                             struct pf_addr *, u_int8_t);
179 #endif /* INET6 */
180 void                     pf_change_icmp(struct pf_addr *, u_int16_t *,
181                             struct pf_addr *, struct pf_addr *, u_int16_t,
182                             u_int16_t *, u_int16_t *, u_int16_t *,
183                             u_int16_t *, u_int8_t, sa_family_t);
184 void                     pf_send_tcp(const struct pf_rule *, sa_family_t,
185                             const struct pf_addr *, const struct pf_addr *,
186                             u_int16_t, u_int16_t, u_int32_t, u_int32_t,
187                             u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
188                             u_int16_t, struct ether_header *, struct ifnet *);
189 void                     pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
190                             sa_family_t, struct pf_rule *);
191 struct pf_rule          *pf_match_translation(struct pf_pdesc *, struct mbuf *,
192                             int, int, struct pfi_kif *,
193                             struct pf_addr *, u_int16_t, struct pf_addr *,
194                             u_int16_t, int);
195 struct pf_rule          *pf_get_translation(struct pf_pdesc *, struct mbuf *,
196                             int, int, struct pfi_kif *, struct pf_src_node **,
197                             struct pf_state_key **, struct pf_state_key **,
198                             struct pf_state_key **, struct pf_state_key **,
199                             struct pf_addr *, struct pf_addr *,
200                             u_int16_t, u_int16_t);
201 void                     pf_detach_state(struct pf_state *);
202 int                      pf_state_key_setup(struct pf_pdesc *, struct pf_rule *,
203                             struct pf_state_key **, struct pf_state_key **,
204                             struct pf_state_key **, struct pf_state_key **,
205                             struct pf_addr *, struct pf_addr *,
206                             u_int16_t, u_int16_t);
207 void                     pf_state_key_detach(struct pf_state *, int);
208 u_int32_t                pf_tcp_iss(struct pf_pdesc *);
209 int                      pf_test_rule(struct pf_rule **, struct pf_state **,
210                             int, struct pfi_kif *, struct mbuf *, int,
211                             void *, struct pf_pdesc *, struct pf_rule **,
212                             struct pf_ruleset **, struct ifqueue *, struct inpcb *);
213 static __inline int      pf_create_state(struct pf_rule *, struct pf_rule *,
214                             struct pf_rule *, struct pf_pdesc *,
215                             struct pf_src_node *, struct pf_state_key *,
216                             struct pf_state_key *, struct pf_state_key *,
217                             struct pf_state_key *, struct mbuf *, int,
218                             u_int16_t, u_int16_t, int *, struct pfi_kif *,
219                             struct pf_state **, int, u_int16_t, u_int16_t,
220                             int);
221 int                      pf_test_fragment(struct pf_rule **, int,
222                             struct pfi_kif *, struct mbuf *, void *,
223                             struct pf_pdesc *, struct pf_rule **,
224                             struct pf_ruleset **);
225 int                      pf_tcp_track_full(struct pf_state_peer *,
226                             struct pf_state_peer *, struct pf_state **,
227                             struct pfi_kif *, struct mbuf *, int,
228                             struct pf_pdesc *, u_short *, int *);
229 int                     pf_tcp_track_sloppy(struct pf_state_peer *,
230                             struct pf_state_peer *, struct pf_state **,
231                             struct pf_pdesc *, u_short *);
232 int                      pf_test_state_tcp(struct pf_state **, int,
233                             struct pfi_kif *, struct mbuf *, int,
234                             void *, struct pf_pdesc *, u_short *);
235 int                      pf_test_state_udp(struct pf_state **, int,
236                             struct pfi_kif *, struct mbuf *, int,
237                             void *, struct pf_pdesc *);
238 int                      pf_test_state_icmp(struct pf_state **, int,
239                             struct pfi_kif *, struct mbuf *, int,
240                             void *, struct pf_pdesc *, u_short *);
241 int                      pf_test_state_other(struct pf_state **, int,
242                             struct pfi_kif *, struct mbuf *, struct pf_pdesc *);
243 void                     pf_step_into_anchor(int *, struct pf_ruleset **, int,
244                             struct pf_rule **, struct pf_rule **, int *);
245 int                      pf_step_out_of_anchor(int *, struct pf_ruleset **,
246                              int, struct pf_rule **, struct pf_rule **,
247                              int *);
248 void                     pf_hash(struct pf_addr *, struct pf_addr *,
249                             struct pf_poolhashkey *, sa_family_t);
250 int                      pf_map_addr(u_int8_t, struct pf_rule *,
251                             struct pf_addr *, struct pf_addr *,
252                             struct pf_addr *, struct pf_src_node **);
253 int                      pf_get_sport(struct pf_pdesc *,
254                             sa_family_t, u_int8_t, struct pf_rule *,
255                             struct pf_addr *, struct pf_addr *,
256                             u_int16_t, u_int16_t,
257                             struct pf_addr *, u_int16_t *,
258                             u_int16_t, u_int16_t,
259                             struct pf_src_node **);
260 void                     pf_route(struct mbuf **, struct pf_rule *, int,
261                             struct ifnet *, struct pf_state *,
262                             struct pf_pdesc *);
263 void                     pf_route6(struct mbuf **, struct pf_rule *, int,
264                             struct ifnet *, struct pf_state *,
265                             struct pf_pdesc *);
266 u_int8_t                 pf_get_wscale(struct mbuf *, int, u_int16_t,
267                             sa_family_t);
268 u_int16_t                pf_get_mss(struct mbuf *, int, u_int16_t,
269                             sa_family_t);
270 u_int16_t                pf_calc_mss(struct pf_addr *, sa_family_t,
271                                 u_int16_t);
272 void                     pf_set_rt_ifp(struct pf_state *,
273                             struct pf_addr *);
274 int                      pf_check_proto_cksum(struct mbuf *, int, int,
275                             u_int8_t, sa_family_t);
276 struct pf_divert        *pf_get_divert(struct mbuf *);
277 void                     pf_print_state_parts(struct pf_state *,
278                             struct pf_state_key *, struct pf_state_key *);
279 int                      pf_addr_wrap_neq(struct pf_addr_wrap *,
280                             struct pf_addr_wrap *);
281 struct pf_state         *pf_find_state(struct pfi_kif *,
282                             struct pf_state_key_cmp *, u_int, struct mbuf *);
283 int                      pf_src_connlimit(struct pf_state *);
284 int                      pf_check_congestion(struct ifqueue *);
285
286 extern int pf_end_threads;
287
288 struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = {
289         { &pf_state_pl, PFSTATE_HIWAT },
290         { &pf_src_tree_pl, PFSNODE_HIWAT },
291         { &pf_frent_pl, PFFRAG_FRENT_HIWAT },
292         { &pfr_ktable_pl, PFR_KTABLE_HIWAT },
293         { &pfr_kentry_pl, PFR_KENTRY_HIWAT }
294 };
295
296 #define STATE_LOOKUP(i, k, d, s, m)                                     \
297         do {                                                            \
298                 s = pf_find_state(i, k, d, m);                          \
299                 if (s == NULL || (s)->timeout == PFTM_PURGE)            \
300                         return (PF_DROP);                               \
301                 if (d == PF_OUT &&                                      \
302                     (((s)->rule.ptr->rt == PF_ROUTETO &&                \
303                     (s)->rule.ptr->direction == PF_OUT) ||              \
304                     ((s)->rule.ptr->rt == PF_REPLYTO &&                 \
305                     (s)->rule.ptr->direction == PF_IN)) &&              \
306                     (s)->rt_kif != NULL &&                              \
307                     (s)->rt_kif != i)                                   \
308                         return (PF_PASS);                               \
309         } while (0)
310
311 #define BOUND_IFACE(r, k) \
312         ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all
313
314 #define STATE_INC_COUNTERS(s)                           \
315         do {                                            \
316                 atomic_add_int(&s->rule.ptr->states_cur, 1);    \
317                 s->rule.ptr->states_tot++;              \
318                 if (s->anchor.ptr != NULL) {            \
319                         atomic_add_int(&s->anchor.ptr->states_cur, 1);  \
320                         s->anchor.ptr->states_tot++;    \
321                 }                                       \
322                 if (s->nat_rule.ptr != NULL) {          \
323                         atomic_add_int(&s->nat_rule.ptr->states_cur, 1); \
324                         s->nat_rule.ptr->states_tot++;  \
325                 }                                       \
326         } while (0)
327
328 #define STATE_DEC_COUNTERS(s)                           \
329         do {                                            \
330                 if (s->nat_rule.ptr != NULL)            \
331                         atomic_add_int(&s->nat_rule.ptr->states_cur, -1); \
332                 if (s->anchor.ptr != NULL)              \
333                         atomic_add_int(&s->anchor.ptr->states_cur, -1); \
334                 atomic_add_int(&s->rule.ptr->states_cur, -1);           \
335         } while (0)
336
337 static MALLOC_DEFINE(M_PFSTATEPL, "pfstatepl", "pf state pool list");
338 static MALLOC_DEFINE(M_PFSRCTREEPL, "pfsrctpl", "pf source tree pool list");
339 static MALLOC_DEFINE(M_PFSTATEKEYPL, "pfstatekeypl", "pf state key pool list");
340 static MALLOC_DEFINE(M_PFSTATEITEMPL, "pfstateitempl", "pf state item pool list");
341
342 static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *);
343 static __inline int pf_state_compare_key(struct pf_state_key *,
344         struct pf_state_key *);
345 static __inline int pf_state_compare_id(struct pf_state *,
346         struct pf_state *);
347
348 struct pf_src_tree tree_src_tracking[MAXCPU];
349 struct pf_state_tree_id tree_id[MAXCPU];
350 struct pf_state_queue state_list[MAXCPU];
351
352 RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare);
353 RB_GENERATE(pf_state_tree, pf_state_key, entry, pf_state_compare_key);
354 RB_GENERATE(pf_state_tree_id, pf_state,
355     entry_id, pf_state_compare_id);
356
357 static __inline int
358 pf_src_compare(struct pf_src_node *a, struct pf_src_node *b)
359 {
360         int     diff;
361
362         if (a->rule.ptr > b->rule.ptr)
363                 return (1);
364         if (a->rule.ptr < b->rule.ptr)
365                 return (-1);
366         if ((diff = a->af - b->af) != 0)
367                 return (diff);
368         switch (a->af) {
369 #ifdef INET
370         case AF_INET:
371                 if (a->addr.addr32[0] > b->addr.addr32[0])
372                         return (1);
373                 if (a->addr.addr32[0] < b->addr.addr32[0])
374                         return (-1);
375                 break;
376 #endif /* INET */
377 #ifdef INET6
378         case AF_INET6:
379                 if (a->addr.addr32[3] > b->addr.addr32[3])
380                         return (1);
381                 if (a->addr.addr32[3] < b->addr.addr32[3])
382                         return (-1);
383                 if (a->addr.addr32[2] > b->addr.addr32[2])
384                         return (1);
385                 if (a->addr.addr32[2] < b->addr.addr32[2])
386                         return (-1);
387                 if (a->addr.addr32[1] > b->addr.addr32[1])
388                         return (1);
389                 if (a->addr.addr32[1] < b->addr.addr32[1])
390                         return (-1);
391                 if (a->addr.addr32[0] > b->addr.addr32[0])
392                         return (1);
393                 if (a->addr.addr32[0] < b->addr.addr32[0])
394                         return (-1);
395                 break;
396 #endif /* INET6 */
397         }
398         return (0);
399 }
400
401 u_int32_t
402 pf_state_hash(struct pf_state_key *sk)
403 {
404         u_int32_t hv = (u_int32_t)(((intptr_t)sk >> 6) ^ ((intptr_t)sk >> 15));
405         if (hv == 0)    /* disallow 0 */
406                 hv = 1;
407         return(hv);
408 }
409
410 #ifdef INET6
411 void
412 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
413 {
414         switch (af) {
415 #ifdef INET
416         case AF_INET:
417                 dst->addr32[0] = src->addr32[0];
418                 break;
419 #endif /* INET */
420         case AF_INET6:
421                 dst->addr32[0] = src->addr32[0];
422                 dst->addr32[1] = src->addr32[1];
423                 dst->addr32[2] = src->addr32[2];
424                 dst->addr32[3] = src->addr32[3];
425                 break;
426         }
427 }
428 #endif /* INET6 */
429
430 void
431 pf_init_threshold(struct pf_threshold *threshold,
432     u_int32_t limit, u_int32_t seconds)
433 {
434         threshold->limit = limit * PF_THRESHOLD_MULT;
435         threshold->seconds = seconds;
436         threshold->count = 0;
437         threshold->last = time_second;
438 }
439
440 void
441 pf_add_threshold(struct pf_threshold *threshold)
442 {
443         u_int32_t t = time_second, diff = t - threshold->last;
444
445         if (diff >= threshold->seconds)
446                 threshold->count = 0;
447         else
448                 threshold->count -= threshold->count * diff /
449                     threshold->seconds;
450         threshold->count += PF_THRESHOLD_MULT;
451         threshold->last = t;
452 }
453
454 int
455 pf_check_threshold(struct pf_threshold *threshold)
456 {
457         return (threshold->count > threshold->limit);
458 }
459
460 int
461 pf_src_connlimit(struct pf_state *state)
462 {
463         int bad = 0;
464         int cpu = mycpu->gd_cpuid;
465
466         state->src_node->conn++;
467         state->src.tcp_est = 1;
468         pf_add_threshold(&state->src_node->conn_rate);
469
470         if (state->rule.ptr->max_src_conn &&
471             state->rule.ptr->max_src_conn <
472             state->src_node->conn) {
473                 pf_status.lcounters[LCNT_SRCCONN]++;
474                 bad++;
475         }
476
477         if (state->rule.ptr->max_src_conn_rate.limit &&
478             pf_check_threshold(&state->src_node->conn_rate)) {
479                 pf_status.lcounters[LCNT_SRCCONNRATE]++;
480                 bad++;
481         }
482
483         if (!bad)
484                 return 0;
485
486         if (state->rule.ptr->overload_tbl) {
487                 struct pfr_addr p;
488                 u_int32_t       killed = 0;
489
490                 pf_status.lcounters[LCNT_OVERLOAD_TABLE]++;
491                 if (pf_status.debug >= PF_DEBUG_MISC) {
492                         kprintf("pf_src_connlimit: blocking address ");
493                         pf_print_host(&state->src_node->addr, 0,
494                             state->key[PF_SK_WIRE]->af);
495                 }
496
497                 bzero(&p, sizeof(p));
498                 p.pfra_af = state->key[PF_SK_WIRE]->af;
499                 switch (state->key[PF_SK_WIRE]->af) {
500 #ifdef INET
501                 case AF_INET:
502                         p.pfra_net = 32;
503                         p.pfra_ip4addr = state->src_node->addr.v4;
504                         break;
505 #endif /* INET */
506 #ifdef INET6
507                 case AF_INET6:
508                         p.pfra_net = 128;
509                         p.pfra_ip6addr = state->src_node->addr.v6;
510                         break;
511 #endif /* INET6 */
512                 }
513
514                 pfr_insert_kentry(state->rule.ptr->overload_tbl,
515                     &p, time_second);
516
517                 /* kill existing states if that's required. */
518                 if (state->rule.ptr->flush) {
519                         struct pf_state_key *sk;
520                         struct pf_state *st;
521
522                         pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++;
523                         RB_FOREACH(st, pf_state_tree_id, &tree_id[cpu]) {
524                                 sk = st->key[PF_SK_WIRE];
525                                 /*
526                                  * Kill states from this source.  (Only those
527                                  * from the same rule if PF_FLUSH_GLOBAL is not
528                                  * set).  (Only on current cpu).
529                                  */
530                                 if (sk->af ==
531                                     state->key[PF_SK_WIRE]->af &&
532                                     ((state->direction == PF_OUT &&
533                                     PF_AEQ(&state->src_node->addr,
534                                         &sk->addr[0], sk->af)) ||
535                                     (state->direction == PF_IN &&
536                                     PF_AEQ(&state->src_node->addr,
537                                         &sk->addr[1], sk->af))) &&
538                                     (state->rule.ptr->flush &
539                                     PF_FLUSH_GLOBAL ||
540                                     state->rule.ptr == st->rule.ptr)) {
541                                         st->timeout = PFTM_PURGE;
542                                         st->src.state = st->dst.state =
543                                             TCPS_CLOSED;
544                                         killed++;
545                                 }
546                         }
547                         if (pf_status.debug >= PF_DEBUG_MISC)
548                                 kprintf(", %u states killed", killed);
549                 }
550                 if (pf_status.debug >= PF_DEBUG_MISC)
551                         kprintf("\n");
552         }
553
554         /* kill this state */
555         state->timeout = PFTM_PURGE;
556         state->src.state = state->dst.state = TCPS_CLOSED;
557
558         return 1;
559 }
560
561 int
562 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
563     struct pf_addr *src, sa_family_t af)
564 {
565         struct pf_src_node      k;
566         int cpu = mycpu->gd_cpuid;
567
568         if (*sn == NULL) {
569                 k.af = af;
570                 PF_ACPY(&k.addr, src, af);
571                 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
572                     rule->rpool.opts & PF_POOL_STICKYADDR)
573                         k.rule.ptr = rule;
574                 else
575                         k.rule.ptr = NULL;
576                 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
577                 *sn = RB_FIND(pf_src_tree, &tree_src_tracking[cpu], &k);
578         }
579         if (*sn == NULL) {
580                 if (!rule->max_src_nodes ||
581                     rule->src_nodes < rule->max_src_nodes)
582                         (*sn) = kmalloc(sizeof(struct pf_src_node),
583                                         M_PFSRCTREEPL, M_NOWAIT|M_ZERO);
584                 else
585                         pf_status.lcounters[LCNT_SRCNODES]++;
586                 if ((*sn) == NULL)
587                         return (-1);
588
589                 pf_init_threshold(&(*sn)->conn_rate,
590                     rule->max_src_conn_rate.limit,
591                     rule->max_src_conn_rate.seconds);
592
593                 (*sn)->af = af;
594                 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
595                     rule->rpool.opts & PF_POOL_STICKYADDR)
596                         (*sn)->rule.ptr = rule;
597                 else
598                         (*sn)->rule.ptr = NULL;
599                 PF_ACPY(&(*sn)->addr, src, af);
600                 if (RB_INSERT(pf_src_tree,
601                     &tree_src_tracking[cpu], *sn) != NULL) {
602                         if (pf_status.debug >= PF_DEBUG_MISC) {
603                                 kprintf("pf: src_tree insert failed: ");
604                                 pf_print_host(&(*sn)->addr, 0, af);
605                                 kprintf("\n");
606                         }
607                         kfree(*sn, M_PFSRCTREEPL);
608                         return (-1);
609                 }
610
611                 /*
612                  * Atomic op required to increment src_nodes in the rule
613                  * because we hold a shared token here (decrements will use
614                  * an exclusive token).
615                  */
616                 (*sn)->creation = time_second;
617                 (*sn)->ruletype = rule->action;
618                 if ((*sn)->rule.ptr != NULL)
619                         atomic_add_int(&(*sn)->rule.ptr->src_nodes, 1);
620                 pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
621                 atomic_add_int(&pf_status.src_nodes, 1);
622         } else {
623                 if (rule->max_src_states &&
624                     (*sn)->states >= rule->max_src_states) {
625                         pf_status.lcounters[LCNT_SRCSTATES]++;
626                         return (-1);
627                 }
628         }
629         return (0);
630 }
631
632 /* state table stuff */
633
634 static __inline int
635 pf_state_compare_key(struct pf_state_key *a, struct pf_state_key *b)
636 {
637         int     diff;
638
639         if ((diff = a->proto - b->proto) != 0)
640                 return (diff);
641         if ((diff = a->af - b->af) != 0)
642                 return (diff);
643         switch (a->af) {
644 #ifdef INET
645         case AF_INET:
646                 if (a->addr[0].addr32[0] > b->addr[0].addr32[0])
647                         return (1);
648                 if (a->addr[0].addr32[0] < b->addr[0].addr32[0])
649                         return (-1);
650                 if (a->addr[1].addr32[0] > b->addr[1].addr32[0])
651                         return (1);
652                 if (a->addr[1].addr32[0] < b->addr[1].addr32[0])
653                         return (-1);
654                 break;
655 #endif /* INET */
656 #ifdef INET6
657         case AF_INET6:
658                 if (a->addr[0].addr32[3] > b->addr[0].addr32[3])
659                         return (1);
660                 if (a->addr[0].addr32[3] < b->addr[0].addr32[3])
661                         return (-1);
662                 if (a->addr[1].addr32[3] > b->addr[1].addr32[3])
663                         return (1);
664                 if (a->addr[1].addr32[3] < b->addr[1].addr32[3])
665                         return (-1);
666                 if (a->addr[0].addr32[2] > b->addr[0].addr32[2])
667                         return (1);
668                 if (a->addr[0].addr32[2] < b->addr[0].addr32[2])
669                         return (-1);
670                 if (a->addr[1].addr32[2] > b->addr[1].addr32[2])
671                         return (1);
672                 if (a->addr[1].addr32[2] < b->addr[1].addr32[2])
673                         return (-1);
674                 if (a->addr[0].addr32[1] > b->addr[0].addr32[1])
675                         return (1);
676                 if (a->addr[0].addr32[1] < b->addr[0].addr32[1])
677                         return (-1);
678                 if (a->addr[1].addr32[1] > b->addr[1].addr32[1])
679                         return (1);
680                 if (a->addr[1].addr32[1] < b->addr[1].addr32[1])
681                         return (-1);
682                 if (a->addr[0].addr32[0] > b->addr[0].addr32[0])
683                         return (1);
684                 if (a->addr[0].addr32[0] < b->addr[0].addr32[0])
685                         return (-1);
686                 if (a->addr[1].addr32[0] > b->addr[1].addr32[0])
687                         return (1);
688                 if (a->addr[1].addr32[0] < b->addr[1].addr32[0])
689                         return (-1);
690                 break;
691 #endif /* INET6 */
692         }
693
694         if ((diff = a->port[0] - b->port[0]) != 0)
695                 return (diff);
696         if ((diff = a->port[1] - b->port[1]) != 0)
697                 return (diff);
698
699         return (0);
700 }
701
702 static __inline int
703 pf_state_compare_id(struct pf_state *a, struct pf_state *b)
704 {
705         if (a->id > b->id)
706                 return (1);
707         if (a->id < b->id)
708                 return (-1);
709         if (a->creatorid > b->creatorid)
710                 return (1);
711         if (a->creatorid < b->creatorid)
712                 return (-1);
713
714         return (0);
715 }
716
717 int
718 pf_state_key_attach(struct pf_state_key *sk, struct pf_state *s, int idx)
719 {
720         struct pf_state_item    *si;
721         struct pf_state_key     *cur;
722         int cpu;
723         int error;
724
725         /*
726          * PFSTATE_STACK_GLOBAL is set for translations when the translated
727          * address/port is not localized to the same cpu that the untranslated
728          * address/port is on.  The wire pf_state_key is managed on the global
729          * statetbl tree for this case.
730          *
731          * However, it appears that RDR translations can wind up with
732          * a reversed WIRE/STACK specification, so atm we do not distinguish
733          * the direction.
734          */
735         if (s->state_flags & PFSTATE_STACK_GLOBAL) {
736                 cpu = MAXCPU;
737                 lockmgr(&pf_global_statetbl_lock, LK_EXCLUSIVE);
738         } else {
739                 cpu = mycpu->gd_cpuid;
740         }
741
742         KKASSERT(s->key[idx] == NULL);  /* XXX handle this? */
743
744         if ((cur = RB_INSERT(pf_state_tree, &pf_statetbl[cpu], sk)) != NULL) {
745                 /* key exists. check for same kif, if none, add to key */
746                 TAILQ_FOREACH(si, &cur->states, entry)
747                         if (si->s->kif == s->kif &&
748                             si->s->direction == s->direction) {
749                                 if (pf_status.debug >= PF_DEBUG_MISC) {
750                                         kprintf(
751                                             "pf: %s key attach failed on %s: ",
752                                             (idx == PF_SK_WIRE) ?
753                                             "wire" : "stack",
754                                             s->kif->pfik_name);
755                                         pf_print_state_parts(s,
756                                             (idx == PF_SK_WIRE) ? sk : NULL,
757                                             (idx == PF_SK_STACK) ? sk : NULL);
758                                         kprintf("\n");
759                                 }
760                                 kfree(sk, M_PFSTATEKEYPL);
761                                 error = -1;
762                                 goto failed;    /* collision! */
763                         }
764                 kfree(sk, M_PFSTATEKEYPL);
765
766                 s->key[idx] = cur;
767         } else {
768                 s->key[idx] = sk;
769         }
770
771         if ((si = kmalloc(sizeof(struct pf_state_item),
772                           M_PFSTATEITEMPL, M_NOWAIT)) == NULL) {
773                 pf_state_key_detach(s, idx);
774                 error = -1;
775                 goto failed;    /* collision! */
776         }
777         si->s = s;
778
779         /* list is sorted, if-bound states before floating */
780         if (s->kif == pfi_all)
781                 TAILQ_INSERT_TAIL(&s->key[idx]->states, si, entry);
782         else
783                 TAILQ_INSERT_HEAD(&s->key[idx]->states, si, entry);
784
785         error = 0;
786 failed:
787         if (s->state_flags & PFSTATE_STACK_GLOBAL)
788                 lockmgr(&pf_global_statetbl_lock, LK_RELEASE);
789         return error;
790 }
791
792 /*
793  * NOTE: Can only be called indirectly via the purge thread with pf_token
794  *       exclusively locked.
795  */
796 void
797 pf_detach_state(struct pf_state *s)
798 {
799         if (s->key[PF_SK_WIRE] == s->key[PF_SK_STACK])
800                 s->key[PF_SK_WIRE] = NULL;
801
802         if (s->key[PF_SK_STACK] != NULL)
803                 pf_state_key_detach(s, PF_SK_STACK);
804
805         if (s->key[PF_SK_WIRE] != NULL)
806                 pf_state_key_detach(s, PF_SK_WIRE);
807 }
808
809 /*
810  * NOTE: Can only be called indirectly via the purge thread with pf_token
811  *       exclusively locked.
812  */
813 void
814 pf_state_key_detach(struct pf_state *s, int idx)
815 {
816         struct pf_state_item    *si;
817         int cpu;
818
819         /*
820          * PFSTATE_STACK_GLOBAL is set for translations when the translated
821          * address/port is not localized to the same cpu that the untranslated
822          * address/port is on.  The wire pf_state_key is managed on the global
823          * statetbl tree for this case.
824          */
825         if (s->state_flags & PFSTATE_STACK_GLOBAL) {
826                 cpu = MAXCPU;
827                 lockmgr(&pf_global_statetbl_lock, LK_EXCLUSIVE);
828         } else {
829                 cpu = mycpu->gd_cpuid;
830         }
831
832         si = TAILQ_FIRST(&s->key[idx]->states);
833         while (si && si->s != s)
834                 si = TAILQ_NEXT(si, entry);
835
836         if (si) {
837                 TAILQ_REMOVE(&s->key[idx]->states, si, entry);
838                 kfree(si, M_PFSTATEITEMPL);
839         }
840
841         if (TAILQ_EMPTY(&s->key[idx]->states)) {
842                 RB_REMOVE(pf_state_tree, &pf_statetbl[cpu], s->key[idx]);
843                 if (s->key[idx]->reverse)
844                         s->key[idx]->reverse->reverse = NULL;
845                 if (s->key[idx]->inp)
846                         s->key[idx]->inp->inp_pf_sk = NULL;
847                 kfree(s->key[idx], M_PFSTATEKEYPL);
848         }
849         s->key[idx] = NULL;
850
851         if (s->state_flags & PFSTATE_STACK_GLOBAL)
852                 lockmgr(&pf_global_statetbl_lock, LK_RELEASE);
853 }
854
855 struct pf_state_key *
856 pf_alloc_state_key(int pool_flags)
857 {
858         struct pf_state_key     *sk;
859
860         sk = kmalloc(sizeof(struct pf_state_key), M_PFSTATEKEYPL, pool_flags);
861         if (sk) {
862                 TAILQ_INIT(&sk->states);
863         }
864         return (sk);
865 }
866
867 int
868 pf_state_key_setup(struct pf_pdesc *pd, struct pf_rule *nr,
869         struct pf_state_key **skw, struct pf_state_key **sks,
870         struct pf_state_key **skp, struct pf_state_key **nkp,
871         struct pf_addr *saddr, struct pf_addr *daddr,
872         u_int16_t sport, u_int16_t dport)
873 {
874         KKASSERT((*skp == NULL && *nkp == NULL));
875
876         if ((*skp = pf_alloc_state_key(M_NOWAIT | M_ZERO)) == NULL)
877                 return (ENOMEM);
878
879         PF_ACPY(&(*skp)->addr[pd->sidx], saddr, pd->af);
880         PF_ACPY(&(*skp)->addr[pd->didx], daddr, pd->af);
881         (*skp)->port[pd->sidx] = sport;
882         (*skp)->port[pd->didx] = dport;
883         (*skp)->proto = pd->proto;
884         (*skp)->af = pd->af;
885
886         if (nr != NULL) {
887                 if ((*nkp = pf_alloc_state_key(M_NOWAIT | M_ZERO)) == NULL)
888                         return (ENOMEM); /* caller must handle cleanup */
889
890                 /* XXX maybe just bcopy and TAILQ_INIT(&(*nkp)->states) */
891                 PF_ACPY(&(*nkp)->addr[0], &(*skp)->addr[0], pd->af);
892                 PF_ACPY(&(*nkp)->addr[1], &(*skp)->addr[1], pd->af);
893                 (*nkp)->port[0] = (*skp)->port[0];
894                 (*nkp)->port[1] = (*skp)->port[1];
895                 (*nkp)->proto = pd->proto;
896                 (*nkp)->af = pd->af;
897         } else
898                 *nkp = *skp;
899
900         if (pd->dir == PF_IN) {
901                 *skw = *skp;
902                 *sks = *nkp;
903         } else {
904                 *sks = *skp;
905                 *skw = *nkp;
906         }
907         return (0);
908 }
909
910 /*
911  * Insert pf_state with one or two state keys (allowing a reverse path lookup
912  * which is used by NAT).  In the NAT case skw is the initiator (?) and
913  * sks is the target.
914  */
915 int
916 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw,
917                 struct pf_state_key *sks, struct pf_state *s)
918 {
919         int cpu = mycpu->gd_cpuid;
920
921         s->kif = kif;
922         s->cpuid = cpu;
923
924         if (skw == sks) {
925                 if (pf_state_key_attach(skw, s, PF_SK_WIRE))
926                         return (-1);
927                 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
928         } else {
929                 if (pf_state_key_attach(skw, s, PF_SK_WIRE)) {
930                         kfree(sks, M_PFSTATEKEYPL);
931                         return (-1);
932                 }
933                 if (pf_state_key_attach(sks, s, PF_SK_STACK)) {
934                         pf_state_key_detach(s, PF_SK_WIRE);
935                         return (-1);
936                 }
937         }
938
939         if (s->id == 0 && s->creatorid == 0) {
940                 u_int64_t sid;
941
942 #if __SIZEOF_LONG__ == 8
943                 sid = atomic_fetchadd_long(&pf_status.stateid, 1);
944 #else
945                 spin_lock(&pf_spin);
946                 sid = pf_status.stateid++;
947                 spin_unlock(&pf_spin);
948 #endif
949                 s->id = htobe64(sid);
950                 s->creatorid = pf_status.hostid;
951         }
952
953         /*
954          * Calculate hash code for altq
955          */
956         s->hash = crc32(s->key[PF_SK_WIRE], sizeof(*sks));
957
958         if (RB_INSERT(pf_state_tree_id, &tree_id[cpu], s) != NULL) {
959                 if (pf_status.debug >= PF_DEBUG_MISC) {
960                         kprintf("pf: state insert failed: "
961                             "id: %016jx creatorid: %08x",
962                               (uintmax_t)be64toh(s->id), ntohl(s->creatorid));
963                         if (s->sync_flags & PFSTATE_FROMSYNC)
964                                 kprintf(" (from sync)");
965                         kprintf("\n");
966                 }
967                 pf_detach_state(s);
968                 return (-1);
969         }
970         TAILQ_INSERT_TAIL(&state_list[cpu], s, entry_list);
971         pf_status.fcounters[FCNT_STATE_INSERT]++;
972         atomic_add_int(&pf_status.states, 1);
973         pfi_kif_ref(kif, PFI_KIF_REF_STATE);
974         pfsync_insert_state(s);
975         return (0);
976 }
977
978 struct pf_state *
979 pf_find_state_byid(struct pf_state_cmp *key)
980 {
981         int cpu = mycpu->gd_cpuid;
982
983         pf_status.fcounters[FCNT_STATE_SEARCH]++;
984
985         return (RB_FIND(pf_state_tree_id, &tree_id[cpu],
986                         (struct pf_state *)key));
987 }
988
989 /*
990  * WARNING! May return a state structure that was localized to another cpu,
991  *          destruction is typically protected by the callers pf_token.
992  *          The element can only be destroyed
993  */
994 struct pf_state *
995 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir,
996               struct mbuf *m)
997 {
998         struct pf_state_key     *skey = (void *)key;
999         struct pf_state_key     *sk;
1000         struct pf_state_item    *si;
1001         struct pf_state *s;
1002         int cpu = mycpu->gd_cpuid;
1003         int globalstl = 0;
1004
1005         pf_status.fcounters[FCNT_STATE_SEARCH]++;
1006
1007         if (dir == PF_OUT && m->m_pkthdr.pf.statekey &&
1008             ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse) {
1009                 sk = ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse;
1010         } else {
1011                 sk = RB_FIND(pf_state_tree, &pf_statetbl[cpu], skey);
1012                 if (sk == NULL) {
1013                         lockmgr(&pf_global_statetbl_lock, LK_SHARED);
1014                         sk = RB_FIND(pf_state_tree, &pf_statetbl[MAXCPU], skey);
1015                         if (sk == NULL) {
1016                                 lockmgr(&pf_global_statetbl_lock, LK_RELEASE);
1017                                 return (NULL);
1018                         }
1019                         globalstl = 1;
1020                 }
1021                 if (dir == PF_OUT && m->m_pkthdr.pf.statekey) {
1022                         ((struct pf_state_key *)
1023                             m->m_pkthdr.pf.statekey)->reverse = sk;
1024                         sk->reverse = m->m_pkthdr.pf.statekey;
1025                 }
1026         }
1027         if (dir == PF_OUT)
1028                 m->m_pkthdr.pf.statekey = NULL;
1029
1030         /* list is sorted, if-bound states before floating ones */
1031         TAILQ_FOREACH(si, &sk->states, entry) {
1032                 if ((si->s->kif == pfi_all || si->s->kif == kif) &&
1033                     sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] :
1034                     si->s->key[PF_SK_STACK])) {
1035                         break;
1036                 }
1037         }
1038
1039         /*
1040          * Extract state before potentially releasing the global statetbl
1041          * lock.  Ignore the state if the create is still in-progress as
1042          * it can be deleted out from under us by the owning localized cpu.
1043          * However, if CREATEINPROG is not set, state can only be deleted
1044          * by the purge thread which we are protected from via our shared
1045          * pf_token.
1046          */
1047         if (si) {
1048                 s = si->s;
1049                 if (s && (s->state_flags & PFSTATE_CREATEINPROG))
1050                         s = NULL;
1051         } else {
1052                 s = NULL;
1053         }
1054         if (globalstl)
1055                 lockmgr(&pf_global_statetbl_lock, LK_RELEASE);
1056         return s;
1057 }
1058
1059 /*
1060  * WARNING! May return a state structure that was localized to another cpu,
1061  *          destruction is typically protected by the callers pf_token.
1062  */
1063 struct pf_state *
1064 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1065 {
1066         struct pf_state_key     *skey = (void *)key;
1067         struct pf_state_key     *sk;
1068         struct pf_state_item    *si, *ret = NULL;
1069         struct pf_state         *s;
1070         int cpu = mycpu->gd_cpuid;
1071         int globalstl = 0;
1072
1073         pf_status.fcounters[FCNT_STATE_SEARCH]++;
1074
1075         sk = RB_FIND(pf_state_tree, &pf_statetbl[cpu], skey);
1076         if (sk == NULL) {
1077                 lockmgr(&pf_global_statetbl_lock, LK_SHARED);
1078                 sk = RB_FIND(pf_state_tree, &pf_statetbl[MAXCPU], skey);
1079                 globalstl = 1;
1080         }
1081         if (sk != NULL) {
1082                 TAILQ_FOREACH(si, &sk->states, entry)
1083                         if (dir == PF_INOUT ||
1084                             (sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] :
1085                             si->s->key[PF_SK_STACK]))) {
1086                                 if (more == NULL) {
1087                                         ret = si;
1088                                         break;
1089                                 }
1090                                 if (ret)
1091                                         (*more)++;
1092                                 else
1093                                         ret = si;
1094                         }
1095         }
1096
1097         /*
1098          * Extract state before potentially releasing the global statetbl
1099          * lock.  Ignore the state if the create is still in-progress as
1100          * it can be deleted out from under us by the owning localized cpu.
1101          * However, if CREATEINPROG is not set, state can only be deleted
1102          * by the purge thread which we are protected from via our shared
1103          * pf_token.
1104          */
1105         if (ret) {
1106                 s = ret->s;
1107                 if (s && (s->state_flags & PFSTATE_CREATEINPROG))
1108                         s = NULL;
1109         } else {
1110                 s = NULL;
1111         }
1112         if (globalstl)
1113                 lockmgr(&pf_global_statetbl_lock, LK_RELEASE);
1114         return s;
1115 }
1116
1117 /* END state table stuff */
1118
1119 void
1120 pf_purge_thread(void *v)
1121 {
1122         globaldata_t save_gd = mycpu;
1123         int nloops = 0;
1124         int locked = 0;
1125         int nn;
1126         int endingit;
1127
1128         for (;;) {
1129                 tsleep(pf_purge_thread, PWAIT, "pftm", 1 * hz);
1130
1131                 endingit = pf_end_threads;
1132
1133                 for (nn = 0; nn < ncpus; ++nn) {
1134                         lwkt_setcpu_self(globaldata_find(nn));
1135
1136                         lwkt_gettoken(&pf_token);
1137                         lockmgr(&pf_consistency_lock, LK_EXCLUSIVE);
1138                         crit_enter();
1139
1140                         /*
1141                          * process a fraction of the state table every second
1142                          */
1143                         if(!pf_purge_expired_states(
1144                                 1 + (pf_status.states /
1145                                      pf_default_rule.timeout[
1146                                         PFTM_INTERVAL]), 0)) {
1147                                 pf_purge_expired_states(
1148                                         1 + (pf_status.states /
1149                                              pf_default_rule.timeout[
1150                                                 PFTM_INTERVAL]), 1);
1151                         }
1152
1153                         /*
1154                          * purge other expired types every PFTM_INTERVAL
1155                          * seconds
1156                          */
1157                         if (++nloops >=
1158                             pf_default_rule.timeout[PFTM_INTERVAL]) {
1159                                 pf_purge_expired_fragments();
1160                                 if (!pf_purge_expired_src_nodes(locked)) {
1161                                         pf_purge_expired_src_nodes(1);
1162                                 }
1163                                 nloops = 0;
1164                         }
1165
1166                         /*
1167                          * If terminating the thread, clean everything out
1168                          * (on all cpus).
1169                          */
1170                         if (endingit) {
1171                                 pf_purge_expired_states(pf_status.states, 0);
1172                                 pf_purge_expired_fragments();
1173                                 pf_purge_expired_src_nodes(1);
1174                         }
1175
1176                         crit_exit();
1177                         lockmgr(&pf_consistency_lock, LK_RELEASE);
1178                         lwkt_reltoken(&pf_token);
1179                 }
1180                 lwkt_setcpu_self(save_gd);
1181                 if (endingit)
1182                         break;
1183         }
1184
1185         /*
1186          * Thread termination
1187          */
1188         pf_end_threads++;
1189         wakeup(pf_purge_thread);
1190         kthread_exit();
1191 }
1192
1193 u_int32_t
1194 pf_state_expires(const struct pf_state *state)
1195 {
1196         u_int32_t       timeout;
1197         u_int32_t       start;
1198         u_int32_t       end;
1199         u_int32_t       states;
1200
1201         /* handle all PFTM_* > PFTM_MAX here */
1202         if (state->timeout == PFTM_PURGE)
1203                 return (time_second);
1204         if (state->timeout == PFTM_UNTIL_PACKET)
1205                 return (0);
1206         KKASSERT(state->timeout != PFTM_UNLINKED);
1207         KKASSERT(state->timeout < PFTM_MAX);
1208         timeout = state->rule.ptr->timeout[state->timeout];
1209         if (!timeout)
1210                 timeout = pf_default_rule.timeout[state->timeout];
1211         start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1212         if (start) {
1213                 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1214                 states = state->rule.ptr->states_cur;
1215         } else {
1216                 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1217                 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1218                 states = pf_status.states;
1219         }
1220         if (end && states > start && start < end) {
1221                 if (states < end)
1222                         return (state->expire + timeout * (end - states) /
1223                             (end - start));
1224                 else
1225                         return (time_second);
1226         }
1227         return (state->expire + timeout);
1228 }
1229
1230 /*
1231  * (called with exclusive pf_token)
1232  */
1233 int
1234 pf_purge_expired_src_nodes(int waslocked)
1235 {
1236         struct pf_src_node *cur, *next;
1237         int locked = waslocked;
1238         int cpu = mycpu->gd_cpuid;
1239
1240         for (cur = RB_MIN(pf_src_tree, &tree_src_tracking[cpu]);
1241              cur;
1242              cur = next) {
1243                 next = RB_NEXT(pf_src_tree, &tree_src_tracking[cpu], cur);
1244
1245                 if (cur->states <= 0 && cur->expire <= time_second) {
1246                          if (!locked) {
1247                                  lockmgr(&pf_consistency_lock, LK_EXCLUSIVE);
1248                                  next = RB_NEXT(pf_src_tree,
1249                                      &tree_src_tracking[cpu], cur);
1250                                  locked = 1;
1251                          }
1252                          if (cur->rule.ptr != NULL) {
1253                                  cur->rule.ptr->src_nodes--;
1254                                  if (cur->rule.ptr->states_cur <= 0 &&
1255                                      cur->rule.ptr->max_src_nodes <= 0)
1256                                          pf_rm_rule(NULL, cur->rule.ptr);
1257                          }
1258                          RB_REMOVE(pf_src_tree, &tree_src_tracking[cpu], cur);
1259                          pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
1260                          atomic_add_int(&pf_status.src_nodes, -1);
1261                          kfree(cur, M_PFSRCTREEPL);
1262                 }
1263         }
1264         if (locked && !waslocked)
1265                 lockmgr(&pf_consistency_lock, LK_RELEASE);
1266         return(1);
1267 }
1268
1269 void
1270 pf_src_tree_remove_state(struct pf_state *s)
1271 {
1272         u_int32_t timeout;
1273
1274         if (s->src_node != NULL) {
1275                 if (s->src.tcp_est)
1276                         --s->src_node->conn;
1277                 if (--s->src_node->states <= 0) {
1278                         timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
1279                         if (!timeout) {
1280                                 timeout =
1281                                     pf_default_rule.timeout[PFTM_SRC_NODE];
1282                         }
1283                         s->src_node->expire = time_second + timeout;
1284                 }
1285         }
1286         if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1287                 if (--s->nat_src_node->states <= 0) {
1288                         timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
1289                         if (!timeout)
1290                                 timeout =
1291                                     pf_default_rule.timeout[PFTM_SRC_NODE];
1292                         s->nat_src_node->expire = time_second + timeout;
1293                 }
1294         }
1295         s->src_node = s->nat_src_node = NULL;
1296 }
1297
1298 /* callers should be at crit_enter() */
1299 void
1300 pf_unlink_state(struct pf_state *cur)
1301 {
1302         int cpu = mycpu->gd_cpuid;
1303
1304         if (cur->src.state == PF_TCPS_PROXY_DST) {
1305                 /* XXX wire key the right one? */
1306                 pf_send_tcp(cur->rule.ptr, cur->key[PF_SK_WIRE]->af,
1307                     &cur->key[PF_SK_WIRE]->addr[1],
1308                     &cur->key[PF_SK_WIRE]->addr[0],
1309                     cur->key[PF_SK_WIRE]->port[1],
1310                     cur->key[PF_SK_WIRE]->port[0],
1311                     cur->src.seqhi, cur->src.seqlo + 1,
1312                     TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL);
1313         }
1314         RB_REMOVE(pf_state_tree_id, &tree_id[cpu], cur);
1315         if (cur->creatorid == pf_status.hostid)
1316                 pfsync_delete_state(cur);
1317         cur->timeout = PFTM_UNLINKED;
1318         pf_src_tree_remove_state(cur);
1319         pf_detach_state(cur);
1320 }
1321
1322 static struct pf_state  *purge_cur[MAXCPU];
1323
1324 /*
1325  * callers should be at crit_enter() and hold pf_consistency_lock exclusively.
1326  * pf_token must also be held exclusively.
1327  */
1328 void
1329 pf_free_state(struct pf_state *cur)
1330 {
1331         int cpu = mycpu->gd_cpuid;
1332
1333         KKASSERT(cur->cpuid == cpu);
1334
1335         if (pfsyncif != NULL &&
1336             (pfsyncif->sc_bulk_send_next == cur ||
1337             pfsyncif->sc_bulk_terminator == cur))
1338                 return;
1339         KKASSERT(cur->timeout == PFTM_UNLINKED);
1340         if (--cur->rule.ptr->states_cur <= 0 &&
1341             cur->rule.ptr->src_nodes <= 0)
1342                 pf_rm_rule(NULL, cur->rule.ptr);
1343         if (cur->nat_rule.ptr != NULL) {
1344                 if (--cur->nat_rule.ptr->states_cur <= 0 &&
1345                         cur->nat_rule.ptr->src_nodes <= 0) {
1346                         pf_rm_rule(NULL, cur->nat_rule.ptr);
1347                 }
1348         }
1349         if (cur->anchor.ptr != NULL) {
1350                 if (--cur->anchor.ptr->states_cur <= 0)
1351                         pf_rm_rule(NULL, cur->anchor.ptr);
1352         }
1353         pf_normalize_tcp_cleanup(cur);
1354         pfi_kif_unref(cur->kif, PFI_KIF_REF_STATE);
1355
1356         /*
1357          * We may be freeing pf_purge_expired_states()'s saved scan entry,
1358          * adjust it if necessary.
1359          */
1360         if (purge_cur[cpu] == cur) {
1361                 kprintf("PURGE CONFLICT\n");
1362                 purge_cur[cpu] = TAILQ_NEXT(purge_cur[cpu], entry_list);
1363         }
1364         TAILQ_REMOVE(&state_list[cpu], cur, entry_list);
1365         if (cur->tag)
1366                 pf_tag_unref(cur->tag);
1367         kfree(cur, M_PFSTATEPL);
1368         pf_status.fcounters[FCNT_STATE_REMOVALS]++;
1369         atomic_add_int(&pf_status.states, -1);
1370 }
1371
1372 int
1373 pf_purge_expired_states(u_int32_t maxcheck, int waslocked)
1374 {
1375         struct pf_state         *cur;
1376         int locked = waslocked;
1377         int cpu = mycpu->gd_cpuid;
1378
1379         while (maxcheck--) {
1380                 /*
1381                  * Wrap to start of list when we hit the end
1382                  */
1383                 cur = purge_cur[cpu];
1384                 if (cur == NULL) {
1385                         cur = TAILQ_FIRST(&state_list[cpu]);
1386                         if (cur == NULL)
1387                                 break;  /* list empty */
1388                 }
1389
1390                 /*
1391                  * Setup next (purge_cur) while we process this one.  If
1392                  * we block and something else deletes purge_cur,
1393                  * pf_free_state() will adjust it further ahead.
1394                  */
1395                 purge_cur[cpu] = TAILQ_NEXT(cur, entry_list);
1396
1397                 if (cur->timeout == PFTM_UNLINKED) {
1398                         /* free unlinked state */
1399                         if (! locked) {
1400                                 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE);
1401                                 locked = 1;
1402                         }
1403                         pf_free_state(cur);
1404                 } else if (pf_state_expires(cur) <= time_second) {
1405                         /* unlink and free expired state */
1406                         pf_unlink_state(cur);
1407                         if (! locked) {
1408                                 if (!lockmgr(&pf_consistency_lock, LK_EXCLUSIVE))
1409                                         return (0);
1410                                 locked = 1;
1411                         }
1412                         pf_free_state(cur);
1413                 }
1414         }
1415
1416         if (locked)
1417                 lockmgr(&pf_consistency_lock, LK_RELEASE);
1418         return (1);
1419 }
1420
1421 int
1422 pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw)
1423 {
1424         if (aw->type != PF_ADDR_TABLE)
1425                 return (0);
1426         if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname)) == NULL)
1427                 return (1);
1428         return (0);
1429 }
1430
1431 void
1432 pf_tbladdr_remove(struct pf_addr_wrap *aw)
1433 {
1434         if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL)
1435                 return;
1436         pfr_detach_table(aw->p.tbl);
1437         aw->p.tbl = NULL;
1438 }
1439
1440 void
1441 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
1442 {
1443         struct pfr_ktable *kt = aw->p.tbl;
1444
1445         if (aw->type != PF_ADDR_TABLE || kt == NULL)
1446                 return;
1447         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1448                 kt = kt->pfrkt_root;
1449         aw->p.tbl = NULL;
1450         aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
1451                 kt->pfrkt_cnt : -1;
1452 }
1453
1454 void
1455 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
1456 {
1457         switch (af) {
1458 #ifdef INET
1459         case AF_INET: {
1460                 u_int32_t a = ntohl(addr->addr32[0]);
1461                 kprintf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1462                     (a>>8)&255, a&255);
1463                 if (p) {
1464                         p = ntohs(p);
1465                         kprintf(":%u", p);
1466                 }
1467                 break;
1468         }
1469 #endif /* INET */
1470 #ifdef INET6
1471         case AF_INET6: {
1472                 u_int16_t b;
1473                 u_int8_t i, curstart = 255, curend = 0,
1474                     maxstart = 0, maxend = 0;
1475                 for (i = 0; i < 8; i++) {
1476                         if (!addr->addr16[i]) {
1477                                 if (curstart == 255)
1478                                         curstart = i;
1479                                 else
1480                                         curend = i;
1481                         } else {
1482                                 if (curstart) {
1483                                         if ((curend - curstart) >
1484                                             (maxend - maxstart)) {
1485                                                 maxstart = curstart;
1486                                                 maxend = curend;
1487                                                 curstart = 255;
1488                                         }
1489                                 }
1490                         }
1491                 }
1492                 for (i = 0; i < 8; i++) {
1493                         if (i >= maxstart && i <= maxend) {
1494                                 if (maxend != 7) {
1495                                         if (i == maxstart)
1496                                                 kprintf(":");
1497                                 } else {
1498                                         if (i == maxend)
1499                                                 kprintf(":");
1500                                 }
1501                         } else {
1502                                 b = ntohs(addr->addr16[i]);
1503                                 kprintf("%x", b);
1504                                 if (i < 7)
1505                                         kprintf(":");
1506                         }
1507                 }
1508                 if (p) {
1509                         p = ntohs(p);
1510                         kprintf("[%u]", p);
1511                 }
1512                 break;
1513         }
1514 #endif /* INET6 */
1515         }
1516 }
1517
1518 void
1519 pf_print_state(struct pf_state *s)
1520 {
1521         pf_print_state_parts(s, NULL, NULL);
1522 }
1523
1524 void
1525 pf_print_state_parts(struct pf_state *s,
1526     struct pf_state_key *skwp, struct pf_state_key *sksp)
1527 {
1528         struct pf_state_key *skw, *sks;
1529         u_int8_t proto, dir;
1530
1531         /* Do our best to fill these, but they're skipped if NULL */
1532         skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
1533         sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
1534         proto = skw ? skw->proto : (sks ? sks->proto : 0);
1535         dir = s ? s->direction : 0;
1536
1537         switch (proto) {
1538         case IPPROTO_TCP:
1539                 kprintf("TCP ");
1540                 break;
1541         case IPPROTO_UDP:
1542                 kprintf("UDP ");
1543                 break;
1544         case IPPROTO_ICMP:
1545                 kprintf("ICMP ");
1546                 break;
1547         case IPPROTO_ICMPV6:
1548                 kprintf("ICMPV6 ");
1549                 break;
1550         default:
1551                 kprintf("%u ", skw->proto);
1552                 break;
1553         }
1554         switch (dir) {
1555         case PF_IN:
1556                 kprintf(" in");
1557                 break;
1558         case PF_OUT:
1559                 kprintf(" out");
1560                 break;
1561         }
1562         if (skw) {
1563                 kprintf(" wire: ");
1564                 pf_print_host(&skw->addr[0], skw->port[0], skw->af);
1565                 kprintf(" ");
1566                 pf_print_host(&skw->addr[1], skw->port[1], skw->af);
1567         }
1568         if (sks) {
1569                 kprintf(" stack: ");
1570                 if (sks != skw) {
1571                         pf_print_host(&sks->addr[0], sks->port[0], sks->af);
1572                         kprintf(" ");
1573                         pf_print_host(&sks->addr[1], sks->port[1], sks->af);
1574                 } else
1575                         kprintf("-");
1576         }
1577         if (s) {
1578                 if (proto == IPPROTO_TCP) {
1579                         kprintf(" [lo=%u high=%u win=%u modulator=%u",
1580                             s->src.seqlo, s->src.seqhi,
1581                             s->src.max_win, s->src.seqdiff);
1582                         if (s->src.wscale && s->dst.wscale)
1583                                 kprintf(" wscale=%u",
1584                                     s->src.wscale & PF_WSCALE_MASK);
1585                         kprintf("]");
1586                         kprintf(" [lo=%u high=%u win=%u modulator=%u",
1587                             s->dst.seqlo, s->dst.seqhi,
1588                             s->dst.max_win, s->dst.seqdiff);
1589                         if (s->src.wscale && s->dst.wscale)
1590                                 kprintf(" wscale=%u",
1591                                 s->dst.wscale & PF_WSCALE_MASK);
1592                         kprintf("]");
1593                 }
1594                 kprintf(" %u:%u", s->src.state, s->dst.state);
1595         }
1596 }
1597
1598 void
1599 pf_print_flags(u_int8_t f)
1600 {
1601         if (f)
1602                 kprintf(" ");
1603         if (f & TH_FIN)
1604                 kprintf("F");
1605         if (f & TH_SYN)
1606                 kprintf("S");
1607         if (f & TH_RST)
1608                 kprintf("R");
1609         if (f & TH_PUSH)
1610                 kprintf("P");
1611         if (f & TH_ACK)
1612                 kprintf("A");
1613         if (f & TH_URG)
1614                 kprintf("U");
1615         if (f & TH_ECE)
1616                 kprintf("E");
1617         if (f & TH_CWR)
1618                 kprintf("W");
1619 }
1620
1621 #define PF_SET_SKIP_STEPS(i)                                    \
1622         do {                                                    \
1623                 while (head[i] != cur) {                        \
1624                         head[i]->skip[i].ptr = cur;             \
1625                         head[i] = TAILQ_NEXT(head[i], entries); \
1626                 }                                               \
1627         } while (0)
1628
1629 void
1630 pf_calc_skip_steps(struct pf_rulequeue *rules)
1631 {
1632         struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
1633         int i;
1634
1635         cur = TAILQ_FIRST(rules);
1636         prev = cur;
1637         for (i = 0; i < PF_SKIP_COUNT; ++i)
1638                 head[i] = cur;
1639         while (cur != NULL) {
1640
1641                 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
1642                         PF_SET_SKIP_STEPS(PF_SKIP_IFP);
1643                 if (cur->direction != prev->direction)
1644                         PF_SET_SKIP_STEPS(PF_SKIP_DIR);
1645                 if (cur->af != prev->af)
1646                         PF_SET_SKIP_STEPS(PF_SKIP_AF);
1647                 if (cur->proto != prev->proto)
1648                         PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
1649                 if (cur->src.neg != prev->src.neg ||
1650                     pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
1651                         PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
1652                 if (cur->src.port[0] != prev->src.port[0] ||
1653                     cur->src.port[1] != prev->src.port[1] ||
1654                     cur->src.port_op != prev->src.port_op)
1655                         PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1656                 if (cur->dst.neg != prev->dst.neg ||
1657                     pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
1658                         PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
1659                 if (cur->dst.port[0] != prev->dst.port[0] ||
1660                     cur->dst.port[1] != prev->dst.port[1] ||
1661                     cur->dst.port_op != prev->dst.port_op)
1662                         PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
1663
1664                 prev = cur;
1665                 cur = TAILQ_NEXT(cur, entries);
1666         }
1667         for (i = 0; i < PF_SKIP_COUNT; ++i)
1668                 PF_SET_SKIP_STEPS(i);
1669 }
1670
1671 int
1672 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
1673 {
1674         if (aw1->type != aw2->type)
1675                 return (1);
1676         switch (aw1->type) {
1677         case PF_ADDR_ADDRMASK:
1678         case PF_ADDR_RANGE:
1679                 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0))
1680                         return (1);
1681                 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0))
1682                         return (1);
1683                 return (0);
1684         case PF_ADDR_DYNIFTL:
1685                 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
1686         case PF_ADDR_NOROUTE:
1687         case PF_ADDR_URPFFAILED:
1688                 return (0);
1689         case PF_ADDR_TABLE:
1690                 return (aw1->p.tbl != aw2->p.tbl);
1691         case PF_ADDR_RTLABEL:
1692                 return (aw1->v.rtlabel != aw2->v.rtlabel);
1693         default:
1694                 kprintf("invalid address type: %d\n", aw1->type);
1695                 return (1);
1696         }
1697 }
1698
1699 u_int16_t
1700 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
1701 {
1702         u_int32_t       l;
1703
1704         if (udp && !cksum)
1705                 return (0x0000);
1706         l = cksum + old - new;
1707         l = (l >> 16) + (l & 65535);
1708         l = l & 65535;
1709         if (udp && !l)
1710                 return (0xFFFF);
1711         return (l);
1712 }
1713
1714 void
1715 pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc,
1716     struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af)
1717 {
1718         struct pf_addr  ao;
1719         u_int16_t       po = *p;
1720
1721         PF_ACPY(&ao, a, af);
1722         PF_ACPY(a, an, af);
1723
1724         *p = pn;
1725
1726         switch (af) {
1727 #ifdef INET
1728         case AF_INET:
1729                 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
1730                     ao.addr16[0], an->addr16[0], 0),
1731                     ao.addr16[1], an->addr16[1], 0);
1732                 *p = pn;
1733                 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
1734                     ao.addr16[0], an->addr16[0], u),
1735                     ao.addr16[1], an->addr16[1], u),
1736                     po, pn, u);
1737                 break;
1738 #endif /* INET */
1739 #ifdef INET6
1740         case AF_INET6:
1741                 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1742                     pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1743                     pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
1744                     ao.addr16[0], an->addr16[0], u),
1745                     ao.addr16[1], an->addr16[1], u),
1746                     ao.addr16[2], an->addr16[2], u),
1747                     ao.addr16[3], an->addr16[3], u),
1748                     ao.addr16[4], an->addr16[4], u),
1749                     ao.addr16[5], an->addr16[5], u),
1750                     ao.addr16[6], an->addr16[6], u),
1751                     ao.addr16[7], an->addr16[7], u),
1752                     po, pn, u);
1753                 break;
1754 #endif /* INET6 */
1755         }
1756 }
1757
1758
1759 /* Changes a u_int32_t.  Uses a void * so there are no align restrictions */
1760 void
1761 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
1762 {
1763         u_int32_t       ao;
1764
1765         memcpy(&ao, a, sizeof(ao));
1766         memcpy(a, &an, sizeof(u_int32_t));
1767         *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
1768             ao % 65536, an % 65536, u);
1769 }
1770
1771 #ifdef INET6
1772 void
1773 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
1774 {
1775         struct pf_addr  ao;
1776
1777         PF_ACPY(&ao, a, AF_INET6);
1778         PF_ACPY(a, an, AF_INET6);
1779
1780         *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1781             pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1782             pf_cksum_fixup(pf_cksum_fixup(*c,
1783             ao.addr16[0], an->addr16[0], u),
1784             ao.addr16[1], an->addr16[1], u),
1785             ao.addr16[2], an->addr16[2], u),
1786             ao.addr16[3], an->addr16[3], u),
1787             ao.addr16[4], an->addr16[4], u),
1788             ao.addr16[5], an->addr16[5], u),
1789             ao.addr16[6], an->addr16[6], u),
1790             ao.addr16[7], an->addr16[7], u);
1791 }
1792 #endif /* INET6 */
1793
1794 void
1795 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
1796     struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
1797     u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
1798 {
1799         struct pf_addr  oia, ooa;
1800
1801         PF_ACPY(&oia, ia, af);
1802         if (oa)
1803                 PF_ACPY(&ooa, oa, af);
1804
1805         /* Change inner protocol port, fix inner protocol checksum. */
1806         if (ip != NULL) {
1807                 u_int16_t       oip = *ip;
1808                 u_int32_t       opc = 0;
1809
1810                 if (pc != NULL)
1811                         opc = *pc;
1812                 *ip = np;
1813                 if (pc != NULL)
1814                         *pc = pf_cksum_fixup(*pc, oip, *ip, u);
1815                 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
1816                 if (pc != NULL)
1817                         *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
1818         }
1819         /* Change inner ip address, fix inner ip and icmp checksums. */
1820         PF_ACPY(ia, na, af);
1821         switch (af) {
1822 #ifdef INET
1823         case AF_INET: {
1824                 u_int32_t        oh2c = *h2c;
1825
1826                 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
1827                     oia.addr16[0], ia->addr16[0], 0),
1828                     oia.addr16[1], ia->addr16[1], 0);
1829                 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
1830                     oia.addr16[0], ia->addr16[0], 0),
1831                     oia.addr16[1], ia->addr16[1], 0);
1832                 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
1833                 break;
1834         }
1835 #endif /* INET */
1836 #ifdef INET6
1837         case AF_INET6:
1838                 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1839                     pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1840                     pf_cksum_fixup(pf_cksum_fixup(*ic,
1841                     oia.addr16[0], ia->addr16[0], u),
1842                     oia.addr16[1], ia->addr16[1], u),
1843                     oia.addr16[2], ia->addr16[2], u),
1844                     oia.addr16[3], ia->addr16[3], u),
1845                     oia.addr16[4], ia->addr16[4], u),
1846                     oia.addr16[5], ia->addr16[5], u),
1847                     oia.addr16[6], ia->addr16[6], u),
1848                     oia.addr16[7], ia->addr16[7], u);
1849                 break;
1850 #endif /* INET6 */
1851         }
1852         /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
1853         if (oa) {
1854                 PF_ACPY(oa, na, af);
1855                 switch (af) {
1856 #ifdef INET
1857                 case AF_INET:
1858                         *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
1859                             ooa.addr16[0], oa->addr16[0], 0),
1860                             ooa.addr16[1], oa->addr16[1], 0);
1861                         break;
1862 #endif /* INET */
1863 #ifdef INET6
1864                 case AF_INET6:
1865                         *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1866                             pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1867                             pf_cksum_fixup(pf_cksum_fixup(*ic,
1868                             ooa.addr16[0], oa->addr16[0], u),
1869                             ooa.addr16[1], oa->addr16[1], u),
1870                             ooa.addr16[2], oa->addr16[2], u),
1871                             ooa.addr16[3], oa->addr16[3], u),
1872                             ooa.addr16[4], oa->addr16[4], u),
1873                             ooa.addr16[5], oa->addr16[5], u),
1874                             ooa.addr16[6], oa->addr16[6], u),
1875                             ooa.addr16[7], oa->addr16[7], u);
1876                         break;
1877 #endif /* INET6 */
1878                 }
1879         }
1880 }
1881
1882
1883 /*
1884  * Need to modulate the sequence numbers in the TCP SACK option
1885  * (credits to Krzysztof Pfaff for report and patch)
1886  */
1887 int
1888 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
1889     struct tcphdr *th, struct pf_state_peer *dst)
1890 {
1891         int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
1892         u_int8_t opts[TCP_MAXOLEN], *opt = opts;
1893         int copyback = 0, i, olen;
1894         struct raw_sackblock sack;
1895
1896 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
1897         if (hlen < TCPOLEN_SACKLEN ||
1898             !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
1899                 return 0;
1900
1901         while (hlen >= TCPOLEN_SACKLEN) {
1902                 olen = opt[1];
1903                 switch (*opt) {
1904                 case TCPOPT_EOL:        /* FALLTHROUGH */
1905                 case TCPOPT_NOP:
1906                         opt++;
1907                         hlen--;
1908                         break;
1909                 case TCPOPT_SACK:
1910                         if (olen > hlen)
1911                                 olen = hlen;
1912                         if (olen >= TCPOLEN_SACKLEN) {
1913                                 for (i = 2; i + TCPOLEN_SACK <= olen;
1914                                     i += TCPOLEN_SACK) {
1915                                         memcpy(&sack, &opt[i], sizeof(sack));
1916                                         pf_change_a(&sack.rblk_start, &th->th_sum,
1917                                             htonl(ntohl(sack.rblk_start) -
1918                                             dst->seqdiff), 0);
1919                                         pf_change_a(&sack.rblk_end, &th->th_sum,
1920                                             htonl(ntohl(sack.rblk_end) -
1921                                             dst->seqdiff), 0);
1922                                         memcpy(&opt[i], &sack, sizeof(sack));
1923                                 }
1924                                 copyback = 1;
1925                         }
1926                         /* FALLTHROUGH */
1927                 default:
1928                         if (olen < 2)
1929                                 olen = 2;
1930                         hlen -= olen;
1931                         opt += olen;
1932                 }
1933         }
1934
1935         if (copyback)
1936                 m_copyback(m, off + sizeof(*th), thoptlen, opts);
1937         return (copyback);
1938 }
1939
1940 void
1941 pf_send_tcp(const struct pf_rule *r, sa_family_t af,
1942     const struct pf_addr *saddr, const struct pf_addr *daddr,
1943     u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
1944     u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
1945     u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp)
1946 {
1947         struct mbuf     *m;
1948         int              len = 0, tlen;
1949 #ifdef INET
1950         struct ip       *h = NULL;
1951 #endif /* INET */
1952 #ifdef INET6
1953         struct ip6_hdr  *h6 = NULL;
1954 #endif /* INET6 */
1955         struct tcphdr   *th = NULL;
1956         char            *opt;
1957
1958         ASSERT_LWKT_TOKEN_HELD(&pf_token);
1959
1960         /* maximum segment size tcp option */
1961         tlen = sizeof(struct tcphdr);
1962         if (mss)
1963                 tlen += 4;
1964
1965         switch (af) {
1966 #ifdef INET
1967         case AF_INET:
1968                 len = sizeof(struct ip) + tlen;
1969                 break;
1970 #endif /* INET */
1971 #ifdef INET6
1972         case AF_INET6:
1973                 len = sizeof(struct ip6_hdr) + tlen;
1974                 break;
1975 #endif /* INET6 */
1976         }
1977
1978         /*
1979          * Create outgoing mbuf.
1980          *
1981          * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags,
1982          * so make sure pf.flags is clear.
1983          */
1984         m = m_gethdr(MB_DONTWAIT, MT_HEADER);
1985         if (m == NULL) {
1986                 return;
1987         }
1988         if (tag)
1989                 m->m_pkthdr.fw_flags |= PF_MBUF_TAGGED;
1990         m->m_pkthdr.pf.flags = 0;
1991         m->m_pkthdr.pf.tag = rtag;
1992         /* XXX Recheck when upgrading to > 4.4 */
1993         m->m_pkthdr.pf.statekey = NULL;
1994         if (r != NULL && r->rtableid >= 0)
1995                 m->m_pkthdr.pf.rtableid = r->rtableid;
1996
1997 #ifdef ALTQ
1998         if (r != NULL && r->qid) {
1999                 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE;
2000                 m->m_pkthdr.pf.qid = r->qid;
2001                 m->m_pkthdr.pf.ecn_af = af;
2002                 m->m_pkthdr.pf.hdr = mtod(m, struct ip *);
2003         }
2004 #endif /* ALTQ */
2005         m->m_data += max_linkhdr;
2006         m->m_pkthdr.len = m->m_len = len;
2007         m->m_pkthdr.rcvif = NULL;
2008         bzero(m->m_data, len);
2009         switch (af) {
2010 #ifdef INET
2011         case AF_INET:
2012                 h = mtod(m, struct ip *);
2013
2014                 /* IP header fields included in the TCP checksum */
2015                 h->ip_p = IPPROTO_TCP;
2016                 h->ip_len = tlen;
2017                 h->ip_src.s_addr = saddr->v4.s_addr;
2018                 h->ip_dst.s_addr = daddr->v4.s_addr;
2019
2020                 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
2021                 break;
2022 #endif /* INET */
2023 #ifdef INET6
2024         case AF_INET6:
2025                 h6 = mtod(m, struct ip6_hdr *);
2026
2027                 /* IP header fields included in the TCP checksum */
2028                 h6->ip6_nxt = IPPROTO_TCP;
2029                 h6->ip6_plen = htons(tlen);
2030                 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
2031                 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
2032
2033                 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
2034                 break;
2035 #endif /* INET6 */
2036         }
2037
2038         /* TCP header */
2039         th->th_sport = sport;
2040         th->th_dport = dport;
2041         th->th_seq = htonl(seq);
2042         th->th_ack = htonl(ack);
2043         th->th_off = tlen >> 2;
2044         th->th_flags = flags;
2045         th->th_win = htons(win);
2046
2047         if (mss) {
2048                 opt = (char *)(th + 1);
2049                 opt[0] = TCPOPT_MAXSEG;
2050                 opt[1] = 4;
2051                 mss = htons(mss);
2052                 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2053         }
2054
2055         switch (af) {
2056 #ifdef INET
2057         case AF_INET:
2058                 /* TCP checksum */
2059                 th->th_sum = in_cksum(m, len);
2060
2061                 /* Finish the IP header */
2062                 h->ip_v = 4;
2063                 h->ip_hl = sizeof(*h) >> 2;
2064                 h->ip_tos = IPTOS_LOWDELAY;
2065                 h->ip_len = len;
2066                 h->ip_off = path_mtu_discovery ? IP_DF : 0;
2067                 h->ip_ttl = ttl ? ttl : ip_defttl;
2068                 h->ip_sum = 0;
2069                 if (eh == NULL) {
2070                         lwkt_reltoken(&pf_token);
2071                         ip_output(m, NULL, NULL, 0, NULL, NULL);
2072                         lwkt_gettoken(&pf_token);
2073                 } else {
2074                         struct route             ro;
2075                         struct rtentry           rt;
2076                         struct ether_header     *e = (void *)ro.ro_dst.sa_data;
2077
2078                         if (ifp == NULL) {
2079                                 m_freem(m);
2080                                 return;
2081                         }
2082                         rt.rt_ifp = ifp;
2083                         ro.ro_rt = &rt;
2084                         ro.ro_dst.sa_len = sizeof(ro.ro_dst);
2085                         ro.ro_dst.sa_family = pseudo_AF_HDRCMPLT;
2086                         bcopy(eh->ether_dhost, e->ether_shost, ETHER_ADDR_LEN);
2087                         bcopy(eh->ether_shost, e->ether_dhost, ETHER_ADDR_LEN);
2088                         e->ether_type = eh->ether_type;
2089                         /* XXX_IMPORT: later */
2090                         lwkt_reltoken(&pf_token);
2091                         ip_output(m, NULL, &ro, 0, NULL, NULL);
2092                         lwkt_gettoken(&pf_token);
2093                 }
2094                 break;
2095 #endif /* INET */
2096 #ifdef INET6
2097         case AF_INET6:
2098                 /* TCP checksum */
2099                 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2100                     sizeof(struct ip6_hdr), tlen);
2101
2102                 h6->ip6_vfc |= IPV6_VERSION;
2103                 h6->ip6_hlim = IPV6_DEFHLIM;
2104
2105                 lwkt_reltoken(&pf_token);
2106                 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
2107                 lwkt_gettoken(&pf_token);
2108                 break;
2109 #endif /* INET6 */
2110         }
2111 }
2112
2113 void
2114 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2115     struct pf_rule *r)
2116 {
2117         struct mbuf     *m0;
2118
2119         /*
2120          * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags,
2121          * so make sure pf.flags is clear.
2122          */
2123         if ((m0 = m_copy(m, 0, M_COPYALL)) == NULL)
2124                 return;
2125
2126         m0->m_pkthdr.fw_flags |= PF_MBUF_TAGGED;
2127         m0->m_pkthdr.pf.flags = 0;
2128         /* XXX Re-Check when Upgrading to > 4.4 */
2129         m0->m_pkthdr.pf.statekey = NULL;
2130
2131         if (r->rtableid >= 0)
2132                 m0->m_pkthdr.pf.rtableid = r->rtableid;
2133
2134 #ifdef ALTQ
2135         if (r->qid) {
2136                 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE;
2137                 m0->m_pkthdr.pf.qid = r->qid;
2138                 m0->m_pkthdr.pf.ecn_af = af;
2139                 m0->m_pkthdr.pf.hdr = mtod(m0, struct ip *);
2140         }
2141 #endif /* ALTQ */
2142
2143         switch (af) {
2144 #ifdef INET
2145         case AF_INET:
2146                 icmp_error(m0, type, code, 0, 0);
2147                 break;
2148 #endif /* INET */
2149 #ifdef INET6
2150         case AF_INET6:
2151                 icmp6_error(m0, type, code, 0);
2152                 break;
2153 #endif /* INET6 */
2154         }
2155 }
2156
2157 /*
2158  * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2159  * If n is 0, they match if they are equal. If n is != 0, they match if they
2160  * are different.
2161  */
2162 int
2163 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2164     struct pf_addr *b, sa_family_t af)
2165 {
2166         int     match = 0;
2167
2168         switch (af) {
2169 #ifdef INET
2170         case AF_INET:
2171                 if ((a->addr32[0] & m->addr32[0]) ==
2172                     (b->addr32[0] & m->addr32[0]))
2173                         match++;
2174                 break;
2175 #endif /* INET */
2176 #ifdef INET6
2177         case AF_INET6:
2178                 if (((a->addr32[0] & m->addr32[0]) ==
2179                      (b->addr32[0] & m->addr32[0])) &&
2180                     ((a->addr32[1] & m->addr32[1]) ==
2181                      (b->addr32[1] & m->addr32[1])) &&
2182                     ((a->addr32[2] & m->addr32[2]) ==
2183                      (b->addr32[2] & m->addr32[2])) &&
2184                     ((a->addr32[3] & m->addr32[3]) ==
2185                      (b->addr32[3] & m->addr32[3])))
2186                         match++;
2187                 break;
2188 #endif /* INET6 */
2189         }
2190         if (match) {
2191                 if (n)
2192                         return (0);
2193                 else
2194                         return (1);
2195         } else {
2196                 if (n)
2197                         return (1);
2198                 else
2199                         return (0);
2200         }
2201 }
2202
2203 /*
2204  * Return 1 if b <= a <= e, otherwise return 0.
2205  */
2206 int
2207 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2208     struct pf_addr *a, sa_family_t af)
2209 {
2210         switch (af) {
2211 #ifdef INET
2212         case AF_INET:
2213                 if ((a->addr32[0] < b->addr32[0]) ||
2214                     (a->addr32[0] > e->addr32[0]))
2215                         return (0);
2216                 break;
2217 #endif /* INET */
2218 #ifdef INET6
2219         case AF_INET6: {
2220                 int     i;
2221
2222                 /* check a >= b */
2223                 for (i = 0; i < 4; ++i)
2224                         if (a->addr32[i] > b->addr32[i])
2225                                 break;
2226                         else if (a->addr32[i] < b->addr32[i])
2227                                 return (0);
2228                 /* check a <= e */
2229                 for (i = 0; i < 4; ++i)
2230                         if (a->addr32[i] < e->addr32[i])
2231                                 break;
2232                         else if (a->addr32[i] > e->addr32[i])
2233                                 return (0);
2234                 break;
2235         }
2236 #endif /* INET6 */
2237         }
2238         return (1);
2239 }
2240
2241 int
2242 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2243 {
2244         switch (op) {
2245         case PF_OP_IRG:
2246                 return ((p > a1) && (p < a2));
2247         case PF_OP_XRG:
2248                 return ((p < a1) || (p > a2));
2249         case PF_OP_RRG:
2250                 return ((p >= a1) && (p <= a2));
2251         case PF_OP_EQ:
2252                 return (p == a1);
2253         case PF_OP_NE:
2254                 return (p != a1);
2255         case PF_OP_LT:
2256                 return (p < a1);
2257         case PF_OP_LE:
2258                 return (p <= a1);
2259         case PF_OP_GT:
2260                 return (p > a1);
2261         case PF_OP_GE:
2262                 return (p >= a1);
2263         }
2264         return (0); /* never reached */
2265 }
2266
2267 int
2268 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2269 {
2270         a1 = ntohs(a1);
2271         a2 = ntohs(a2);
2272         p = ntohs(p);
2273         return (pf_match(op, a1, a2, p));
2274 }
2275
2276 int
2277 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2278 {
2279         if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2280                 return (0);
2281         return (pf_match(op, a1, a2, u));
2282 }
2283
2284 int
2285 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2286 {
2287         if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2288                 return (0);
2289         return (pf_match(op, a1, a2, g));
2290 }
2291
2292 int
2293 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag)
2294 {
2295         if (*tag == -1)
2296                 *tag = m->m_pkthdr.pf.tag;
2297
2298         return ((!r->match_tag_not && r->match_tag == *tag) ||
2299             (r->match_tag_not && r->match_tag != *tag));
2300 }
2301
2302 int
2303 pf_tag_packet(struct mbuf *m, int tag, int rtableid)
2304 {
2305         if (tag <= 0 && rtableid < 0)
2306                 return (0);
2307
2308         if (tag > 0)
2309                 m->m_pkthdr.pf.tag = tag;
2310         if (rtableid >= 0)
2311                 m->m_pkthdr.pf.rtableid = rtableid;
2312
2313         return (0);
2314 }
2315
2316 void
2317 pf_step_into_anchor(int *depth, struct pf_ruleset **rs, int n,
2318     struct pf_rule **r, struct pf_rule **a, int *match)
2319 {
2320         struct pf_anchor_stackframe     *f;
2321
2322         (*r)->anchor->match = 0;
2323         if (match)
2324                 *match = 0;
2325         if (*depth >= NELEM(pf_anchor_stack)) {
2326                 kprintf("pf_step_into_anchor: stack overflow\n");
2327                 *r = TAILQ_NEXT(*r, entries);
2328                 return;
2329         } else if (*depth == 0 && a != NULL)
2330                 *a = *r;
2331         f = pf_anchor_stack + (*depth)++;
2332         f->rs = *rs;
2333         f->r = *r;
2334         if ((*r)->anchor_wildcard) {
2335                 f->parent = &(*r)->anchor->children;
2336                 if ((f->child = RB_MIN(pf_anchor_node, f->parent)) ==
2337                     NULL) {
2338                         *r = NULL;
2339                         return;
2340                 }
2341                 *rs = &f->child->ruleset;
2342         } else {
2343                 f->parent = NULL;
2344                 f->child = NULL;
2345                 *rs = &(*r)->anchor->ruleset;
2346         }
2347         *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2348 }
2349
2350 int
2351 pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, int n,
2352     struct pf_rule **r, struct pf_rule **a, int *match)
2353 {
2354         struct pf_anchor_stackframe     *f;
2355         int quick = 0;
2356
2357         do {
2358                 if (*depth <= 0)
2359                         break;
2360                 f = pf_anchor_stack + *depth - 1;
2361                 if (f->parent != NULL && f->child != NULL) {
2362                         if (f->child->match ||
2363                             (match != NULL && *match)) {
2364                                 f->r->anchor->match = 1;
2365                                 *match = 0;
2366                         }
2367                         f->child = RB_NEXT(pf_anchor_node, f->parent, f->child);
2368                         if (f->child != NULL) {
2369                                 *rs = &f->child->ruleset;
2370                                 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2371                                 if (*r == NULL)
2372                                         continue;
2373                                 else
2374                                         break;
2375                         }
2376                 }
2377                 (*depth)--;
2378                 if (*depth == 0 && a != NULL)
2379                         *a = NULL;
2380                 *rs = f->rs;
2381                 if (f->r->anchor->match || (match != NULL && *match))
2382                         quick = f->r->quick;
2383                 *r = TAILQ_NEXT(f->r, entries);
2384         } while (*r == NULL);
2385
2386         return (quick);
2387 }
2388
2389 #ifdef INET6
2390 void
2391 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
2392     struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
2393 {
2394         switch (af) {
2395 #ifdef INET
2396         case AF_INET:
2397                 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2398                 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2399                 break;
2400 #endif /* INET */
2401         case AF_INET6:
2402                 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2403                 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2404                 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
2405                 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
2406                 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
2407                 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
2408                 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
2409                 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
2410                 break;
2411         }
2412 }
2413
2414 void
2415 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
2416 {
2417         switch (af) {
2418 #ifdef INET
2419         case AF_INET:
2420                 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
2421                 break;
2422 #endif /* INET */
2423         case AF_INET6:
2424                 if (addr->addr32[3] == 0xffffffff) {
2425                         addr->addr32[3] = 0;
2426                         if (addr->addr32[2] == 0xffffffff) {
2427                                 addr->addr32[2] = 0;
2428                                 if (addr->addr32[1] == 0xffffffff) {
2429                                         addr->addr32[1] = 0;
2430                                         addr->addr32[0] =
2431                                             htonl(ntohl(addr->addr32[0]) + 1);
2432                                 } else
2433                                         addr->addr32[1] =
2434                                             htonl(ntohl(addr->addr32[1]) + 1);
2435                         } else
2436                                 addr->addr32[2] =
2437                                     htonl(ntohl(addr->addr32[2]) + 1);
2438                 } else
2439                         addr->addr32[3] =
2440                             htonl(ntohl(addr->addr32[3]) + 1);
2441                 break;
2442         }
2443 }
2444 #endif /* INET6 */
2445
2446 #define mix(a,b,c) \
2447         do {                                    \
2448                 a -= b; a -= c; a ^= (c >> 13); \
2449                 b -= c; b -= a; b ^= (a << 8);  \
2450                 c -= a; c -= b; c ^= (b >> 13); \
2451                 a -= b; a -= c; a ^= (c >> 12); \
2452                 b -= c; b -= a; b ^= (a << 16); \
2453                 c -= a; c -= b; c ^= (b >> 5);  \
2454                 a -= b; a -= c; a ^= (c >> 3);  \
2455                 b -= c; b -= a; b ^= (a << 10); \
2456                 c -= a; c -= b; c ^= (b >> 15); \
2457         } while (0)
2458
2459 /*
2460  * hash function based on bridge_hash in if_bridge.c
2461  */
2462 void
2463 pf_hash(struct pf_addr *inaddr, struct pf_addr *hash,
2464     struct pf_poolhashkey *key, sa_family_t af)
2465 {
2466         u_int32_t       a = 0x9e3779b9, b = 0x9e3779b9, c = key->key32[0];
2467
2468         switch (af) {
2469 #ifdef INET
2470         case AF_INET:
2471                 a += inaddr->addr32[0];
2472                 b += key->key32[1];
2473                 mix(a, b, c);
2474                 hash->addr32[0] = c + key->key32[2];
2475                 break;
2476 #endif /* INET */
2477 #ifdef INET6
2478         case AF_INET6:
2479                 a += inaddr->addr32[0];
2480                 b += inaddr->addr32[2];
2481                 mix(a, b, c);
2482                 hash->addr32[0] = c;
2483                 a += inaddr->addr32[1];
2484                 b += inaddr->addr32[3];
2485                 c += key->key32[1];
2486                 mix(a, b, c);
2487                 hash->addr32[1] = c;
2488                 a += inaddr->addr32[2];
2489                 b += inaddr->addr32[1];
2490                 c += key->key32[2];
2491                 mix(a, b, c);
2492                 hash->addr32[2] = c;
2493                 a += inaddr->addr32[3];
2494                 b += inaddr->addr32[0];
2495                 c += key->key32[3];
2496                 mix(a, b, c);
2497                 hash->addr32[3] = c;
2498                 break;
2499 #endif /* INET6 */
2500         }
2501 }
2502
2503 int
2504 pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr,
2505     struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn)
2506 {
2507         unsigned char            hash[16];
2508         struct pf_pool          *rpool = &r->rpool;
2509         struct pf_pooladdr      *acur = rpool->cur;
2510         struct pf_pooladdr      *cur;
2511         struct pf_addr          *raddr;
2512         struct pf_addr          *rmask;
2513         struct pf_addr          counter;
2514         struct pf_src_node       k;
2515         int cpu = mycpu->gd_cpuid;
2516         int tblidx;
2517
2518         /*
2519          * NOTE! rpool->cur and rpool->tblidx can be iterators and thus
2520          *       may represent a SMP race due to the shared nature of the
2521          *       rpool structure.  We allow the race and ensure that updates
2522          *       do not create a fatal condition.
2523          */
2524         cpu_ccfence();
2525         cur = acur;
2526         raddr = &cur->addr.v.a.addr;
2527         rmask = &cur->addr.v.a.mask;
2528
2529         if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR &&
2530             (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
2531                 k.af = af;
2532                 PF_ACPY(&k.addr, saddr, af);
2533                 if (r->rule_flag & PFRULE_RULESRCTRACK ||
2534                     r->rpool.opts & PF_POOL_STICKYADDR)
2535                         k.rule.ptr = r;
2536                 else
2537                         k.rule.ptr = NULL;
2538                 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
2539                 *sn = RB_FIND(pf_src_tree, &tree_src_tracking[cpu], &k);
2540                 if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, af)) {
2541                         PF_ACPY(naddr, &(*sn)->raddr, af);
2542                         if (pf_status.debug >= PF_DEBUG_MISC) {
2543                                 kprintf("pf_map_addr: src tracking maps ");
2544                                 pf_print_host(&k.addr, 0, af);
2545                                 kprintf(" to ");
2546                                 pf_print_host(naddr, 0, af);
2547                                 kprintf("\n");
2548                         }
2549                         return (0);
2550                 }
2551         }
2552
2553         if (cur->addr.type == PF_ADDR_NOROUTE)
2554                 return (1);
2555         if (cur->addr.type == PF_ADDR_DYNIFTL) {
2556                 switch (af) {
2557 #ifdef INET
2558                 case AF_INET:
2559                         if (cur->addr.p.dyn->pfid_acnt4 < 1 &&
2560                             (rpool->opts & PF_POOL_TYPEMASK) !=
2561                             PF_POOL_ROUNDROBIN)
2562                                 return (1);
2563                         raddr = &cur->addr.p.dyn->pfid_addr4;
2564                         rmask = &cur->addr.p.dyn->pfid_mask4;
2565                         break;
2566 #endif /* INET */
2567 #ifdef INET6
2568                 case AF_INET6:
2569                         if (cur->addr.p.dyn->pfid_acnt6 < 1 &&
2570                             (rpool->opts & PF_POOL_TYPEMASK) !=
2571                             PF_POOL_ROUNDROBIN)
2572                                 return (1);
2573                         raddr = &cur->addr.p.dyn->pfid_addr6;
2574                         rmask = &cur->addr.p.dyn->pfid_mask6;
2575                         break;
2576 #endif /* INET6 */
2577                 }
2578         } else if (cur->addr.type == PF_ADDR_TABLE) {
2579                 if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN)
2580                         return (1); /* unsupported */
2581         } else {
2582                 raddr = &cur->addr.v.a.addr;
2583                 rmask = &cur->addr.v.a.mask;
2584         }
2585
2586         switch (rpool->opts & PF_POOL_TYPEMASK) {
2587         case PF_POOL_NONE:
2588                 PF_ACPY(naddr, raddr, af);
2589                 break;
2590         case PF_POOL_BITMASK:
2591                 PF_POOLMASK(naddr, raddr, rmask, saddr, af);
2592                 break;
2593         case PF_POOL_RANDOM:
2594                 if (init_addr != NULL && PF_AZERO(init_addr, af)) {
2595                         switch (af) {
2596 #ifdef INET
2597                         case AF_INET:
2598                                 counter.addr32[0] = htonl(karc4random());
2599                                 break;
2600 #endif /* INET */
2601 #ifdef INET6
2602                         case AF_INET6:
2603                                 if (rmask->addr32[3] != 0xffffffff)
2604                                         counter.addr32[3] =
2605                                                 htonl(karc4random());
2606                                 else
2607                                         break;
2608                                 if (rmask->addr32[2] != 0xffffffff)
2609                                         counter.addr32[2] =
2610                                                 htonl(karc4random());
2611                                 else
2612                                         break;
2613                                 if (rmask->addr32[1] != 0xffffffff)
2614                                         counter.addr32[1] =
2615                                                 htonl(karc4random());
2616                                 else
2617                                         break;
2618                                 if (rmask->addr32[0] != 0xffffffff)
2619                                         counter.addr32[0] =
2620                                                 htonl(karc4random());
2621                                 break;
2622 #endif /* INET6 */
2623                         }
2624                         PF_POOLMASK(naddr, raddr, rmask, &counter, af);
2625                         PF_ACPY(init_addr, naddr, af);
2626
2627                 } else {
2628                         counter = rpool->counter;
2629                         cpu_ccfence();
2630                         PF_AINC(&counter, af);
2631                         PF_POOLMASK(naddr, raddr, rmask, &counter, af);
2632                         rpool->counter = counter;
2633                 }
2634                 break;
2635         case PF_POOL_SRCHASH:
2636                 pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af);
2637                 PF_POOLMASK(naddr, raddr, rmask, (struct pf_addr *)&hash, af);
2638                 break;
2639         case PF_POOL_ROUNDROBIN:
2640                 tblidx = rpool->tblidx;
2641                 counter = rpool->counter;
2642                 if (cur->addr.type == PF_ADDR_TABLE) {
2643                         if (!pfr_pool_get(cur->addr.p.tbl,
2644                             &tblidx, &counter,
2645                             &raddr, &rmask, af)) {
2646                                 goto get_addr;
2647                         }
2648                 } else if (cur->addr.type == PF_ADDR_DYNIFTL) {
2649                         if (!pfr_pool_get(cur->addr.p.dyn->pfid_kt,
2650                             &tblidx, &counter,
2651                             &raddr, &rmask, af)) {
2652                                 goto get_addr;
2653                         }
2654                 } else if (pf_match_addr(0, raddr, rmask,
2655                                          &counter, af)) {
2656                         goto get_addr;
2657                 }
2658
2659         try_next:
2660                 if ((cur = TAILQ_NEXT(cur, entries)) == NULL)
2661                         cur = TAILQ_FIRST(&rpool->list);
2662                 if (cur->addr.type == PF_ADDR_TABLE) {
2663                         tblidx = -1;
2664                         if (pfr_pool_get(cur->addr.p.tbl,
2665                             &tblidx, &counter,
2666                             &raddr, &rmask, af)) {
2667                                 /* table contains no address of type 'af' */
2668                                 if (cur != acur)
2669                                         goto try_next;
2670                                 return (1);
2671                         }
2672                 } else if (cur->addr.type == PF_ADDR_DYNIFTL) {
2673                         tblidx = -1;
2674                         if (pfr_pool_get(cur->addr.p.dyn->pfid_kt,
2675                             &tblidx, &counter,
2676                             &raddr, &rmask, af)) {
2677                                 /* table contains no address of type 'af' */
2678                                 if (cur != acur)
2679                                         goto try_next;
2680                                 return (1);
2681                         }
2682                 } else {
2683                         raddr = &cur->addr.v.a.addr;
2684                         rmask = &cur->addr.v.a.mask;
2685                         PF_ACPY(&counter, raddr, af);
2686                 }
2687
2688         get_addr:
2689                 rpool->cur = cur;
2690                 rpool->tblidx = tblidx;
2691                 PF_ACPY(naddr, &counter, af);
2692                 if (init_addr != NULL && PF_AZERO(init_addr, af))
2693                         PF_ACPY(init_addr, naddr, af);
2694                 PF_AINC(&counter, af);
2695                 rpool->counter = counter;
2696                 break;
2697         }
2698         if (*sn != NULL)
2699                 PF_ACPY(&(*sn)->raddr, naddr, af);
2700
2701         if (pf_status.debug >= PF_DEBUG_MISC &&
2702             (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
2703                 kprintf("pf_map_addr: selected address ");
2704                 pf_print_host(naddr, 0, af);
2705                 kprintf("\n");
2706         }
2707
2708         return (0);
2709 }
2710
2711 int
2712 pf_get_sport(struct pf_pdesc *pd, sa_family_t af,
2713              u_int8_t proto, struct pf_rule *r,
2714              struct pf_addr *saddr, struct pf_addr *daddr,
2715              u_int16_t sport, u_int16_t dport,
2716              struct pf_addr *naddr, u_int16_t *nport,
2717              u_int16_t low, u_int16_t high, struct pf_src_node **sn)
2718 {
2719         struct pf_state_key_cmp key;
2720         struct pf_addr          init_addr;
2721         u_int16_t               cut;
2722         u_int32_t               toeplitz_sport;
2723
2724         bzero(&init_addr, sizeof(init_addr));
2725         if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn))
2726                 return (1);
2727
2728         if (proto == IPPROTO_ICMP) {
2729                 low = 1;
2730                 high = 65535;
2731         }
2732
2733         bzero(&key, sizeof(key));
2734         key.af = af;
2735         key.proto = proto;
2736         key.port[0] = dport;
2737         PF_ACPY(&key.addr[0], daddr, key.af);
2738
2739         do {
2740                 PF_ACPY(&key.addr[1], naddr, key.af);
2741
2742                 /*
2743                  * We want to select a port that calculates to a toeplitz hash
2744                  * that masks to the same cpu, otherwise the response may
2745                  * not see the new state.
2746                  *
2747                  * We can still do this even if the kernel is disregarding
2748                  * the hash and vectoring the packets to a specific cpu,
2749                  * but it will reduce the number of ports we can use.
2750                  */
2751                 switch(af) {
2752                 case AF_INET:
2753                         toeplitz_sport =
2754                                 toeplitz_piecemeal_port(sport) ^
2755                                 toeplitz_piecemeal_addr(saddr->v4.s_addr) ^
2756                                 toeplitz_piecemeal_addr(naddr->v4.s_addr);
2757                         break;
2758                 case AF_INET6:
2759                         /* XXX TODO XXX */
2760                 default:
2761                         /* XXX TODO XXX */
2762                         toeplitz_sport = 0;
2763                         break;
2764                 }
2765
2766                 /*
2767                  * port search; start random, step;
2768                  * similar 2 portloop in in_pcbbind
2769                  *
2770                  * WARNING! We try to match such that the kernel will
2771                  *          dispatch the translated host/port to the same
2772                  *          cpu, but this might not be possible.
2773                  *
2774                  *          In the case where the port is fixed, or for the
2775                  *          UDP case (whos toeplitz does not incorporate the
2776                  *          port), we set not_cpu_localized which ultimately
2777                  *          causes the pf_state_tree element
2778                  *
2779                  * XXX fixed ports present a problem for cpu localization.
2780                  */
2781                 if (!(proto == IPPROTO_TCP ||
2782                       proto == IPPROTO_UDP ||
2783                       proto == IPPROTO_ICMP)) {
2784                         /*
2785                          * non-specific protocol, leave port intact.
2786                          */
2787                         key.port[1] = sport;
2788                         if (pf_find_state_all(&key, PF_IN, NULL) == NULL) {
2789                                 *nport = sport;
2790                                 pd->not_cpu_localized = 1;
2791                                 return (0);
2792                         }
2793                 } else if (low == 0 && high == 0) {
2794                         /*
2795                          * static-port same as originator.
2796                          */
2797                         key.port[1] = sport;
2798                         if (pf_find_state_all(&key, PF_IN, NULL) == NULL) {
2799                                 *nport = sport;
2800                                 pd->not_cpu_localized = 1;
2801                                 return (0);
2802                         }
2803                 } else if (low == high) {
2804                         /*
2805                          * specific port as specified.
2806                          */
2807                         key.port[1] = htons(low);
2808                         if (pf_find_state_all(&key, PF_IN, NULL) == NULL) {
2809                                 *nport = htons(low);
2810                                 pd->not_cpu_localized = 1;
2811                                 return (0);
2812                         }
2813                 } else {
2814                         /*
2815                          * normal dynamic port
2816                          */
2817                         u_int16_t tmp;
2818
2819                         if (low > high) {
2820                                 tmp = low;
2821                                 low = high;
2822                                 high = tmp;
2823                         }
2824                         /* low < high */
2825                         cut = htonl(karc4random()) % (1 + high - low) + low;
2826                         /* low <= cut <= high */
2827                         for (tmp = cut; tmp <= high; ++(tmp)) {
2828                                 key.port[1] = htons(tmp);
2829                                 if ((toeplitz_piecemeal_port(key.port[1]) ^
2830                                      toeplitz_sport) & ncpus2_mask) {
2831                                         continue;
2832                                 }
2833                                 if (pf_find_state_all(&key, PF_IN, NULL) ==
2834                                     NULL && !in_baddynamic(tmp, proto)) {
2835                                         if (proto == IPPROTO_UDP)
2836                                                 pd->not_cpu_localized = 1;
2837                                         *nport = htons(tmp);
2838                                         return (0);
2839                                 }
2840                         }
2841                         for (tmp = cut - 1; tmp >= low; --(tmp)) {
2842                                 key.port[1] = htons(tmp);
2843                                 if ((toeplitz_piecemeal_port(key.port[1]) ^
2844                                      toeplitz_sport) & ncpus2_mask) {
2845                                         continue;
2846                                 }
2847                                 if (pf_find_state_all(&key, PF_IN, NULL) ==
2848                                     NULL && !in_baddynamic(tmp, proto)) {
2849                                         if (proto == IPPROTO_UDP)
2850                                                 pd->not_cpu_localized = 1;
2851                                         *nport = htons(tmp);
2852                                         return (0);
2853                                 }
2854                         }
2855                 }
2856
2857                 /*
2858                  * Next address
2859                  */
2860                 switch (r->rpool.opts & PF_POOL_TYPEMASK) {
2861                 case PF_POOL_RANDOM:
2862                 case PF_POOL_ROUNDROBIN:
2863                         if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn))
2864                                 return (1);
2865                         break;
2866                 case PF_POOL_NONE:
2867                 case PF_POOL_SRCHASH:
2868                 case PF_POOL_BITMASK:
2869                 default:
2870                         return (1);
2871                 }
2872         } while (! PF_AEQ(&init_addr, naddr, af) );
2873         return (1);                                     /* none available */
2874 }
2875
2876 struct pf_rule *
2877 pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off,
2878     int direction, struct pfi_kif *kif, struct pf_addr *saddr, u_int16_t sport,
2879     struct pf_addr *daddr, u_int16_t dport, int rs_num)
2880 {
2881         struct pf_rule          *r, *rm = NULL;
2882         struct pf_ruleset       *ruleset = NULL;
2883         int                      tag = -1;
2884         int                      rtableid = -1;
2885         int                      asd = 0;
2886
2887         r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr);
2888         while (r && rm == NULL) {
2889                 struct pf_rule_addr     *src = NULL, *dst = NULL;
2890                 struct pf_addr_wrap     *xdst = NULL;
2891                 struct pf_pooladdr      *cur;
2892
2893                 if (r->action == PF_BINAT && direction == PF_IN) {
2894                         src = &r->dst;
2895                         cur = r->rpool.cur;     /* SMP race possible */
2896                         cpu_ccfence();
2897                         if (cur)
2898                                 xdst = &cur->addr;
2899                 } else {
2900                         src = &r->src;
2901                         dst = &r->dst;
2902                 }
2903
2904                 r->evaluations++;
2905                 if (pfi_kif_match(r->kif, kif) == r->ifnot)
2906                         r = r->skip[PF_SKIP_IFP].ptr;
2907                 else if (r->direction && r->direction != direction)
2908                         r = r->skip[PF_SKIP_DIR].ptr;
2909                 else if (r->af && r->af != pd->af)
2910                         r = r->skip[PF_SKIP_AF].ptr;
2911                 else if (r->proto && r->proto != pd->proto)
2912                         r = r->skip[PF_SKIP_PROTO].ptr;
2913                 else if (PF_MISMATCHAW(&src->addr, saddr, pd->af,
2914                     src->neg, kif))
2915                         r = r->skip[src == &r->src ? PF_SKIP_SRC_ADDR :
2916                             PF_SKIP_DST_ADDR].ptr;
2917                 else if (src->port_op && !pf_match_port(src->port_op,
2918                     src->port[0], src->port[1], sport))
2919                         r = r->skip[src == &r->src ? PF_SKIP_SRC_PORT :
2920                             PF_SKIP_DST_PORT].ptr;
2921                 else if (dst != NULL &&
2922                     PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->neg, NULL))
2923                         r = r->skip[PF_SKIP_DST_ADDR].ptr;
2924                 else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af,
2925                     0, NULL))
2926                         r = TAILQ_NEXT(r, entries);
2927                 else if (dst != NULL && dst->port_op &&
2928                     !pf_match_port(dst->port_op, dst->port[0],
2929                     dst->port[1], dport))
2930                         r = r->skip[PF_SKIP_DST_PORT].ptr;
2931                 else if (r->match_tag && !pf_match_tag(m, r, &tag))
2932                         r = TAILQ_NEXT(r, entries);
2933                 else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto !=
2934                     IPPROTO_TCP || !pf_osfp_match(pf_osfp_fingerprint(pd, m,
2935                     off, pd->hdr.tcp), r->os_fingerprint)))
2936                         r = TAILQ_NEXT(r, entries);
2937                 else {
2938                         if (r->tag)
2939                                 tag = r->tag;
2940                         if (r->rtableid >= 0)
2941                                 rtableid = r->rtableid;
2942                         if (r->anchor == NULL) {
2943                                 rm = r;
2944                         } else
2945                                 pf_step_into_anchor(&asd, &ruleset, rs_num,
2946                                     &r, NULL, NULL);
2947                 }
2948                 if (r == NULL)
2949                         pf_step_out_of_anchor(&asd, &ruleset, rs_num, &r,
2950                             NULL, NULL);
2951         }
2952         if (pf_tag_packet(m, tag, rtableid))
2953                 return (NULL);
2954         if (rm != NULL && (rm->action == PF_NONAT ||
2955             rm->action == PF_NORDR || rm->action == PF_NOBINAT))
2956                 return (NULL);
2957         return (rm);
2958 }
2959
2960 struct pf_rule *
2961 pf_get_translation(struct pf_pdesc *pd, struct mbuf *m, int off, int direction,
2962     struct pfi_kif *kif, struct pf_src_node **sn,
2963     struct pf_state_key **skw, struct pf_state_key **sks,
2964     struct pf_state_key **skp, struct pf_state_key **nkp,
2965     struct pf_addr *saddr, struct pf_addr *daddr,
2966     u_int16_t sport, u_int16_t dport)
2967 {
2968         struct pf_rule  *r = NULL;
2969
2970         if (direction == PF_OUT) {
2971                 r = pf_match_translation(pd, m, off, direction, kif, saddr,
2972                     sport, daddr, dport, PF_RULESET_BINAT);
2973                 if (r == NULL)
2974                         r = pf_match_translation(pd, m, off, direction, kif,
2975                             saddr, sport, daddr, dport, PF_RULESET_NAT);
2976         } else {
2977                 r = pf_match_translation(pd, m, off, direction, kif, saddr,
2978                     sport, daddr, dport, PF_RULESET_RDR);
2979                 if (r == NULL)
2980                         r = pf_match_translation(pd, m, off, direction, kif,
2981                             saddr, sport, daddr, dport, PF_RULESET_BINAT);
2982         }
2983
2984         if (r != NULL) {
2985                 struct pf_addr  *naddr;
2986                 u_int16_t       *nport;
2987
2988                 if (pf_state_key_setup(pd, r, skw, sks, skp, nkp,
2989                     saddr, daddr, sport, dport))
2990                         return r;
2991
2992                 /* XXX We only modify one side for now. */
2993                 naddr = &(*nkp)->addr[1];
2994                 nport = &(*nkp)->port[1];
2995
2996                 /*
2997                  * NOTE: Currently all translations will clear
2998                  *       BRIDGE_MBUF_TAGGED, telling the bridge to
2999                  *       ignore the original input encapsulation.
3000                  */
3001                 switch (r->action) {
3002                 case PF_NONAT:
3003                 case PF_NOBINAT:
3004                 case PF_NORDR:
3005                         return (NULL);
3006                 case PF_NAT:
3007                         m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
3008                         if (pf_get_sport(pd, pd->af, pd->proto, r,
3009                             saddr, daddr, sport, dport,
3010                             naddr, nport, r->rpool.proxy_port[0],
3011                             r->rpool.proxy_port[1], sn)) {
3012                                 DPFPRINTF(PF_DEBUG_MISC,
3013                                     ("pf: NAT proxy port allocation "
3014                                     "(%u-%u) failed\n",
3015                                     r->rpool.proxy_port[0],
3016                                     r->rpool.proxy_port[1]));
3017                                 return (NULL);
3018                         }
3019                         break;
3020                 case PF_BINAT:
3021                         m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
3022                         switch (direction) {
3023                         case PF_OUT:
3024                                 if (r->rpool.cur->addr.type == PF_ADDR_DYNIFTL){
3025                                         switch (pd->af) {
3026 #ifdef INET
3027                                         case AF_INET:
3028                                                 if (r->rpool.cur->addr.p.dyn->
3029                                                     pfid_acnt4 < 1)
3030                                                         return (NULL);
3031                                                 PF_POOLMASK(naddr,
3032                                                     &r->rpool.cur->addr.p.dyn->
3033                                                     pfid_addr4,
3034                                                     &r->rpool.cur->addr.p.dyn->
3035                                                     pfid_mask4,
3036                                                     saddr, AF_INET);
3037                                                 break;
3038 #endif /* INET */
3039 #ifdef INET6
3040                                         case AF_INET6:
3041                                                 if (r->rpool.cur->addr.p.dyn->
3042                                                     pfid_acnt6 < 1)
3043                                                         return (NULL);
3044                                                 PF_POOLMASK(naddr,
3045                                                     &r->rpool.cur->addr.p.dyn->
3046                                                     pfid_addr6,
3047                                                     &r->rpool.cur->addr.p.dyn->
3048                                                     pfid_mask6,
3049                                                     saddr, AF_INET6);
3050                                                 break;
3051 #endif /* INET6 */
3052                                         }
3053                                 } else
3054                                         PF_POOLMASK(naddr,
3055                                             &r->rpool.cur->addr.v.a.addr,
3056                                             &r->rpool.cur->addr.v.a.mask,
3057                                             saddr, pd->af);
3058                                 break;
3059                         case PF_IN:
3060                                 if (r->src.addr.type == PF_ADDR_DYNIFTL) {
3061                                         switch (pd->af) {
3062 #ifdef INET
3063                                         case AF_INET:
3064                                                 if (r->src.addr.p.dyn->
3065                                                     pfid_acnt4 < 1)
3066                                                         return (NULL);
3067                                                 PF_POOLMASK(naddr,
3068                                                     &r->src.addr.p.dyn->
3069                                                     pfid_addr4,
3070                                                     &r->src.addr.p.dyn->
3071                                                     pfid_mask4,
3072                                                     daddr, AF_INET);
3073                                                 break;
3074 #endif /* INET */
3075 #ifdef INET6
3076                                         case AF_INET6:
3077                                                 if (r->src.addr.p.dyn->
3078                                                     pfid_acnt6 < 1)
3079                                                         return (NULL);
3080                                                 PF_POOLMASK(naddr,
3081                                                     &r->src.addr.p.dyn->
3082                                                     pfid_addr6,
3083                                                     &r->src.addr.p.dyn->
3084                                                     pfid_mask6,
3085                                                     daddr, AF_INET6);
3086                                                 break;
3087 #endif /* INET6 */
3088                                         }
3089                                 } else
3090                                         PF_POOLMASK(naddr,
3091                                             &r->src.addr.v.a.addr,
3092                                             &r->src.addr.v.a.mask, daddr,
3093                                             pd->af);
3094                                 break;
3095                         }
3096                         break;
3097                 case PF_RDR: {
3098                         m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
3099                         if (pf_map_addr(pd->af, r, saddr, naddr, NULL, sn))
3100                                 return (NULL);
3101                         if ((r->rpool.opts & PF_POOL_TYPEMASK) ==
3102                             PF_POOL_BITMASK)
3103                                 PF_POOLMASK(naddr, naddr,
3104                                     &r->rpool.cur->addr.v.a.mask, daddr,
3105                                     pd->af);
3106
3107                         if (r->rpool.proxy_port[1]) {
3108                                 u_int32_t       tmp_nport;
3109
3110                                 tmp_nport = ((ntohs(dport) -
3111                                     ntohs(r->dst.port[0])) %
3112                                     (r->rpool.proxy_port[1] -
3113                                     r->rpool.proxy_port[0] + 1)) +
3114                                     r->rpool.proxy_port[0];
3115
3116                                 /* wrap around if necessary */
3117                                 if (tmp_nport > 65535)
3118                                         tmp_nport -= 65535;
3119                                 *nport = htons((u_int16_t)tmp_nport);
3120                         } else if (r->rpool.proxy_port[0]) {
3121                                 *nport = htons(r->rpool.proxy_port[0]);
3122                         }
3123                         pd->not_cpu_localized = 1;
3124                         break;
3125                 }
3126                 default:
3127                         return (NULL);
3128                 }
3129         }
3130
3131         return (r);
3132 }
3133
3134 struct netmsg_hashlookup {
3135         struct netmsg_base      base;
3136         struct inpcb            **nm_pinp;
3137         struct inpcbinfo        *nm_pcbinfo;
3138         struct pf_addr          *nm_saddr;
3139         struct pf_addr          *nm_daddr;
3140         uint16_t                nm_sport;
3141         uint16_t                nm_dport;
3142         sa_family_t             nm_af;
3143 };
3144
3145 #ifdef PF_SOCKET_LOOKUP_DOMSG
3146 static void
3147 in_pcblookup_hash_handler(netmsg_t msg)
3148 {
3149         struct netmsg_hashlookup *rmsg = (struct netmsg_hashlookup *)msg;
3150
3151         if (rmsg->nm_af == AF_INET)
3152                 *rmsg->nm_pinp = in_pcblookup_hash(rmsg->nm_pcbinfo,
3153                     rmsg->nm_saddr->v4, rmsg->nm_sport, rmsg->nm_daddr->v4,
3154                     rmsg->nm_dport, INPLOOKUP_WILDCARD, NULL);
3155 #ifdef INET6
3156         else
3157                 *rmsg->nm_pinp = in6_pcblookup_hash(rmsg->nm_pcbinfo,
3158                     &rmsg->nm_saddr->v6, rmsg->nm_sport, &rmsg->nm_daddr->v6,
3159                     rmsg->nm_dport, INPLOOKUP_WILDCARD, NULL);
3160 #endif /* INET6 */
3161         lwkt_replymsg(&rmsg->base.lmsg, 0);
3162 }
3163 #endif  /* PF_SOCKET_LOOKUP_DOMSG */
3164
3165 int
3166 pf_socket_lookup(int direction, struct pf_pdesc *pd)
3167 {
3168         struct pf_addr          *saddr, *daddr;
3169         u_int16_t                sport, dport;
3170         struct inpcbinfo        *pi;
3171         struct inpcb            *inp;
3172         struct netmsg_hashlookup *msg = NULL;
3173 #ifdef PF_SOCKET_LOOKUP_DOMSG
3174         struct netmsg_hashlookup msg0;
3175 #endif
3176         int                      pi_cpu = 0;
3177
3178         if (pd == NULL)
3179                 return (-1);
3180         pd->lookup.uid = UID_MAX;
3181         pd->lookup.gid = GID_MAX;
3182         pd->lookup.pid = NO_PID;
3183         if (direction == PF_IN) {
3184                 saddr = pd->src;
3185                 daddr = pd->dst;
3186         } else {
3187                 saddr = pd->dst;
3188                 daddr = pd->src;
3189         }
3190         switch (pd->proto) {
3191         case IPPROTO_TCP:
3192                 if (pd->hdr.tcp == NULL)
3193                         return (-1);
3194                 sport = pd->hdr.tcp->th_sport;
3195                 dport = pd->hdr.tcp->th_dport;
3196
3197                 pi_cpu = tcp_addrcpu(saddr->v4.s_addr, sport, daddr->v4.s_addr, dport);
3198                 pi = &tcbinfo[pi_cpu];
3199                 /*
3200                  * Our netstack runs lockless on MP systems
3201                  * (only for TCP connections at the moment).
3202                  * 
3203                  * As we are not allowed to read another CPU's tcbinfo,
3204                  * we have to ask that CPU via remote call to search the
3205                  * table for us.
3206                  * 
3207                  * Prepare a msg iff data belongs to another CPU.
3208                  */
3209                 if (pi_cpu != mycpu->gd_cpuid) {
3210 #ifdef PF_SOCKET_LOOKUP_DOMSG
3211                         /*
3212                          * NOTE:
3213                          *
3214                          * Following lwkt_domsg() is dangerous and could
3215                          * lockup the network system, e.g.
3216                          *
3217                          * On 2 CPU system:
3218                          * netisr0 domsg to netisr1 (due to lookup)
3219                          * netisr1 domsg to netisr0 (due to lookup)
3220                          *
3221                          * We simply return -1 here, since we are probably
3222                          * called before NAT, so the TCP packet should
3223                          * already be on the correct CPU.
3224                          */
3225                         msg = &msg0;
3226                         netmsg_init(&msg->base, NULL, &curthread->td_msgport,
3227                                     0, in_pcblookup_hash_handler);
3228                         msg->nm_pinp = &inp;
3229                         msg->nm_pcbinfo = pi;
3230                         msg->nm_saddr = saddr;
3231                         msg->nm_sport = sport;
3232                         msg->nm_daddr = daddr;
3233                         msg->nm_dport = dport;
3234                         msg->nm_af = pd->af;
3235 #else   /* !PF_SOCKET_LOOKUP_DOMSG */
3236                         kprintf("pf_socket_lookup: tcp packet not on the "
3237                                 "correct cpu %d, cur cpu %d\n",
3238                                 pi_cpu, mycpuid);
3239                         print_backtrace(-1);
3240                         return -1;
3241 #endif  /* PF_SOCKET_LOOKUP_DOMSG */
3242                 }
3243                 break;
3244         case IPPROTO_UDP:
3245                 if (pd->hdr.udp == NULL)
3246                         return (-1);
3247                 sport = pd->hdr.udp->uh_sport;
3248                 dport = pd->hdr.udp->uh_dport;
3249                 pi = &udbinfo[mycpuid];
3250                 break;
3251         default:
3252                 return (-1);
3253         }
3254         if (direction != PF_IN) {
3255                 u_int16_t       p;
3256
3257                 p = sport;
3258                 sport = dport;
3259                 dport = p;
3260         }
3261         switch (pd->af) {
3262 #ifdef INET6
3263         case AF_INET6:
3264                 /*
3265                  * Query other CPU, second part
3266                  * 
3267                  * msg only gets initialized when:
3268                  * 1) packet is TCP
3269                  * 2) the info belongs to another CPU
3270                  *
3271                  * Use some switch/case magic to avoid code duplication.
3272                  */
3273                 if (msg == NULL) {
3274                         inp = in6_pcblookup_hash(pi, &saddr->v6, sport,
3275                             &daddr->v6, dport, INPLOOKUP_WILDCARD, NULL);
3276
3277                         if (inp == NULL)
3278                                 return (-1);
3279                         break;
3280                 }
3281                 /* FALLTHROUGH if SMP and on other CPU */
3282 #endif /* INET6 */
3283         case AF_INET:
3284                 if (msg != NULL) {
3285                         lwkt_domsg(netisr_cpuport(pi_cpu),
3286                                      &msg->base.lmsg, 0);
3287                 } else
3288                 {
3289                         inp = in_pcblookup_hash(pi, saddr->v4, sport, daddr->v4,
3290                             dport, INPLOOKUP_WILDCARD, NULL);
3291                 }
3292                 if (inp == NULL)
3293                         return (-1);
3294                 break;
3295
3296         default:
3297                 return (-1);
3298         }
3299         pd->lookup.uid = inp->inp_socket->so_cred->cr_uid;
3300         pd->lookup.gid = inp->inp_socket->so_cred->cr_groups[0];
3301         return (1);
3302 }
3303
3304 u_int8_t
3305 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3306 {
3307         int              hlen;
3308         u_int8_t         hdr[60];
3309         u_int8_t        *opt, optlen;
3310         u_int8_t         wscale = 0;
3311
3312         hlen = th_off << 2;             /* hlen <= sizeof(hdr) */
3313         if (hlen <= sizeof(struct tcphdr))
3314                 return (0);
3315         if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3316                 return (0);
3317         opt = hdr + sizeof(struct tcphdr);
3318         hlen -= sizeof(struct tcphdr);
3319         while (hlen >= 3) {
3320                 switch (*opt) {
3321                 case TCPOPT_EOL:
3322                 case TCPOPT_NOP:
3323                         ++opt;
3324                         --hlen;
3325                         break;
3326                 case TCPOPT_WINDOW:
3327                         wscale = opt[2];
3328                         if (wscale > TCP_MAX_WINSHIFT)
3329                                 wscale = TCP_MAX_WINSHIFT;
3330                         wscale |= PF_WSCALE_FLAG;
3331                         /* FALLTHROUGH */
3332                 default:
3333                         optlen = opt[1];
3334                         if (optlen < 2)
3335                                 optlen = 2;
3336                         hlen -= optlen;
3337                         opt += optlen;
3338                         break;
3339                 }
3340         }
3341         return (wscale);
3342 }
3343
3344 u_int16_t
3345 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3346 {
3347         int              hlen;
3348         u_int8_t         hdr[60];
3349         u_int8_t        *opt, optlen;
3350         u_int16_t        mss = tcp_mssdflt;
3351
3352         hlen = th_off << 2;     /* hlen <= sizeof(hdr) */
3353         if (hlen <= sizeof(struct tcphdr))
3354                 return (0);
3355         if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3356                 return (0);
3357         opt = hdr + sizeof(struct tcphdr);
3358         hlen -= sizeof(struct tcphdr);
3359         while (hlen >= TCPOLEN_MAXSEG) {
3360                 switch (*opt) {
3361                 case TCPOPT_EOL:
3362                 case TCPOPT_NOP:
3363                         ++opt;
3364                         --hlen;
3365                         break;
3366                 case TCPOPT_MAXSEG:
3367                         bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
3368                         /* FALLTHROUGH */
3369                 default:
3370                         optlen = opt[1];
3371                         if (optlen < 2)
3372                                 optlen = 2;
3373                         hlen -= optlen;
3374                         opt += optlen;
3375                         break;
3376                 }
3377         }
3378         return (mss);
3379 }
3380
3381 u_int16_t
3382 pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer)
3383 {
3384 #ifdef INET
3385         struct sockaddr_in      *dst;
3386         struct route             ro;
3387 #endif /* INET */
3388 #ifdef INET6
3389         struct sockaddr_in6     *dst6;
3390         struct route_in6         ro6;
3391 #endif /* INET6 */
3392         struct rtentry          *rt = NULL;
3393         int                      hlen = 0;
3394         u_int16_t                mss = tcp_mssdflt;
3395
3396         switch (af) {
3397 #ifdef INET
3398         case AF_INET:
3399                 hlen = sizeof(struct ip);
3400                 bzero(&ro, sizeof(ro));
3401                 dst = (struct sockaddr_in *)&ro.ro_dst;
3402                 dst->sin_family = AF_INET;
3403                 dst->sin_len = sizeof(*dst);
3404                 dst->sin_addr = addr->v4;
3405                 rtalloc_ign(&ro, (RTF_CLONING | RTF_PRCLONING));
3406                 rt = ro.ro_rt;
3407                 break;
3408 #endif /* INET */
3409 #ifdef INET6
3410         case AF_INET6:
3411                 hlen = sizeof(struct ip6_hdr);
3412                 bzero(&ro6, sizeof(ro6));
3413                 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst;
3414                 dst6->sin6_family = AF_INET6;
3415                 dst6->sin6_len = sizeof(*dst6);
3416                 dst6->sin6_addr = addr->v6;
3417                 rtalloc_ign((struct route *)&ro6, (RTF_CLONING | RTF_PRCLONING));
3418                 rt = ro6.ro_rt;
3419                 break;
3420 #endif /* INET6 */
3421         }
3422
3423         if (rt && rt->rt_ifp) {
3424                 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr);
3425                 mss = max(tcp_mssdflt, mss);
3426                 RTFREE(rt);
3427         }
3428         mss = min(mss, offer);
3429         mss = max(mss, 64);             /* sanity - at least max opt space */
3430         return (mss);
3431 }
3432
3433 void
3434 pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr)
3435 {
3436         struct pf_rule *r = s->rule.ptr;
3437
3438         s->rt_kif = NULL;
3439         if (!r->rt || r->rt == PF_FASTROUTE)
3440                 return;
3441         switch (s->key[PF_SK_WIRE]->af) {
3442 #ifdef INET
3443         case AF_INET:
3444                 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL,
3445                     &s->nat_src_node);
3446                 s->rt_kif = r->rpool.cur->kif;
3447                 break;
3448 #endif /* INET */
3449 #ifdef INET6
3450         case AF_INET6:
3451                 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL,
3452                     &s->nat_src_node);
3453                 s->rt_kif = r->rpool.cur->kif;
3454                 break;
3455 #endif /* INET6 */
3456         }
3457 }
3458
3459 u_int32_t
3460 pf_tcp_iss(struct pf_pdesc *pd)
3461 {
3462         MD5_CTX ctx;
3463         u_int32_t digest[4];
3464
3465         if (pf_tcp_secret_init == 0) {
3466                 lwkt_gettoken(&pf_gtoken);
3467                 if (pf_tcp_secret_init == 0) {
3468                         karc4rand(pf_tcp_secret, sizeof(pf_tcp_secret));
3469                         MD5Init(&pf_tcp_secret_ctx);
3470                         MD5Update(&pf_tcp_secret_ctx, pf_tcp_secret,
3471                             sizeof(pf_tcp_secret));
3472                         pf_tcp_secret_init = 1;
3473                 }
3474                 lwkt_reltoken(&pf_gtoken);
3475         }
3476         ctx = pf_tcp_secret_ctx;
3477
3478         MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short));
3479         MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short));
3480         if (pd->af == AF_INET6) {
3481                 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
3482                 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
3483         } else {
3484                 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
3485                 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
3486         }
3487         MD5Final((u_char *)digest, &ctx);
3488         pf_tcp_iss_off += 4096;
3489
3490         return (digest[0] + pd->hdr.tcp->th_seq + pf_tcp_iss_off);
3491 }
3492
3493 int
3494 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
3495     struct pfi_kif *kif, struct mbuf *m, int off, void *h,
3496     struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm,
3497     struct ifqueue *ifq, struct inpcb *inp)
3498 {
3499         struct pf_rule          *nr = NULL;
3500         struct pf_addr          *saddr = pd->src, *daddr = pd->dst;
3501         sa_family_t              af = pd->af;
3502         struct pf_rule          *r, *a = NULL;
3503         struct pf_ruleset       *ruleset = NULL;
3504         struct pf_src_node      *nsn = NULL;
3505         struct tcphdr           *th = pd->hdr.tcp;
3506         struct pf_state_key     *skw = NULL, *sks = NULL;
3507         struct pf_state_key     *sk = NULL, *nk = NULL;
3508         u_short                  reason;
3509         int                      rewrite = 0, hdrlen = 0;
3510         int                      tag = -1, rtableid = -1;
3511         int                      asd = 0;
3512         int                      match = 0;
3513         int                      state_icmp = 0;
3514         u_int16_t                sport = 0, dport = 0;
3515         u_int16_t                bproto_sum = 0, bip_sum = 0;
3516         u_int8_t                 icmptype = 0, icmpcode = 0;
3517
3518
3519         if (direction == PF_IN && pf_check_congestion(ifq)) {
3520                 REASON_SET(&reason, PFRES_CONGEST);
3521                 return (PF_DROP);
3522         }
3523
3524         if (inp != NULL)
3525                 pd->lookup.done = pf_socket_lookup(direction, pd);
3526         else if (debug_pfugidhack) { 
3527                 DPFPRINTF(PF_DEBUG_MISC, ("pf: unlocked lookup\n"));
3528                 pd->lookup.done = pf_socket_lookup(direction, pd);
3529         }
3530
3531         switch (pd->proto) {
3532         case IPPROTO_TCP:
3533                 sport = th->th_sport;
3534                 dport = th->th_dport;
3535                 hdrlen = sizeof(*th);
3536                 break;
3537         case IPPROTO_UDP:
3538                 sport = pd->hdr.udp->uh_sport;
3539                 dport = pd->hdr.udp->uh_dport;
3540                 hdrlen = sizeof(*pd->hdr.udp);
3541                 break;
3542 #ifdef INET
3543         case IPPROTO_ICMP:
3544                 if (pd->af != AF_INET)
3545                         break;
3546                 sport = dport = pd->hdr.icmp->icmp_id;
3547                 hdrlen = sizeof(*pd->hdr.icmp);
3548                 icmptype = pd->hdr.icmp->icmp_type;
3549                 icmpcode = pd->hdr.icmp->icmp_code;
3550
3551                 if (icmptype == ICMP_UNREACH ||
3552                     icmptype == ICMP_SOURCEQUENCH ||
3553                     icmptype == ICMP_REDIRECT ||
3554                     icmptype == ICMP_TIMXCEED ||
3555                     icmptype == ICMP_PARAMPROB)
3556                         state_icmp++;
3557                 break;
3558 #endif /* INET */
3559 #ifdef INET6
3560         case IPPROTO_ICMPV6:
3561                 if (af != AF_INET6)
3562                         break;
3563                 sport = dport = pd->hdr.icmp6->icmp6_id;
3564                 hdrlen = sizeof(*pd->hdr.icmp6);
3565                 icmptype = pd->hdr.icmp6->icmp6_type;
3566                 icmpcode = pd->hdr.icmp6->icmp6_code;
3567
3568                 if (icmptype == ICMP6_DST_UNREACH ||
3569                     icmptype == ICMP6_PACKET_TOO_BIG ||
3570                     icmptype == ICMP6_TIME_EXCEEDED ||
3571                     icmptype == ICMP6_PARAM_PROB)
3572                         state_icmp++;
3573                 break;
3574 #endif /* INET6 */
3575         default:
3576                 sport = dport = hdrlen = 0;
3577                 break;
3578         }
3579
3580         r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3581
3582         /* check packet for BINAT/NAT/RDR */
3583         if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn,
3584             &skw, &sks, &sk, &nk, saddr, daddr, sport, dport)) != NULL) {
3585                 if (nk == NULL || sk == NULL) {
3586                         REASON_SET(&reason, PFRES_MEMORY);
3587                         goto cleanup;
3588                 }
3589
3590                 if (pd->ip_sum)
3591                         bip_sum = *pd->ip_sum;
3592
3593                 m->m_flags &= ~M_HASH;
3594                 switch (pd->proto) {
3595                 case IPPROTO_TCP:
3596                         bproto_sum = th->th_sum;
3597                         pd->proto_sum = &th->th_sum;
3598
3599                         if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3600                             nk->port[pd->sidx] != sport) {
3601                                 pf_change_ap(saddr, &th->th_sport, pd->ip_sum,
3602                                     &th->th_sum, &nk->addr[pd->sidx],
3603                                     nk->port[pd->sidx], 0, af);
3604                                 pd->sport = &th->th_sport;
3605                                 sport = th->th_sport;
3606                         }
3607
3608                         if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3609                             nk->port[pd->didx] != dport) {
3610                                 pf_change_ap(daddr, &th->th_dport, pd->ip_sum,
3611                                     &th->th_sum, &nk->addr[pd->didx],
3612                                     nk->port[pd->didx], 0, af);
3613                                 dport = th->th_dport;
3614                                 pd->dport = &th->th_dport;
3615                         }
3616                         rewrite++;
3617                         break;
3618                 case IPPROTO_UDP:
3619                         bproto_sum = pd->hdr.udp->uh_sum;
3620                         pd->proto_sum = &pd->hdr.udp->uh_sum;
3621
3622                         if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3623                             nk->port[pd->sidx] != sport) {
3624                                 pf_change_ap(saddr, &pd->hdr.udp->uh_sport,
3625                                     pd->ip_sum, &pd->hdr.udp->uh_sum,
3626                                     &nk->addr[pd->sidx],
3627                                     nk->port[pd->sidx], 1, af);
3628                                 sport = pd->hdr.udp->uh_sport;
3629                                 pd->sport = &pd->hdr.udp->uh_sport;
3630                         }
3631
3632                         if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3633                             nk->port[pd->didx] != dport) {
3634                                 pf_change_ap(daddr, &pd->hdr.udp->uh_dport,
3635                                     pd->ip_sum, &pd->hdr.udp->uh_sum,
3636                                     &nk->addr[pd->didx],
3637                                     nk->port[pd->didx], 1, af);
3638                                 dport = pd->hdr.udp->uh_dport;
3639                                 pd->dport = &pd->hdr.udp->uh_dport;
3640                         }
3641                         rewrite++;
3642                         break;
3643 #ifdef INET
3644                 case IPPROTO_ICMP:
3645                         nk->port[0] = nk->port[1];
3646                         if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
3647                                 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3648                                     nk->addr[pd->sidx].v4.s_addr, 0);
3649
3650                         if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
3651                                 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
3652                                     nk->addr[pd->didx].v4.s_addr, 0);
3653
3654                         if (nk->port[1] != pd->hdr.icmp->icmp_id) {
3655                                 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
3656                                     pd->hdr.icmp->icmp_cksum, sport,
3657                                     nk->port[1], 0);
3658                                 pd->hdr.icmp->icmp_id = nk->port[1];
3659                                 pd->sport = &pd->hdr.icmp->icmp_id;
3660                         }
3661                         m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
3662                         break;
3663 #endif /* INET */
3664 #ifdef INET6
3665                 case IPPROTO_ICMPV6:
3666                         nk->port[0] = nk->port[1];
3667                         if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
3668                                 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
3669                                     &nk->addr[pd->sidx], 0);
3670
3671                         if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
3672                                 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
3673                                     &nk->addr[pd->didx], 0);
3674                         rewrite++;
3675                         break;
3676 #endif /* INET */
3677                 default:
3678                         switch (af) {
3679 #ifdef INET
3680                         case AF_INET:
3681                                 if (PF_ANEQ(saddr,
3682                                     &nk->addr[pd->sidx], AF_INET))
3683                                         pf_change_a(&saddr->v4.s_addr,
3684                                             pd->ip_sum,
3685                                             nk->addr[pd->sidx].v4.s_addr, 0);
3686
3687                                 if (PF_ANEQ(daddr,
3688                                     &nk->addr[pd->didx], AF_INET))
3689                                         pf_change_a(&daddr->v4.s_addr,
3690                                             pd->ip_sum,
3691                                             nk->addr[pd->didx].v4.s_addr, 0);
3692                                 break;
3693 #endif /* INET */
3694 #ifdef INET6
3695                         case AF_INET6:
3696                                 if (PF_ANEQ(saddr,
3697                                     &nk->addr[pd->sidx], AF_INET6))
3698                                         PF_ACPY(saddr, &nk->addr[pd->sidx], af);
3699
3700                                 if (PF_ANEQ(daddr,
3701                                     &nk->addr[pd->didx], AF_INET6))
3702                                         PF_ACPY(saddr, &nk->addr[pd->didx], af);
3703                                 break;
3704 #endif /* INET */
3705                         }
3706                         break;
3707                 }
3708                 if (nr->natpass)
3709                         r = NULL;
3710                 pd->nat_rule = nr;
3711         }
3712
3713         while (r != NULL) {
3714                 r->evaluations++;
3715                 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3716                         r = r->skip[PF_SKIP_IFP].ptr;
3717                 else if (r->direction && r->direction != direction)
3718                         r = r->skip[PF_SKIP_DIR].ptr;
3719                 else if (r->af && r->af != af)
3720                         r = r->skip[PF_SKIP_AF].ptr;
3721                 else if (r->proto && r->proto != pd->proto)
3722                         r = r->skip[PF_SKIP_PROTO].ptr;
3723                 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
3724                     r->src.neg, kif))
3725                         r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3726                 /* tcp/udp only. port_op always 0 in other cases */
3727                 else if (r->src.port_op && !pf_match_port(r->src.port_op,
3728                     r->src.port[0], r->src.port[1], sport))
3729                         r = r->skip[PF_SKIP_SRC_PORT].ptr;
3730                 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
3731                     r->dst.neg, NULL))
3732                         r = r->skip[PF_SKIP_DST_ADDR].ptr;
3733                 /* tcp/udp only. port_op always 0 in other cases */
3734                 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
3735                     r->dst.port[0], r->dst.port[1], dport))
3736                         r = r->skip[PF_SKIP_DST_PORT].ptr;
3737                 /* icmp only. type always 0 in other cases */
3738                 else if (r->type && r->type != icmptype + 1)
3739                         r = TAILQ_NEXT(r, entries);
3740                 /* icmp only. type always 0 in other cases */
3741                 else if (r->code && r->code != icmpcode + 1)
3742                         r = TAILQ_NEXT(r, entries);
3743                 else if (r->tos && !(r->tos == pd->tos))
3744                         r = TAILQ_NEXT(r, entries);
3745                 else if (r->rule_flag & PFRULE_FRAGMENT)
3746                         r = TAILQ_NEXT(r, entries);
3747                 else if (pd->proto == IPPROTO_TCP &&
3748                     (r->flagset & th->th_flags) != r->flags)
3749                         r = TAILQ_NEXT(r, entries);
3750                 /* tcp/udp only. uid.op always 0 in other cases */
3751                 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
3752                     pf_socket_lookup(direction, pd), 1)) &&
3753                     !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
3754                     pd->lookup.uid))
3755                         r = TAILQ_NEXT(r, entries);
3756                 /* tcp/udp only. gid.op always 0 in other cases */
3757                 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
3758                     pf_socket_lookup(direction, pd), 1)) &&
3759                     !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
3760                     pd->lookup.gid))
3761                         r = TAILQ_NEXT(r, entries);
3762                 else if (r->prob &&
3763                   r->prob <= karc4random())
3764                         r = TAILQ_NEXT(r, entries);
3765                 else if (r->match_tag && !pf_match_tag(m, r, &tag))
3766                         r = TAILQ_NEXT(r, entries);
3767                 else if (r->os_fingerprint != PF_OSFP_ANY &&
3768                     (pd->proto != IPPROTO_TCP || !pf_osfp_match(
3769                     pf_osfp_fingerprint(pd, m, off, th),
3770                     r->os_fingerprint)))
3771                         r = TAILQ_NEXT(r, entries);
3772                 else {
3773                         if (r->tag)
3774                                 tag = r->tag;
3775                         if (r->rtableid >= 0)
3776                                 rtableid = r->rtableid;
3777                         if (r->anchor == NULL) {
3778                                 match = 1;
3779                                 *rm = r;
3780                                 *am = a;
3781                                 *rsm = ruleset;
3782                                 if ((*rm)->quick)
3783                                         break;
3784                                 r = TAILQ_NEXT(r, entries);
3785                         } else
3786                                 pf_step_into_anchor(&asd, &ruleset,
3787                                     PF_RULESET_FILTER, &r, &a, &match);
3788                 }
3789                 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
3790                     PF_RULESET_FILTER, &r, &a, &match))
3791                         break;
3792         }
3793         r = *rm;
3794         a = *am;
3795         ruleset = *rsm;
3796
3797         REASON_SET(&reason, PFRES_MATCH);
3798
3799         if (r->log || (nr != NULL && nr->log)) {
3800                 if (rewrite)
3801                         m_copyback(m, off, hdrlen, pd->hdr.any);
3802                 PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr,
3803                     a, ruleset, pd);
3804         }
3805
3806         if ((r->action == PF_DROP) &&
3807             ((r->rule_flag & PFRULE_RETURNRST) ||
3808             (r->rule_flag & PFRULE_RETURNICMP) ||
3809             (r->rule_flag & PFRULE_RETURN))) {
3810                 /* undo NAT changes, if they have taken place */
3811                 if (nr != NULL) {
3812                         PF_ACPY(saddr, &sk->addr[pd->sidx], af);
3813                         PF_ACPY(daddr, &sk->addr[pd->didx], af);
3814                         if (pd->sport)
3815                                 *pd->sport = sk->port[pd->sidx];
3816                         if (pd->dport)
3817                                 *pd->dport = sk->port[pd->didx];
3818                         if (pd->proto_sum)
3819                                 *pd->proto_sum = bproto_sum;
3820                         if (pd->ip_sum)
3821                                 *pd->ip_sum = bip_sum;
3822                         m_copyback(m, off, hdrlen, pd->hdr.any);
3823                 }
3824                 if (pd->proto == IPPROTO_TCP &&
3825                     ((r->rule_flag & PFRULE_RETURNRST) ||
3826                     (r->rule_flag & PFRULE_RETURN)) &&
3827                     !(th->th_flags & TH_RST)) {
3828                         u_int32_t        ack = ntohl(th->th_seq) + pd->p_len;
3829                         int              len = 0;
3830                         struct ip       *h4;
3831 #ifdef INET6
3832                         struct ip6_hdr  *h6;
3833 #endif
3834                         switch (af) {
3835                         case AF_INET:
3836                                 h4 = mtod(m, struct ip *);
3837                                 len = h4->ip_len - off;
3838                                 break;
3839 #ifdef INET6
3840                         case AF_INET6:
3841                                 h6 = mtod(m, struct ip6_hdr *);
3842                                 len = h6->ip6_plen - (off - sizeof(*h6));
3843                                 break;
3844 #endif
3845                         }
3846
3847                         if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
3848                                 REASON_SET(&reason, PFRES_PROTCKSUM);
3849                         else {
3850                                 if (th->th_flags & TH_SYN)
3851                                         ack++;
3852                                 if (th->th_flags & TH_FIN)
3853                                         ack++;
3854                                 pf_send_tcp(r, af, pd->dst,
3855                                     pd->src, th->th_dport, th->th_sport,
3856                                     ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
3857                                     r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp);
3858                         }
3859                 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
3860                     r->return_icmp)
3861                         pf_send_icmp(m, r->return_icmp >> 8,
3862                             r->return_icmp & 255, af, r);
3863                 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
3864                     r->return_icmp6)
3865                         pf_send_icmp(m, r->return_icmp6 >> 8,
3866                             r->return_icmp6 & 255, af, r);
3867         }
3868
3869         if (r->action == PF_DROP)
3870                 goto cleanup;
3871
3872         if (pf_tag_packet(m, tag, rtableid)) {
3873                 REASON_SET(&reason, PFRES_MEMORY);
3874                 goto cleanup;
3875         }
3876
3877         if (!state_icmp && (r->keep_state || nr != NULL ||
3878             (pd->flags & PFDESC_TCP_NORM))) {
3879                 int action;
3880                 action = pf_create_state(r, nr, a, pd, nsn, skw, sks, nk, sk, m,
3881                     off, sport, dport, &rewrite, kif, sm, tag, bproto_sum,
3882                     bip_sum, hdrlen);
3883                 if (action != PF_PASS)
3884                         return (action);
3885         }
3886
3887         /* copy back packet headers if we performed NAT operations */
3888         if (rewrite)
3889                 m_copyback(m, off, hdrlen, pd->hdr.any);
3890
3891         return (PF_PASS);
3892
3893 cleanup:
3894         if (sk != NULL)
3895                 kfree(sk, M_PFSTATEKEYPL);
3896         if (nk != NULL)
3897                 kfree(nk, M_PFSTATEKEYPL);
3898         return (PF_DROP);
3899 }
3900
3901 static __inline int
3902 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
3903     struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *skw,
3904     struct pf_state_key *sks, struct pf_state_key *nk, struct pf_state_key *sk,
3905     struct mbuf *m, int off, u_int16_t sport, u_int16_t dport, int *rewrite,
3906     struct pfi_kif *kif, struct pf_state **sm, int tag, u_int16_t bproto_sum,
3907     u_int16_t bip_sum, int hdrlen)
3908 {
3909         struct pf_state         *s = NULL;
3910         struct pf_src_node      *sn = NULL;
3911         struct tcphdr           *th = pd->hdr.tcp;
3912         u_int16_t                mss = tcp_mssdflt;
3913         u_short                  reason;
3914         int cpu = mycpu->gd_cpuid;
3915
3916         /* check maximums */
3917         if (r->max_states && (r->states_cur >= r->max_states)) {
3918                 pf_status.lcounters[LCNT_STATES]++;
3919                 REASON_SET(&reason, PFRES_MAXSTATES);
3920                 return (PF_DROP);
3921         }
3922         /* src node for filter rule */
3923         if ((r->rule_flag & PFRULE_SRCTRACK ||
3924             r->rpool.opts & PF_POOL_STICKYADDR) &&
3925             pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
3926                 REASON_SET(&reason, PFRES_SRCLIMIT);
3927                 goto csfailed;
3928         }
3929         /* src node for translation rule */
3930         if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3931             pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
3932                 REASON_SET(&reason, PFRES_SRCLIMIT);
3933                 goto csfailed;
3934         }
3935         s = kmalloc(sizeof(struct pf_state), M_PFSTATEPL, M_NOWAIT|M_ZERO);
3936         if (s == NULL) {
3937                 REASON_SET(&reason, PFRES_MEMORY);
3938                 goto csfailed;
3939         }
3940         lockinit(&s->lk, "pfstlk", 0, 0);
3941         s->id = 0; /* XXX Do we really need that? not in OpenBSD */
3942         s->creatorid = 0;
3943         s->rule.ptr = r;
3944         s->nat_rule.ptr = nr;
3945         s->anchor.ptr = a;
3946         s->state_flags = PFSTATE_CREATEINPROG;
3947         STATE_INC_COUNTERS(s);
3948         if (r->allow_opts)
3949                 s->state_flags |= PFSTATE_ALLOWOPTS;
3950         if (r->rule_flag & PFRULE_STATESLOPPY)
3951                 s->state_flags |= PFSTATE_SLOPPY;
3952         if (pd->not_cpu_localized)
3953                 s->state_flags |= PFSTATE_STACK_GLOBAL;
3954
3955         s->log = r->log & PF_LOG_ALL;
3956         if (nr != NULL)
3957                 s->log |= nr->log & PF_LOG_ALL;
3958         switch (pd->proto) {
3959         case IPPROTO_TCP:
3960                 s->src.seqlo = ntohl(th->th_seq);
3961                 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
3962                 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
3963                     r->keep_state == PF_STATE_MODULATE) {
3964                         /* Generate sequence number modulator */
3965                         if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
3966                             0)
3967                                 s->src.seqdiff = 1;
3968                         pf_change_a(&th->th_seq, &th->th_sum,
3969                             htonl(s->src.seqlo + s->src.seqdiff), 0);
3970                         *rewrite = 1;
3971                 } else
3972                         s->src.seqdiff = 0;
3973                 if (th->th_flags & TH_SYN) {
3974                         s->src.seqhi++;
3975                         s->src.wscale = pf_get_wscale(m, off,
3976                             th->th_off, pd->af);
3977                 }
3978                 s->src.max_win = MAX(ntohs(th->th_win), 1);
3979                 if (s->src.wscale & PF_WSCALE_MASK) {
3980                         /* Remove scale factor from initial window */
3981                         int win = s->src.max_win;
3982                         win += 1 << (s->src.wscale & PF_WSCALE_MASK);
3983                         s->src.max_win = (win - 1) >>
3984                             (s->src.wscale & PF_WSCALE_MASK);
3985                 }
3986                 if (th->th_flags & TH_FIN)
3987                         s->src.seqhi++;
3988                 s->dst.seqhi = 1;
3989                 s->dst.max_win = 1;
3990                 s->src.state = TCPS_SYN_SENT;
3991                 s->dst.state = TCPS_CLOSED;
3992                 s->timeout = PFTM_TCP_FIRST_PACKET;
3993                 break;
3994         case IPPROTO_UDP:
3995                 s->src.state = PFUDPS_SINGLE;
3996                 s->dst.state = PFUDPS_NO_TRAFFIC;
3997                 s->timeout = PFTM_UDP_FIRST_PACKET;
3998                 break;
3999         case IPPROTO_ICMP:
4000 #ifdef INET6
4001         case IPPROTO_ICMPV6:
4002 #endif
4003                 s->timeout = PFTM_ICMP_FIRST_PACKET;
4004                 break;
4005         default:
4006                 s->src.state = PFOTHERS_SINGLE;
4007                 s->dst.state = PFOTHERS_NO_TRAFFIC;
4008                 s->timeout = PFTM_OTHER_FIRST_PACKET;
4009         }
4010
4011         s->creation = time_second;
4012         s->expire = time_second;
4013
4014         if (sn != NULL) {
4015                 s->src_node = sn;
4016                 s->src_node->states++;
4017         }
4018         if (nsn != NULL) {
4019                 /* XXX We only modify one side for now. */
4020                 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
4021                 s->nat_src_node = nsn;
4022                 s->nat_src_node->states++;
4023         }
4024         if (pd->proto == IPPROTO_TCP) {
4025                 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
4026                     off, pd, th, &s->src, &s->dst)) {
4027                         REASON_SET(&reason, PFRES_MEMORY);
4028                         pf_src_tree_remove_state(s);
4029                         STATE_DEC_COUNTERS(s);
4030                         kfree(s, M_PFSTATEPL);
4031                         return (PF_DROP);
4032                 }
4033                 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
4034                     pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
4035                     &s->src, &s->dst, rewrite)) {
4036                         /* This really shouldn't happen!!! */
4037                         DPFPRINTF(PF_DEBUG_URGENT,
4038                             ("pf_normalize_tcp_stateful failed on first pkt"));
4039                         pf_normalize_tcp_cleanup(s);
4040                         pf_src_tree_remove_state(s);
4041                         STATE_DEC_COUNTERS(s);
4042                         kfree(s, M_PFSTATEPL);
4043                         return (PF_DROP);
4044                 }
4045         }
4046         s->direction = pd->dir;
4047
4048         if (sk == NULL && pf_state_key_setup(pd, nr, &skw, &sks, &sk, &nk,
4049                                              pd->src, pd->dst, sport, dport)) {
4050                 REASON_SET(&reason, PFRES_MEMORY);
4051                 goto csfailed;
4052         }
4053
4054         if (pf_state_insert(BOUND_IFACE(r, kif), skw, sks, s)) {
4055                 if (pd->proto == IPPROTO_TCP)
4056                         pf_normalize_tcp_cleanup(s);
4057                 REASON_SET(&reason, PFRES_STATEINS);
4058                 pf_src_tree_remove_state(s);
4059                 STATE_DEC_COUNTERS(s);
4060                 kfree(s, M_PFSTATEPL);
4061                 return (PF_DROP);
4062         } else
4063                 *sm = s;
4064
4065         pf_set_rt_ifp(s, pd->src);      /* needs s->state_key set */
4066         if (tag > 0) {
4067                 pf_tag_ref(tag);
4068                 s->tag = tag;
4069         }
4070         if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
4071             TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
4072                 s->src.state = PF_TCPS_PROXY_SRC;
4073                 /* undo NAT changes, if they have taken place */
4074                 if (nr != NULL) {
4075                         struct pf_state_key *skt = s->key[PF_SK_WIRE];
4076                         if (pd->dir == PF_OUT)
4077                                 skt = s->key[PF_SK_STACK];
4078                         PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
4079                         PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
4080                         if (pd->sport)
4081                                 *pd->sport = skt->port[pd->sidx];
4082                         if (pd->dport)
4083                                 *pd->dport = skt->port[pd->didx];
4084                         if (pd->proto_sum)
4085                                 *pd->proto_sum = bproto_sum;
4086                         if (pd->ip_sum)
4087                                 *pd->ip_sum = bip_sum;
4088                         m_copyback(m, off, hdrlen, pd->hdr.any);
4089                 }
4090                 s->src.seqhi = htonl(karc4random());
4091                 /* Find mss option */
4092                 mss = pf_get_mss(m, off, th->th_off, pd->af);
4093                 mss = pf_calc_mss(pd->src, pd->af, mss);
4094                 mss = pf_calc_mss(pd->dst, pd->af, mss);
4095                 s->src.mss = mss;
4096                 s->state_flags &= ~PFSTATE_CREATEINPROG;
4097                 pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport,
4098                             th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
4099                             TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL);
4100                 REASON_SET(&reason, PFRES_SYNPROXY);
4101                 return (PF_SYNPROXY_DROP);
4102         }
4103
4104         s->state_flags &= ~PFSTATE_CREATEINPROG;
4105         return (PF_PASS);
4106
4107 csfailed:
4108         if (sk != NULL)
4109                 kfree(sk, M_PFSTATEKEYPL);
4110         if (nk != NULL)
4111                 kfree(nk, M_PFSTATEKEYPL);
4112
4113         if (sn != NULL && sn->states == 0 && sn->expire == 0) {
4114                 RB_REMOVE(pf_src_tree, &tree_src_tracking[cpu], sn);
4115                 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
4116                 atomic_add_int(&pf_status.src_nodes, -1);
4117                 kfree(sn, M_PFSRCTREEPL);
4118         }
4119         if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0) {
4120                 RB_REMOVE(pf_src_tree, &tree_src_tracking[cpu], nsn);
4121                 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
4122                 atomic_add_int(&pf_status.src_nodes, -1);
4123                 kfree(nsn, M_PFSRCTREEPL);
4124         }
4125         if (s) {
4126                 pf_src_tree_remove_state(s);
4127                 STATE_DEC_COUNTERS(s);
4128                 kfree(s, M_PFSTATEPL);
4129         }
4130
4131         return (PF_DROP);
4132 }
4133
4134 int
4135 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
4136     struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
4137     struct pf_ruleset **rsm)
4138 {
4139         struct pf_rule          *r, *a = NULL;
4140         struct pf_ruleset       *ruleset = NULL;
4141         sa_family_t              af = pd->af;
4142         u_short                  reason;
4143         int                      tag = -1;
4144         int                      asd = 0;
4145         int                      match = 0;
4146
4147         r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
4148         while (r != NULL) {
4149                 r->evaluations++;
4150                 if (pfi_kif_match(r->kif, kif) == r->ifnot)
4151                         r = r->skip[PF_SKIP_IFP].ptr;
4152                 else if (r->direction && r->direction != direction)
4153                         r = r->skip[PF_SKIP_DIR].ptr;
4154                 else if (r->af && r->af != af)
4155                         r = r->skip[PF_SKIP_AF].ptr;
4156                 else if (r->proto && r->proto != pd->proto)
4157                         r = r->skip[PF_SKIP_PROTO].ptr;
4158                 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
4159                     r->src.neg, kif))
4160                         r = r->skip[PF_SKIP_SRC_ADDR].ptr;
4161                 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
4162                     r->dst.neg, NULL))
4163                         r = r->skip[PF_SKIP_DST_ADDR].ptr;
4164                 else if (r->tos && !(r->tos == pd->tos))
4165                         r = TAILQ_NEXT(r, entries);
4166                 else if (r->os_fingerprint != PF_OSFP_ANY)
4167                         r = TAILQ_NEXT(r, entries);
4168                 else if (pd->proto == IPPROTO_UDP &&
4169                     (r->src.port_op || r->dst.port_op))
4170                         r = TAILQ_NEXT(r, entries);
4171                 else if (pd->proto == IPPROTO_TCP &&
4172                     (r->src.port_op || r->dst.port_op || r->flagset))
4173                         r = TAILQ_NEXT(r, entries);
4174                 else if ((pd->proto == IPPROTO_ICMP ||
4175                     pd->proto == IPPROTO_ICMPV6) &&
4176                     (r->type || r->code))
4177                         r = TAILQ_NEXT(r, entries);
4178                 else if (r->prob && r->prob <= karc4random())
4179                         r = TAILQ_NEXT(r, entries);
4180                 else if (r->match_tag && !pf_match_tag(m, r, &tag))
4181                         r = TAILQ_NEXT(r, entries);
4182                 else {
4183                         if (r->anchor == NULL) {
4184                                 match = 1;
4185                                 *rm = r;
4186                                 *am = a;
4187                                 *rsm = ruleset;
4188                                 if ((*rm)->quick)
4189                                         break;
4190                                 r = TAILQ_NEXT(r, entries);
4191                         } else
4192                                 pf_step_into_anchor(&asd, &ruleset,
4193                                     PF_RULESET_FILTER, &r, &a, &match);
4194                 }
4195                 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
4196                     PF_RULESET_FILTER, &r, &a, &match))
4197                         break;
4198         }
4199         r = *rm;
4200         a = *am;
4201         ruleset = *rsm;
4202
4203         REASON_SET(&reason, PFRES_MATCH);
4204
4205         if (r->log)
4206                 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset,
4207                     pd);
4208
4209         if (r->action != PF_PASS)
4210                 return (PF_DROP);
4211
4212         if (pf_tag_packet(m, tag, -1)) {
4213                 REASON_SET(&reason, PFRES_MEMORY);
4214                 return (PF_DROP);
4215         }
4216
4217         return (PF_PASS);
4218 }
4219
4220 /*
4221  * Called with state locked
4222  */
4223 int
4224 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
4225         struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off,
4226         struct pf_pdesc *pd, u_short *reason, int *copyback)
4227 {
4228         struct tcphdr           *th = pd->hdr.tcp;
4229         u_int16_t                win = ntohs(th->th_win);
4230         u_int32_t                ack, end, seq, orig_seq;
4231         u_int8_t                 sws, dws;
4232         int                      ackskew;
4233
4234         if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
4235                 sws = src->wscale & PF_WSCALE_MASK;
4236                 dws = dst->wscale & PF_WSCALE_MASK;
4237         } else
4238                 sws = dws = 0;
4239
4240         /*
4241          * Sequence tracking algorithm from Guido van Rooij's paper:
4242          *   http://www.madison-gurkha.com/publications/tcp_filtering/
4243          *      tcp_filtering.ps
4244          */
4245
4246         orig_seq = seq = ntohl(th->th_seq);
4247         if (src->seqlo == 0) {
4248                 /* First packet from this end. Set its state */
4249
4250                 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
4251                     src->scrub == NULL) {
4252                         if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
4253                                 REASON_SET(reason, PFRES_MEMORY);
4254                                 return (PF_DROP);
4255                         }
4256                 }
4257
4258                 /* Deferred generation of sequence number modulator */
4259                 if (dst->seqdiff && !src->seqdiff) {
4260                         /* use random iss for the TCP server */
4261                         while ((src->seqdiff = karc4random() - seq) == 0)
4262                                 ;
4263                         ack = ntohl(th->th_ack) - dst->seqdiff;
4264                         pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
4265                             src->seqdiff), 0);
4266                         pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
4267                         *copyback = 1;
4268                 } else {
4269                         ack = ntohl(th->th_ack);
4270                 }
4271
4272                 end = seq + pd->p_len;
4273                 if (th->th_flags & TH_SYN) {
4274                         end++;
4275                         (*state)->sync_flags |= PFSTATE_GOT_SYN2;
4276                         if (dst->wscale & PF_WSCALE_FLAG) {
4277                                 src->wscale = pf_get_wscale(m, off, th->th_off,
4278                                     pd->af);
4279                                 if (src->wscale & PF_WSCALE_FLAG) {
4280                                         /* Remove scale factor from initial
4281                                          * window */
4282                                         sws = src->wscale & PF_WSCALE_MASK;
4283                                         win = ((u_int32_t)win + (1 << sws) - 1)
4284                                             >> sws;
4285                                         dws = dst->wscale & PF_WSCALE_MASK;
4286                                 } else {
4287                                         /* fixup other window */
4288                                         dst->max_win <<= dst->wscale &
4289                                             PF_WSCALE_MASK;
4290                                         /* in case of a retrans SYN|ACK */
4291                                         dst->wscale = 0;
4292                                 }
4293                         }
4294                 }
4295                 if (th->th_flags & TH_FIN)
4296                         end++;
4297
4298                 src->seqlo = seq;
4299                 if (src->state < TCPS_SYN_SENT)
4300                         src->state = TCPS_SYN_SENT;
4301
4302                 /*
4303                  * May need to slide the window (seqhi may have been set by
4304                  * the crappy stack check or if we picked up the connection
4305                  * after establishment)
4306                  */
4307                 if (src->seqhi == 1 ||
4308                     SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
4309                         src->seqhi = end + MAX(1, dst->max_win << dws);
4310                 if (win > src->max_win)
4311                         src->max_win = win;
4312
4313         } else {
4314                 ack = ntohl(th->th_ack) - dst->seqdiff;
4315                 if (src->seqdiff) {
4316                         /* Modulate sequence numbers */
4317                         pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
4318                             src->seqdiff), 0);
4319                         pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
4320                         *copyback = 1;
4321                 }
4322                 end = seq + pd->p_len;
4323                 if (th->th_flags & TH_SYN)
4324                         end++;
4325                 if (th->th_flags & TH_FIN)
4326                         end++;
4327         }
4328
4329         if ((th->th_flags & TH_ACK) == 0) {
4330                 /* Let it pass through the ack skew check */
4331                 ack = dst->seqlo;
4332         } else if ((ack == 0 &&
4333             (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
4334             /* broken tcp stacks do not set ack */
4335             (dst->state < TCPS_SYN_SENT)) {
4336                 /*
4337                  * Many stacks (ours included) will set the ACK number in an
4338                  * FIN|ACK if the SYN times out -- no sequence to ACK.
4339                  */
4340                 ack = dst->seqlo;
4341         }
4342
4343         if (seq == end) {
4344                 /* Ease sequencing restrictions on no data packets */
4345                 seq = src->seqlo;
4346                 end = seq;
4347         }
4348
4349         ackskew = dst->seqlo - ack;
4350
4351
4352         /*
4353          * Need to demodulate the sequence numbers in any TCP SACK options
4354          * (Selective ACK). We could optionally validate the SACK values
4355          * against the current ACK window, either forwards or backwards, but
4356          * I'm not confident that SACK has been implemented properly
4357          * everywhere. It wouldn't surprise me if several stacks accidently
4358          * SACK too far backwards of previously ACKed data. There really aren't
4359          * any security implications of bad SACKing unless the target stack
4360          * doesn't validate the option length correctly. Someone trying to
4361          * spoof into a TCP connection won't bother blindly sending SACK
4362          * options anyway.
4363          */
4364         if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
4365                 if (pf_modulate_sack(m, off, pd, th, dst))
4366                         *copyback = 1;
4367         }
4368
4369
4370 #define MAXACKWINDOW (0xffff + 1500)    /* 1500 is an arbitrary fudge factor */
4371         if (SEQ_GEQ(src->seqhi, end) &&
4372             /* Last octet inside other's window space */
4373             SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
4374             /* Retrans: not more than one window back */
4375             (ackskew >= -MAXACKWINDOW) &&
4376             /* Acking not more than one reassembled fragment backwards */
4377             (ackskew <= (MAXACKWINDOW << sws)) &&
4378             /* Acking not more than one window forward */
4379             ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
4380             (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
4381             (pd->flags & PFDESC_IP_REAS) == 0)) {
4382             /* Require an exact/+1 sequence match on resets when possible */
4383
4384                 if (dst->scrub || src->scrub) {
4385                         if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4386                             *state, src, dst, copyback))
4387                                 return (PF_DROP);
4388                 }
4389
4390                 /* update max window */
4391                 if (src->max_win < win)
4392                         src->max_win = win;
4393                 /* synchronize sequencing */
4394                 if (SEQ_GT(end, src->seqlo))
4395                         src->seqlo = end;
4396                 /* slide the window of what the other end can send */
4397                 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4398                         dst->seqhi = ack + MAX((win << sws), 1);
4399
4400
4401                 /* update states */
4402                 if (th->th_flags & TH_SYN)
4403                         if (src->state < TCPS_SYN_SENT)
4404                                 src->state = TCPS_SYN_SENT;
4405                 if (th->th_flags & TH_FIN)
4406                         if (src->state < TCPS_CLOSING)
4407                                 src->state = TCPS_CLOSING;
4408                 if (th->th_flags & TH_ACK) {
4409                         if (dst->state == TCPS_SYN_SENT) {
4410                                 dst->state = TCPS_ESTABLISHED;
4411                                 if (src->state == TCPS_ESTABLISHED &&
4412                                     (*state)->src_node != NULL &&
4413                                     pf_src_connlimit(*state)) {
4414                                         REASON_SET(reason, PFRES_SRCLIMIT);
4415                                         return (PF_DROP);
4416                                 }
4417                         } else if (dst->state == TCPS_CLOSING)
4418                                 dst->state = TCPS_FIN_WAIT_2;
4419                 }
4420                 if (th->th_flags & TH_RST)
4421                         src->state = dst->state = TCPS_TIME_WAIT;
4422
4423                 /* update expire time */
4424                 (*state)->expire = time_second;
4425                 if (src->state >= TCPS_FIN_WAIT_2 &&
4426                     dst->state >= TCPS_FIN_WAIT_2)
4427                         (*state)->timeout = PFTM_TCP_CLOSED;
4428                 else if (src->state >= TCPS_CLOSING &&
4429                     dst->state >= TCPS_CLOSING)
4430                         (*state)->timeout = PFTM_TCP_FIN_WAIT;
4431                 else if (src->state < TCPS_ESTABLISHED ||
4432                     dst->state < TCPS_ESTABLISHED)
4433                         (*state)->timeout = PFTM_TCP_OPENING;
4434                 else if (src->state >= TCPS_CLOSING ||
4435                     dst->state >= TCPS_CLOSING)
4436                         (*state)->timeout = PFTM_TCP_CLOSING;
4437                 else
4438                         (*state)->timeout = PFTM_TCP_ESTABLISHED;
4439
4440                 /* Fall through to PASS packet */
4441
4442         } else if ((dst->state < TCPS_SYN_SENT ||
4443                 dst->state >= TCPS_FIN_WAIT_2 ||
4444                 src->state >= TCPS_FIN_WAIT_2) &&
4445             SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
4446             /* Within a window forward of the originating packet */
4447             SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
4448             /* Within a window backward of the originating packet */
4449
4450                 /*
4451                  * This currently handles three situations:
4452                  *  1) Stupid stacks will shotgun SYNs before their peer
4453                  *     replies.
4454                  *  2) When PF catches an already established stream (the
4455                  *     firewall rebooted, the state table was flushed, routes
4456                  *     changed...)
4457                  *  3) Packets get funky immediately after the connection
4458                  *     closes (this should catch Solaris spurious ACK|FINs
4459                  *     that web servers like to spew after a close)
4460                  *
4461                  * This must be a little more careful than the above code
4462                  * since packet floods will also be caught here. We don't
4463                  * update the TTL here to mitigate the damage of a packet
4464                  * flood and so the same code can handle awkward establishment
4465                  * and a loosened connection close.
4466                  * In the establishment case, a correct peer response will
4467                  * validate the connection, go through the normal state code
4468                  * and keep updating the state TTL.
4469                  */
4470
4471                 if (pf_status.debug >= PF_DEBUG_MISC) {
4472                         kprintf("pf: loose state match: ");
4473                         pf_print_state(*state);
4474                         pf_print_flags(th->th_flags);
4475                         kprintf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4476                             "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack, pd->p_len,
4477                             ackskew, (unsigned long long)(*state)->packets[0],
4478                             (unsigned long long)(*state)->packets[1],
4479                             pd->dir == PF_IN ? "in" : "out",
4480                             pd->dir == (*state)->direction ? "fwd" : "rev");
4481                 }
4482
4483                 if (dst->scrub || src->scrub) {
4484                         if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4485                             *state, src, dst, copyback))
4486                                 return (PF_DROP);
4487                 }
4488
4489                 /* update max window */
4490                 if (src->max_win < win)
4491                         src->max_win = win;
4492                 /* synchronize sequencing */
4493                 if (SEQ_GT(end, src->seqlo))
4494                         src->seqlo = end;
4495                 /* slide the window of what the other end can send */
4496                 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4497                         dst->seqhi = ack + MAX((win << sws), 1);
4498
4499                 /*
4500                  * Cannot set dst->seqhi here since this could be a shotgunned
4501                  * SYN and not an already established connection.
4502                  */
4503
4504                 if (th->th_flags & TH_FIN)
4505                         if (src->state < TCPS_CLOSING)
4506                                 src->state = TCPS_CLOSING;
4507                 if (th->th_flags & TH_RST)
4508                         src->state = dst->state = TCPS_TIME_WAIT;
4509
4510                 /* Fall through to PASS packet */
4511
4512         } else if ((*state)->pickup_mode == PF_PICKUPS_HASHONLY ||
4513                     ((*state)->pickup_mode == PF_PICKUPS_ENABLED &&
4514                      ((*state)->sync_flags & PFSTATE_GOT_SYN_MASK) !=
4515                       PFSTATE_GOT_SYN_MASK)) {
4516                 /*
4517                  * If pickup mode is hash only, do not fail on sequence checks.
4518                  *
4519                  * If pickup mode is enabled and we did not see the SYN in
4520                  * both direction, do not fail on sequence checks because
4521                  * we do not have complete information on window scale.
4522                  *
4523                  * Adjust expiration and fall through to PASS packet.
4524                  * XXX Add a FIN check to reduce timeout?
4525                  */
4526                 (*state)->expire = time_second;
4527         } else  {
4528                 /*
4529                  * Failure processing
4530                  */
4531                 if ((*state)->dst.state == TCPS_SYN_SENT &&
4532                     (*state)->src.state == TCPS_SYN_SENT) {
4533                         /* Send RST for state mismatches during handshake */
4534                         if (!(th->th_flags & TH_RST))
4535                                 pf_send_tcp((*state)->rule.ptr, pd->af,
4536                                     pd->dst, pd->src, th->th_dport,
4537                                     th->th_sport, ntohl(th->th_ack), 0,
4538                                     TH_RST, 0, 0,
4539                                     (*state)->rule.ptr->return_ttl, 1, 0,
4540                                     pd->eh, kif->pfik_ifp);
4541                         src->seqlo = 0;
4542                         src->seqhi = 1;
4543                         src->max_win = 1;
4544                 } else if (pf_status.debug >= PF_DEBUG_MISC) {
4545                         kprintf("pf: BAD state: ");
4546                         pf_print_state(*state);
4547                         pf_print_flags(th->th_flags);
4548                         kprintf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4549                             "pkts=%llu:%llu dir=%s,%s\n",
4550                             seq, orig_seq, ack, pd->p_len, ackskew,
4551                             (unsigned long long)(*state)->packets[0],
4552                                 (unsigned long long)(*state)->packets[1],
4553                             pd->dir == PF_IN ? "in" : "out",
4554                             pd->dir == (*state)->direction ? "fwd" : "rev");
4555                         kprintf("pf: State failure on: %c %c %c %c | %c %c\n",
4556                             SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4557                             SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4558                             ' ': '2',
4559                             (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4560                             (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4561                             SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4562                             SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4563                 }
4564                 REASON_SET(reason, PFRES_BADSTATE);
4565                 return (PF_DROP);
4566         }
4567
4568         return (PF_PASS);
4569 }
4570
4571 /*
4572  * Called with state locked
4573  */
4574 int
4575 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst,
4576         struct pf_state **state, struct pf_pdesc *pd, u_short *reason)
4577 {
4578         struct tcphdr           *th = pd->hdr.tcp;
4579
4580         if (th->th_flags & TH_SYN)
4581                 if (src->state < TCPS_SYN_SENT)
4582                         src->state = TCPS_SYN_SENT;
4583         if (th->th_flags & TH_FIN)
4584                 if (src->state < TCPS_CLOSING)
4585                         src->state = TCPS_CLOSING;
4586         if (th->th_flags & TH_ACK) {
4587                 if (dst->state == TCPS_SYN_SENT) {
4588                         dst->state = TCPS_ESTABLISHED;
4589                         if (src->state == TCPS_ESTABLISHED &&
4590                             (*state)->src_node != NULL &&
4591                             pf_src_connlimit(*state)) {
4592                                 REASON_SET(reason, PFRES_SRCLIMIT);
4593                                 return (PF_DROP);
4594                         }
4595                 } else if (dst->state == TCPS_CLOSING) {
4596                         dst->state = TCPS_FIN_WAIT_2;
4597                 } else if (src->state == TCPS_SYN_SENT &&
4598                     dst->state < TCPS_SYN_SENT) {
4599                         /*
4600                          * Handle a special sloppy case where we only see one
4601                          * half of the connection. If there is a ACK after
4602                          * the initial SYN without ever seeing a packet from
4603                          * the destination, set the connection to established.
4604                          */
4605                         dst->state = src->state = TCPS_ESTABLISHED;
4606                         if ((*state)->src_node != NULL &&
4607                             pf_src_connlimit(*state)) {
4608                                 REASON_SET(reason, PFRES_SRCLIMIT);
4609                                 return (PF_DROP);
4610                         }
4611                 } else if (src->state == TCPS_CLOSING &&
4612                     dst->state == TCPS_ESTABLISHED &&
4613                     dst->seqlo == 0) {
4614                         /*
4615                          * Handle the closing of half connections where we
4616                          * don't see the full bidirectional FIN/ACK+ACK
4617                          * handshake.
4618                          */
4619                         dst->state = TCPS_CLOSING;
4620                 }
4621         }
4622         if (th->th_flags & TH_RST)
4623                 src->state = dst->state = TCPS_TIME_WAIT;
4624
4625         /* update expire time */
4626         (*state)->expire = time_second;
4627         if (src->state >= TCPS_FIN_WAIT_2 &&
4628             dst->state >= TCPS_FIN_WAIT_2)
4629                 (*state)->timeout = PFTM_TCP_CLOSED;
4630         else if (src->state >= TCPS_CLOSING &&
4631             dst->state >= TCPS_CLOSING)
4632                 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4633         else if (src->state < TCPS_ESTABLISHED ||
4634             dst->state < TCPS_ESTABLISHED)
4635                 (*state)->timeout = PFTM_TCP_OPENING;
4636         else if (src->state >= TCPS_CLOSING ||
4637             dst->state >= TCPS_CLOSING)
4638                 (*state)->timeout = PFTM_TCP_CLOSING;
4639         else
4640                 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4641
4642         return (PF_PASS);
4643 }
4644
4645 /*
4646  * Test TCP connection state.  Caller must hold the state locked.
4647  */
4648 int
4649 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
4650                   struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4651                   u_short *reason)
4652 {
4653         struct pf_state_key_cmp  key;
4654         struct tcphdr           *th = pd->hdr.tcp;
4655         int                      copyback = 0;
4656         int                      error;
4657         struct pf_state_peer    *src, *dst;
4658         struct pf_state_key     *sk;
4659
4660         key.af = pd->af;
4661         key.proto = IPPROTO_TCP;
4662         if (direction == PF_IN) {       /* wire side, straight */
4663                 PF_ACPY(&key.addr[0], pd->src, key.af);
4664                 PF_ACPY(&key.addr[1], pd->dst, key.af);
4665                 key.port[0] = th->th_sport;
4666                 key.port[1] = th->th_dport;
4667         } else {                        /* stack side, reverse */
4668                 PF_ACPY(&key.addr[1], pd->src, key.af);
4669                 PF_ACPY(&key.addr[0], pd->dst, key.af);
4670                 key.port[1] = th->th_sport;
4671                 key.port[0] = th->th_dport;
4672         }
4673
4674         STATE_LOOKUP(kif, &key, direction, *state, m);
4675         lockmgr(&(*state)->lk, LK_EXCLUSIVE);
4676
4677         if (direction == (*state)->direction) {
4678                 src = &(*state)->src;
4679                 dst = &(*state)->dst;
4680         } else {
4681                 src = &(*state)->dst;
4682                 dst = &(*state)->src;
4683         }
4684
4685         sk = (*state)->key[pd->didx];
4686
4687         if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
4688                 if (direction != (*state)->direction) {
4689                         REASON_SET(reason, PFRES_SYNPROXY);
4690                         FAIL (PF_SYNPROXY_DROP);
4691                 }
4692                 if (th->th_flags & TH_SYN) {
4693                         if (ntohl(th->th_seq) != (*state)->src.seqlo) {
4694                                 REASON_SET(reason, PFRES_SYNPROXY);
4695                                 FAIL (PF_DROP);
4696                         }
4697                         pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
4698                             pd->src, th->th_dport, th->th_sport,
4699                             (*state)->src.seqhi, ntohl(th->th_seq) + 1,
4700                             TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1,
4701                             0, NULL, NULL);
4702                         REASON_SET(reason, PFRES_SYNPROXY);
4703                         FAIL (PF_SYNPROXY_DROP);
4704                 } else if (!(th->th_flags & TH_ACK) ||
4705                     (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4706                     (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4707                         REASON_SET(reason, PFRES_SYNPROXY);
4708                         FAIL (PF_DROP);
4709                 } else if ((*state)->src_node != NULL &&
4710                     pf_src_connlimit(*state)) {
4711                         REASON_SET(reason, PFRES_SRCLIMIT);
4712                         FAIL (PF_DROP);
4713                 } else
4714                         (*state)->src.state = PF_TCPS_PROXY_DST;
4715         }
4716         if ((*state)->src.state == PF_TCPS_PROXY_DST) {
4717                 if (direction == (*state)->direction) {
4718                         if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
4719                             (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4720                             (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4721                                 REASON_SET(reason, PFRES_SYNPROXY);
4722                                 FAIL (PF_DROP);
4723                         }
4724                         (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
4725                         if ((*state)->dst.seqhi == 1)
4726                                 (*state)->dst.seqhi = htonl(karc4random());
4727                         pf_send_tcp((*state)->rule.ptr, pd->af,
4728                             &sk->addr[pd->sidx], &sk->addr[pd->didx],
4729                             sk->port[pd->sidx], sk->port[pd->didx],
4730                             (*state)->dst.seqhi, 0, TH_SYN, 0,
4731                             (*state)->src.mss, 0, 0, (*state)->tag, NULL, NULL);
4732                         REASON_SET(reason, PFRES_SYNPROXY);
4733                         FAIL (PF_SYNPROXY_DROP);
4734                 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
4735                     (TH_SYN|TH_ACK)) ||
4736                     (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
4737                         REASON_SET(reason, PFRES_SYNPROXY);
4738                         FAIL (PF_DROP);
4739                 } else {
4740                         (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
4741                         (*state)->dst.seqlo = ntohl(th->th_seq);
4742                         pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
4743                             pd->src, th->th_dport, th->th_sport,
4744                             ntohl(th->th_ack), ntohl(th->th_seq) + 1,
4745                             TH_ACK, (*state)->src.max_win, 0, 0, 0,
4746                             (*state)->tag, NULL, NULL);
4747                         pf_send_tcp((*state)->rule.ptr, pd->af,
4748                             &sk->addr[pd->sidx], &sk->addr[pd->didx],
4749                             sk->port[pd->sidx], sk->port[pd->didx],
4750                             (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
4751                             TH_ACK, (*state)->dst.max_win, 0, 0, 1,
4752                             0, NULL, NULL);
4753                         (*state)->src.seqdiff = (*state)->dst.seqhi -
4754                             (*state)->src.seqlo;
4755                         (*state)->dst.seqdiff = (*state)->src.seqhi -
4756                             (*state)->dst.seqlo;
4757                         (*state)->src.seqhi = (*state)->src.seqlo +
4758                             (*state)->dst.max_win;
4759                         (*state)->dst.seqhi = (*state)->dst.seqlo +
4760                             (*state)->src.max_win;
4761                         (*state)->src.wscale = (*state)->dst.wscale = 0;
4762                         (*state)->src.state = (*state)->dst.state =
4763                             TCPS_ESTABLISHED;
4764                         REASON_SET(reason, PFRES_SYNPROXY);
4765                         FAIL (PF_SYNPROXY_DROP);
4766                 }
4767         }
4768
4769         /*
4770          * Check for connection (addr+port pair) reuse.  We can't actually
4771          * unlink the state if we don't own it.
4772          */
4773         if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
4774             dst->state >= TCPS_FIN_WAIT_2 &&
4775             src->state >= TCPS_FIN_WAIT_2) {
4776                 if (pf_status.debug >= PF_DEBUG_MISC) {
4777                         kprintf("pf: state reuse ");
4778                         pf_print_state(*state);
4779                         pf_print_flags(th->th_flags);
4780                         kprintf("\n");
4781                 }
4782                 /* XXX make sure it's the same direction ?? */
4783                 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
4784                 if ((*state)->cpuid == mycpu->gd_cpuid) {
4785                         pf_unlink_state(*state);
4786                         *state = NULL;
4787                 } else {
4788                         (*state)->timeout = PFTM_PURGE;
4789                 }
4790                 FAIL (PF_DROP);
4791         }
4792
4793         if ((*state)->state_flags & PFSTATE_SLOPPY) {
4794                 if (pf_tcp_track_sloppy(src, dst, state, pd,
4795                                         reason) == PF_DROP) {
4796                         FAIL (PF_DROP);
4797                 }
4798         } else {
4799                 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd,
4800                                       reason, &copyback) == PF_DROP) {
4801                         FAIL (PF_DROP);
4802                 }
4803         }
4804
4805         /* translate source/destination address, if necessary */
4806         if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4807                 struct pf_state_key *nk = (*state)->key[pd->didx];
4808
4809                 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4810                     nk->port[pd->sidx] != th->th_sport)  {
4811                         /*
4812                          * The translated source address may be completely
4813                          * unrelated to the saved link header, make sure
4814                          * a bridge doesn't try to use it.
4815                          */
4816                         m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
4817                         m->m_flags &= ~M_HASH;
4818                         pf_change_ap(pd->src, &th->th_sport, pd->ip_sum,
4819                             &th->th_sum, &nk->addr[pd->sidx],
4820                             nk->port[pd->sidx], 0, pd->af);
4821                 }
4822
4823                 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4824                     nk->port[pd->didx] != th->th_dport) {
4825                         /*
4826                          * If we don't redispatch the packet will go into
4827                          * the protocol stack on the wrong cpu for the
4828                          * post-translated address.
4829                          */
4830                         m->m_flags &= ~M_HASH;
4831                         pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum,
4832                             &th->th_sum, &nk->addr[pd->didx],
4833                             nk->port[pd->didx], 0, pd->af);
4834                 }
4835                 copyback = 1;
4836         }
4837
4838         /* Copyback sequence modulation or stateful scrub changes if needed */
4839         if (copyback)
4840                 m_copyback(m, off, sizeof(*th), (caddr_t)th);
4841
4842         pfsync_update_state(*state);
4843         error = PF_PASS;
4844 done:
4845         if (*state)
4846                 lockmgr(&(*state)->lk, LK_RELEASE);
4847         return (error);
4848 }
4849
4850 /*
4851  * Test UDP connection state.  Caller must hold the state locked.
4852  */
4853 int
4854 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
4855                   struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4856 {
4857         struct pf_state_peer    *src, *dst;
4858         struct pf_state_key_cmp  key;
4859         struct udphdr           *uh = pd->hdr.udp;
4860
4861         key.af = pd->af;
4862         key.proto = IPPROTO_UDP;
4863         if (direction == PF_IN) {       /* wire side, straight */
4864                 PF_ACPY(&key.addr[0], pd->src, key.af);
4865                 PF_ACPY(&key.addr[1], pd->dst, key.af);
4866                 key.port[0] = uh->uh_sport;
4867                 key.port[1] = uh->uh_dport;
4868         } else {                        /* stack side, reverse */
4869                 PF_ACPY(&key.addr[1], pd->src, key.af);
4870                 PF_ACPY(&key.addr[0], pd->dst, key.af);
4871                 key.port[1] = uh->uh_sport;
4872                 key.port[0] = uh->uh_dport;
4873         }
4874
4875         STATE_LOOKUP(kif, &key, direction, *state, m);
4876         lockmgr(&(*state)->lk, LK_EXCLUSIVE);
4877
4878         if (direction == (*state)->direction) {
4879                 src = &(*state)->src;
4880                 dst = &(*state)->dst;
4881         } else {
4882                 src = &(*state)->dst;
4883                 dst = &(*state)->src;
4884         }
4885
4886         /* update states */
4887         if (src->state < PFUDPS_SINGLE)
4888                 src->state = PFUDPS_SINGLE;
4889         if (dst->state == PFUDPS_SINGLE)
4890                 dst->state = PFUDPS_MULTIPLE;
4891
4892         /* update expire time */
4893         (*state)->expire = time_second;
4894         if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4895                 (*state)->timeout = PFTM_UDP_MULTIPLE;
4896         else
4897                 (*state)->timeout = PFTM_UDP_SINGLE;
4898
4899         /* translate source/destination address, if necessary */
4900         if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4901                 struct pf_state_key *nk = (*state)->key[pd->didx];
4902
4903                 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4904                     nk->port[pd->sidx] != uh->uh_sport) {
4905                         /*
4906                          * The translated source address may be completely
4907                          * unrelated to the saved link header, make sure
4908                          * a bridge doesn't try to use it.
4909                          */
4910                         m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
4911                         m->m_flags &= ~M_HASH;
4912                         pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum,
4913                             &uh->uh_sum, &nk->addr[pd->sidx],
4914                             nk->port[pd->sidx], 1, pd->af);
4915                 }
4916
4917                 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4918                     nk->port[pd->didx] != uh->uh_dport) {
4919                         /*
4920                          * If we don't redispatch the packet will go into
4921                          * the protocol stack on the wrong cpu for the
4922                          * post-translated address.
4923                          */
4924                         m->m_flags &= ~M_HASH;
4925                         pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum,
4926                             &uh->uh_sum, &nk->addr[pd->didx],
4927                             nk->port[pd->didx], 1, pd->af);
4928                 }
4929                 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4930         }
4931
4932         pfsync_update_state(*state);
4933         lockmgr(&(*state)->lk, LK_RELEASE);
4934         return (PF_PASS);
4935 }
4936
4937 /*
4938  * Test ICMP connection state.  Caller must hold the state locked.
4939  */
4940 int
4941 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
4942                    struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4943                    u_short *reason)
4944 {
4945         struct pf_addr  *saddr = pd->src, *daddr = pd->dst;
4946         u_int16_t        icmpid = 0, *icmpsum;
4947         u_int8_t         icmptype;
4948         int              state_icmp = 0;
4949         int              error;
4950         struct pf_state_key_cmp key;
4951
4952         switch (pd->proto) {
4953 #ifdef INET
4954         case IPPROTO_ICMP:
4955                 icmptype = pd->hdr.icmp->icmp_type;
4956                 icmpid = pd->hdr.icmp->icmp_id;
4957                 icmpsum = &pd->hdr.icmp->icmp_cksum;
4958
4959                 if (icmptype == ICMP_UNREACH ||
4960                     icmptype == ICMP_SOURCEQUENCH ||
4961                     icmptype == ICMP_REDIRECT ||
4962                     icmptype == ICMP_TIMXCEED ||
4963                     icmptype == ICMP_PARAMPROB)
4964                         state_icmp++;
4965                 break;
4966 #endif /* INET */
4967 #ifdef INET6
4968         case IPPROTO_ICMPV6:
4969                 icmptype = pd->hdr.icmp6->icmp6_type;
4970                 icmpid = pd->hdr.icmp6->icmp6_id;
4971                 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
4972
4973                 if (icmptype == ICMP6_DST_UNREACH ||
4974                     icmptype == ICMP6_PACKET_TOO_BIG ||
4975                     icmptype == ICMP6_TIME_EXCEEDED ||
4976                     icmptype == ICMP6_PARAM_PROB)
4977                         state_icmp++;
4978                 break;
4979 #endif /* INET6 */
4980         }
4981
4982         if (!state_icmp) {
4983
4984                 /*
4985                  * ICMP query/reply message not related to a TCP/UDP packet.
4986                  * Search for an ICMP state.
4987                  */
4988                 key.af = pd->af;
4989                 key.proto = pd->proto;
4990                 key.port[0] = key.port[1] = icmpid;
4991                 if (direction == PF_IN) {       /* wire side, straight */
4992                         PF_ACPY(&key.addr[0], pd->src, key.af);
4993                         PF_ACPY(&key.addr[1], pd->dst, key.af);
4994                 } else {                        /* stack side, reverse */
4995                         PF_ACPY(&key.addr[1], pd->src, key.af);
4996                         PF_ACPY(&key.addr[0], pd->dst, key.af);
4997                 }
4998
4999                 STATE_LOOKUP(kif, &key, direction, *state, m);
5000                 lockmgr(&(*state)->lk, LK_EXCLUSIVE);
5001
5002                 (*state)->expire = time_second;
5003                 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
5004
5005                 /* translate source/destination address, if necessary */
5006                 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5007                         struct pf_state_key *nk = (*state)->key[pd->didx];
5008
5009                         switch (pd->af) {
5010 #ifdef INET
5011                         case AF_INET:
5012                                 if (PF_ANEQ(pd->src,
5013                                     &nk->addr[pd->sidx], AF_INET))
5014                                         pf_change_a(&saddr->v4.s_addr,
5015                                             pd->ip_sum,
5016                                             nk->addr[pd->sidx].v4.s_addr, 0);
5017
5018                                 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
5019                                     AF_INET))
5020                                         pf_change_a(&daddr->v4.s_addr,
5021                                             pd->ip_sum,
5022                                             nk->addr[pd->didx].v4.s_addr, 0);
5023
5024                                 if (nk->port[0] !=
5025                                     pd->hdr.icmp->icmp_id) {
5026                                         pd->hdr.icmp->icmp_cksum =
5027                                             pf_cksum_fixup(
5028                                             pd->hdr.icmp->icmp_cksum, icmpid,
5029                                             nk->port[pd->sidx], 0);
5030                                         pd->hdr.icmp->icmp_id =
5031                                             nk->port[pd->sidx];
5032                                 }
5033
5034                                 m_copyback(m, off, ICMP_MINLEN,
5035                                     (caddr_t)pd->hdr.icmp);
5036                                 break;
5037 #endif /* INET */
5038 #ifdef INET6
5039                         case AF_INET6:
5040                                 if (PF_ANEQ(pd->src,
5041                                     &nk->addr[pd->sidx], AF_INET6))
5042                                         pf_change_a6(saddr,
5043                                             &pd->hdr.icmp6->icmp6_cksum,
5044                                             &nk->addr[pd->sidx], 0);
5045
5046                                 if (PF_ANEQ(pd->dst,
5047                                     &nk->addr[pd->didx], AF_INET6))
5048                                         pf_change_a6(daddr,
5049                                             &pd->hdr.icmp6->icmp6_cksum,
5050                                             &nk->addr[pd->didx], 0);
5051
5052                                 m_copyback(m, off,
5053                                         sizeof(struct icmp6_hdr),
5054                                         (caddr_t)pd->hdr.icmp6);
5055                                 break;
5056 #endif /* INET6 */
5057                         }
5058                 }
5059         } else {
5060                 /*
5061                  * ICMP error message in response to a TCP/UDP packet.
5062                  * Extract the inner TCP/UDP header and search for that state.
5063                  */
5064
5065                 struct pf_pdesc pd2;
5066 #ifdef INET
5067                 struct ip       h2;
5068 #endif /* INET */
5069 #ifdef INET6
5070                 struct ip6_hdr  h2_6;
5071                 int             terminal = 0;
5072 #endif /* INET6 */
5073                 int             ipoff2;
5074                 int             off2;
5075
5076                 pd2.not_cpu_localized = 1;
5077                 pd2.af = pd->af;
5078                 /* Payload packet is from the opposite direction. */
5079                 pd2.sidx = (direction == PF_IN) ? 1 : 0;
5080                 pd2.didx = (direction == PF_IN) ? 0 : 1;
5081                 switch (pd->af) {
5082 #ifdef INET
5083                 case AF_INET:
5084                         /* offset of h2 in mbuf chain */
5085                         ipoff2 = off + ICMP_MINLEN;
5086
5087                         if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
5088                             NULL, reason, pd2.af)) {
5089                                 DPFPRINTF(PF_DEBUG_MISC,
5090                                     ("pf: ICMP error message too short "
5091                                     "(ip)\n"));
5092                                 FAIL (PF_DROP);
5093                         }
5094                         /*
5095                          * ICMP error messages don't refer to non-first
5096                          * fragments
5097                          */
5098                         if (h2.ip_off & htons(IP_OFFMASK)) {
5099                                 REASON_SET(reason, PFRES_FRAG);
5100                                 FAIL (PF_DROP);
5101                         }
5102
5103                         /* offset of protocol header that follows h2 */
5104                         off2 = ipoff2 + (h2.ip_hl << 2);
5105
5106                         pd2.proto = h2.ip_p;
5107                         pd2.src = (struct pf_addr *)&h2.ip_src;
5108                         pd2.dst = (struct pf_addr *)&h2.ip_dst;
5109                         pd2.ip_sum = &h2.ip_sum;
5110                         break;
5111 #endif /* INET */
5112 #ifdef INET6
5113                 case AF_INET6:
5114                         ipoff2 = off + sizeof(struct icmp6_hdr);
5115
5116                         if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
5117                             NULL, reason, pd2.af)) {
5118                                 DPFPRINTF(PF_DEBUG_MISC,
5119                                     ("pf: ICMP error message too short "
5120                                     "(ip6)\n"));
5121                                 FAIL (PF_DROP);
5122                         }
5123                         pd2.proto = h2_6.ip6_nxt;
5124                         pd2.src = (struct pf_addr *)&h2_6.ip6_src;
5125                         pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
5126                         pd2.ip_sum = NULL;
5127                         off2 = ipoff2 + sizeof(h2_6);
5128                         do {
5129                                 switch (pd2.proto) {
5130                                 case IPPROTO_FRAGMENT:
5131                                         /*
5132                                          * ICMPv6 error messages for
5133                                          * non-first fragments
5134                                          */
5135                                         REASON_SET(reason, PFRES_FRAG);
5136                                         FAIL (PF_DROP);
5137                                 case IPPROTO_AH:
5138                                 case IPPROTO_HOPOPTS:
5139                                 case IPPROTO_ROUTING:
5140                                 case IPPROTO_DSTOPTS: {
5141                                         /* get next header and header length */
5142                                         struct ip6_ext opt6;
5143
5144                                         if (!pf_pull_hdr(m, off2, &opt6,
5145                                             sizeof(opt6), NULL, reason,
5146                                             pd2.af)) {
5147                                                 DPFPRINTF(PF_DEBUG_MISC,
5148                                                     ("pf: ICMPv6 short opt\n"));
5149                                                 FAIL (PF_DROP);
5150                                         }
5151                                         if (pd2.proto == IPPROTO_AH)
5152                                                 off2 += (opt6.ip6e_len + 2) * 4;
5153                                         else
5154                                                 off2 += (opt6.ip6e_len + 1) * 8;
5155                                         pd2.proto = opt6.ip6e_nxt;
5156                                         /* goto the next header */
5157                                         break;
5158                                 }
5159                                 default:
5160                                         terminal++;
5161                                         break;
5162                                 }
5163                         } while (!terminal);
5164                         break;
5165 #endif /* INET6 */
5166                 default:
5167                         DPFPRINTF(PF_DEBUG_MISC,
5168                             ("pf: ICMP AF %d unknown (ip6)\n", pd->af));
5169                         FAIL (PF_DROP);
5170                         break;
5171                 }
5172
5173                 switch (pd2.proto) {
5174                 case IPPROTO_TCP: {
5175                         struct tcphdr            th;
5176                         u_int32_t                seq;
5177                         struct pf_state_peer    *src, *dst;
5178                         u_int8_t                 dws;
5179                         int                      copyback = 0;
5180
5181                         /*
5182                          * Only the first 8 bytes of the TCP header can be
5183                          * expected. Don't access any TCP header fields after
5184                          * th_seq, an ackskew test is not possible.
5185                          */
5186                         if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
5187                             pd2.af)) {
5188                                 DPFPRINTF(PF_DEBUG_MISC,
5189                                     ("pf: ICMP error message too short "
5190                                     "(tcp)\n"));
5191                                 FAIL (PF_DROP);
5192                         }
5193
5194                         key.af = pd2.af;
5195                         key.proto = IPPROTO_TCP;
5196                         PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5197                         PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5198                         key.port[pd2.sidx] = th.th_sport;
5199                         key.port[pd2.didx] = th.th_dport;
5200
5201                         STATE_LOOKUP(kif, &key, direction, *state, m);
5202                         lockmgr(&(*state)->lk, LK_EXCLUSIVE);
5203
5204                         if (direction == (*state)->direction) {
5205                                 src = &(*state)->dst;
5206                                 dst = &(*state)->src;
5207                         } else {
5208                                 src = &(*state)->src;
5209                                 dst = &(*state)->dst;
5210                         }
5211
5212                         if (src->wscale && dst->wscale)
5213                                 dws = dst->wscale & PF_WSCALE_MASK;
5214                         else
5215                                 dws = 0;
5216
5217                         /* Demodulate sequence number */
5218                         seq = ntohl(th.th_seq) - src->seqdiff;
5219                         if (src->seqdiff) {
5220                                 pf_change_a(&th.th_seq, icmpsum,
5221                                     htonl(seq), 0);
5222                                 copyback = 1;
5223                         }
5224
5225                         if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
5226                             (!SEQ_GEQ(src->seqhi, seq) ||
5227                             !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
5228                                 if (pf_status.debug >= PF_DEBUG_MISC) {
5229                                         kprintf("pf: BAD ICMP %d:%d ",
5230                                             icmptype, pd->hdr.icmp->icmp_code);
5231                                         pf_print_host(pd->src, 0, pd->af);
5232                                         kprintf(" -> ");
5233                                         pf_print_host(pd->dst, 0, pd->af);
5234                                         kprintf(" state: ");
5235                                         pf_print_state(*state);
5236                                         kprintf(" seq=%u\n", seq);
5237                                 }
5238                                 REASON_SET(reason, PFRES_BADSTATE);
5239                                 FAIL (PF_DROP);
5240                         } else {
5241                                 if (pf_status.debug >= PF_DEBUG_MISC) {
5242                                         kprintf("pf: OK ICMP %d:%d ",
5243                                             icmptype, pd->hdr.icmp->icmp_code);
5244                                         pf_print_host(pd->src, 0, pd->af);
5245                                         kprintf(" -> ");
5246                                         pf_print_host(pd->dst, 0, pd->af);
5247                                         kprintf(" state: ");
5248                                         pf_print_state(*state);
5249                                         kprintf(" seq=%u\n", seq);
5250                                 }
5251                         }
5252
5253                         /* translate source/destination address, if necessary */
5254                         if ((*state)->key[PF_SK_WIRE] !=
5255                             (*state)->key[PF_SK_STACK]) {
5256                                 struct pf_state_key *nk =
5257                                     (*state)->key[pd->didx];
5258
5259                                 if (PF_ANEQ(pd2.src,
5260                                     &nk->addr[pd2.sidx], pd2.af) ||
5261                                     nk->port[pd2.sidx] != th.th_sport)
5262                                         pf_change_icmp(pd2.src, &th.th_sport,
5263                                             daddr, &nk->addr[pd2.sidx],
5264                                             nk->port[pd2.sidx], NULL,
5265                                             pd2.ip_sum, icmpsum,
5266                                             pd->ip_sum, 0, pd2.af);
5267
5268                                 if (PF_ANEQ(pd2.dst,
5269                                     &nk->addr[pd2.didx], pd2.af) ||
5270                                     nk->port[pd2.didx] != th.th_dport)
5271                                         pf_change_icmp(pd2.dst, &th.th_dport,
5272                                             NULL, /* XXX Inbound NAT? */
5273                                             &nk->addr[pd2.didx],
5274                                             nk->port[pd2.didx], NULL,
5275                                             pd2.ip_sum, icmpsum,
5276                                             pd->ip_sum, 0, pd2.af);
5277                                 copyback = 1;
5278                         }
5279
5280                         if (copyback) {
5281                                 switch (pd2.af) {
5282 #ifdef INET
5283                                 case AF_INET:
5284                                         m_copyback(m, off, ICMP_MINLEN,
5285                                             (caddr_t)pd->hdr.icmp);
5286                                         m_copyback(m, ipoff2, sizeof(h2),
5287                                             (caddr_t)&h2);
5288                                         break;
5289 #endif /* INET */
5290 #ifdef INET6
5291                                 case AF_INET6:
5292                                         m_copyback(m, off,
5293                                             sizeof(struct icmp6_hdr),
5294                                             (caddr_t)pd->hdr.icmp6);
5295                                         m_copyback(m, ipoff2, sizeof(h2_6),
5296                                             (caddr_t)&h2_6);
5297                                         break;
5298 #endif /* INET6 */
5299                                 }
5300                                 m_copyback(m, off2, 8, (caddr_t)&th);
5301                         }
5302                         break;
5303                 }
5304                 case IPPROTO_UDP: {
5305                         struct udphdr           uh;
5306
5307                         if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
5308                             NULL, reason, pd2.af)) {
5309                                 DPFPRINTF(PF_DEBUG_MISC,
5310                                     ("pf: ICMP error message too short "
5311                                     "(udp)\n"));
5312                                 return (PF_DROP);
5313                         }
5314
5315                         key.af = pd2.af;
5316                         key.proto = IPPROTO_UDP;
5317                         PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5318                         PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5319                         key.port[pd2.sidx] = uh.uh_sport;
5320                         key.port[pd2.didx] = uh.uh_dport;
5321
5322                         STATE_LOOKUP(kif, &key, direction, *state, m);
5323                         lockmgr(&(*state)->lk, LK_EXCLUSIVE);
5324
5325                         /* translate source/destination address, if necessary */
5326                         if ((*state)->key[PF_SK_WIRE] !=
5327                             (*state)->key[PF_SK_STACK]) {
5328                                 struct pf_state_key *nk =
5329                                     (*state)->key[pd->didx];
5330
5331                                 if (PF_ANEQ(pd2.src,
5332                                     &nk->addr[pd2.sidx], pd2.af) ||
5333                                     nk->port[pd2.sidx] != uh.uh_sport)
5334                                         pf_change_icmp(pd2.src, &uh.uh_sport,
5335                                             daddr, &nk->addr[pd2.sidx],
5336                                             nk->port[pd2.sidx], &uh.uh_sum,
5337                                             pd2.ip_sum, icmpsum,
5338                                             pd->ip_sum, 1, pd2.af);
5339
5340                                 if (PF_ANEQ(pd2.dst,
5341                                     &nk->addr[pd2.didx], pd2.af) ||
5342                                     nk->port[pd2.didx] != uh.uh_dport)
5343                                         pf_change_icmp(pd2.dst, &uh.uh_dport,
5344                                             NULL, /* XXX Inbound NAT? */
5345                                             &nk->addr[pd2.didx],
5346                                             nk->port[pd2.didx], &uh.uh_sum,
5347                                             pd2.ip_sum, icmpsum,
5348                                             pd->ip_sum, 1, pd2.af);
5349
5350                                 switch (pd2.af) {
5351 #ifdef INET
5352                                 case AF_INET:
5353                                         m_copyback(m, off, ICMP_MINLEN,
5354                                             (caddr_t)pd->hdr.icmp);
5355                                         m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5356                                         break;
5357 #endif /* INET */
5358 #ifdef INET6
5359                                 case AF_INET6:
5360                                         m_copyback(m, off,
5361                                             sizeof(struct icmp6_hdr),
5362                                             (caddr_t)pd->hdr.icmp6);
5363                                         m_copyback(m, ipoff2, sizeof(h2_6),
5364                                             (caddr_t)&h2_6);
5365                                         break;
5366 #endif /* INET6 */
5367                                 }
5368                                 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
5369                         }
5370                         break;
5371                 }
5372 #ifdef INET
5373                 case IPPROTO_ICMP: {
5374                         struct icmp             iih;
5375
5376                         if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
5377                             NULL, reason, pd2.af)) {
5378                                 DPFPRINTF(PF_DEBUG_MISC,
5379                                     ("pf: ICMP error message too short i"
5380                                     "(icmp)\n"));
5381                                 return (PF_DROP);
5382                         }
5383
5384                         key.af = pd2.af;
5385                         key.proto = IPPROTO_ICMP;
5386                         PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5387                         PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5388                         key.port[0] = key.port[1] = iih.icmp_id;
5389
5390                         STATE_LOOKUP(kif, &key, direction, *state, m);
5391                         lockmgr(&(*state)->lk, LK_EXCLUSIVE);
5392
5393                         /* translate source/destination address, if necessary */
5394                         if ((*state)->key[PF_SK_WIRE] !=
5395                             (*state)->key[PF_SK_STACK]) {
5396                                 struct pf_state_key *nk =
5397                                     (*state)->key[pd->didx];
5398
5399                                 if (PF_ANEQ(pd2.src,
5400                                     &nk->addr[pd2.sidx], pd2.af) ||
5401                                     nk->port[pd2.sidx] != iih.icmp_id)
5402                                         pf_change_icmp(pd2.src, &iih.icmp_id,
5403                                             daddr, &nk->addr[pd2.sidx],
5404                                             nk->port[pd2.sidx], NULL,
5405                                             pd2.ip_sum, icmpsum,
5406                                             pd->ip_sum, 0, AF_INET);
5407
5408                                 if (PF_ANEQ(pd2.dst,
5409                                     &nk->addr[pd2.didx], pd2.af) ||
5410                                     nk->port[pd2.didx] != iih.icmp_id)
5411                                         pf_change_icmp(pd2.dst, &iih.icmp_id,
5412                                             NULL, /* XXX Inbound NAT? */
5413                                             &nk->addr[pd2.didx],
5414                                             nk->port[pd2.didx], NULL,
5415                                             pd2.ip_sum, icmpsum,
5416                                             pd->ip_sum, 0, AF_INET);
5417
5418                                 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
5419                                 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5420                                 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
5421                         }
5422                         break;
5423                 }
5424 #endif /* INET */
5425 #ifdef INET6
5426                 case IPPROTO_ICMPV6: {
5427                         struct icmp6_hdr        iih;
5428
5429                         if (!pf_pull_hdr(m, off2, &iih,
5430                             sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
5431                                 DPFPRINTF(PF_DEBUG_MISC,
5432                                     ("pf: ICMP error message too short "
5433                                     "(icmp6)\n"));
5434                                 FAIL (PF_DROP);
5435                         }
5436
5437                         key.af = pd2.af;
5438                         key.proto = IPPROTO_ICMPV6;
5439                         PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5440                         PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5441                         key.port[0] = key.port[1] = iih.icmp6_id;
5442
5443                         STATE_LOOKUP(kif, &key, direction, *state, m);
5444                         lockmgr(&(*state)->lk, LK_EXCLUSIVE);
5445
5446                         /* translate source/destination address, if necessary */
5447                         if ((*state)->key[PF_SK_WIRE] !=
5448                             (*state)->key[PF_SK_STACK]) {
5449                                 struct pf_state_key *nk =
5450                                     (*state)->key[pd->didx];
5451
5452                                 if (PF_ANEQ(pd2.src,
5453                                     &nk->addr[pd2.sidx], pd2.af) ||
5454                                     nk->port[pd2.sidx] != iih.icmp6_id)
5455                                         pf_change_icmp(pd2.src, &iih.icmp6_id,
5456                                             daddr, &nk->addr[pd2.sidx],
5457                                             nk->port[pd2.sidx], NULL,
5458                                             pd2.ip_sum, icmpsum,
5459                                             pd->ip_sum, 0, AF_INET6);
5460
5461                                 if (PF_ANEQ(pd2.dst,
5462                                     &nk->addr[pd2.didx], pd2.af) ||
5463                                     nk->port[pd2.didx] != iih.icmp6_id)
5464                                         pf_change_icmp(pd2.dst, &iih.icmp6_id,
5465                                             NULL, /* XXX Inbound NAT? */
5466                                             &nk->addr[pd2.didx],
5467                                             nk->port[pd2.didx], NULL,
5468                                             pd2.ip_sum, icmpsum,
5469                                             pd->ip_sum, 0, AF_INET6);
5470
5471                                 m_copyback(m, off, sizeof(struct icmp6_hdr),
5472                                     (caddr_t)pd->hdr.icmp6);
5473                                 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
5474                                 m_copyback(m, off2, sizeof(struct icmp6_hdr),
5475                                     (caddr_t)&iih);
5476                         }
5477                         break;
5478                 }
5479 #endif /* INET6 */
5480                 default: {
5481                         key.af = pd2.af;
5482                         key.proto = pd2.proto;
5483                         PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5484                         PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5485                         key.port[0] = key.port[1] = 0;
5486
5487                         STATE_LOOKUP(kif, &key, direction, *state, m);
5488                         lockmgr(&(*state)->lk, LK_EXCLUSIVE);
5489
5490                         /* translate source/destination address, if necessary */
5491                         if ((*state)->key[PF_SK_WIRE] !=
5492                             (*state)->key[PF_SK_STACK]) {
5493                                 struct pf_state_key *nk =
5494                                     (*state)->key[pd->didx];
5495
5496                                 if (PF_ANEQ(pd2.src,
5497                                     &nk->addr[pd2.sidx], pd2.af))
5498                                         pf_change_icmp(pd2.src, NULL, daddr,
5499                                             &nk->addr[pd2.sidx], 0, NULL,
5500                                             pd2.ip_sum, icmpsum,
5501                                             pd->ip_sum, 0, pd2.af);
5502
5503                                 if (PF_ANEQ(pd2.dst,
5504                                     &nk->addr[pd2.didx], pd2.af))
5505                                         pf_change_icmp(pd2.src, NULL,
5506                                             NULL, /* XXX Inbound NAT? */
5507                                             &nk->addr[pd2.didx], 0, NULL,
5508                                             pd2.ip_sum, icmpsum,
5509                                             pd->ip_sum, 0, pd2.af);
5510
5511                                 switch (pd2.af) {
5512 #ifdef INET
5513                                 case AF_INET:
5514                                         m_copyback(m, off, ICMP_MINLEN,
5515                                             (caddr_t)pd->hdr.icmp);
5516                                         m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5517                                         break;
5518 #endif /* INET */
5519 #ifdef INET6
5520                                 case AF_INET6:
5521                                         m_copyback(m, off,
5522                                             sizeof(struct icmp6_hdr),
5523                                             (caddr_t)pd->hdr.icmp6);
5524                                         m_copyback(m, ipoff2, sizeof(h2_6),
5525                                             (caddr_t)&h2_6);
5526                                         break;
5527 #endif /* INET6 */
5528                                 }
5529                         }
5530                         break;
5531                 }
5532                 }
5533         }
5534
5535         pfsync_update_state(*state);
5536         error = PF_PASS;
5537 done:
5538         if (*state)
5539                 lockmgr(&(*state)->lk, LK_RELEASE);
5540         return (error);
5541 }
5542
5543 /*
5544  * Test other connection state.  Caller must hold the state locked.
5545  */
5546 int
5547 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
5548                     struct mbuf *m, struct pf_pdesc *pd)
5549 {
5550         struct pf_state_peer    *src, *dst;
5551         struct pf_state_key_cmp  key;
5552
5553         key.af = pd->af;
5554         key.proto = pd->proto;
5555         if (direction == PF_IN) {
5556                 PF_ACPY(&key.addr[0], pd->src, key.af);
5557                 PF_ACPY(&key.addr[1], pd->dst, key.af);
5558                 key.port[0] = key.port[1] = 0;
5559         } else {
5560                 PF_ACPY(&key.addr[1], pd->src, key.af);
5561                 PF_ACPY(&key.addr[0], pd->dst, key.af);
5562                 key.port[1] = key.port[0] = 0;
5563         }
5564
5565         STATE_LOOKUP(kif, &key, direction, *state, m);
5566         lockmgr(&(*state)->lk, LK_EXCLUSIVE);
5567
5568         if (direction == (*state)->direction) {
5569                 src = &(*state)->src;
5570                 dst = &(*state)->dst;
5571         } else {
5572                 src = &(*state)->dst;
5573                 dst = &(*state)->src;
5574         }
5575
5576         /* update states */
5577         if (src->state < PFOTHERS_SINGLE)
5578                 src->state = PFOTHERS_SINGLE;
5579         if (dst->state == PFOTHERS_SINGLE)
5580                 dst->state = PFOTHERS_MULTIPLE;
5581
5582         /* update expire time */
5583         (*state)->expire = time_second;
5584         if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
5585                 (*state)->timeout = PFTM_OTHER_MULTIPLE;
5586         else
5587                 (*state)->timeout = PFTM_OTHER_SINGLE;
5588
5589         /* translate source/destination address, if necessary */
5590         if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5591                 struct pf_state_key *nk = (*state)->key[pd->didx];
5592
5593                 KKASSERT(nk);
5594                 KKASSERT(pd);
5595                 KKASSERT(pd->src);
5596                 KKASSERT(pd->dst);
5597                 switch (pd->af) {
5598 #ifdef INET
5599                 case AF_INET:
5600                         if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5601                                 pf_change_a(&pd->src->v4.s_addr,
5602                                     pd->ip_sum,
5603                                     nk->addr[pd->sidx].v4.s_addr,
5604                                     0);
5605
5606
5607                         if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5608                                 pf_change_a(&pd->dst->v4.s_addr,
5609                                     pd->ip_sum,
5610                                     nk->addr[pd->didx].v4.s_addr,
5611                                     0);
5612
5613                         break;
5614 #endif /* INET */
5615 #ifdef INET6
5616                 case AF_INET6:
5617                         if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5618                                 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
5619
5620                         if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5621                                 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
5622 #endif /* INET6 */
5623                 }
5624         }
5625
5626         pfsync_update_state(*state);
5627         lockmgr(&(*state)->lk, LK_RELEASE);
5628         return (PF_PASS);
5629 }
5630
5631 /*
5632  * ipoff and off are measured from the start of the mbuf chain.
5633  * h must be at "ipoff" on the mbuf chain.
5634  */
5635 void *
5636 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
5637     u_short *actionp, u_short *reasonp, sa_family_t af)
5638 {
5639         switch (af) {
5640 #ifdef INET
5641         case AF_INET: {
5642                 struct ip       *h = mtod(m, struct ip *);
5643                 u_int16_t        fragoff = (h->ip_off & IP_OFFMASK) << 3;
5644
5645                 if (fragoff) {
5646                         if (fragoff >= len)
5647                                 ACTION_SET(actionp, PF_PASS);
5648                         else {
5649                                 ACTION_SET(actionp, PF_DROP);
5650                                 REASON_SET(reasonp, PFRES_FRAG);
5651                         }
5652                         return (NULL);
5653                 }
5654                 if (m->m_pkthdr.len < off + len ||
5655                     h->ip_len < off + len) {
5656                         ACTION_SET(actionp, PF_DROP);
5657                         REASON_SET(reasonp, PFRES_SHORT);
5658                         return (NULL);
5659                 }
5660                 break;
5661         }
5662 #endif /* INET */
5663 #ifdef INET6
5664         case AF_INET6: {
5665                 struct ip6_hdr  *h = mtod(m, struct ip6_hdr *);
5666
5667                 if (m->m_pkthdr.len < off + len ||
5668                     (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
5669                     (unsigned)(off + len)) {
5670                         ACTION_SET(actionp, PF_DROP);
5671                         REASON_SET(reasonp, PFRES_SHORT);
5672                         return (NULL);
5673                 }
5674                 break;
5675         }
5676 #endif /* INET6 */
5677         }
5678         m_copydata(m, off, len, p);
5679         return (p);
5680 }
5681
5682 int
5683 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif)
5684 {
5685         struct sockaddr_in      *dst;
5686         int                      ret = 1;
5687         int                      check_mpath;
5688 #ifdef INET6
5689         struct sockaddr_in6     *dst6;
5690         struct route_in6         ro;
5691 #else
5692         struct route             ro;
5693 #endif
5694         struct radix_node       *rn;
5695         struct rtentry          *rt;
5696         struct ifnet            *ifp;
5697
5698         check_mpath = 0;
5699         bzero(&ro, sizeof(ro));
5700         switch (af) {
5701         case AF_INET:
5702                 dst = satosin(&ro.ro_dst);
5703                 dst->sin_family = AF_INET;
5704                 dst->sin_len = sizeof(*dst);
5705                 dst->sin_addr = addr->v4;
5706                 break;
5707 #ifdef INET6
5708         case AF_INET6:
5709                 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5710                 dst6->sin6_family = AF_INET6;
5711                 dst6->sin6_len = sizeof(*dst6);
5712                 dst6->sin6_addr = addr->v6;
5713                 break;
5714 #endif /* INET6 */
5715         default:
5716                 return (0);
5717         }
5718
5719         /* Skip checks for ipsec interfaces */
5720         if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5721                 goto out;
5722
5723         rtalloc_ign((struct route *)&ro, 0);
5724
5725         if (ro.ro_rt != NULL) {
5726                 /* No interface given, this is a no-route check */
5727                 if (kif == NULL)
5728                         goto out;
5729
5730                 if (kif->pfik_ifp == NULL) {
5731                         ret = 0;
5732                         goto out;
5733                 }
5734
5735                 /* Perform uRPF check if passed input interface */
5736                 ret = 0;
5737                 rn = (struct radix_node *)ro.ro_rt;
5738                 do {
5739                         rt = (struct rtentry *)rn;
5740                         ifp = rt->rt_ifp;
5741
5742                         if (kif->pfik_ifp == ifp)
5743                                 ret = 1;
5744                         rn = NULL;
5745                 } while (check_mpath == 1 && rn != NULL && ret == 0);
5746         } else
5747                 ret = 0;
5748 out:
5749         if (ro.ro_rt != NULL)
5750                 RTFREE(ro.ro_rt);
5751         return (ret);
5752 }
5753
5754 int
5755 pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw)
5756 {
5757         struct sockaddr_in      *dst;
5758 #ifdef INET6
5759         struct sockaddr_in6     *dst6;
5760         struct route_in6         ro;
5761 #else
5762         struct route             ro;
5763 #endif
5764         int                      ret = 0;
5765
5766         ASSERT_LWKT_TOKEN_HELD(&pf_token);
5767
5768         bzero(&ro, sizeof(ro));
5769         switch (af) {
5770         case AF_INET:
5771                 dst = satosin(&ro.ro_dst);
5772                 dst->sin_family = AF_INET;
5773                 dst->sin_len = sizeof(*dst);
5774                 dst->sin_addr = addr->v4;
5775                 break;
5776 #ifdef INET6
5777         case AF_INET6:
5778                 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5779                 dst6->sin6_family = AF_INET6;
5780                 dst6->sin6_len = sizeof(*dst6);
5781                 dst6->sin6_addr = addr->v6;
5782                 break;
5783 #endif /* INET6 */
5784         default:
5785                 return (0);
5786         }
5787
5788 rtalloc_ign((struct route *)&ro, (RTF_CLONING | RTF_PRCLONING));
5789
5790         if (ro.ro_rt != NULL) {
5791                 RTFREE(ro.ro_rt);
5792         }
5793
5794         return (ret);
5795 }
5796
5797 #ifdef INET
5798 void
5799 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5800     struct pf_state *s, struct pf_pdesc *pd)
5801 {
5802         struct mbuf             *m0, *m1;
5803         struct route             iproute;
5804         struct route            *ro = NULL;
5805         struct sockaddr_in      *dst;
5806         struct ip               *ip;
5807         struct ifnet            *ifp = NULL;
5808         struct pf_addr           naddr;
5809         struct pf_src_node      *sn = NULL;
5810         int                      error = 0;
5811         int sw_csum;
5812 #ifdef IPSEC
5813         struct m_tag            *mtag;
5814 #endif /* IPSEC */
5815
5816         ASSERT_LWKT_TOKEN_HELD(&pf_token);
5817
5818         if (m == NULL || *m == NULL || r == NULL ||
5819             (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
5820                 panic("pf_route: invalid parameters");
5821
5822         if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) {
5823                 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED;
5824                 (*m)->m_pkthdr.pf.routed = 1;
5825         } else {
5826                 if ((*m)->m_pkthdr.pf.routed++ > 3) {
5827                         m0 = *m;
5828                         *m = NULL;
5829                         goto bad;
5830                 }
5831         }
5832
5833         if (r->rt == PF_DUPTO) {
5834                 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL) {
5835                         return;
5836                 }
5837         } else {
5838                 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5839                         return;
5840                 }
5841                 m0 = *m;
5842         }
5843
5844         if (m0->m_len < sizeof(struct ip)) {
5845                 DPFPRINTF(PF_DEBUG_URGENT,
5846                     ("pf_route: m0->m_len < sizeof(struct ip)\n"));
5847                 goto bad;
5848         }
5849
5850         ip = mtod(m0, struct ip *);
5851
5852         ro = &iproute;
5853         bzero((caddr_t)ro, sizeof(*ro));
5854         dst = satosin(&ro->ro_dst);
5855         dst->sin_family = AF_INET;
5856         dst->sin_len = sizeof(*dst);
5857         dst->sin_addr = ip->ip_dst;
5858
5859         if (r->rt == PF_FASTROUTE) {
5860                 rtalloc(ro);
5861                 if (ro->ro_rt == 0) {
5862                         ipstat.ips_noroute++;
5863                         goto bad;
5864                 }
5865
5866                 ifp = ro->ro_rt->rt_ifp;
5867                 ro->ro_rt->rt_use++;
5868
5869                 if (ro->ro_rt->rt_flags & RTF_GATEWAY)
5870                         dst = satosin(ro->ro_rt->rt_gateway);
5871         } else {
5872                 if (TAILQ_EMPTY(&r->rpool.list)) {
5873                         DPFPRINTF(PF_DEBUG_URGENT,
5874                             ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n"));
5875                         goto bad;
5876                 }
5877                 if (s == NULL) {
5878                         pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
5879                             &naddr, NULL, &sn);
5880                         if (!PF_AZERO(&naddr, AF_INET))
5881                                 dst->sin_addr.s_addr = naddr.v4.s_addr;
5882                         ifp = r->rpool.cur->kif ?
5883                             r->rpool.cur->kif->pfik_ifp : NULL;
5884                 } else {
5885                         if (!PF_AZERO(&s->rt_addr, AF_INET))
5886                                 dst->sin_addr.s_addr =
5887                                     s->rt_addr.v4.s_addr;
5888                         ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5889                 }
5890         }
5891         if (ifp == NULL)
5892                 goto bad;
5893
5894         if (oifp != ifp) {
5895                 if (pf_test(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) {
5896                         goto bad;
5897                 } else if (m0 == NULL) {
5898                         goto done;
5899                 }
5900                 if (m0->m_len < sizeof(struct ip)) {
5901                         DPFPRINTF(PF_DEBUG_URGENT,
5902                             ("pf_route: m0->m_len < sizeof(struct ip)\n"));
5903                         goto bad;
5904                 }
5905                 ip = mtod(m0, struct ip *);
5906         }
5907
5908         /* Copied from FreeBSD 5.1-CURRENT ip_output. */
5909         m0->m_pkthdr.csum_flags |= CSUM_IP;
5910         sw_csum = m0->m_pkthdr.csum_flags & ~ifp->if_hwassist;
5911         if (sw_csum & CSUM_DELAY_DATA) {
5912                 in_delayed_cksum(m0);
5913                 sw_csum &= ~CSUM_DELAY_DATA;
5914         }
5915         m0->m_pkthdr.csum_flags &= ifp->if_hwassist;
5916         m0->m_pkthdr.csum_iphlen = (ip->ip_hl << 2);
5917
5918         /*
5919          * WARNING!  We cannot fragment if the packet was modified from an
5920          *           original which expected to be using TSO.  In this
5921          *           situation we pray that the target interface is
5922          *           compatible with the originating interface.
5923          */
5924         if (ip->ip_len <= ifp->if_mtu ||
5925             (m0->m_pkthdr.csum_flags & CSUM_TSO) ||
5926             ((ifp->if_hwassist & CSUM_FRAGMENT) &&
5927                 (ip->ip_off & IP_DF) == 0)) {
5928                 ip->ip_len = htons(ip->ip_len);
5929                 ip->ip_off = htons(ip->ip_off);
5930                 ip->ip_sum = 0;
5931                 if (sw_csum & CSUM_DELAY_IP) {
5932                         /* From KAME */
5933                         if (ip->ip_v == IPVERSION &&
5934                             (ip->ip_hl << 2) == sizeof(*ip)) {
5935                                 ip->ip_sum = in_cksum_hdr(ip);
5936                         } else {
5937                                 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
5938                         }
5939                 }
5940                 lwkt_reltoken(&pf_token);
5941                 error = ifp->if_output(ifp, m0, sintosa(dst), ro->ro_rt);
5942                 lwkt_gettoken(&pf_token);
5943                 goto done;
5944         }
5945
5946         /*
5947          * Too large for interface; fragment if possible.
5948          * Must be able to put at least 8 bytes per fragment.
5949          */
5950         if (ip->ip_off & IP_DF) {
5951                 ipstat.ips_cantfrag++;
5952                 if (r->rt != PF_DUPTO) {
5953                         icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
5954                                    ifp->if_mtu);
5955                         goto done;
5956                 } else
5957                         goto bad;
5958         }
5959
5960         m1 = m0;
5961         error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist, sw_csum);
5962         if (error) {
5963                 goto bad;
5964         }
5965
5966         for (m0 = m1; m0; m0 = m1) {
5967                 m1 = m0->m_nextpkt;
5968                 m0->m_nextpkt = 0;
5969                 if (error == 0) {
5970                         lwkt_reltoken(&pf_token);
5971                         error = (*ifp->if_output)(ifp, m0, sintosa(dst),
5972                                                   NULL);
5973                         lwkt_gettoken(&pf_token);
5974                 } else
5975                         m_freem(m0);
5976         }
5977
5978         if (error == 0)
5979                 ipstat.ips_fragmented++;
5980
5981 done:
5982         if (r->rt != PF_DUPTO)
5983                 *m = NULL;
5984         if (ro == &iproute && ro->ro_rt)
5985                 RTFREE(ro->ro_rt);
5986         return;
5987
5988 bad:
5989         m_freem(m0);
5990         goto done;
5991 }
5992 #endif /* INET */
5993
5994 #ifdef INET6
5995 void
5996 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5997     struct pf_state *s, struct pf_pdesc *pd)
5998 {
5999         struct mbuf             *m0;
6000         struct route_in6         ip6route;
6001         struct route_in6        *ro;
6002         struct sockaddr_in6     *dst;
6003         struct ip6_hdr          *ip6;
6004         struct ifnet            *ifp = NULL;
6005         struct pf_addr           naddr;
6006         struct pf_src_node      *sn = NULL;
6007
6008         if (m == NULL || *m == NULL || r == NULL ||
6009             (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
6010                 panic("pf_route6: invalid parameters");
6011
6012         if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) {
6013                 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED;
6014                 (*m)->m_pkthdr.pf.routed = 1;
6015         } else {
6016                 if ((*m)->m_pkthdr.pf.routed++ > 3) {
6017                         m0 = *m;
6018                         *m = NULL;
6019                         goto bad;
6020                 }
6021         }
6022
6023         if (r->rt == PF_DUPTO) {
6024                 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL)
6025                         return;
6026         } else {
6027                 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
6028                         return;
6029                 m0 = *m;
6030         }
6031
6032         if (m0->m_len < sizeof(struct ip6_hdr)) {
6033                 DPFPRINTF(PF_DEBUG_URGENT,
6034                     ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n"));
6035                 goto bad;
6036         }
6037         ip6 = mtod(m0, struct ip6_hdr *);
6038
6039         ro = &ip6route;
6040         bzero((caddr_t)ro, sizeof(*ro));
6041         dst = (struct sockaddr_in6 *)&ro->ro_dst;
6042         dst->sin6_family = AF_INET6;
6043         dst->sin6_len = sizeof(*dst);
6044         dst->sin6_addr = ip6->ip6_dst;
6045
6046         /*
6047          * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags,
6048          * so make sure pf.flags is clear.
6049          *
6050          * Cheat. XXX why only in the v6 case???
6051          */
6052         if (r->rt == PF_FASTROUTE) {
6053                 m0->m_pkthdr.fw_flags |= PF_MBUF_TAGGED;
6054                 m0->m_pkthdr.pf.flags = 0;
6055                 /* XXX Re-Check when Upgrading to > 4.4 */
6056                 m0->m_pkthdr.pf.statekey = NULL;
6057                 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
6058                 return;
6059         }
6060
6061         if (TAILQ_EMPTY(&r->rpool.list)) {
6062                 DPFPRINTF(PF_DEBUG_URGENT,
6063                     ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n"));
6064                 goto bad;
6065         }
6066         if (s == NULL) {
6067                 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
6068                     &naddr, NULL, &sn);
6069                 if (!PF_AZERO(&naddr, AF_INET6))
6070                         PF_ACPY((struct pf_addr *)&dst->sin6_addr,
6071                             &naddr, AF_INET6);
6072                 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
6073         } else {
6074                 if (!PF_AZERO(&s->rt_addr, AF_INET6))
6075                         PF_ACPY((struct pf_addr *)&dst->sin6_addr,
6076                             &s->rt_addr, AF_INET6);
6077                 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
6078         }
6079         if (ifp == NULL)
6080                 goto bad;
6081
6082         if (oifp != ifp) {
6083                 if (pf_test6(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) {
6084                         goto bad;
6085                 } else if (m0 == NULL) {
6086                         goto done;
6087                 }
6088                 if (m0->m_len < sizeof(struct ip6_hdr)) {
6089                         DPFPRINTF(PF_DEBUG_URGENT,
6090                             ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n"));
6091                         goto bad;
6092                 }
6093                 ip6 = mtod(m0, struct ip6_hdr *);
6094         }
6095
6096         /*
6097          * If the packet is too large for the outgoing interface,
6098          * send back an icmp6 error.
6099          */
6100         if (IN6_IS_ADDR_LINKLOCAL(&dst->sin6_addr))
6101                 dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
6102         if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) {
6103                 nd6_output(ifp, ifp, m0, dst, NULL);
6104         } else {
6105                 in6_ifstat_inc(ifp, ifs6_in_toobig);
6106                 if (r->rt != PF_DUPTO)
6107                         icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
6108                 else
6109                         goto bad;
6110         }
6111
6112 done:
6113         if (r->rt != PF_DUPTO)
6114                 *m = NULL;
6115         return;
6116
6117 bad:
6118         m_freem(m0);
6119         goto done;
6120 }
6121 #endif /* INET6 */
6122
6123
6124 /*
6125  * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag
6126  *   off is the offset where the protocol header starts
6127  *   len is the total length of protocol header plus payload
6128  * returns 0 when the checksum is valid, otherwise returns 1.
6129  */
6130 /*
6131  * XXX
6132  * FreeBSD supports cksum offload for the following drivers.
6133  * em(4), gx(4), lge(4), nge(4), ti(4), xl(4)
6134  * If we can make full use of it we would outperform ipfw/ipfilter in
6135  * very heavy traffic. 
6136  * I have not tested 'cause I don't have NICs that supports cksum offload.
6137  * (There might be problems. Typical phenomena would be
6138  *   1. No route message for UDP packet.
6139  *   2. No connection acceptance from external hosts regardless of rule set.)
6140  */
6141 int
6142 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p,
6143     sa_family_t af)
6144 {
6145         u_int16_t sum = 0;
6146         int hw_assist = 0;
6147         struct ip *ip;
6148
6149         if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
6150                 return (1);
6151         if (m->m_pkthdr.len < off + len)
6152                 return (1);
6153
6154         switch (p) {
6155         case IPPROTO_TCP:
6156         case IPPROTO_UDP:
6157                 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
6158                         if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
6159                                 sum = m->m_pkthdr.csum_data;
6160                         } else {
6161                                 ip = mtod(m, struct ip *);      
6162                                 sum = in_pseudo(ip->ip_src.s_addr,
6163                                         ip->ip_dst.s_addr, htonl((u_short)len +
6164                                         m->m_pkthdr.csum_data + p));
6165                         }
6166                         sum ^= 0xffff;
6167                         ++hw_assist;
6168                 }
6169                 break;
6170         case IPPROTO_ICMP:
6171 #ifdef INET6
6172         case IPPROTO_ICMPV6:
6173 #endif /* INET6 */
6174                 break;
6175         default:
6176                 return (1);
6177         }
6178
6179         if (!hw_assist) {
6180                 switch (af) {
6181                 case AF_INET:
6182                         if (p == IPPROTO_ICMP) {
6183                                 if (m->m_len < off)
6184                                         return (1);
6185                                 m->m_data += off;
6186                                 m->m_len -= off;
6187                                 sum = in_cksum(m, len);
6188                                 m->m_data -= off;
6189                                 m->m_len += off;
6190                         } else {
6191                                 if (m->m_len < sizeof(struct ip))
6192                                         return (1);
6193                                 sum = in_cksum_range(m, p, off, len);
6194                                 if (sum == 0) {
6195                                         m->m_pkthdr.csum_flags |=
6196                                             (CSUM_DATA_VALID |
6197                                              CSUM_PSEUDO_HDR);
6198                                         m->m_pkthdr.csum_data = 0xffff;
6199                                 }
6200                         }
6201                         break;
6202 #ifdef INET6
6203                 case AF_INET6:
6204                         if (m->m_len < sizeof(struct ip6_hdr))
6205                                 return (1);
6206                         sum = in6_cksum(m, p, off, len);
6207                         /*
6208                          * XXX
6209                          * IPv6 H/W cksum off-load not supported yet!
6210                          *
6211                          * if (sum == 0) {
6212                          *      m->m_pkthdr.csum_flags |=
6213                          *          (CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
6214                          *      m->m_pkthdr.csum_data = 0xffff;
6215                          *}
6216                          */
6217                         break;
6218 #endif /* INET6 */
6219                 default:
6220                         return (1);
6221                 }
6222         }
6223         if (sum) {
6224                 switch (p) {
6225                 case IPPROTO_TCP:
6226                         tcpstat.tcps_rcvbadsum++;
6227                         break;
6228                 case IPPROTO_UDP:
6229                         udp_stat.udps_badsum++;
6230                         break;
6231                 case IPPROTO_ICMP:
6232                         icmpstat.icps_checksum++;
6233                         break;
6234 #ifdef INET6
6235                 case IPPROTO_ICMPV6:
6236                         icmp6stat.icp6s_checksum++;
6237                         break;
6238 #endif /* INET6 */
6239                 }
6240                 return (1);
6241         }
6242         return (0);
6243 }
6244
6245 struct pf_divert *
6246 pf_find_divert(struct mbuf *m)
6247 {
6248         struct m_tag    *mtag;
6249
6250         if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL)
6251                 return (NULL);
6252
6253         return ((struct pf_divert *)(mtag + 1));
6254 }
6255
6256 struct pf_divert *
6257 pf_get_divert(struct mbuf *m)
6258 {
6259         struct m_tag    *mtag;
6260
6261         if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) {
6262                 mtag = m_tag_get(PACKET_TAG_PF_DIVERT, sizeof(struct pf_divert),
6263                     M_NOWAIT);
6264                 if (mtag == NULL)
6265                         return (NULL);
6266                 bzero(mtag + 1, sizeof(struct pf_divert));
6267                 m_tag_prepend(m, mtag);
6268         }
6269
6270         return ((struct pf_divert *)(mtag + 1));
6271 }
6272
6273 #ifdef INET
6274
6275 /*
6276  * WARNING: pf_token held shared on entry, THIS IS CPU LOCALIZED CODE
6277  */
6278 int
6279 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0,
6280     struct ether_header *eh, struct inpcb *inp)
6281 {
6282         struct pfi_kif          *kif;
6283         u_short                  action, reason = 0, log = 0;
6284         struct mbuf             *m = *m0;
6285         struct ip               *h = NULL;
6286         struct pf_rule          *a = NULL, *r = &pf_default_rule, *tr, *nr;
6287         struct pf_state         *s = NULL;
6288         struct pf_ruleset       *ruleset = NULL;
6289         struct pf_pdesc          pd;
6290         int                      off, dirndx;
6291 #ifdef ALTQ
6292         int                      pqid = 0;
6293 #endif
6294
6295         if (!pf_status.running)
6296                 return (PF_PASS);
6297
6298         memset(&pd, 0, sizeof(pd));
6299 #ifdef foo
6300         if (ifp->if_type == IFT_CARP && ifp->if_carpdev)
6301                 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif;
6302         else
6303 #endif
6304                 kif = (struct pfi_kif *)ifp->if_pf_kif;
6305
6306         if (kif == NULL) {
6307                 DPFPRINTF(PF_DEBUG_URGENT,
6308                     ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
6309                 return (PF_DROP);
6310         }
6311         if (kif->pfik_flags & PFI_IFLAG_SKIP)
6312                 return (PF_PASS);
6313
6314 #ifdef DIAGNOSTIC
6315         if ((m->m_flags & M_PKTHDR) == 0)
6316                 panic("non-M_PKTHDR is passed to pf_test");
6317 #endif /* DIAGNOSTIC */
6318
6319         if (m->m_pkthdr.len < (int)sizeof(*h)) {
6320                 action = PF_DROP;
6321                 REASON_SET(&reason, PFRES_SHORT);
6322                 log = 1;
6323                 goto done;
6324         }
6325
6326         /*
6327          * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags,
6328          * so make sure pf.flags is clear.
6329          */
6330         if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED)
6331                 return (PF_PASS);
6332         m->m_pkthdr.pf.flags = 0;
6333         /* Re-Check when updating to > 4.4 */
6334         m->m_pkthdr.pf.statekey = NULL;
6335
6336         /* We do IP header normalization and packet reassembly here */
6337         if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
6338                 action = PF_DROP;
6339                 goto done;
6340         }
6341         m = *m0;        /* pf_normalize messes with m0 */
6342         h = mtod(m, struct ip *);
6343
6344         off = h->ip_hl << 2;
6345         if (off < (int)sizeof(*h)) {
6346                 action = PF_DROP;
6347                 REASON_SET(&reason, PFRES_SHORT);
6348                 log = 1;
6349                 goto done;
6350         }
6351
6352         pd.src = (struct pf_addr *)&h->ip_src;
6353         pd.dst = (struct pf_addr *)&h->ip_dst;
6354         pd.sport = pd.dport = NULL;
6355         pd.ip_sum = &h->ip_sum;
6356         pd.proto_sum = NULL;
6357         pd.proto = h->ip_p;
6358         pd.dir = dir;
6359         pd.sidx = (dir == PF_IN) ? 0 : 1;
6360         pd.didx = (dir == PF_IN) ? 1 : 0;
6361         pd.af = AF_INET;
6362         pd.tos = h->ip_tos;
6363         pd.tot_len = h->ip_len;
6364         pd.eh = eh;
6365
6366         /* handle fragments that didn't get reassembled by normalization */
6367         if (h->ip_off & (IP_MF | IP_OFFMASK)) {
6368                 action = pf_test_fragment(&r, dir, kif, m, h,
6369                     &pd, &a, &ruleset);
6370                 goto done;
6371         }
6372
6373         switch (h->ip_p) {
6374
6375         case IPPROTO_TCP: {
6376                 struct tcphdr   th;
6377
6378                 pd.hdr.tcp = &th;
6379                 if (!pf_pull_hdr(m, off, &th, sizeof(th),
6380                     &action, &reason, AF_INET)) {
6381                         log = action != PF_PASS;
6382                         goto done;
6383                 }
6384                 pd.p_len = pd.tot_len - off - (th.th_off << 2);
6385 #ifdef ALTQ
6386                 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
6387                         pqid = 1;
6388 #endif
6389                 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6390                 if (action == PF_DROP)
6391                         goto done;
6392                 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6393                                            &reason);
6394                 if (action == PF_PASS) {
6395                         r = s->rule.ptr;
6396                         a = s->anchor.ptr;
6397                         log = s->log;
6398                 } else if (s == NULL) {
6399                         action = pf_test_rule(&r, &s, dir, kif,
6400                                               m, off, h, &pd, &a,
6401                                               &ruleset, NULL, inp);
6402                 }
6403                 break;
6404         }
6405
6406         case IPPROTO_UDP: {
6407                 struct udphdr   uh;
6408
6409                 pd.hdr.udp = &uh;
6410                 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6411                     &action, &reason, AF_INET)) {
6412                         log = action != PF_PASS;
6413                         goto done;
6414                 }
6415                 if (uh.uh_dport == 0 ||
6416                     ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6417                     ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6418                         action = PF_DROP;
6419                         REASON_SET(&reason, PFRES_SHORT);
6420                         goto done;
6421                 }
6422                 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6423                 if (action == PF_PASS) {
6424                         r = s->rule.ptr;
6425                         a = s->anchor.ptr;
6426                         log = s->log;
6427                 } else if (s == NULL) {
6428                         action = pf_test_rule(&r, &s, dir, kif,
6429                                               m, off, h, &pd, &a,
6430                                               &ruleset, NULL, inp);
6431                 }
6432                 break;
6433         }
6434
6435         case IPPROTO_ICMP: {
6436                 struct icmp     ih;
6437
6438                 pd.hdr.icmp = &ih;
6439                 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
6440                     &action, &reason, AF_INET)) {
6441                         log = action != PF_PASS;
6442                         goto done;
6443                 }
6444                 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
6445                                             &reason);
6446                 if (action == PF_PASS) {
6447                         r = s->rule.ptr;
6448                         a = s->anchor.ptr;
6449                         log = s->log;
6450                 } else if (s == NULL) {
6451                         action = pf_test_rule(&r, &s, dir, kif,
6452                                               m, off, h, &pd, &a,
6453                                               &ruleset, NULL, inp);
6454                 }
6455                 break;
6456         }
6457
6458         default:
6459                 action = pf_test_state_other(&s, dir, kif, m, &pd);
6460                 if (action == PF_PASS) {
6461                         r = s->rule.ptr;
6462                         a = s->anchor.ptr;
6463                         log = s->log;
6464                 } else if (s == NULL) {
6465                         action = pf_test_rule(&r, &s, dir, kif, m, off, h,
6466                                               &pd, &a, &ruleset, NULL, inp);
6467                 }
6468                 break;
6469         }
6470
6471 done:
6472         if (action == PF_PASS && h->ip_hl > 5 &&
6473             !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6474                 action = PF_DROP;
6475                 REASON_SET(&reason, PFRES_IPOPTIONS);
6476                 log = 1;
6477                 DPFPRINTF(PF_DEBUG_MISC,
6478                     ("pf: dropping packet with ip options\n"));
6479         }
6480
6481         if ((s && s->tag) || r->rtableid)
6482                 pf_tag_packet(m, s ? s->tag : 0, r->rtableid);
6483
6484 #if 0
6485         if (dir == PF_IN && s && s->key[PF_SK_STACK])
6486                 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK];
6487 #endif
6488
6489 #ifdef ALTQ
6490         if (action == PF_PASS && r->qid) {
6491                 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE;
6492                 if (pqid || (pd.tos & IPTOS_LOWDELAY))
6493                         m->m_pkthdr.pf.qid = r->pqid;
6494                 else
6495                         m->m_pkthdr.pf.qid = r->qid;
6496                 m->m_pkthdr.pf.ecn_af = AF_INET;
6497                 m->m_pkthdr.pf.hdr = h;
6498                 /* add connection hash for fairq */
6499                 if (s) {
6500                         /* for fairq */
6501                         m->m_pkthdr.pf.state_hash = s->hash;
6502                         m->m_pkthdr.pf.flags |= PF_TAG_STATE_HASHED;
6503                 }
6504         }
6505 #endif /* ALTQ */
6506
6507         /*
6508          * connections redirected to loopback should not match sockets
6509          * bound specifically to loopback due to security implications,
6510          * see tcp_input() and in_pcblookup_listen().
6511          */
6512         if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6513             pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6514             (s->nat_rule.ptr->action == PF_RDR ||
6515             s->nat_rule.ptr->action == PF_BINAT) &&
6516             (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
6517                 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST;
6518
6519         if (dir == PF_IN && action == PF_PASS && r->divert.port) {
6520                 struct pf_divert *divert;
6521
6522                 if ((divert = pf_get_divert(m))) {
6523                         m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED;
6524                         divert->port = r->divert.port;
6525                         divert->addr.ipv4 = r->divert.addr.v4;
6526                 }
6527         }
6528
6529         if (log) {
6530                 struct pf_rule *lr;
6531
6532                 if (s != NULL && s->nat_rule.ptr != NULL &&
6533                     s->nat_rule.ptr->log & PF_LOG_ALL)
6534                         lr = s->nat_rule.ptr;
6535                 else
6536                         lr = r;
6537                 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, lr, a, ruleset,
6538                     &pd);
6539         }
6540
6541         kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6542         kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
6543
6544         if (action == PF_PASS || r->action == PF_DROP) {
6545                 dirndx = (dir == PF_OUT);
6546                 r->packets[dirndx]++;
6547                 r->bytes[dirndx] += pd.tot_len;
6548                 if (a != NULL) {
6549                         a->packets[dirndx]++;
6550                         a->bytes[dirndx] += pd.tot_len;
6551                 }
6552                 if (s != NULL) {
6553                         if (s->nat_rule.ptr != NULL) {
6554                                 s->nat_rule.ptr->packets[dirndx]++;
6555                                 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6556                         }
6557                         if (s->src_node != NULL) {
6558                                 s->src_node->packets[dirndx]++;
6559                                 s->src_node->bytes[dirndx] += pd.tot_len;
6560                         }
6561                         if (s->nat_src_node != NULL) {
6562                                 s->nat_src_node->packets[dirndx]++;
6563                                 s->nat_src_node->bytes[dirndx] += pd.tot_len;
6564                         }
6565                         dirndx = (dir == s->direction) ? 0 : 1;
6566                         s->packets[dirndx]++;
6567                         s->bytes[dirndx] += pd.tot_len;
6568                 }
6569                 tr = r;
6570                 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6571                 if (nr != NULL && r == &pf_default_rule)
6572                         tr = nr;
6573                 if (tr->src.addr.type == PF_ADDR_TABLE)
6574                         pfr_update_stats(tr->src.addr.p.tbl,
6575                             (s == NULL) ? pd.src :
6576                             &s->key[(s->direction == PF_IN)]->
6577                                 addr[(s->direction == PF_OUT)],
6578                             pd.af, pd.tot_len, dir == PF_OUT,
6579                             r->action == PF_PASS, tr->src.neg);
6580                 if (tr->dst.addr.type == PF_ADDR_TABLE)
6581                         pfr_update_stats(tr->dst.addr.p.tbl,
6582                             (s == NULL) ? pd.dst :
6583                             &s->key[(s->direction == PF_IN)]->
6584                                 addr[(s->direction == PF_IN)],
6585                             pd.af, pd.tot_len, dir == PF_OUT,
6586                             r->action == PF_PASS, tr->dst.neg);
6587         }
6588
6589
6590         if (action == PF_SYNPROXY_DROP) {
6591                 m_freem(*m0);
6592                 *m0 = NULL;
6593                 action = PF_PASS;
6594         } else if (r->rt)
6595                 /* pf_route can free the mbuf causing *m0 to become NULL */
6596                 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd);
6597
6598         return (action);
6599 }
6600 #endif /* INET */
6601
6602 #ifdef INET6
6603
6604 /*
6605  * WARNING: pf_token held shared on entry, THIS IS CPU LOCALIZED CODE
6606  */
6607 int
6608 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0,
6609     struct ether_header *eh, struct inpcb *inp)
6610 {
6611         struct pfi_kif          *kif;
6612         u_short                  action, reason = 0, log = 0;
6613         struct mbuf             *m = *m0, *n = NULL;
6614         struct ip6_hdr          *h = NULL;
6615         struct pf_rule          *a = NULL, *r = &pf_default_rule, *tr, *nr;
6616         struct pf_state         *s = NULL;
6617         struct pf_ruleset       *ruleset = NULL;
6618         struct pf_pdesc          pd;
6619         int                      off, terminal = 0, dirndx, rh_cnt = 0;
6620
6621         if (!pf_status.running)
6622                 return (PF_PASS);
6623
6624         memset(&pd, 0, sizeof(pd));
6625 #ifdef foo
6626         if (ifp->if_type == IFT_CARP && ifp->if_carpdev)
6627                 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif;
6628         else
6629 #endif
6630                 kif = (struct pfi_kif *)ifp->if_pf_kif;
6631
6632         if (kif == NULL) {
6633                 DPFPRINTF(PF_DEBUG_URGENT,
6634                     ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
6635                 return (PF_DROP);
6636         }
6637         if (kif->pfik_flags & PFI_IFLAG_SKIP)
6638                 return (PF_PASS);
6639
6640 #ifdef DIAGNOSTIC
6641         if ((m->m_flags & M_PKTHDR) == 0)
6642                 panic("non-M_PKTHDR is passed to pf_test6");
6643 #endif /* DIAGNOSTIC */
6644
6645         if (m->m_pkthdr.len < (int)sizeof(*h)) {
6646                 action = PF_DROP;
6647                 REASON_SET(&reason, PFRES_SHORT);
6648                 log = 1;
6649                 goto done;
6650         }
6651
6652         /*
6653          * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags,
6654          * so make sure pf.flags is clear.
6655          */
6656         if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED)
6657                 return (PF_PASS);
6658         m->m_pkthdr.pf.flags = 0;
6659         /* Re-Check when updating to > 4.4 */
6660         m->m_pkthdr.pf.statekey = NULL;
6661
6662         /* We do IP header normalization and packet reassembly here */
6663         if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
6664                 action = PF_DROP;
6665                 goto done;
6666         }
6667         m = *m0;        /* pf_normalize messes with m0 */
6668         h = mtod(m, struct ip6_hdr *);
6669
6670 #if 1
6671         /*
6672          * we do not support jumbogram yet.  if we keep going, zero ip6_plen
6673          * will do something bad, so drop the packet for now.
6674          */
6675         if (htons(h->ip6_plen) == 0) {
6676                 action = PF_DROP;
6677                 REASON_SET(&reason, PFRES_NORM);        /*XXX*/
6678                 goto done;
6679         }
6680 #endif
6681
6682         pd.src = (struct pf_addr *)&h->ip6_src;
6683         pd.dst = (struct pf_addr *)&h->ip6_dst;
6684         pd.sport = pd.dport = NULL;
6685         pd.ip_sum = NULL;
6686         pd.proto_sum = NULL;
6687         pd.dir = dir;
6688         pd.sidx = (dir == PF_IN) ? 0 : 1;
6689         pd.didx = (dir == PF_IN) ? 1 : 0;
6690         pd.af = AF_INET6;
6691         pd.tos = 0;
6692         pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
6693         pd.eh = eh;
6694
6695         off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
6696         pd.proto = h->ip6_nxt;
6697         do {
6698                 switch (pd.proto) {
6699                 case IPPROTO_FRAGMENT:
6700                         action = pf_test_fragment(&r, dir, kif, m, h,
6701                             &pd, &a, &ruleset);
6702                         if (action == PF_DROP)
6703                                 REASON_SET(&reason, PFRES_FRAG);
6704                         goto done;
6705                 case IPPROTO_ROUTING: {
6706                         struct ip6_rthdr rthdr;
6707
6708                         if (rh_cnt++) {
6709                                 DPFPRINTF(PF_DEBUG_MISC,
6710                                     ("pf: IPv6 more than one rthdr\n"));
6711                                 action = PF_DROP;
6712                                 REASON_SET(&reason, PFRES_IPOPTIONS);
6713                                 log = 1;
6714                                 goto done;
6715                         }
6716                         if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
6717                             &reason, pd.af)) {
6718                                 DPFPRINTF(PF_DEBUG_MISC,
6719                                     ("pf: IPv6 short rthdr\n"));
6720                                 action = PF_DROP;
6721                                 REASON_SET(&reason, PFRES_SHORT);
6722                                 log = 1;
6723                                 goto done;
6724                         }
6725                         if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
6726                                 DPFPRINTF(PF_DEBUG_MISC,
6727                                     ("pf: IPv6 rthdr0\n"));
6728                                 action = PF_DROP;
6729                                 REASON_SET(&reason, PFRES_IPOPTIONS);
6730                                 log = 1;
6731                                 goto done;
6732                         }
6733                         /* FALLTHROUGH */
6734                 }
6735                 case IPPROTO_AH:
6736                 case IPPROTO_HOPOPTS:
6737                 case IPPROTO_DSTOPTS: {
6738                         /* get next header and header length */
6739                         struct ip6_ext  opt6;
6740
6741                         if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
6742                             NULL, &reason, pd.af)) {
6743                                 DPFPRINTF(PF_DEBUG_MISC,
6744                                     ("pf: IPv6 short opt\n"));
6745                                 action = PF_DROP;
6746                                 log = 1;
6747                                 goto done;
6748                         }
6749                         if (pd.proto == IPPROTO_AH)
6750                                 off += (opt6.ip6e_len + 2) * 4;
6751                         else
6752                                 off += (opt6.ip6e_len + 1) * 8;
6753                         pd.proto = opt6.ip6e_nxt;
6754                         /* goto the next header */
6755                         break;
6756                 }
6757                 default:
6758                         terminal++;
6759                         break;
6760                 }
6761         } while (!terminal);
6762
6763         /* if there's no routing header, use unmodified mbuf for checksumming */
6764         if (!n)
6765                 n = m;
6766
6767         switch (pd.proto) {
6768
6769         case IPPROTO_TCP: {
6770                 struct tcphdr   th;
6771
6772                 pd.hdr.tcp = &th;
6773                 if (!pf_pull_hdr(m, off, &th, sizeof(th),
6774                     &action, &reason, AF_INET6)) {
6775                         log = action != PF_PASS;
6776                         goto done;
6777                 }
6778                 pd.p_len = pd.tot_len - off - (th.th_off << 2);
6779                 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6780                 if (action == PF_DROP)
6781                         goto done;
6782                 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6783                                            &reason);
6784                 if (action == PF_PASS) {
6785                         r = s->rule.ptr;
6786                         a = s->anchor.ptr;
6787                         log = s->log;
6788                 } else if (s == NULL) {
6789                         action = pf_test_rule(&r, &s, dir, kif,
6790                                               m, off, h, &pd, &a,
6791                                               &ruleset, NULL, inp);
6792                 }
6793                 break;
6794         }
6795
6796         case IPPROTO_UDP: {
6797                 struct udphdr   uh;
6798
6799                 pd.hdr.udp = &uh;
6800                 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6801                     &action, &reason, AF_INET6)) {
6802                         log = action != PF_PASS;
6803                         goto done;
6804                 }
6805                 if (uh.uh_dport == 0 ||
6806                     ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6807                     ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6808                         action = PF_DROP;
6809                         REASON_SET(&reason, PFRES_SHORT);
6810                         goto done;
6811                 }
6812                 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6813                 if (action == PF_PASS) {
6814                         r = s->rule.ptr;
6815                         a = s->anchor.ptr;
6816                         log = s->log;
6817                 } else if (s == NULL) {
6818                         action = pf_test_rule(&r, &s, dir, kif,
6819                                               m, off, h, &pd, &a,
6820                                               &ruleset, NULL, inp);
6821                 }
6822                 break;
6823         }
6824
6825         case IPPROTO_ICMPV6: {
6826                 struct icmp6_hdr        ih;
6827
6828                 pd.hdr.icmp6 = &ih;
6829                 if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
6830                     &action, &reason, AF_INET6)) {
6831                         log = action != PF_PASS;
6832                         goto done;
6833                 }
6834                 action = pf_test_state_icmp(&s, dir, kif,
6835                                             m, off, h, &pd, &reason);
6836                 if (action == PF_PASS) {
6837                         r = s->rule.ptr;
6838                         a = s->anchor.ptr;
6839                         log = s->log;
6840                 } else if (s == NULL) {
6841                         action = pf_test_rule(&r, &s, dir, kif,
6842                                               m, off, h, &pd, &a,
6843                                               &ruleset, NULL, inp);
6844                 }
6845                 break;
6846         }
6847
6848         default:
6849                 action = pf_test_state_other(&s, dir, kif, m, &pd);
6850                 if (action == PF_PASS) {
6851                         r = s->rule.ptr;
6852                         a = s->anchor.ptr;
6853                         log = s->log;
6854                 } else if (s == NULL) {
6855                         action = pf_test_rule(&r, &s, dir, kif, m, off, h,
6856                                               &pd, &a, &ruleset, NULL, inp);
6857                 }
6858                 break;
6859         }
6860
6861 done:
6862         if (n != m) {
6863                 m_freem(n);
6864                 n = NULL;
6865         }
6866
6867         /* handle dangerous IPv6 extension headers. */
6868         if (action == PF_PASS && rh_cnt &&
6869             !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6870                 action = PF_DROP;
6871                 REASON_SET(&reason, PFRES_IPOPTIONS);
6872                 log = 1;
6873                 DPFPRINTF(PF_DEBUG_MISC,
6874                     ("pf: dropping packet with dangerous v6 headers\n"));
6875         }
6876
6877         if ((s && s->tag) || r->rtableid)
6878                 pf_tag_packet(m, s ? s->tag : 0, r->rtableid);
6879
6880 #if 0
6881         if (dir == PF_IN && s && s->key[PF_SK_STACK])
6882                 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK];
6883 #endif
6884
6885 #ifdef ALTQ
6886         if (action == PF_PASS && r->qid) {
6887                 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE;
6888                 if (pd.tos & IPTOS_LOWDELAY)
6889                         m->m_pkthdr.pf.qid = r->pqid;
6890                 else
6891                         m->m_pkthdr.pf.qid = r->qid;
6892                 m->m_pkthdr.pf.ecn_af = AF_INET6;
6893                 m->m_pkthdr.pf.hdr = h;
6894                 if (s) {
6895                         /* for fairq */
6896                         m->m_pkthdr.pf.state_hash = s->hash;
6897                         m->m_pkthdr.pf.flags |= PF_TAG_STATE_HASHED;
6898                 }
6899         }
6900 #endif /* ALTQ */
6901
6902         if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6903             pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6904             (s->nat_rule.ptr->action == PF_RDR ||
6905             s->nat_rule.ptr->action == PF_BINAT) &&
6906             IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
6907                 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST;
6908
6909         if (dir == PF_IN && action == PF_PASS && r->divert.port) {
6910                 struct pf_divert *divert;
6911
6912                 if ((divert = pf_get_divert(m))) {
6913                         m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED;
6914                         divert->port = r->divert.port;
6915                         divert->addr.ipv6 = r->divert.addr.v6;
6916                 }
6917         }
6918
6919         if (log) {
6920                 struct pf_rule *lr;
6921
6922                 if (s != NULL && s->nat_rule.ptr != NULL &&
6923                     s->nat_rule.ptr->log & PF_LOG_ALL)
6924                         lr = s->nat_rule.ptr;
6925                 else
6926                         lr = r;
6927                 PFLOG_PACKET(kif, h, m, AF_INET6, dir, reason, lr, a, ruleset,
6928                     &pd);
6929         }
6930
6931         kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6932         kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
6933
6934         if (action == PF_PASS || r->action == PF_DROP) {
6935                 dirndx = (dir == PF_OUT);
6936                 r->packets[dirndx]++;
6937                 r->bytes[dirndx] += pd.tot_len;
6938                 if (a != NULL) {
6939                         a->packets[dirndx]++;
6940                         a->bytes[dirndx] += pd.tot_len;
6941                 }
6942                 if (s != NULL) {
6943                         if (s->nat_rule.ptr != NULL) {
6944                                 s->nat_rule.ptr->packets[dirndx]++;
6945                                 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6946                         }
6947                         if (s->src_node != NULL) {
6948                                 s->src_node->packets[dirndx]++;
6949                                 s->src_node->bytes[dirndx] += pd.tot_len;
6950                         }
6951                         if (s->nat_src_node != NULL) {
6952                                 s->nat_src_node->packets[dirndx]++;
6953                                 s->nat_src_node->bytes[dirndx] += pd.tot_len;
6954                         }
6955                         dirndx = (dir == s->direction) ? 0 : 1;
6956                         s->packets[dirndx]++;
6957                         s->bytes[dirndx] += pd.tot_len;
6958                 }
6959                 tr = r;
6960                 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6961                 if (nr != NULL && r == &pf_default_rule)
6962                         tr = nr;
6963                 if (tr->src.addr.type == PF_ADDR_TABLE)
6964                         pfr_update_stats(tr->src.addr.p.tbl,
6965                             (s == NULL) ? pd.src :
6966                             &s->key[(s->direction == PF_IN)]->addr[0],
6967                             pd.af, pd.tot_len, dir == PF_OUT,
6968                             r->action == PF_PASS, tr->src.neg);
6969                 if (tr->dst.addr.type == PF_ADDR_TABLE)
6970                         pfr_update_stats(tr->dst.addr.p.tbl,
6971                             (s == NULL) ? pd.dst :
6972                             &s->key[(s->direction == PF_IN)]->addr[1],
6973                             pd.af, pd.tot_len, dir == PF_OUT,
6974                             r->action == PF_PASS, tr->dst.neg);
6975         }
6976
6977
6978         if (action == PF_SYNPROXY_DROP) {
6979                 m_freem(*m0);
6980                 *m0 = NULL;
6981                 action = PF_PASS;
6982         } else if (r->rt)
6983                 /* pf_route6 can free the mbuf causing *m0 to become NULL */
6984                 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
6985
6986         return (action);
6987 }
6988 #endif /* INET6 */
6989
6990 int
6991 pf_check_congestion(struct ifqueue *ifq)
6992 {
6993                 return (0);
6994 }