1 /* $OpenBSD: pf.c,v 1.614 2008/08/02 12:34:37 henning Exp $ */
4 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
6 * Copyright (c) 2001 Daniel Hartmeier
7 * Copyright (c) 2002 - 2008 Henning Brauer
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * - Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * - Redistributions in binary form must reproduce the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer in the documentation and/or other materials provided
19 * with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
34 * Effort sponsored in part by the Defense Advanced Research Projects
35 * Agency (DARPA) and Air Force Research Laboratory, Air Force
36 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
41 #include "opt_inet6.h"
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/malloc.h>
47 #include <sys/filio.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/kernel.h>
52 #include <sys/sysctl.h>
53 #include <sys/endian.h>
54 #include <vm/vm_zone.h>
56 #include <sys/kthread.h>
58 #include <machine/inttypes.h>
63 #include <net/if_types.h>
65 #include <net/netisr.h>
66 #include <net/route.h>
68 #include <netinet/in.h>
69 #include <netinet/in_var.h>
70 #include <netinet/in_systm.h>
71 #include <netinet/ip.h>
72 #include <netinet/ip_var.h>
73 #include <netinet/tcp.h>
74 #include <netinet/tcp_seq.h>
75 #include <netinet/udp.h>
76 #include <netinet/ip_icmp.h>
77 #include <netinet/in_pcb.h>
78 #include <netinet/tcp_timer.h>
79 #include <netinet/tcp_var.h>
80 #include <netinet/udp_var.h>
81 #include <netinet/icmp_var.h>
82 #include <netinet/if_ether.h>
84 #include <net/pf/pfvar.h>
85 #include <net/pf/if_pflog.h>
87 #include <net/pf/if_pfsync.h>
90 #include <netinet/ip6.h>
91 #include <netinet/in_pcb.h>
92 #include <netinet/icmp6.h>
93 #include <netinet6/nd6.h>
94 #include <netinet6/ip6_var.h>
95 #include <netinet6/in6_pcb.h>
98 #include <sys/in_cksum.h>
99 #include <sys/ucred.h>
100 #include <machine/limits.h>
101 #include <sys/msgport2.h>
102 #include <net/netmsg2.h>
104 extern int ip_optcopy(struct ip *, struct ip *);
105 extern int debug_pfugidhack;
107 struct lwkt_token pf_token = LWKT_TOKEN_INITIALIZER(pf_token);
109 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) kprintf x
116 struct pf_state_tree pf_statetbl;
118 struct pf_altqqueue pf_altqs[2];
119 struct pf_palist pf_pabuf;
120 struct pf_altqqueue *pf_altqs_active;
121 struct pf_altqqueue *pf_altqs_inactive;
122 struct pf_status pf_status;
124 u_int32_t ticket_altqs_active;
125 u_int32_t ticket_altqs_inactive;
126 int altqs_inactive_open;
127 u_int32_t ticket_pabuf;
129 MD5_CTX pf_tcp_secret_ctx;
130 u_char pf_tcp_secret[16];
131 int pf_tcp_secret_init;
134 struct pf_anchor_stackframe {
135 struct pf_ruleset *rs;
137 struct pf_anchor_node *parent;
138 struct pf_anchor *child;
139 } pf_anchor_stack[64];
141 vm_zone_t pf_src_tree_pl, pf_rule_pl, pf_pooladdr_pl;
142 vm_zone_t pf_state_pl, pf_state_key_pl, pf_state_item_pl;
143 vm_zone_t pf_altq_pl;
145 void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t);
147 void pf_init_threshold(struct pf_threshold *, u_int32_t,
149 void pf_add_threshold(struct pf_threshold *);
150 int pf_check_threshold(struct pf_threshold *);
152 void pf_change_ap(struct pf_addr *, u_int16_t *,
153 u_int16_t *, u_int16_t *, struct pf_addr *,
154 u_int16_t, u_int8_t, sa_family_t);
155 int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
156 struct tcphdr *, struct pf_state_peer *);
158 void pf_change_a6(struct pf_addr *, u_int16_t *,
159 struct pf_addr *, u_int8_t);
161 void pf_change_icmp(struct pf_addr *, u_int16_t *,
162 struct pf_addr *, struct pf_addr *, u_int16_t,
163 u_int16_t *, u_int16_t *, u_int16_t *,
164 u_int16_t *, u_int8_t, sa_family_t);
165 void pf_send_tcp(const struct pf_rule *, sa_family_t,
166 const struct pf_addr *, const struct pf_addr *,
167 u_int16_t, u_int16_t, u_int32_t, u_int32_t,
168 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
169 u_int16_t, struct ether_header *, struct ifnet *);
170 void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
171 sa_family_t, struct pf_rule *);
172 struct pf_rule *pf_match_translation(struct pf_pdesc *, struct mbuf *,
173 int, int, struct pfi_kif *,
174 struct pf_addr *, u_int16_t, struct pf_addr *,
176 struct pf_rule *pf_get_translation(struct pf_pdesc *, struct mbuf *,
177 int, int, struct pfi_kif *, struct pf_src_node **,
178 struct pf_state_key **, struct pf_state_key **,
179 struct pf_state_key **, struct pf_state_key **,
180 struct pf_addr *, struct pf_addr *,
181 u_int16_t, u_int16_t);
182 void pf_detach_state(struct pf_state *);
183 int pf_state_key_setup(struct pf_pdesc *, struct pf_rule *,
184 struct pf_state_key **, struct pf_state_key **,
185 struct pf_state_key **, struct pf_state_key **,
186 struct pf_addr *, struct pf_addr *,
187 u_int16_t, u_int16_t);
188 void pf_state_key_detach(struct pf_state *, int);
189 u_int32_t pf_tcp_iss(struct pf_pdesc *);
190 int pf_test_rule(struct pf_rule **, struct pf_state **,
191 int, struct pfi_kif *, struct mbuf *, int,
192 void *, struct pf_pdesc *, struct pf_rule **,
193 struct pf_ruleset **, struct ifqueue *, struct inpcb *);
194 static __inline int pf_create_state(struct pf_rule *, struct pf_rule *,
195 struct pf_rule *, struct pf_pdesc *,
196 struct pf_src_node *, struct pf_state_key *,
197 struct pf_state_key *, struct pf_state_key *,
198 struct pf_state_key *, struct mbuf *, int,
199 u_int16_t, u_int16_t, int *, struct pfi_kif *,
200 struct pf_state **, int, u_int16_t, u_int16_t,
202 int pf_test_fragment(struct pf_rule **, int,
203 struct pfi_kif *, struct mbuf *, void *,
204 struct pf_pdesc *, struct pf_rule **,
205 struct pf_ruleset **);
206 int pf_tcp_track_full(struct pf_state_peer *,
207 struct pf_state_peer *, struct pf_state **,
208 struct pfi_kif *, struct mbuf *, int,
209 struct pf_pdesc *, u_short *, int *);
210 int pf_tcp_track_sloppy(struct pf_state_peer *,
211 struct pf_state_peer *, struct pf_state **,
212 struct pf_pdesc *, u_short *);
213 int pf_test_state_tcp(struct pf_state **, int,
214 struct pfi_kif *, struct mbuf *, int,
215 void *, struct pf_pdesc *, u_short *);
216 int pf_test_state_udp(struct pf_state **, int,
217 struct pfi_kif *, struct mbuf *, int,
218 void *, struct pf_pdesc *);
219 int pf_test_state_icmp(struct pf_state **, int,
220 struct pfi_kif *, struct mbuf *, int,
221 void *, struct pf_pdesc *, u_short *);
222 int pf_test_state_other(struct pf_state **, int,
223 struct pfi_kif *, struct mbuf *, struct pf_pdesc *);
224 void pf_step_into_anchor(int *, struct pf_ruleset **, int,
225 struct pf_rule **, struct pf_rule **, int *);
226 int pf_step_out_of_anchor(int *, struct pf_ruleset **,
227 int, struct pf_rule **, struct pf_rule **,
229 void pf_hash(struct pf_addr *, struct pf_addr *,
230 struct pf_poolhashkey *, sa_family_t);
231 int pf_map_addr(u_int8_t, struct pf_rule *,
232 struct pf_addr *, struct pf_addr *,
233 struct pf_addr *, struct pf_src_node **);
234 int pf_get_sport(sa_family_t, u_int8_t, struct pf_rule *,
235 struct pf_addr *, struct pf_addr *, u_int16_t,
236 struct pf_addr *, u_int16_t*, u_int16_t, u_int16_t,
237 struct pf_src_node **);
238 void pf_route(struct mbuf **, struct pf_rule *, int,
239 struct ifnet *, struct pf_state *,
241 void pf_route6(struct mbuf **, struct pf_rule *, int,
242 struct ifnet *, struct pf_state *,
244 u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t,
246 u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t,
248 u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
250 void pf_set_rt_ifp(struct pf_state *,
252 int pf_check_proto_cksum(struct mbuf *, int, int,
253 u_int8_t, sa_family_t);
254 struct pf_divert *pf_get_divert(struct mbuf *);
255 void pf_print_state_parts(struct pf_state *,
256 struct pf_state_key *, struct pf_state_key *);
257 int pf_addr_wrap_neq(struct pf_addr_wrap *,
258 struct pf_addr_wrap *);
259 struct pf_state *pf_find_state(struct pfi_kif *,
260 struct pf_state_key_cmp *, u_int, struct mbuf *);
261 int pf_src_connlimit(struct pf_state **);
262 int pf_check_congestion(struct ifqueue *);
264 extern int pf_end_threads;
266 struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = {
267 { &pf_state_pl, PFSTATE_HIWAT },
268 { &pf_src_tree_pl, PFSNODE_HIWAT },
269 { &pf_frent_pl, PFFRAG_FRENT_HIWAT },
270 { &pfr_ktable_pl, PFR_KTABLE_HIWAT },
271 { &pfr_kentry_pl, PFR_KENTRY_HIWAT }
274 #define STATE_LOOKUP(i, k, d, s, m) \
276 s = pf_find_state(i, k, d, m); \
277 if (s == NULL || (s)->timeout == PFTM_PURGE) \
280 (((s)->rule.ptr->rt == PF_ROUTETO && \
281 (s)->rule.ptr->direction == PF_OUT) || \
282 ((s)->rule.ptr->rt == PF_REPLYTO && \
283 (s)->rule.ptr->direction == PF_IN)) && \
284 (s)->rt_kif != NULL && \
289 #define BOUND_IFACE(r, k) \
290 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all
292 #define STATE_INC_COUNTERS(s) \
294 s->rule.ptr->states_cur++; \
295 s->rule.ptr->states_tot++; \
296 if (s->anchor.ptr != NULL) { \
297 s->anchor.ptr->states_cur++; \
298 s->anchor.ptr->states_tot++; \
300 if (s->nat_rule.ptr != NULL) { \
301 s->nat_rule.ptr->states_cur++; \
302 s->nat_rule.ptr->states_tot++; \
306 #define STATE_DEC_COUNTERS(s) \
308 if (s->nat_rule.ptr != NULL) \
309 s->nat_rule.ptr->states_cur--; \
310 if (s->anchor.ptr != NULL) \
311 s->anchor.ptr->states_cur--; \
312 s->rule.ptr->states_cur--; \
315 static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *);
316 static __inline int pf_state_compare_key(struct pf_state_key *,
317 struct pf_state_key *);
318 static __inline int pf_state_compare_id(struct pf_state *,
321 struct pf_src_tree tree_src_tracking;
323 struct pf_state_tree_id tree_id;
324 struct pf_state_queue state_list;
326 RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare);
327 RB_GENERATE(pf_state_tree, pf_state_key, entry, pf_state_compare_key);
328 RB_GENERATE(pf_state_tree_id, pf_state,
329 entry_id, pf_state_compare_id);
332 pf_src_compare(struct pf_src_node *a, struct pf_src_node *b)
336 if (a->rule.ptr > b->rule.ptr)
338 if (a->rule.ptr < b->rule.ptr)
340 if ((diff = a->af - b->af) != 0)
345 if (a->addr.addr32[0] > b->addr.addr32[0])
347 if (a->addr.addr32[0] < b->addr.addr32[0])
353 if (a->addr.addr32[3] > b->addr.addr32[3])
355 if (a->addr.addr32[3] < b->addr.addr32[3])
357 if (a->addr.addr32[2] > b->addr.addr32[2])
359 if (a->addr.addr32[2] < b->addr.addr32[2])
361 if (a->addr.addr32[1] > b->addr.addr32[1])
363 if (a->addr.addr32[1] < b->addr.addr32[1])
365 if (a->addr.addr32[0] > b->addr.addr32[0])
367 if (a->addr.addr32[0] < b->addr.addr32[0])
376 pf_state_hash(struct pf_state_key *sk)
378 u_int32_t hv = (u_int32_t)(((intptr_t)sk >> 6) ^ ((intptr_t)sk >> 15));
379 if (hv == 0) /* disallow 0 */
386 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
391 dst->addr32[0] = src->addr32[0];
395 dst->addr32[0] = src->addr32[0];
396 dst->addr32[1] = src->addr32[1];
397 dst->addr32[2] = src->addr32[2];
398 dst->addr32[3] = src->addr32[3];
405 pf_init_threshold(struct pf_threshold *threshold,
406 u_int32_t limit, u_int32_t seconds)
408 threshold->limit = limit * PF_THRESHOLD_MULT;
409 threshold->seconds = seconds;
410 threshold->count = 0;
411 threshold->last = time_second;
415 pf_add_threshold(struct pf_threshold *threshold)
417 u_int32_t t = time_second, diff = t - threshold->last;
419 if (diff >= threshold->seconds)
420 threshold->count = 0;
422 threshold->count -= threshold->count * diff /
424 threshold->count += PF_THRESHOLD_MULT;
429 pf_check_threshold(struct pf_threshold *threshold)
431 return (threshold->count > threshold->limit);
435 pf_src_connlimit(struct pf_state **state)
439 (*state)->src_node->conn++;
440 (*state)->src.tcp_est = 1;
441 pf_add_threshold(&(*state)->src_node->conn_rate);
443 if ((*state)->rule.ptr->max_src_conn &&
444 (*state)->rule.ptr->max_src_conn <
445 (*state)->src_node->conn) {
446 pf_status.lcounters[LCNT_SRCCONN]++;
450 if ((*state)->rule.ptr->max_src_conn_rate.limit &&
451 pf_check_threshold(&(*state)->src_node->conn_rate)) {
452 pf_status.lcounters[LCNT_SRCCONNRATE]++;
459 if ((*state)->rule.ptr->overload_tbl) {
461 u_int32_t killed = 0;
463 pf_status.lcounters[LCNT_OVERLOAD_TABLE]++;
464 if (pf_status.debug >= PF_DEBUG_MISC) {
465 kprintf("pf_src_connlimit: blocking address ");
466 pf_print_host(&(*state)->src_node->addr, 0,
467 (*state)->key[PF_SK_WIRE]->af);
470 bzero(&p, sizeof(p));
471 p.pfra_af = (*state)->key[PF_SK_WIRE]->af;
472 switch ((*state)->key[PF_SK_WIRE]->af) {
476 p.pfra_ip4addr = (*state)->src_node->addr.v4;
482 p.pfra_ip6addr = (*state)->src_node->addr.v6;
487 pfr_insert_kentry((*state)->rule.ptr->overload_tbl,
490 /* kill existing states if that's required. */
491 if ((*state)->rule.ptr->flush) {
492 struct pf_state_key *sk;
495 pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++;
496 RB_FOREACH(st, pf_state_tree_id, &tree_id) {
497 sk = st->key[PF_SK_WIRE];
499 * Kill states from this source. (Only those
500 * from the same rule if PF_FLUSH_GLOBAL is not
504 (*state)->key[PF_SK_WIRE]->af &&
505 (((*state)->direction == PF_OUT &&
506 PF_AEQ(&(*state)->src_node->addr,
507 &sk->addr[0], sk->af)) ||
508 ((*state)->direction == PF_IN &&
509 PF_AEQ(&(*state)->src_node->addr,
510 &sk->addr[1], sk->af))) &&
511 ((*state)->rule.ptr->flush &
513 (*state)->rule.ptr == st->rule.ptr)) {
514 st->timeout = PFTM_PURGE;
515 st->src.state = st->dst.state =
520 if (pf_status.debug >= PF_DEBUG_MISC)
521 kprintf(", %u states killed", killed);
523 if (pf_status.debug >= PF_DEBUG_MISC)
527 /* kill this state */
528 (*state)->timeout = PFTM_PURGE;
529 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
534 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
535 struct pf_addr *src, sa_family_t af)
537 struct pf_src_node k;
541 PF_ACPY(&k.addr, src, af);
542 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
543 rule->rpool.opts & PF_POOL_STICKYADDR)
547 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
548 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
551 if (!rule->max_src_nodes ||
552 rule->src_nodes < rule->max_src_nodes)
553 (*sn) = pool_get(&pf_src_tree_pl, PR_NOWAIT | PR_ZERO);
555 pf_status.lcounters[LCNT_SRCNODES]++;
559 pf_init_threshold(&(*sn)->conn_rate,
560 rule->max_src_conn_rate.limit,
561 rule->max_src_conn_rate.seconds);
564 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
565 rule->rpool.opts & PF_POOL_STICKYADDR)
566 (*sn)->rule.ptr = rule;
568 (*sn)->rule.ptr = NULL;
569 PF_ACPY(&(*sn)->addr, src, af);
570 if (RB_INSERT(pf_src_tree,
571 &tree_src_tracking, *sn) != NULL) {
572 if (pf_status.debug >= PF_DEBUG_MISC) {
573 kprintf("pf: src_tree insert failed: ");
574 pf_print_host(&(*sn)->addr, 0, af);
577 pool_put(&pf_src_tree_pl, *sn);
580 (*sn)->creation = time_second;
581 (*sn)->ruletype = rule->action;
582 if ((*sn)->rule.ptr != NULL)
583 (*sn)->rule.ptr->src_nodes++;
584 pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
585 pf_status.src_nodes++;
587 if (rule->max_src_states &&
588 (*sn)->states >= rule->max_src_states) {
589 pf_status.lcounters[LCNT_SRCSTATES]++;
596 /* state table stuff */
599 pf_state_compare_key(struct pf_state_key *a, struct pf_state_key *b)
603 if ((diff = a->proto - b->proto) != 0)
605 if ((diff = a->af - b->af) != 0)
610 if (a->addr[0].addr32[0] > b->addr[0].addr32[0])
612 if (a->addr[0].addr32[0] < b->addr[0].addr32[0])
614 if (a->addr[1].addr32[0] > b->addr[1].addr32[0])
616 if (a->addr[1].addr32[0] < b->addr[1].addr32[0])
622 if (a->addr[0].addr32[3] > b->addr[0].addr32[3])
624 if (a->addr[0].addr32[3] < b->addr[0].addr32[3])
626 if (a->addr[1].addr32[3] > b->addr[1].addr32[3])
628 if (a->addr[1].addr32[3] < b->addr[1].addr32[3])
630 if (a->addr[0].addr32[2] > b->addr[0].addr32[2])
632 if (a->addr[0].addr32[2] < b->addr[0].addr32[2])
634 if (a->addr[1].addr32[2] > b->addr[1].addr32[2])
636 if (a->addr[1].addr32[2] < b->addr[1].addr32[2])
638 if (a->addr[0].addr32[1] > b->addr[0].addr32[1])
640 if (a->addr[0].addr32[1] < b->addr[0].addr32[1])
642 if (a->addr[1].addr32[1] > b->addr[1].addr32[1])
644 if (a->addr[1].addr32[1] < b->addr[1].addr32[1])
646 if (a->addr[0].addr32[0] > b->addr[0].addr32[0])
648 if (a->addr[0].addr32[0] < b->addr[0].addr32[0])
650 if (a->addr[1].addr32[0] > b->addr[1].addr32[0])
652 if (a->addr[1].addr32[0] < b->addr[1].addr32[0])
658 if ((diff = a->port[0] - b->port[0]) != 0)
660 if ((diff = a->port[1] - b->port[1]) != 0)
667 pf_state_compare_id(struct pf_state *a, struct pf_state *b)
673 if (a->creatorid > b->creatorid)
675 if (a->creatorid < b->creatorid)
682 pf_state_key_attach(struct pf_state_key *sk, struct pf_state *s, int idx)
684 struct pf_state_item *si;
685 struct pf_state_key *cur;
687 KKASSERT(s->key[idx] == NULL); /* XXX handle this? */
689 if ((cur = RB_INSERT(pf_state_tree, &pf_statetbl, sk)) != NULL) {
690 /* key exists. check for same kif, if none, add to key */
691 TAILQ_FOREACH(si, &cur->states, entry)
692 if (si->s->kif == s->kif &&
693 si->s->direction == s->direction) {
694 if (pf_status.debug >= PF_DEBUG_MISC) {
696 "pf: %s key attach failed on %s: ",
697 (idx == PF_SK_WIRE) ?
700 pf_print_state_parts(s,
701 (idx == PF_SK_WIRE) ? sk : NULL,
702 (idx == PF_SK_STACK) ? sk : NULL);
705 pool_put(&pf_state_key_pl, sk);
706 return (-1); /* collision! */
708 pool_put(&pf_state_key_pl, sk);
713 if ((si = pool_get(&pf_state_item_pl, PR_NOWAIT)) == NULL) {
714 pf_state_key_detach(s, idx);
719 /* list is sorted, if-bound states before floating */
720 if (s->kif == pfi_all)
721 TAILQ_INSERT_TAIL(&s->key[idx]->states, si, entry);
723 TAILQ_INSERT_HEAD(&s->key[idx]->states, si, entry);
728 pf_detach_state(struct pf_state *s)
730 if (s->key[PF_SK_WIRE] == s->key[PF_SK_STACK])
731 s->key[PF_SK_WIRE] = NULL;
733 if (s->key[PF_SK_STACK] != NULL)
734 pf_state_key_detach(s, PF_SK_STACK);
736 if (s->key[PF_SK_WIRE] != NULL)
737 pf_state_key_detach(s, PF_SK_WIRE);
741 pf_state_key_detach(struct pf_state *s, int idx)
743 struct pf_state_item *si;
745 si = TAILQ_FIRST(&s->key[idx]->states);
746 while (si && si->s != s)
747 si = TAILQ_NEXT(si, entry);
750 TAILQ_REMOVE(&s->key[idx]->states, si, entry);
751 pool_put(&pf_state_item_pl, si);
754 if (TAILQ_EMPTY(&s->key[idx]->states)) {
755 RB_REMOVE(pf_state_tree, &pf_statetbl, s->key[idx]);
756 if (s->key[idx]->reverse)
757 s->key[idx]->reverse->reverse = NULL;
758 if (s->key[idx]->inp)
759 s->key[idx]->inp->inp_pf_sk = NULL;
760 pool_put(&pf_state_key_pl, s->key[idx]);
765 struct pf_state_key *
766 pf_alloc_state_key(int pool_flags)
768 struct pf_state_key *sk;
770 if ((sk = pool_get(&pf_state_key_pl, pool_flags)) == NULL)
772 TAILQ_INIT(&sk->states);
778 pf_state_key_setup(struct pf_pdesc *pd, struct pf_rule *nr,
779 struct pf_state_key **skw, struct pf_state_key **sks,
780 struct pf_state_key **skp, struct pf_state_key **nkp,
781 struct pf_addr *saddr, struct pf_addr *daddr,
782 u_int16_t sport, u_int16_t dport)
784 KKASSERT((*skp == NULL && *nkp == NULL));
786 if ((*skp = pf_alloc_state_key(PR_NOWAIT | PR_ZERO)) == NULL)
789 PF_ACPY(&(*skp)->addr[pd->sidx], saddr, pd->af);
790 PF_ACPY(&(*skp)->addr[pd->didx], daddr, pd->af);
791 (*skp)->port[pd->sidx] = sport;
792 (*skp)->port[pd->didx] = dport;
793 (*skp)->proto = pd->proto;
797 if ((*nkp = pf_alloc_state_key(PR_NOWAIT | PR_ZERO)) == NULL)
798 return (ENOMEM); /* caller must handle cleanup */
800 /* XXX maybe just bcopy and TAILQ_INIT(&(*nkp)->states) */
801 PF_ACPY(&(*nkp)->addr[0], &(*skp)->addr[0], pd->af);
802 PF_ACPY(&(*nkp)->addr[1], &(*skp)->addr[1], pd->af);
803 (*nkp)->port[0] = (*skp)->port[0];
804 (*nkp)->port[1] = (*skp)->port[1];
805 (*nkp)->proto = pd->proto;
810 if (pd->dir == PF_IN) {
822 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw,
823 struct pf_state_key *sks, struct pf_state *s)
828 if (pf_state_key_attach(skw, s, PF_SK_WIRE))
830 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
832 if (pf_state_key_attach(skw, s, PF_SK_WIRE)) {
833 pool_put(&pf_state_key_pl, sks);
836 if (pf_state_key_attach(sks, s, PF_SK_STACK)) {
837 pf_state_key_detach(s, PF_SK_WIRE);
842 if (s->id == 0 && s->creatorid == 0) {
843 s->id = htobe64(pf_status.stateid++);
844 s->creatorid = pf_status.hostid;
846 if (RB_INSERT(pf_state_tree_id, &tree_id, s) != NULL) {
847 if (pf_status.debug >= PF_DEBUG_MISC) {
848 kprintf("pf: state insert failed: "
849 "id: %016jx creatorid: %08x",
850 (uintmax_t)be64toh(s->id), ntohl(s->creatorid));
851 if (s->sync_flags & PFSTATE_FROMSYNC)
852 kprintf(" (from sync)");
858 TAILQ_INSERT_TAIL(&state_list, s, entry_list);
859 pf_status.fcounters[FCNT_STATE_INSERT]++;
861 pfi_kif_ref(kif, PFI_KIF_REF_STATE);
862 pfsync_insert_state(s);
867 pf_find_state_byid(struct pf_state_cmp *key)
869 pf_status.fcounters[FCNT_STATE_SEARCH]++;
871 return (RB_FIND(pf_state_tree_id, &tree_id, (struct pf_state *)key));
875 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir,
878 struct pf_state_key *sk;
879 struct pf_state_item *si;
881 pf_status.fcounters[FCNT_STATE_SEARCH]++;
883 if (dir == PF_OUT && m->m_pkthdr.pf.statekey &&
884 ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse)
885 sk = ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse;
887 if ((sk = RB_FIND(pf_state_tree, &pf_statetbl,
888 (struct pf_state_key *)key)) == NULL)
890 if (dir == PF_OUT && m->m_pkthdr.pf.statekey) {
891 ((struct pf_state_key *)
892 m->m_pkthdr.pf.statekey)->reverse = sk;
893 sk->reverse = m->m_pkthdr.pf.statekey;
898 m->m_pkthdr.pf.statekey = NULL;
900 /* list is sorted, if-bound states before floating ones */
901 TAILQ_FOREACH(si, &sk->states, entry)
902 if ((si->s->kif == pfi_all || si->s->kif == kif) &&
903 sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] :
904 si->s->key[PF_SK_STACK]))
911 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
913 struct pf_state_key *sk;
914 struct pf_state_item *si, *ret = NULL;
916 pf_status.fcounters[FCNT_STATE_SEARCH]++;
918 sk = RB_FIND(pf_state_tree, &pf_statetbl, (struct pf_state_key *)key);
921 TAILQ_FOREACH(si, &sk->states, entry)
922 if (dir == PF_INOUT ||
923 (sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] :
924 si->s->key[PF_SK_STACK]))) {
934 return (ret ? ret->s : NULL);
937 /* END state table stuff */
941 pf_purge_thread(void *v)
946 lwkt_gettoken(&pf_token);
948 tsleep(pf_purge_thread, PWAIT, "pftm", 1 * hz);
950 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE);
952 if (pf_end_threads) {
953 pf_purge_expired_states(pf_status.states, 1);
954 pf_purge_expired_fragments();
955 pf_purge_expired_src_nodes(1);
958 lockmgr(&pf_consistency_lock, LK_RELEASE);
959 wakeup(pf_purge_thread);
964 /* process a fraction of the state table every second */
965 if(!pf_purge_expired_states(1 + (pf_status.states
966 / pf_default_rule.timeout[PFTM_INTERVAL]), 0)) {
968 pf_purge_expired_states(1 + (pf_status.states
969 / pf_default_rule.timeout[PFTM_INTERVAL]), 1);
972 /* purge other expired types every PFTM_INTERVAL seconds */
973 if (++nloops >= pf_default_rule.timeout[PFTM_INTERVAL]) {
974 pf_purge_expired_fragments();
975 if (!pf_purge_expired_src_nodes(locked)) {
976 pf_purge_expired_src_nodes(1);
981 lockmgr(&pf_consistency_lock, LK_RELEASE);
983 lwkt_reltoken(&pf_token);
987 pf_state_expires(const struct pf_state *state)
994 /* handle all PFTM_* > PFTM_MAX here */
995 if (state->timeout == PFTM_PURGE)
996 return (time_second);
997 if (state->timeout == PFTM_UNTIL_PACKET)
999 KKASSERT(state->timeout != PFTM_UNLINKED);
1000 KKASSERT(state->timeout < PFTM_MAX);
1001 timeout = state->rule.ptr->timeout[state->timeout];
1003 timeout = pf_default_rule.timeout[state->timeout];
1004 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1006 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1007 states = state->rule.ptr->states_cur;
1009 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1010 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1011 states = pf_status.states;
1013 if (end && states > start && start < end) {
1015 return (state->expire + timeout * (end - states) /
1018 return (time_second);
1020 return (state->expire + timeout);
1024 pf_purge_expired_src_nodes(int waslocked)
1026 struct pf_src_node *cur, *next;
1027 int locked = waslocked;
1029 for (cur = RB_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) {
1030 next = RB_NEXT(pf_src_tree, &tree_src_tracking, cur);
1032 if (cur->states <= 0 && cur->expire <= time_second) {
1034 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE);
1035 next = RB_NEXT(pf_src_tree,
1036 &tree_src_tracking, cur);
1039 if (cur->rule.ptr != NULL) {
1040 cur->rule.ptr->src_nodes--;
1041 if (cur->rule.ptr->states_cur <= 0 &&
1042 cur->rule.ptr->max_src_nodes <= 0)
1043 pf_rm_rule(NULL, cur->rule.ptr);
1045 RB_REMOVE(pf_src_tree, &tree_src_tracking, cur);
1046 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
1047 pf_status.src_nodes--;
1048 pool_put(&pf_src_tree_pl, cur);
1052 if (locked && !waslocked)
1053 lockmgr(&pf_consistency_lock, LK_RELEASE);
1058 pf_src_tree_remove_state(struct pf_state *s)
1062 if (s->src_node != NULL) {
1064 --s->src_node->conn;
1065 if (--s->src_node->states <= 0) {
1066 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
1069 pf_default_rule.timeout[PFTM_SRC_NODE];
1070 s->src_node->expire = time_second + timeout;
1073 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1074 if (--s->nat_src_node->states <= 0) {
1075 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
1078 pf_default_rule.timeout[PFTM_SRC_NODE];
1079 s->nat_src_node->expire = time_second + timeout;
1082 s->src_node = s->nat_src_node = NULL;
1085 /* callers should be at crit_enter() */
1087 pf_unlink_state(struct pf_state *cur)
1089 if (cur->src.state == PF_TCPS_PROXY_DST) {
1090 /* XXX wire key the right one? */
1091 pf_send_tcp(cur->rule.ptr, cur->key[PF_SK_WIRE]->af,
1092 &cur->key[PF_SK_WIRE]->addr[1],
1093 &cur->key[PF_SK_WIRE]->addr[0],
1094 cur->key[PF_SK_WIRE]->port[1],
1095 cur->key[PF_SK_WIRE]->port[0],
1096 cur->src.seqhi, cur->src.seqlo + 1,
1097 TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL);
1099 RB_REMOVE(pf_state_tree_id, &tree_id, cur);
1100 if (cur->creatorid == pf_status.hostid)
1101 pfsync_delete_state(cur);
1102 cur->timeout = PFTM_UNLINKED;
1103 pf_src_tree_remove_state(cur);
1104 pf_detach_state(cur);
1107 static struct pf_state *purge_cur;
1109 /* callers should be at crit_enter() and hold the
1110 * write_lock on pf_consistency_lock */
1112 pf_free_state(struct pf_state *cur)
1114 if (pfsyncif != NULL &&
1115 (pfsyncif->sc_bulk_send_next == cur ||
1116 pfsyncif->sc_bulk_terminator == cur))
1118 KKASSERT(cur->timeout == PFTM_UNLINKED);
1119 if (--cur->rule.ptr->states_cur <= 0 &&
1120 cur->rule.ptr->src_nodes <= 0)
1121 pf_rm_rule(NULL, cur->rule.ptr);
1122 if (cur->nat_rule.ptr != NULL)
1123 if (--cur->nat_rule.ptr->states_cur <= 0 &&
1124 cur->nat_rule.ptr->src_nodes <= 0)
1125 pf_rm_rule(NULL, cur->nat_rule.ptr);
1126 if (cur->anchor.ptr != NULL)
1127 if (--cur->anchor.ptr->states_cur <= 0)
1128 pf_rm_rule(NULL, cur->anchor.ptr);
1129 pf_normalize_tcp_cleanup(cur);
1130 pfi_kif_unref(cur->kif, PFI_KIF_REF_STATE);
1133 * We may be freeing pf_purge_expired_states()'s saved scan entry,
1134 * adjust it if necessary.
1136 if (purge_cur == cur) {
1137 kprintf("PURGE CONFLICT\n");
1138 purge_cur = TAILQ_NEXT(purge_cur, entry_list);
1140 TAILQ_REMOVE(&state_list, cur, entry_list);
1142 pf_tag_unref(cur->tag);
1143 pool_put(&pf_state_pl, cur);
1144 pf_status.fcounters[FCNT_STATE_REMOVALS]++;
1149 pf_purge_expired_states(u_int32_t maxcheck, int waslocked)
1151 struct pf_state *cur;
1152 int locked = waslocked;
1154 while (maxcheck--) {
1156 * Wrap to start of list when we hit the end
1160 cur = TAILQ_FIRST(&state_list);
1162 break; /* list empty */
1166 * Setup next (purge_cur) while we process this one. If we block and
1167 * something else deletes purge_cur, pf_free_state() will adjust it further
1170 purge_cur = TAILQ_NEXT(cur, entry_list);
1172 if (cur->timeout == PFTM_UNLINKED) {
1173 /* free unlinked state */
1175 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE);
1179 } else if (pf_state_expires(cur) <= time_second) {
1180 /* unlink and free expired state */
1181 pf_unlink_state(cur);
1183 if (!lockmgr(&pf_consistency_lock, LK_EXCLUSIVE))
1192 lockmgr(&pf_consistency_lock, LK_RELEASE);
1197 pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw)
1199 if (aw->type != PF_ADDR_TABLE)
1201 if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname)) == NULL)
1207 pf_tbladdr_remove(struct pf_addr_wrap *aw)
1209 if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL)
1211 pfr_detach_table(aw->p.tbl);
1216 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
1218 struct pfr_ktable *kt = aw->p.tbl;
1220 if (aw->type != PF_ADDR_TABLE || kt == NULL)
1222 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1223 kt = kt->pfrkt_root;
1225 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
1230 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
1235 u_int32_t a = ntohl(addr->addr32[0]);
1236 kprintf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1248 u_int8_t i, curstart = 255, curend = 0,
1249 maxstart = 0, maxend = 0;
1250 for (i = 0; i < 8; i++) {
1251 if (!addr->addr16[i]) {
1252 if (curstart == 255)
1258 if ((curend - curstart) >
1259 (maxend - maxstart)) {
1260 maxstart = curstart;
1267 for (i = 0; i < 8; i++) {
1268 if (i >= maxstart && i <= maxend) {
1277 b = ntohs(addr->addr16[i]);
1294 pf_print_state(struct pf_state *s)
1296 pf_print_state_parts(s, NULL, NULL);
1300 pf_print_state_parts(struct pf_state *s,
1301 struct pf_state_key *skwp, struct pf_state_key *sksp)
1303 struct pf_state_key *skw, *sks;
1304 u_int8_t proto, dir;
1306 /* Do our best to fill these, but they're skipped if NULL */
1307 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
1308 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
1309 proto = skw ? skw->proto : (sks ? sks->proto : 0);
1310 dir = s ? s->direction : 0;
1322 case IPPROTO_ICMPV6:
1326 kprintf("%u ", skw->proto);
1339 pf_print_host(&skw->addr[0], skw->port[0], skw->af);
1341 pf_print_host(&skw->addr[1], skw->port[1], skw->af);
1344 kprintf(" stack: ");
1346 pf_print_host(&sks->addr[0], sks->port[0], sks->af);
1348 pf_print_host(&sks->addr[1], sks->port[1], sks->af);
1353 if (proto == IPPROTO_TCP) {
1354 kprintf(" [lo=%u high=%u win=%u modulator=%u",
1355 s->src.seqlo, s->src.seqhi,
1356 s->src.max_win, s->src.seqdiff);
1357 if (s->src.wscale && s->dst.wscale)
1358 kprintf(" wscale=%u",
1359 s->src.wscale & PF_WSCALE_MASK);
1361 kprintf(" [lo=%u high=%u win=%u modulator=%u",
1362 s->dst.seqlo, s->dst.seqhi,
1363 s->dst.max_win, s->dst.seqdiff);
1364 if (s->src.wscale && s->dst.wscale)
1365 kprintf(" wscale=%u",
1366 s->dst.wscale & PF_WSCALE_MASK);
1369 kprintf(" %u:%u", s->src.state, s->dst.state);
1374 pf_print_flags(u_int8_t f)
1396 #define PF_SET_SKIP_STEPS(i) \
1398 while (head[i] != cur) { \
1399 head[i]->skip[i].ptr = cur; \
1400 head[i] = TAILQ_NEXT(head[i], entries); \
1405 pf_calc_skip_steps(struct pf_rulequeue *rules)
1407 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
1410 cur = TAILQ_FIRST(rules);
1412 for (i = 0; i < PF_SKIP_COUNT; ++i)
1414 while (cur != NULL) {
1416 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
1417 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
1418 if (cur->direction != prev->direction)
1419 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
1420 if (cur->af != prev->af)
1421 PF_SET_SKIP_STEPS(PF_SKIP_AF);
1422 if (cur->proto != prev->proto)
1423 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
1424 if (cur->src.neg != prev->src.neg ||
1425 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
1426 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
1427 if (cur->src.port[0] != prev->src.port[0] ||
1428 cur->src.port[1] != prev->src.port[1] ||
1429 cur->src.port_op != prev->src.port_op)
1430 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1431 if (cur->dst.neg != prev->dst.neg ||
1432 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
1433 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
1434 if (cur->dst.port[0] != prev->dst.port[0] ||
1435 cur->dst.port[1] != prev->dst.port[1] ||
1436 cur->dst.port_op != prev->dst.port_op)
1437 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
1440 cur = TAILQ_NEXT(cur, entries);
1442 for (i = 0; i < PF_SKIP_COUNT; ++i)
1443 PF_SET_SKIP_STEPS(i);
1447 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
1449 if (aw1->type != aw2->type)
1451 switch (aw1->type) {
1452 case PF_ADDR_ADDRMASK:
1454 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0))
1456 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0))
1459 case PF_ADDR_DYNIFTL:
1460 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
1461 case PF_ADDR_NOROUTE:
1462 case PF_ADDR_URPFFAILED:
1465 return (aw1->p.tbl != aw2->p.tbl);
1466 case PF_ADDR_RTLABEL:
1467 return (aw1->v.rtlabel != aw2->v.rtlabel);
1469 kprintf("invalid address type: %d\n", aw1->type);
1475 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
1481 l = cksum + old - new;
1482 l = (l >> 16) + (l & 65535);
1490 pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc,
1491 struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af)
1496 PF_ACPY(&ao, a, af);
1504 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
1505 ao.addr16[0], an->addr16[0], 0),
1506 ao.addr16[1], an->addr16[1], 0);
1508 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
1509 ao.addr16[0], an->addr16[0], u),
1510 ao.addr16[1], an->addr16[1], u),
1516 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1517 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1518 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
1519 ao.addr16[0], an->addr16[0], u),
1520 ao.addr16[1], an->addr16[1], u),
1521 ao.addr16[2], an->addr16[2], u),
1522 ao.addr16[3], an->addr16[3], u),
1523 ao.addr16[4], an->addr16[4], u),
1524 ao.addr16[5], an->addr16[5], u),
1525 ao.addr16[6], an->addr16[6], u),
1526 ao.addr16[7], an->addr16[7], u),
1534 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
1536 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
1540 memcpy(&ao, a, sizeof(ao));
1541 memcpy(a, &an, sizeof(u_int32_t));
1542 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
1543 ao % 65536, an % 65536, u);
1548 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
1552 PF_ACPY(&ao, a, AF_INET6);
1553 PF_ACPY(a, an, AF_INET6);
1555 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1556 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1557 pf_cksum_fixup(pf_cksum_fixup(*c,
1558 ao.addr16[0], an->addr16[0], u),
1559 ao.addr16[1], an->addr16[1], u),
1560 ao.addr16[2], an->addr16[2], u),
1561 ao.addr16[3], an->addr16[3], u),
1562 ao.addr16[4], an->addr16[4], u),
1563 ao.addr16[5], an->addr16[5], u),
1564 ao.addr16[6], an->addr16[6], u),
1565 ao.addr16[7], an->addr16[7], u);
1570 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
1571 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
1572 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
1574 struct pf_addr oia, ooa;
1576 PF_ACPY(&oia, ia, af);
1578 PF_ACPY(&ooa, oa, af);
1580 /* Change inner protocol port, fix inner protocol checksum. */
1582 u_int16_t oip = *ip;
1589 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
1590 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
1592 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
1594 /* Change inner ip address, fix inner ip and icmp checksums. */
1595 PF_ACPY(ia, na, af);
1599 u_int32_t oh2c = *h2c;
1601 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
1602 oia.addr16[0], ia->addr16[0], 0),
1603 oia.addr16[1], ia->addr16[1], 0);
1604 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
1605 oia.addr16[0], ia->addr16[0], 0),
1606 oia.addr16[1], ia->addr16[1], 0);
1607 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
1613 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1614 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1615 pf_cksum_fixup(pf_cksum_fixup(*ic,
1616 oia.addr16[0], ia->addr16[0], u),
1617 oia.addr16[1], ia->addr16[1], u),
1618 oia.addr16[2], ia->addr16[2], u),
1619 oia.addr16[3], ia->addr16[3], u),
1620 oia.addr16[4], ia->addr16[4], u),
1621 oia.addr16[5], ia->addr16[5], u),
1622 oia.addr16[6], ia->addr16[6], u),
1623 oia.addr16[7], ia->addr16[7], u);
1627 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
1629 PF_ACPY(oa, na, af);
1633 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
1634 ooa.addr16[0], oa->addr16[0], 0),
1635 ooa.addr16[1], oa->addr16[1], 0);
1640 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1641 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1642 pf_cksum_fixup(pf_cksum_fixup(*ic,
1643 ooa.addr16[0], oa->addr16[0], u),
1644 ooa.addr16[1], oa->addr16[1], u),
1645 ooa.addr16[2], oa->addr16[2], u),
1646 ooa.addr16[3], oa->addr16[3], u),
1647 ooa.addr16[4], oa->addr16[4], u),
1648 ooa.addr16[5], oa->addr16[5], u),
1649 ooa.addr16[6], oa->addr16[6], u),
1650 ooa.addr16[7], oa->addr16[7], u);
1659 * Need to modulate the sequence numbers in the TCP SACK option
1660 * (credits to Krzysztof Pfaff for report and patch)
1663 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
1664 struct tcphdr *th, struct pf_state_peer *dst)
1666 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
1667 u_int8_t opts[TCP_MAXOLEN], *opt = opts;
1668 int copyback = 0, i, olen;
1669 struct raw_sackblock sack;
1671 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
1672 if (hlen < TCPOLEN_SACKLEN ||
1673 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
1676 while (hlen >= TCPOLEN_SACKLEN) {
1679 case TCPOPT_EOL: /* FALLTHROUGH */
1687 if (olen >= TCPOLEN_SACKLEN) {
1688 for (i = 2; i + TCPOLEN_SACK <= olen;
1689 i += TCPOLEN_SACK) {
1690 memcpy(&sack, &opt[i], sizeof(sack));
1691 pf_change_a(&sack.rblk_start, &th->th_sum,
1692 htonl(ntohl(sack.rblk_start) -
1694 pf_change_a(&sack.rblk_end, &th->th_sum,
1695 htonl(ntohl(sack.rblk_end) -
1697 memcpy(&opt[i], &sack, sizeof(sack));
1711 m_copyback(m, off + sizeof(*th), thoptlen, opts);
1716 pf_send_tcp(const struct pf_rule *r, sa_family_t af,
1717 const struct pf_addr *saddr, const struct pf_addr *daddr,
1718 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
1719 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
1720 u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp)
1725 struct ip *h = NULL;
1728 struct ip6_hdr *h6 = NULL;
1730 struct tcphdr *th = NULL;
1733 ASSERT_LWKT_TOKEN_HELD(&pf_token);
1735 /* maximum segment size tcp option */
1736 tlen = sizeof(struct tcphdr);
1743 len = sizeof(struct ip) + tlen;
1748 len = sizeof(struct ip6_hdr) + tlen;
1754 * Create outgoing mbuf.
1756 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags,
1757 * so make sure pf.flags is clear.
1759 m = m_gethdr(MB_DONTWAIT, MT_HEADER);
1764 m->m_pkthdr.fw_flags |= PF_MBUF_TAGGED;
1765 m->m_pkthdr.pf.flags = 0;
1766 m->m_pkthdr.pf.tag = rtag;
1767 /* XXX Recheck when upgrading to > 4.4 */
1768 m->m_pkthdr.pf.statekey = NULL;
1769 if (r != NULL && r->rtableid >= 0)
1770 m->m_pkthdr.pf.rtableid = r->rtableid;
1773 if (r != NULL && r->qid) {
1774 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE;
1775 m->m_pkthdr.pf.qid = r->qid;
1776 m->m_pkthdr.pf.ecn_af = af;
1777 m->m_pkthdr.pf.hdr = mtod(m, struct ip *);
1780 m->m_data += max_linkhdr;
1781 m->m_pkthdr.len = m->m_len = len;
1782 m->m_pkthdr.rcvif = NULL;
1783 bzero(m->m_data, len);
1787 h = mtod(m, struct ip *);
1789 /* IP header fields included in the TCP checksum */
1790 h->ip_p = IPPROTO_TCP;
1792 h->ip_src.s_addr = saddr->v4.s_addr;
1793 h->ip_dst.s_addr = daddr->v4.s_addr;
1795 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
1800 h6 = mtod(m, struct ip6_hdr *);
1802 /* IP header fields included in the TCP checksum */
1803 h6->ip6_nxt = IPPROTO_TCP;
1804 h6->ip6_plen = htons(tlen);
1805 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
1806 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
1808 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
1814 th->th_sport = sport;
1815 th->th_dport = dport;
1816 th->th_seq = htonl(seq);
1817 th->th_ack = htonl(ack);
1818 th->th_off = tlen >> 2;
1819 th->th_flags = flags;
1820 th->th_win = htons(win);
1823 opt = (char *)(th + 1);
1824 opt[0] = TCPOPT_MAXSEG;
1827 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
1834 th->th_sum = in_cksum(m, len);
1836 /* Finish the IP header */
1838 h->ip_hl = sizeof(*h) >> 2;
1839 h->ip_tos = IPTOS_LOWDELAY;
1841 h->ip_off = path_mtu_discovery ? IP_DF : 0;
1842 h->ip_ttl = ttl ? ttl : ip_defttl;
1845 lwkt_reltoken(&pf_token);
1846 ip_output(m, NULL, NULL, 0, NULL, NULL);
1847 lwkt_gettoken(&pf_token);
1851 struct ether_header *e = (void *)ro.ro_dst.sa_data;
1859 ro.ro_dst.sa_len = sizeof(ro.ro_dst);
1860 ro.ro_dst.sa_family = pseudo_AF_HDRCMPLT;
1861 bcopy(eh->ether_dhost, e->ether_shost, ETHER_ADDR_LEN);
1862 bcopy(eh->ether_shost, e->ether_dhost, ETHER_ADDR_LEN);
1863 e->ether_type = eh->ether_type;
1864 /* XXX_IMPORT: later */
1865 lwkt_reltoken(&pf_token);
1866 ip_output(m, (void *)NULL, &ro, 0,
1867 (void *)NULL, (void *)NULL);
1868 lwkt_gettoken(&pf_token);
1875 th->th_sum = in6_cksum(m, IPPROTO_TCP,
1876 sizeof(struct ip6_hdr), tlen);
1878 h6->ip6_vfc |= IPV6_VERSION;
1879 h6->ip6_hlim = IPV6_DEFHLIM;
1881 lwkt_reltoken(&pf_token);
1882 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
1883 lwkt_gettoken(&pf_token);
1890 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
1896 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags,
1897 * so make sure pf.flags is clear.
1899 if ((m0 = m_copy(m, 0, M_COPYALL)) == NULL)
1902 m0->m_pkthdr.fw_flags |= PF_MBUF_TAGGED;
1903 m0->m_pkthdr.pf.flags = 0;
1904 /* XXX Re-Check when Upgrading to > 4.4 */
1905 m0->m_pkthdr.pf.statekey = NULL;
1907 if (r->rtableid >= 0)
1908 m0->m_pkthdr.pf.rtableid = r->rtableid;
1912 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE;
1913 m0->m_pkthdr.pf.qid = r->qid;
1914 m0->m_pkthdr.pf.ecn_af = af;
1915 m0->m_pkthdr.pf.hdr = mtod(m0, struct ip *);
1922 icmp_error(m0, type, code, 0, 0);
1927 icmp6_error(m0, type, code, 0);
1934 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
1935 * If n is 0, they match if they are equal. If n is != 0, they match if they
1939 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
1940 struct pf_addr *b, sa_family_t af)
1947 if ((a->addr32[0] & m->addr32[0]) ==
1948 (b->addr32[0] & m->addr32[0]))
1954 if (((a->addr32[0] & m->addr32[0]) ==
1955 (b->addr32[0] & m->addr32[0])) &&
1956 ((a->addr32[1] & m->addr32[1]) ==
1957 (b->addr32[1] & m->addr32[1])) &&
1958 ((a->addr32[2] & m->addr32[2]) ==
1959 (b->addr32[2] & m->addr32[2])) &&
1960 ((a->addr32[3] & m->addr32[3]) ==
1961 (b->addr32[3] & m->addr32[3])))
1980 * Return 1 if b <= a <= e, otherwise return 0.
1983 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
1984 struct pf_addr *a, sa_family_t af)
1989 if ((a->addr32[0] < b->addr32[0]) ||
1990 (a->addr32[0] > e->addr32[0]))
1999 for (i = 0; i < 4; ++i)
2000 if (a->addr32[i] > b->addr32[i])
2002 else if (a->addr32[i] < b->addr32[i])
2005 for (i = 0; i < 4; ++i)
2006 if (a->addr32[i] < e->addr32[i])
2008 else if (a->addr32[i] > e->addr32[i])
2018 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2022 return ((p > a1) && (p < a2));
2024 return ((p < a1) || (p > a2));
2026 return ((p >= a1) && (p <= a2));
2040 return (0); /* never reached */
2044 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2049 return (pf_match(op, a1, a2, p));
2053 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2055 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2057 return (pf_match(op, a1, a2, u));
2061 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2063 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2065 return (pf_match(op, a1, a2, g));
2069 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag)
2072 *tag = m->m_pkthdr.pf.tag;
2074 return ((!r->match_tag_not && r->match_tag == *tag) ||
2075 (r->match_tag_not && r->match_tag != *tag));
2079 pf_tag_packet(struct mbuf *m, int tag, int rtableid)
2081 if (tag <= 0 && rtableid < 0)
2085 m->m_pkthdr.pf.tag = tag;
2087 m->m_pkthdr.pf.rtableid = rtableid;
2093 pf_step_into_anchor(int *depth, struct pf_ruleset **rs, int n,
2094 struct pf_rule **r, struct pf_rule **a, int *match)
2096 struct pf_anchor_stackframe *f;
2098 (*r)->anchor->match = 0;
2101 if (*depth >= sizeof(pf_anchor_stack) /
2102 sizeof(pf_anchor_stack[0])) {
2103 kprintf("pf_step_into_anchor: stack overflow\n");
2104 *r = TAILQ_NEXT(*r, entries);
2106 } else if (*depth == 0 && a != NULL)
2108 f = pf_anchor_stack + (*depth)++;
2111 if ((*r)->anchor_wildcard) {
2112 f->parent = &(*r)->anchor->children;
2113 if ((f->child = RB_MIN(pf_anchor_node, f->parent)) ==
2118 *rs = &f->child->ruleset;
2122 *rs = &(*r)->anchor->ruleset;
2124 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2128 pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, int n,
2129 struct pf_rule **r, struct pf_rule **a, int *match)
2131 struct pf_anchor_stackframe *f;
2137 f = pf_anchor_stack + *depth - 1;
2138 if (f->parent != NULL && f->child != NULL) {
2139 if (f->child->match ||
2140 (match != NULL && *match)) {
2141 f->r->anchor->match = 1;
2144 f->child = RB_NEXT(pf_anchor_node, f->parent, f->child);
2145 if (f->child != NULL) {
2146 *rs = &f->child->ruleset;
2147 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2155 if (*depth == 0 && a != NULL)
2158 if (f->r->anchor->match || (match != NULL && *match))
2159 quick = f->r->quick;
2160 *r = TAILQ_NEXT(f->r, entries);
2161 } while (*r == NULL);
2168 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
2169 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
2174 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2175 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2179 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2180 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2181 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
2182 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
2183 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
2184 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
2185 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
2186 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
2192 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
2197 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
2201 if (addr->addr32[3] == 0xffffffff) {
2202 addr->addr32[3] = 0;
2203 if (addr->addr32[2] == 0xffffffff) {
2204 addr->addr32[2] = 0;
2205 if (addr->addr32[1] == 0xffffffff) {
2206 addr->addr32[1] = 0;
2208 htonl(ntohl(addr->addr32[0]) + 1);
2211 htonl(ntohl(addr->addr32[1]) + 1);
2214 htonl(ntohl(addr->addr32[2]) + 1);
2217 htonl(ntohl(addr->addr32[3]) + 1);
2223 #define mix(a,b,c) \
2225 a -= b; a -= c; a ^= (c >> 13); \
2226 b -= c; b -= a; b ^= (a << 8); \
2227 c -= a; c -= b; c ^= (b >> 13); \
2228 a -= b; a -= c; a ^= (c >> 12); \
2229 b -= c; b -= a; b ^= (a << 16); \
2230 c -= a; c -= b; c ^= (b >> 5); \
2231 a -= b; a -= c; a ^= (c >> 3); \
2232 b -= c; b -= a; b ^= (a << 10); \
2233 c -= a; c -= b; c ^= (b >> 15); \
2237 * hash function based on bridge_hash in if_bridge.c
2240 pf_hash(struct pf_addr *inaddr, struct pf_addr *hash,
2241 struct pf_poolhashkey *key, sa_family_t af)
2243 u_int32_t a = 0x9e3779b9, b = 0x9e3779b9, c = key->key32[0];
2248 a += inaddr->addr32[0];
2251 hash->addr32[0] = c + key->key32[2];
2256 a += inaddr->addr32[0];
2257 b += inaddr->addr32[2];
2259 hash->addr32[0] = c;
2260 a += inaddr->addr32[1];
2261 b += inaddr->addr32[3];
2264 hash->addr32[1] = c;
2265 a += inaddr->addr32[2];
2266 b += inaddr->addr32[1];
2269 hash->addr32[2] = c;
2270 a += inaddr->addr32[3];
2271 b += inaddr->addr32[0];
2274 hash->addr32[3] = c;
2281 pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr,
2282 struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn)
2284 unsigned char hash[16];
2285 struct pf_pool *rpool = &r->rpool;
2286 struct pf_addr *raddr = &rpool->cur->addr.v.a.addr;
2287 struct pf_addr *rmask = &rpool->cur->addr.v.a.mask;
2288 struct pf_pooladdr *acur = rpool->cur;
2289 struct pf_src_node k;
2291 if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR &&
2292 (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
2294 PF_ACPY(&k.addr, saddr, af);
2295 if (r->rule_flag & PFRULE_RULESRCTRACK ||
2296 r->rpool.opts & PF_POOL_STICKYADDR)
2300 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
2301 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
2302 if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, af)) {
2303 PF_ACPY(naddr, &(*sn)->raddr, af);
2304 if (pf_status.debug >= PF_DEBUG_MISC) {
2305 kprintf("pf_map_addr: src tracking maps ");
2306 pf_print_host(&k.addr, 0, af);
2308 pf_print_host(naddr, 0, af);
2315 if (rpool->cur->addr.type == PF_ADDR_NOROUTE)
2317 if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
2321 if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 &&
2322 (rpool->opts & PF_POOL_TYPEMASK) !=
2325 raddr = &rpool->cur->addr.p.dyn->pfid_addr4;
2326 rmask = &rpool->cur->addr.p.dyn->pfid_mask4;
2331 if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 &&
2332 (rpool->opts & PF_POOL_TYPEMASK) !=
2335 raddr = &rpool->cur->addr.p.dyn->pfid_addr6;
2336 rmask = &rpool->cur->addr.p.dyn->pfid_mask6;
2340 } else if (rpool->cur->addr.type == PF_ADDR_TABLE) {
2341 if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN)
2342 return (1); /* unsupported */
2344 raddr = &rpool->cur->addr.v.a.addr;
2345 rmask = &rpool->cur->addr.v.a.mask;
2348 switch (rpool->opts & PF_POOL_TYPEMASK) {
2350 PF_ACPY(naddr, raddr, af);
2352 case PF_POOL_BITMASK:
2353 PF_POOLMASK(naddr, raddr, rmask, saddr, af);
2355 case PF_POOL_RANDOM:
2356 if (init_addr != NULL && PF_AZERO(init_addr, af)) {
2360 rpool->counter.addr32[0] = htonl(karc4random());
2365 if (rmask->addr32[3] != 0xffffffff)
2366 rpool->counter.addr32[3] =
2367 htonl(karc4random());
2370 if (rmask->addr32[2] != 0xffffffff)
2371 rpool->counter.addr32[2] =
2372 htonl(karc4random());
2375 if (rmask->addr32[1] != 0xffffffff)
2376 rpool->counter.addr32[1] =
2377 htonl(karc4random());
2380 if (rmask->addr32[0] != 0xffffffff)
2381 rpool->counter.addr32[0] =
2382 htonl(karc4random());
2386 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af);
2387 PF_ACPY(init_addr, naddr, af);
2390 PF_AINC(&rpool->counter, af);
2391 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af);
2394 case PF_POOL_SRCHASH:
2395 pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af);
2396 PF_POOLMASK(naddr, raddr, rmask, (struct pf_addr *)&hash, af);
2398 case PF_POOL_ROUNDROBIN:
2399 if (rpool->cur->addr.type == PF_ADDR_TABLE) {
2400 if (!pfr_pool_get(rpool->cur->addr.p.tbl,
2401 &rpool->tblidx, &rpool->counter,
2402 &raddr, &rmask, af))
2404 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
2405 if (!pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
2406 &rpool->tblidx, &rpool->counter,
2407 &raddr, &rmask, af))
2409 } else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af))
2413 if ((rpool->cur = TAILQ_NEXT(rpool->cur, entries)) == NULL)
2414 rpool->cur = TAILQ_FIRST(&rpool->list);
2415 if (rpool->cur->addr.type == PF_ADDR_TABLE) {
2417 if (pfr_pool_get(rpool->cur->addr.p.tbl,
2418 &rpool->tblidx, &rpool->counter,
2419 &raddr, &rmask, af)) {
2420 /* table contains no address of type 'af' */
2421 if (rpool->cur != acur)
2425 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
2427 if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
2428 &rpool->tblidx, &rpool->counter,
2429 &raddr, &rmask, af)) {
2430 /* table contains no address of type 'af' */
2431 if (rpool->cur != acur)
2436 raddr = &rpool->cur->addr.v.a.addr;
2437 rmask = &rpool->cur->addr.v.a.mask;
2438 PF_ACPY(&rpool->counter, raddr, af);
2442 PF_ACPY(naddr, &rpool->counter, af);
2443 if (init_addr != NULL && PF_AZERO(init_addr, af))
2444 PF_ACPY(init_addr, naddr, af);
2445 PF_AINC(&rpool->counter, af);
2449 PF_ACPY(&(*sn)->raddr, naddr, af);
2451 if (pf_status.debug >= PF_DEBUG_MISC &&
2452 (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
2453 kprintf("pf_map_addr: selected address ");
2454 pf_print_host(naddr, 0, af);
2462 pf_get_sport(sa_family_t af, u_int8_t proto, struct pf_rule *r,
2463 struct pf_addr *saddr, struct pf_addr *daddr, u_int16_t dport,
2464 struct pf_addr *naddr, u_int16_t *nport, u_int16_t low, u_int16_t high,
2465 struct pf_src_node **sn)
2467 struct pf_state_key_cmp key;
2468 struct pf_addr init_addr;
2471 bzero(&init_addr, sizeof(init_addr));
2472 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn))
2475 if (proto == IPPROTO_ICMP) {
2483 PF_ACPY(&key.addr[1], daddr, key.af);
2484 PF_ACPY(&key.addr[0], naddr, key.af);
2485 key.port[1] = dport;
2488 * port search; start random, step;
2489 * similar 2 portloop in in_pcbbind
2491 if (!(proto == IPPROTO_TCP || proto == IPPROTO_UDP ||
2492 proto == IPPROTO_ICMP)) {
2493 key.port[0] = dport;
2494 if (pf_find_state_all(&key, PF_IN, NULL) == NULL)
2496 } else if (low == 0 && high == 0) {
2497 key.port[0] = *nport;
2498 if (pf_find_state_all(&key, PF_IN, NULL) == NULL)
2500 } else if (low == high) {
2501 key.port[0] = htons(low);
2502 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) {
2503 *nport = htons(low);
2515 cut = htonl(karc4random()) % (1 + high - low) + low;
2516 /* low <= cut <= high */
2517 for (tmp = cut; tmp <= high; ++(tmp)) {
2518 key.port[0] = htons(tmp);
2519 if (pf_find_state_all(&key, PF_IN, NULL) ==
2520 NULL && !in_baddynamic(tmp, proto)) {
2521 *nport = htons(tmp);
2525 for (tmp = cut - 1; tmp >= low; --(tmp)) {
2526 key.port[0] = htons(tmp);
2527 if (pf_find_state_all(&key, PF_IN, NULL) ==
2528 NULL && !in_baddynamic(tmp, proto)) {
2529 *nport = htons(tmp);
2535 switch (r->rpool.opts & PF_POOL_TYPEMASK) {
2536 case PF_POOL_RANDOM:
2537 case PF_POOL_ROUNDROBIN:
2538 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn))
2542 case PF_POOL_SRCHASH:
2543 case PF_POOL_BITMASK:
2547 } while (! PF_AEQ(&init_addr, naddr, af) );
2548 return (1); /* none available */
2552 pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off,
2553 int direction, struct pfi_kif *kif, struct pf_addr *saddr, u_int16_t sport,
2554 struct pf_addr *daddr, u_int16_t dport, int rs_num)
2556 struct pf_rule *r, *rm = NULL;
2557 struct pf_ruleset *ruleset = NULL;
2562 r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr);
2563 while (r && rm == NULL) {
2564 struct pf_rule_addr *src = NULL, *dst = NULL;
2565 struct pf_addr_wrap *xdst = NULL;
2567 if (r->action == PF_BINAT && direction == PF_IN) {
2569 if (r->rpool.cur != NULL)
2570 xdst = &r->rpool.cur->addr;
2577 if (pfi_kif_match(r->kif, kif) == r->ifnot)
2578 r = r->skip[PF_SKIP_IFP].ptr;
2579 else if (r->direction && r->direction != direction)
2580 r = r->skip[PF_SKIP_DIR].ptr;
2581 else if (r->af && r->af != pd->af)
2582 r = r->skip[PF_SKIP_AF].ptr;
2583 else if (r->proto && r->proto != pd->proto)
2584 r = r->skip[PF_SKIP_PROTO].ptr;
2585 else if (PF_MISMATCHAW(&src->addr, saddr, pd->af,
2587 r = r->skip[src == &r->src ? PF_SKIP_SRC_ADDR :
2588 PF_SKIP_DST_ADDR].ptr;
2589 else if (src->port_op && !pf_match_port(src->port_op,
2590 src->port[0], src->port[1], sport))
2591 r = r->skip[src == &r->src ? PF_SKIP_SRC_PORT :
2592 PF_SKIP_DST_PORT].ptr;
2593 else if (dst != NULL &&
2594 PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->neg, NULL))
2595 r = r->skip[PF_SKIP_DST_ADDR].ptr;
2596 else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af,
2598 r = TAILQ_NEXT(r, entries);
2599 else if (dst != NULL && dst->port_op &&
2600 !pf_match_port(dst->port_op, dst->port[0],
2601 dst->port[1], dport))
2602 r = r->skip[PF_SKIP_DST_PORT].ptr;
2603 else if (r->match_tag && !pf_match_tag(m, r, &tag))
2604 r = TAILQ_NEXT(r, entries);
2605 else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto !=
2606 IPPROTO_TCP || !pf_osfp_match(pf_osfp_fingerprint(pd, m,
2607 off, pd->hdr.tcp), r->os_fingerprint)))
2608 r = TAILQ_NEXT(r, entries);
2612 if (r->rtableid >= 0)
2613 rtableid = r->rtableid;
2614 if (r->anchor == NULL) {
2617 pf_step_into_anchor(&asd, &ruleset, rs_num,
2621 pf_step_out_of_anchor(&asd, &ruleset, rs_num, &r,
2624 if (pf_tag_packet(m, tag, rtableid))
2626 if (rm != NULL && (rm->action == PF_NONAT ||
2627 rm->action == PF_NORDR || rm->action == PF_NOBINAT))
2633 pf_get_translation(struct pf_pdesc *pd, struct mbuf *m, int off, int direction,
2634 struct pfi_kif *kif, struct pf_src_node **sn,
2635 struct pf_state_key **skw, struct pf_state_key **sks,
2636 struct pf_state_key **skp, struct pf_state_key **nkp,
2637 struct pf_addr *saddr, struct pf_addr *daddr,
2638 u_int16_t sport, u_int16_t dport)
2640 struct pf_rule *r = NULL;
2643 if (direction == PF_OUT) {
2644 r = pf_match_translation(pd, m, off, direction, kif, saddr,
2645 sport, daddr, dport, PF_RULESET_BINAT);
2647 r = pf_match_translation(pd, m, off, direction, kif,
2648 saddr, sport, daddr, dport, PF_RULESET_NAT);
2650 r = pf_match_translation(pd, m, off, direction, kif, saddr,
2651 sport, daddr, dport, PF_RULESET_RDR);
2653 r = pf_match_translation(pd, m, off, direction, kif,
2654 saddr, sport, daddr, dport, PF_RULESET_BINAT);
2658 struct pf_addr *naddr;
2661 if (pf_state_key_setup(pd, r, skw, sks, skp, nkp,
2662 saddr, daddr, sport, dport))
2665 /* XXX We only modify one side for now. */
2666 naddr = &(*nkp)->addr[1];
2667 nport = &(*nkp)->port[1];
2669 switch (r->action) {
2675 if (pf_get_sport(pd->af, pd->proto, r, saddr,
2676 daddr, dport, naddr, nport, r->rpool.proxy_port[0],
2677 r->rpool.proxy_port[1], sn)) {
2678 DPFPRINTF(PF_DEBUG_MISC,
2679 ("pf: NAT proxy port allocation "
2681 r->rpool.proxy_port[0],
2682 r->rpool.proxy_port[1]));
2687 switch (direction) {
2689 if (r->rpool.cur->addr.type == PF_ADDR_DYNIFTL){
2693 if (r->rpool.cur->addr.p.dyn->
2697 &r->rpool.cur->addr.p.dyn->
2699 &r->rpool.cur->addr.p.dyn->
2706 if (r->rpool.cur->addr.p.dyn->
2710 &r->rpool.cur->addr.p.dyn->
2712 &r->rpool.cur->addr.p.dyn->
2720 &r->rpool.cur->addr.v.a.addr,
2721 &r->rpool.cur->addr.v.a.mask,
2725 if (r->src.addr.type == PF_ADDR_DYNIFTL) {
2729 if (r->src.addr.p.dyn->
2733 &r->src.addr.p.dyn->
2735 &r->src.addr.p.dyn->
2742 if (r->src.addr.p.dyn->
2746 &r->src.addr.p.dyn->
2748 &r->src.addr.p.dyn->
2756 &r->src.addr.v.a.addr,
2757 &r->src.addr.v.a.mask, daddr,
2763 if (pf_map_addr(pd->af, r, saddr, naddr, NULL, sn))
2765 if ((r->rpool.opts & PF_POOL_TYPEMASK) ==
2767 PF_POOLMASK(naddr, naddr,
2768 &r->rpool.cur->addr.v.a.mask, daddr,
2771 if (r->rpool.proxy_port[1]) {
2772 u_int32_t tmp_nport;
2774 tmp_nport = ((ntohs(dport) -
2775 ntohs(r->dst.port[0])) %
2776 (r->rpool.proxy_port[1] -
2777 r->rpool.proxy_port[0] + 1)) +
2778 r->rpool.proxy_port[0];
2780 /* wrap around if necessary */
2781 if (tmp_nport > 65535)
2783 *nport = htons((u_int16_t)tmp_nport);
2784 } else if (r->rpool.proxy_port[0])
2785 *nport = htons(r->rpool.proxy_port[0]);
2797 struct netmsg_hashlookup {
2798 struct netmsg_base base;
2799 struct inpcb **nm_pinp;
2800 struct inpcbinfo *nm_pcbinfo;
2801 struct pf_addr *nm_saddr;
2802 struct pf_addr *nm_daddr;
2808 #ifdef PF_SOCKET_LOOKUP_DOMSG
2810 in_pcblookup_hash_handler(netmsg_t msg)
2812 struct netmsg_hashlookup *rmsg = (struct netmsg_hashlookup *)msg;
2814 if (rmsg->nm_af == AF_INET)
2815 *rmsg->nm_pinp = in_pcblookup_hash(rmsg->nm_pcbinfo,
2816 rmsg->nm_saddr->v4, rmsg->nm_sport, rmsg->nm_daddr->v4,
2817 rmsg->nm_dport, INPLOOKUP_WILDCARD, NULL);
2820 *rmsg->nm_pinp = in6_pcblookup_hash(rmsg->nm_pcbinfo,
2821 &rmsg->nm_saddr->v6, rmsg->nm_sport, &rmsg->nm_daddr->v6,
2822 rmsg->nm_dport, INPLOOKUP_WILDCARD, NULL);
2824 lwkt_replymsg(&rmsg->base.lmsg, 0);
2826 #endif /* PF_SOCKET_LOOKUP_DOMSG */
2831 pf_socket_lookup(int direction, struct pf_pdesc *pd)
2833 struct pf_addr *saddr, *daddr;
2834 u_int16_t sport, dport;
2835 struct inpcbinfo *pi;
2838 struct netmsg_hashlookup *msg = NULL;
2839 #ifdef PF_SOCKET_LOOKUP_DOMSG
2840 struct netmsg_hashlookup msg0;
2847 pd->lookup.uid = UID_MAX;
2848 pd->lookup.gid = GID_MAX;
2849 pd->lookup.pid = NO_PID;
2850 if (direction == PF_IN) {
2857 switch (pd->proto) {
2859 if (pd->hdr.tcp == NULL)
2861 sport = pd->hdr.tcp->th_sport;
2862 dport = pd->hdr.tcp->th_dport;
2864 pi_cpu = tcp_addrcpu(saddr->v4.s_addr, sport, daddr->v4.s_addr, dport);
2865 pi = &tcbinfo[pi_cpu];
2868 * Our netstack runs lockless on MP systems
2869 * (only for TCP connections at the moment).
2871 * As we are not allowed to read another CPU's tcbinfo,
2872 * we have to ask that CPU via remote call to search the
2875 * Prepare a msg iff data belongs to another CPU.
2877 if (pi_cpu != mycpu->gd_cpuid) {
2878 #ifdef PF_SOCKET_LOOKUP_DOMSG
2882 * Following lwkt_domsg() is dangerous and could
2883 * lockup the network system, e.g.
2886 * netisr0 domsg to netisr1 (due to lookup)
2887 * netisr1 domsg to netisr0 (due to lookup)
2889 * We simply return -1 here, since we are probably
2890 * called before NAT, so the TCP packet should
2891 * already be on the correct CPU.
2894 netmsg_init(&msg->base, NULL, &curthread->td_msgport,
2895 0, in_pcblookup_hash_handler);
2896 msg->nm_pinp = &inp;
2897 msg->nm_pcbinfo = pi;
2898 msg->nm_saddr = saddr;
2899 msg->nm_sport = sport;
2900 msg->nm_daddr = daddr;
2901 msg->nm_dport = dport;
2902 msg->nm_af = pd->af;
2903 #else /* !PF_SOCKET_LOOKUP_DOMSG */
2904 kprintf("pf_socket_lookup: tcp packet not on the "
2905 "correct cpu %d, cur cpu %d\n",
2907 print_backtrace(-1);
2909 #endif /* PF_SOCKET_LOOKUP_DOMSG */
2914 if (pd->hdr.udp == NULL)
2916 sport = pd->hdr.udp->uh_sport;
2917 dport = pd->hdr.udp->uh_dport;
2923 if (direction != PF_IN) {
2935 * Query other CPU, second part
2937 * msg only gets initialized when:
2939 * 2) the info belongs to another CPU
2941 * Use some switch/case magic to avoid code duplication.
2946 inp = in6_pcblookup_hash(pi, &saddr->v6, sport,
2947 &daddr->v6, dport, INPLOOKUP_WILDCARD, NULL);
2953 /* FALLTHROUGH if SMP and on other CPU */
2958 lwkt_domsg(cpu_portfn(pi_cpu),
2959 &msg->base.lmsg, 0);
2963 inp = in_pcblookup_hash(pi, saddr->v4, sport, daddr->v4,
2964 dport, INPLOOKUP_WILDCARD, NULL);
2973 pd->lookup.uid = inp->inp_socket->so_cred->cr_uid;
2974 pd->lookup.gid = inp->inp_socket->so_cred->cr_groups[0];
2979 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2983 u_int8_t *opt, optlen;
2984 u_int8_t wscale = 0;
2986 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
2987 if (hlen <= sizeof(struct tcphdr))
2989 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2991 opt = hdr + sizeof(struct tcphdr);
2992 hlen -= sizeof(struct tcphdr);
3002 if (wscale > TCP_MAX_WINSHIFT)
3003 wscale = TCP_MAX_WINSHIFT;
3004 wscale |= PF_WSCALE_FLAG;
3019 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3023 u_int8_t *opt, optlen;
3024 u_int16_t mss = tcp_mssdflt;
3026 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3027 if (hlen <= sizeof(struct tcphdr))
3029 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3031 opt = hdr + sizeof(struct tcphdr);
3032 hlen -= sizeof(struct tcphdr);
3033 while (hlen >= TCPOLEN_MAXSEG) {
3041 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
3056 pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer)
3059 struct sockaddr_in *dst;
3063 struct sockaddr_in6 *dst6;
3064 struct route_in6 ro6;
3066 struct rtentry *rt = NULL;
3068 u_int16_t mss = tcp_mssdflt;
3073 hlen = sizeof(struct ip);
3074 bzero(&ro, sizeof(ro));
3075 dst = (struct sockaddr_in *)&ro.ro_dst;
3076 dst->sin_family = AF_INET;
3077 dst->sin_len = sizeof(*dst);
3078 dst->sin_addr = addr->v4;
3079 rtalloc_ign(&ro, (RTF_CLONING | RTF_PRCLONING));
3085 hlen = sizeof(struct ip6_hdr);
3086 bzero(&ro6, sizeof(ro6));
3087 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst;
3088 dst6->sin6_family = AF_INET6;
3089 dst6->sin6_len = sizeof(*dst6);
3090 dst6->sin6_addr = addr->v6;
3091 rtalloc_ign((struct route *)&ro6, (RTF_CLONING | RTF_PRCLONING));
3097 if (rt && rt->rt_ifp) {
3098 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr);
3099 mss = max(tcp_mssdflt, mss);
3102 mss = min(mss, offer);
3103 mss = max(mss, 64); /* sanity - at least max opt space */
3108 pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr)
3110 struct pf_rule *r = s->rule.ptr;
3113 if (!r->rt || r->rt == PF_FASTROUTE)
3115 switch (s->key[PF_SK_WIRE]->af) {
3118 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL,
3120 s->rt_kif = r->rpool.cur->kif;
3125 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL,
3127 s->rt_kif = r->rpool.cur->kif;
3134 pf_tcp_iss(struct pf_pdesc *pd)
3137 u_int32_t digest[4];
3139 if (pf_tcp_secret_init == 0) {
3140 karc4rand(pf_tcp_secret, sizeof(pf_tcp_secret));
3141 MD5Init(&pf_tcp_secret_ctx);
3142 MD5Update(&pf_tcp_secret_ctx, pf_tcp_secret,
3143 sizeof(pf_tcp_secret));
3144 pf_tcp_secret_init = 1;
3146 ctx = pf_tcp_secret_ctx;
3148 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short));
3149 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short));
3150 if (pd->af == AF_INET6) {
3151 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
3152 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
3154 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
3155 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
3157 MD5Final((u_char *)digest, &ctx);
3158 pf_tcp_iss_off += 4096;
3159 return (digest[0] + pd->hdr.tcp->th_seq + pf_tcp_iss_off);
3163 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
3164 struct pfi_kif *kif, struct mbuf *m, int off, void *h,
3165 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm,
3166 struct ifqueue *ifq, struct inpcb *inp)
3168 struct pf_rule *nr = NULL;
3169 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
3170 sa_family_t af = pd->af;
3171 struct pf_rule *r, *a = NULL;
3172 struct pf_ruleset *ruleset = NULL;
3173 struct pf_src_node *nsn = NULL;
3174 struct tcphdr *th = pd->hdr.tcp;
3175 struct pf_state_key *skw = NULL, *sks = NULL;
3176 struct pf_state_key *sk = NULL, *nk = NULL;
3178 int rewrite = 0, hdrlen = 0;
3179 int tag = -1, rtableid = -1;
3183 u_int16_t sport = 0, dport = 0;
3184 u_int16_t nport = 0, bport = 0;
3185 u_int16_t bproto_sum = 0, bip_sum = 0;
3186 u_int8_t icmptype = 0, icmpcode = 0;
3189 if (direction == PF_IN && pf_check_congestion(ifq)) {
3190 REASON_SET(&reason, PFRES_CONGEST);
3195 pd->lookup.done = pf_socket_lookup(direction, pd);
3196 else if (debug_pfugidhack) {
3197 DPFPRINTF(PF_DEBUG_MISC, ("pf: unlocked lookup\n"));
3198 pd->lookup.done = pf_socket_lookup(direction, pd);
3201 switch (pd->proto) {
3203 sport = th->th_sport;
3204 dport = th->th_dport;
3205 hdrlen = sizeof(*th);
3208 sport = pd->hdr.udp->uh_sport;
3209 dport = pd->hdr.udp->uh_dport;
3210 hdrlen = sizeof(*pd->hdr.udp);
3214 if (pd->af != AF_INET)
3216 sport = dport = pd->hdr.icmp->icmp_id;
3217 hdrlen = sizeof(*pd->hdr.icmp);
3218 icmptype = pd->hdr.icmp->icmp_type;
3219 icmpcode = pd->hdr.icmp->icmp_code;
3221 if (icmptype == ICMP_UNREACH ||
3222 icmptype == ICMP_SOURCEQUENCH ||
3223 icmptype == ICMP_REDIRECT ||
3224 icmptype == ICMP_TIMXCEED ||
3225 icmptype == ICMP_PARAMPROB)
3230 case IPPROTO_ICMPV6:
3233 sport = dport = pd->hdr.icmp6->icmp6_id;
3234 hdrlen = sizeof(*pd->hdr.icmp6);
3235 icmptype = pd->hdr.icmp6->icmp6_type;
3236 icmpcode = pd->hdr.icmp6->icmp6_code;
3238 if (icmptype == ICMP6_DST_UNREACH ||
3239 icmptype == ICMP6_PACKET_TOO_BIG ||
3240 icmptype == ICMP6_TIME_EXCEEDED ||
3241 icmptype == ICMP6_PARAM_PROB)
3246 sport = dport = hdrlen = 0;
3250 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3252 bport = nport = sport;
3253 /* check packet for BINAT/NAT/RDR */
3254 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn,
3255 &skw, &sks, &sk, &nk, saddr, daddr, sport, dport)) != NULL) {
3256 if (nk == NULL || sk == NULL) {
3257 REASON_SET(&reason, PFRES_MEMORY);
3262 bip_sum = *pd->ip_sum;
3264 switch (pd->proto) {
3266 bproto_sum = th->th_sum;
3267 pd->proto_sum = &th->th_sum;
3269 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3270 nk->port[pd->sidx] != sport) {
3271 pf_change_ap(saddr, &th->th_sport, pd->ip_sum,
3272 &th->th_sum, &nk->addr[pd->sidx],
3273 nk->port[pd->sidx], 0, af);
3274 pd->sport = &th->th_sport;
3275 sport = th->th_sport;
3278 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3279 nk->port[pd->didx] != dport) {
3280 pf_change_ap(daddr, &th->th_dport, pd->ip_sum,
3281 &th->th_sum, &nk->addr[pd->didx],
3282 nk->port[pd->didx], 0, af);
3283 dport = th->th_dport;
3284 pd->dport = &th->th_dport;
3289 bproto_sum = pd->hdr.udp->uh_sum;
3290 pd->proto_sum = &pd->hdr.udp->uh_sum;
3292 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3293 nk->port[pd->sidx] != sport) {
3294 pf_change_ap(saddr, &pd->hdr.udp->uh_sport,
3295 pd->ip_sum, &pd->hdr.udp->uh_sum,
3296 &nk->addr[pd->sidx],
3297 nk->port[pd->sidx], 1, af);
3298 sport = pd->hdr.udp->uh_sport;
3299 pd->sport = &pd->hdr.udp->uh_sport;
3302 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3303 nk->port[pd->didx] != dport) {
3304 pf_change_ap(daddr, &pd->hdr.udp->uh_dport,
3305 pd->ip_sum, &pd->hdr.udp->uh_sum,
3306 &nk->addr[pd->didx],
3307 nk->port[pd->didx], 1, af);
3308 dport = pd->hdr.udp->uh_dport;
3309 pd->dport = &pd->hdr.udp->uh_dport;
3315 nk->port[0] = nk->port[1];
3316 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
3317 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3318 nk->addr[pd->sidx].v4.s_addr, 0);
3320 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
3321 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
3322 nk->addr[pd->didx].v4.s_addr, 0);
3324 if (nk->port[1] != pd->hdr.icmp->icmp_id) {
3325 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
3326 pd->hdr.icmp->icmp_cksum, sport,
3328 pd->hdr.icmp->icmp_id = nk->port[1];
3329 pd->sport = &pd->hdr.icmp->icmp_id;
3331 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
3335 case IPPROTO_ICMPV6:
3336 nk->port[0] = nk->port[1];
3337 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
3338 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
3339 &nk->addr[pd->sidx], 0);
3341 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
3342 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
3343 &nk->addr[pd->didx], 0);
3352 &nk->addr[pd->sidx], AF_INET))
3353 pf_change_a(&saddr->v4.s_addr,
3355 nk->addr[pd->sidx].v4.s_addr, 0);
3358 &nk->addr[pd->didx], AF_INET))
3359 pf_change_a(&daddr->v4.s_addr,
3361 nk->addr[pd->didx].v4.s_addr, 0);
3367 &nk->addr[pd->sidx], AF_INET6))
3368 PF_ACPY(saddr, &nk->addr[pd->sidx], af);
3371 &nk->addr[pd->didx], AF_INET6))
3372 PF_ACPY(saddr, &nk->addr[pd->didx], af);
3385 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3386 r = r->skip[PF_SKIP_IFP].ptr;
3387 else if (r->direction && r->direction != direction)
3388 r = r->skip[PF_SKIP_DIR].ptr;
3389 else if (r->af && r->af != af)
3390 r = r->skip[PF_SKIP_AF].ptr;
3391 else if (r->proto && r->proto != pd->proto)
3392 r = r->skip[PF_SKIP_PROTO].ptr;
3393 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
3395 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3396 /* tcp/udp only. port_op always 0 in other cases */
3397 else if (r->src.port_op && !pf_match_port(r->src.port_op,
3398 r->src.port[0], r->src.port[1], sport))
3399 r = r->skip[PF_SKIP_SRC_PORT].ptr;
3400 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
3402 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3403 /* tcp/udp only. port_op always 0 in other cases */
3404 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
3405 r->dst.port[0], r->dst.port[1], dport))
3406 r = r->skip[PF_SKIP_DST_PORT].ptr;
3407 /* icmp only. type always 0 in other cases */
3408 else if (r->type && r->type != icmptype + 1)
3409 r = TAILQ_NEXT(r, entries);
3410 /* icmp only. type always 0 in other cases */
3411 else if (r->code && r->code != icmpcode + 1)
3412 r = TAILQ_NEXT(r, entries);
3413 else if (r->tos && !(r->tos == pd->tos))
3414 r = TAILQ_NEXT(r, entries);
3415 else if (r->rule_flag & PFRULE_FRAGMENT)
3416 r = TAILQ_NEXT(r, entries);
3417 else if (pd->proto == IPPROTO_TCP &&
3418 (r->flagset & th->th_flags) != r->flags)
3419 r = TAILQ_NEXT(r, entries);
3420 /* tcp/udp only. uid.op always 0 in other cases */
3421 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
3422 pf_socket_lookup(direction, pd), 1)) &&
3423 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
3425 r = TAILQ_NEXT(r, entries);
3426 /* tcp/udp only. gid.op always 0 in other cases */
3427 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
3428 pf_socket_lookup(direction, pd), 1)) &&
3429 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
3431 r = TAILQ_NEXT(r, entries);
3433 r->prob <= karc4random())
3434 r = TAILQ_NEXT(r, entries);
3435 else if (r->match_tag && !pf_match_tag(m, r, &tag))
3436 r = TAILQ_NEXT(r, entries);
3437 else if (r->os_fingerprint != PF_OSFP_ANY &&
3438 (pd->proto != IPPROTO_TCP || !pf_osfp_match(
3439 pf_osfp_fingerprint(pd, m, off, th),
3440 r->os_fingerprint)))
3441 r = TAILQ_NEXT(r, entries);
3445 if (r->rtableid >= 0)
3446 rtableid = r->rtableid;
3447 if (r->anchor == NULL) {
3454 r = TAILQ_NEXT(r, entries);
3456 pf_step_into_anchor(&asd, &ruleset,
3457 PF_RULESET_FILTER, &r, &a, &match);
3459 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
3460 PF_RULESET_FILTER, &r, &a, &match))
3467 REASON_SET(&reason, PFRES_MATCH);
3469 if (r->log || (nr != NULL && nr->log)) {
3471 m_copyback(m, off, hdrlen, pd->hdr.any);
3472 PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr,
3476 if ((r->action == PF_DROP) &&
3477 ((r->rule_flag & PFRULE_RETURNRST) ||
3478 (r->rule_flag & PFRULE_RETURNICMP) ||
3479 (r->rule_flag & PFRULE_RETURN))) {
3480 /* undo NAT changes, if they have taken place */
3482 PF_ACPY(saddr, &sk->addr[pd->sidx], af);
3483 PF_ACPY(daddr, &sk->addr[pd->didx], af);
3485 *pd->sport = sk->port[pd->sidx];
3487 *pd->dport = sk->port[pd->didx];
3489 *pd->proto_sum = bproto_sum;
3491 *pd->ip_sum = bip_sum;
3492 m_copyback(m, off, hdrlen, pd->hdr.any);
3494 if (pd->proto == IPPROTO_TCP &&
3495 ((r->rule_flag & PFRULE_RETURNRST) ||
3496 (r->rule_flag & PFRULE_RETURN)) &&
3497 !(th->th_flags & TH_RST)) {
3498 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
3505 h4 = mtod(m, struct ip *);
3506 len = h4->ip_len - off;
3510 h6 = mtod(m, struct ip6_hdr *);
3511 len = h6->ip6_plen - (off - sizeof(*h6));
3516 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
3517 REASON_SET(&reason, PFRES_PROTCKSUM);
3519 if (th->th_flags & TH_SYN)
3521 if (th->th_flags & TH_FIN)
3523 pf_send_tcp(r, af, pd->dst,
3524 pd->src, th->th_dport, th->th_sport,
3525 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
3526 r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp);
3528 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
3530 pf_send_icmp(m, r->return_icmp >> 8,
3531 r->return_icmp & 255, af, r);
3532 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
3534 pf_send_icmp(m, r->return_icmp6 >> 8,
3535 r->return_icmp6 & 255, af, r);
3538 if (r->action == PF_DROP)
3541 if (pf_tag_packet(m, tag, rtableid)) {
3542 REASON_SET(&reason, PFRES_MEMORY);
3546 if (!state_icmp && (r->keep_state || nr != NULL ||
3547 (pd->flags & PFDESC_TCP_NORM))) {
3549 action = pf_create_state(r, nr, a, pd, nsn, skw, sks, nk, sk, m,
3550 off, sport, dport, &rewrite, kif, sm, tag, bproto_sum,
3552 if (action != PF_PASS)
3556 /* copy back packet headers if we performed NAT operations */
3558 m_copyback(m, off, hdrlen, pd->hdr.any);
3564 pool_put(&pf_state_key_pl, sk);
3566 pool_put(&pf_state_key_pl, nk);
3571 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
3572 struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *skw,
3573 struct pf_state_key *sks, struct pf_state_key *nk, struct pf_state_key *sk,
3574 struct mbuf *m, int off, u_int16_t sport, u_int16_t dport, int *rewrite,
3575 struct pfi_kif *kif, struct pf_state **sm, int tag, u_int16_t bproto_sum,
3576 u_int16_t bip_sum, int hdrlen)
3578 struct pf_state *s = NULL;
3579 struct pf_src_node *sn = NULL;
3580 struct tcphdr *th = pd->hdr.tcp;
3581 u_int16_t mss = tcp_mssdflt;
3584 /* check maximums */
3585 if (r->max_states && (r->states_cur >= r->max_states)) {
3586 pf_status.lcounters[LCNT_STATES]++;
3587 REASON_SET(&reason, PFRES_MAXSTATES);
3590 /* src node for filter rule */
3591 if ((r->rule_flag & PFRULE_SRCTRACK ||
3592 r->rpool.opts & PF_POOL_STICKYADDR) &&
3593 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
3594 REASON_SET(&reason, PFRES_SRCLIMIT);
3597 /* src node for translation rule */
3598 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3599 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
3600 REASON_SET(&reason, PFRES_SRCLIMIT);
3603 s = pool_get(&pf_state_pl, PR_NOWAIT | PR_ZERO);
3605 REASON_SET(&reason, PFRES_MEMORY);
3608 s->id = 0; /* XXX Do we really need that? not in OpenBSD */
3611 s->nat_rule.ptr = nr;
3613 STATE_INC_COUNTERS(s);
3615 s->state_flags |= PFSTATE_ALLOWOPTS;
3616 if (r->rule_flag & PFRULE_STATESLOPPY)
3617 s->state_flags |= PFSTATE_SLOPPY;
3618 s->log = r->log & PF_LOG_ALL;
3620 s->log |= nr->log & PF_LOG_ALL;
3621 switch (pd->proto) {
3623 s->src.seqlo = ntohl(th->th_seq);
3624 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
3625 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
3626 r->keep_state == PF_STATE_MODULATE) {
3627 /* Generate sequence number modulator */
3628 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
3631 pf_change_a(&th->th_seq, &th->th_sum,
3632 htonl(s->src.seqlo + s->src.seqdiff), 0);
3636 if (th->th_flags & TH_SYN) {
3638 s->src.wscale = pf_get_wscale(m, off,
3639 th->th_off, pd->af);
3641 s->src.max_win = MAX(ntohs(th->th_win), 1);
3642 if (s->src.wscale & PF_WSCALE_MASK) {
3643 /* Remove scale factor from initial window */
3644 int win = s->src.max_win;
3645 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
3646 s->src.max_win = (win - 1) >>
3647 (s->src.wscale & PF_WSCALE_MASK);
3649 if (th->th_flags & TH_FIN)
3653 s->src.state = TCPS_SYN_SENT;
3654 s->dst.state = TCPS_CLOSED;
3655 s->timeout = PFTM_TCP_FIRST_PACKET;
3658 s->src.state = PFUDPS_SINGLE;
3659 s->dst.state = PFUDPS_NO_TRAFFIC;
3660 s->timeout = PFTM_UDP_FIRST_PACKET;
3664 case IPPROTO_ICMPV6:
3666 s->timeout = PFTM_ICMP_FIRST_PACKET;
3669 s->src.state = PFOTHERS_SINGLE;
3670 s->dst.state = PFOTHERS_NO_TRAFFIC;
3671 s->timeout = PFTM_OTHER_FIRST_PACKET;
3674 s->creation = time_second;
3675 s->expire = time_second;
3679 s->src_node->states++;
3682 /* XXX We only modify one side for now. */
3683 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
3684 s->nat_src_node = nsn;
3685 s->nat_src_node->states++;
3687 if (pd->proto == IPPROTO_TCP) {
3688 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
3689 off, pd, th, &s->src, &s->dst)) {
3690 REASON_SET(&reason, PFRES_MEMORY);
3691 pf_src_tree_remove_state(s);
3692 STATE_DEC_COUNTERS(s);
3693 pool_put(&pf_state_pl, s);
3696 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
3697 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
3698 &s->src, &s->dst, rewrite)) {
3699 /* This really shouldn't happen!!! */
3700 DPFPRINTF(PF_DEBUG_URGENT,
3701 ("pf_normalize_tcp_stateful failed on first pkt"));
3702 pf_normalize_tcp_cleanup(s);
3703 pf_src_tree_remove_state(s);
3704 STATE_DEC_COUNTERS(s);
3705 pool_put(&pf_state_pl, s);
3709 s->direction = pd->dir;
3711 if (sk == NULL && pf_state_key_setup(pd, nr, &skw, &sks, &sk, &nk,
3712 pd->src, pd->dst, sport, dport))
3715 if (pf_state_insert(BOUND_IFACE(r, kif), skw, sks, s)) {
3716 if (pd->proto == IPPROTO_TCP)
3717 pf_normalize_tcp_cleanup(s);
3718 REASON_SET(&reason, PFRES_STATEINS);
3719 pf_src_tree_remove_state(s);
3720 STATE_DEC_COUNTERS(s);
3721 pool_put(&pf_state_pl, s);
3726 pf_set_rt_ifp(s, pd->src); /* needs s->state_key set */
3731 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
3732 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
3733 s->src.state = PF_TCPS_PROXY_SRC;
3734 /* undo NAT changes, if they have taken place */
3736 struct pf_state_key *skt = s->key[PF_SK_WIRE];
3737 if (pd->dir == PF_OUT)
3738 skt = s->key[PF_SK_STACK];
3739 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
3740 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
3742 *pd->sport = skt->port[pd->sidx];
3744 *pd->dport = skt->port[pd->didx];
3746 *pd->proto_sum = bproto_sum;
3748 *pd->ip_sum = bip_sum;
3749 m_copyback(m, off, hdrlen, pd->hdr.any);
3751 s->src.seqhi = htonl(karc4random());
3752 /* Find mss option */
3753 mss = pf_get_mss(m, off, th->th_off, pd->af);
3754 mss = pf_calc_mss(pd->src, pd->af, mss);
3755 mss = pf_calc_mss(pd->dst, pd->af, mss);
3757 pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport,
3758 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
3759 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL);
3760 REASON_SET(&reason, PFRES_SYNPROXY);
3761 return (PF_SYNPROXY_DROP);
3768 pool_put(&pf_state_key_pl, sk);
3770 pool_put(&pf_state_key_pl, nk);
3772 if (sn != NULL && sn->states == 0 && sn->expire == 0) {
3773 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
3774 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
3775 pf_status.src_nodes--;
3776 pool_put(&pf_src_tree_pl, sn);
3778 if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0) {
3779 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
3780 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
3781 pf_status.src_nodes--;
3782 pool_put(&pf_src_tree_pl, nsn);
3788 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
3789 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
3790 struct pf_ruleset **rsm)
3792 struct pf_rule *r, *a = NULL;
3793 struct pf_ruleset *ruleset = NULL;
3794 sa_family_t af = pd->af;
3800 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3803 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3804 r = r->skip[PF_SKIP_IFP].ptr;
3805 else if (r->direction && r->direction != direction)
3806 r = r->skip[PF_SKIP_DIR].ptr;
3807 else if (r->af && r->af != af)
3808 r = r->skip[PF_SKIP_AF].ptr;
3809 else if (r->proto && r->proto != pd->proto)
3810 r = r->skip[PF_SKIP_PROTO].ptr;
3811 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
3813 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3814 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
3816 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3817 else if (r->tos && !(r->tos == pd->tos))
3818 r = TAILQ_NEXT(r, entries);
3819 else if (r->os_fingerprint != PF_OSFP_ANY)
3820 r = TAILQ_NEXT(r, entries);
3821 else if (pd->proto == IPPROTO_UDP &&
3822 (r->src.port_op || r->dst.port_op))
3823 r = TAILQ_NEXT(r, entries);
3824 else if (pd->proto == IPPROTO_TCP &&
3825 (r->src.port_op || r->dst.port_op || r->flagset))
3826 r = TAILQ_NEXT(r, entries);
3827 else if ((pd->proto == IPPROTO_ICMP ||
3828 pd->proto == IPPROTO_ICMPV6) &&
3829 (r->type || r->code))
3830 r = TAILQ_NEXT(r, entries);
3831 else if (r->prob && r->prob <= karc4random())
3832 r = TAILQ_NEXT(r, entries);
3833 else if (r->match_tag && !pf_match_tag(m, r, &tag))
3834 r = TAILQ_NEXT(r, entries);
3836 if (r->anchor == NULL) {
3843 r = TAILQ_NEXT(r, entries);
3845 pf_step_into_anchor(&asd, &ruleset,
3846 PF_RULESET_FILTER, &r, &a, &match);
3848 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
3849 PF_RULESET_FILTER, &r, &a, &match))
3856 REASON_SET(&reason, PFRES_MATCH);
3859 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset,
3862 if (r->action != PF_PASS)
3865 if (pf_tag_packet(m, tag, -1)) {
3866 REASON_SET(&reason, PFRES_MEMORY);
3874 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
3875 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off,
3876 struct pf_pdesc *pd, u_short *reason, int *copyback)
3878 struct tcphdr *th = pd->hdr.tcp;
3879 u_int16_t win = ntohs(th->th_win);
3880 u_int32_t ack, end, seq, orig_seq;
3884 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
3885 sws = src->wscale & PF_WSCALE_MASK;
3886 dws = dst->wscale & PF_WSCALE_MASK;
3891 * Sequence tracking algorithm from Guido van Rooij's paper:
3892 * http://www.madison-gurkha.com/publications/tcp_filtering/
3896 orig_seq = seq = ntohl(th->th_seq);
3897 if (src->seqlo == 0) {
3898 /* First packet from this end. Set its state */
3900 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
3901 src->scrub == NULL) {
3902 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
3903 REASON_SET(reason, PFRES_MEMORY);
3908 /* Deferred generation of sequence number modulator */
3909 if (dst->seqdiff && !src->seqdiff) {
3910 /* use random iss for the TCP server */
3911 while ((src->seqdiff = karc4random() - seq) == 0)
3913 ack = ntohl(th->th_ack) - dst->seqdiff;
3914 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
3916 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
3919 ack = ntohl(th->th_ack);
3922 end = seq + pd->p_len;
3923 if (th->th_flags & TH_SYN) {
3925 (*state)->sync_flags |= PFSTATE_GOT_SYN2;
3926 if (dst->wscale & PF_WSCALE_FLAG) {
3927 src->wscale = pf_get_wscale(m, off, th->th_off,
3929 if (src->wscale & PF_WSCALE_FLAG) {
3930 /* Remove scale factor from initial
3932 sws = src->wscale & PF_WSCALE_MASK;
3933 win = ((u_int32_t)win + (1 << sws) - 1)
3935 dws = dst->wscale & PF_WSCALE_MASK;
3937 /* fixup other window */
3938 dst->max_win <<= dst->wscale &
3940 /* in case of a retrans SYN|ACK */
3945 if (th->th_flags & TH_FIN)
3949 if (src->state < TCPS_SYN_SENT)
3950 src->state = TCPS_SYN_SENT;
3953 * May need to slide the window (seqhi may have been set by
3954 * the crappy stack check or if we picked up the connection
3955 * after establishment)
3957 if (src->seqhi == 1 ||
3958 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
3959 src->seqhi = end + MAX(1, dst->max_win << dws);
3960 if (win > src->max_win)
3964 ack = ntohl(th->th_ack) - dst->seqdiff;
3966 /* Modulate sequence numbers */
3967 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
3969 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
3972 end = seq + pd->p_len;
3973 if (th->th_flags & TH_SYN)
3975 if (th->th_flags & TH_FIN)
3979 if ((th->th_flags & TH_ACK) == 0) {
3980 /* Let it pass through the ack skew check */
3982 } else if ((ack == 0 &&
3983 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
3984 /* broken tcp stacks do not set ack */
3985 (dst->state < TCPS_SYN_SENT)) {
3987 * Many stacks (ours included) will set the ACK number in an
3988 * FIN|ACK if the SYN times out -- no sequence to ACK.
3994 /* Ease sequencing restrictions on no data packets */
3999 ackskew = dst->seqlo - ack;
4003 * Need to demodulate the sequence numbers in any TCP SACK options
4004 * (Selective ACK). We could optionally validate the SACK values
4005 * against the current ACK window, either forwards or backwards, but
4006 * I'm not confident that SACK has been implemented properly
4007 * everywhere. It wouldn't surprise me if several stacks accidently
4008 * SACK too far backwards of previously ACKed data. There really aren't
4009 * any security implications of bad SACKing unless the target stack
4010 * doesn't validate the option length correctly. Someone trying to
4011 * spoof into a TCP connection won't bother blindly sending SACK
4014 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
4015 if (pf_modulate_sack(m, off, pd, th, dst))
4020 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
4021 if (SEQ_GEQ(src->seqhi, end) &&
4022 /* Last octet inside other's window space */
4023 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
4024 /* Retrans: not more than one window back */
4025 (ackskew >= -MAXACKWINDOW) &&
4026 /* Acking not more than one reassembled fragment backwards */
4027 (ackskew <= (MAXACKWINDOW << sws)) &&
4028 /* Acking not more than one window forward */
4029 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
4030 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
4031 (pd->flags & PFDESC_IP_REAS) == 0)) {
4032 /* Require an exact/+1 sequence match on resets when possible */
4034 if (dst->scrub || src->scrub) {
4035 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4036 *state, src, dst, copyback))
4040 /* update max window */
4041 if (src->max_win < win)
4043 /* synchronize sequencing */
4044 if (SEQ_GT(end, src->seqlo))
4046 /* slide the window of what the other end can send */
4047 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4048 dst->seqhi = ack + MAX((win << sws), 1);
4052 if (th->th_flags & TH_SYN)
4053 if (src->state < TCPS_SYN_SENT)
4054 src->state = TCPS_SYN_SENT;
4055 if (th->th_flags & TH_FIN)
4056 if (src->state < TCPS_CLOSING)
4057 src->state = TCPS_CLOSING;
4058 if (th->th_flags & TH_ACK) {
4059 if (dst->state == TCPS_SYN_SENT) {
4060 dst->state = TCPS_ESTABLISHED;
4061 if (src->state == TCPS_ESTABLISHED &&
4062 (*state)->src_node != NULL &&
4063 pf_src_connlimit(state)) {
4064 REASON_SET(reason, PFRES_SRCLIMIT);
4067 } else if (dst->state == TCPS_CLOSING)
4068 dst->state = TCPS_FIN_WAIT_2;
4070 if (th->th_flags & TH_RST)
4071 src->state = dst->state = TCPS_TIME_WAIT;
4073 /* update expire time */
4074 (*state)->expire = time_second;
4075 if (src->state >= TCPS_FIN_WAIT_2 &&
4076 dst->state >= TCPS_FIN_WAIT_2)
4077 (*state)->timeout = PFTM_TCP_CLOSED;
4078 else if (src->state >= TCPS_CLOSING &&
4079 dst->state >= TCPS_CLOSING)
4080 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4081 else if (src->state < TCPS_ESTABLISHED ||
4082 dst->state < TCPS_ESTABLISHED)
4083 (*state)->timeout = PFTM_TCP_OPENING;
4084 else if (src->state >= TCPS_CLOSING ||
4085 dst->state >= TCPS_CLOSING)
4086 (*state)->timeout = PFTM_TCP_CLOSING;
4088 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4090 /* Fall through to PASS packet */
4092 } else if ((dst->state < TCPS_SYN_SENT ||
4093 dst->state >= TCPS_FIN_WAIT_2 ||
4094 src->state >= TCPS_FIN_WAIT_2) &&
4095 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
4096 /* Within a window forward of the originating packet */
4097 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
4098 /* Within a window backward of the originating packet */
4101 * This currently handles three situations:
4102 * 1) Stupid stacks will shotgun SYNs before their peer
4104 * 2) When PF catches an already established stream (the
4105 * firewall rebooted, the state table was flushed, routes
4107 * 3) Packets get funky immediately after the connection
4108 * closes (this should catch Solaris spurious ACK|FINs
4109 * that web servers like to spew after a close)
4111 * This must be a little more careful than the above code
4112 * since packet floods will also be caught here. We don't
4113 * update the TTL here to mitigate the damage of a packet
4114 * flood and so the same code can handle awkward establishment
4115 * and a loosened connection close.
4116 * In the establishment case, a correct peer response will
4117 * validate the connection, go through the normal state code
4118 * and keep updating the state TTL.
4121 if (pf_status.debug >= PF_DEBUG_MISC) {
4122 kprintf("pf: loose state match: ");
4123 pf_print_state(*state);
4124 pf_print_flags(th->th_flags);
4125 kprintf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4126 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack, pd->p_len,
4127 ackskew, (unsigned long long)(*state)->packets[0],
4128 (unsigned long long)(*state)->packets[1],
4129 pd->dir == PF_IN ? "in" : "out",
4130 pd->dir == (*state)->direction ? "fwd" : "rev");
4133 if (dst->scrub || src->scrub) {
4134 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4135 *state, src, dst, copyback))
4139 /* update max window */
4140 if (src->max_win < win)
4142 /* synchronize sequencing */
4143 if (SEQ_GT(end, src->seqlo))
4145 /* slide the window of what the other end can send */
4146 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4147 dst->seqhi = ack + MAX((win << sws), 1);
4150 * Cannot set dst->seqhi here since this could be a shotgunned
4151 * SYN and not an already established connection.
4154 if (th->th_flags & TH_FIN)
4155 if (src->state < TCPS_CLOSING)
4156 src->state = TCPS_CLOSING;
4157 if (th->th_flags & TH_RST)
4158 src->state = dst->state = TCPS_TIME_WAIT;
4160 /* Fall through to PASS packet */
4162 } else if ((*state)->pickup_mode == PF_PICKUPS_HASHONLY ||
4163 ((*state)->pickup_mode == PF_PICKUPS_ENABLED &&
4164 ((*state)->sync_flags & PFSTATE_GOT_SYN_MASK) !=
4165 PFSTATE_GOT_SYN_MASK)) {
4167 * If pickup mode is hash only, do not fail on sequence checks.
4169 * If pickup mode is enabled and we did not see the SYN in
4170 * both direction, do not fail on sequence checks because
4171 * we do not have complete information on window scale.
4173 * Adjust expiration and fall through to PASS packet.
4174 * XXX Add a FIN check to reduce timeout?
4176 (*state)->expire = time_second;
4179 * Failure processing
4181 if ((*state)->dst.state == TCPS_SYN_SENT &&
4182 (*state)->src.state == TCPS_SYN_SENT) {
4183 /* Send RST for state mismatches during handshake */
4184 if (!(th->th_flags & TH_RST))
4185 pf_send_tcp((*state)->rule.ptr, pd->af,
4186 pd->dst, pd->src, th->th_dport,
4187 th->th_sport, ntohl(th->th_ack), 0,
4189 (*state)->rule.ptr->return_ttl, 1, 0,
4190 pd->eh, kif->pfik_ifp);
4194 } else if (pf_status.debug >= PF_DEBUG_MISC) {
4195 kprintf("pf: BAD state: ");
4196 pf_print_state(*state);
4197 pf_print_flags(th->th_flags);
4198 kprintf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4199 "pkts=%llu:%llu dir=%s,%s\n",
4200 seq, orig_seq, ack, pd->p_len, ackskew,
4201 (unsigned long long)(*state)->packets[0],
4202 (unsigned long long)(*state)->packets[1],
4203 pd->dir == PF_IN ? "in" : "out",
4204 pd->dir == (*state)->direction ? "fwd" : "rev");
4205 kprintf("pf: State failure on: %c %c %c %c | %c %c\n",
4206 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4207 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4209 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4210 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4211 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4212 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4214 REASON_SET(reason, PFRES_BADSTATE);
4222 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst,
4223 struct pf_state **state, struct pf_pdesc *pd, u_short *reason)
4225 struct tcphdr *th = pd->hdr.tcp;
4227 if (th->th_flags & TH_SYN)
4228 if (src->state < TCPS_SYN_SENT)
4229 src->state = TCPS_SYN_SENT;
4230 if (th->th_flags & TH_FIN)
4231 if (src->state < TCPS_CLOSING)
4232 src->state = TCPS_CLOSING;
4233 if (th->th_flags & TH_ACK) {
4234 if (dst->state == TCPS_SYN_SENT) {
4235 dst->state = TCPS_ESTABLISHED;
4236 if (src->state == TCPS_ESTABLISHED &&
4237 (*state)->src_node != NULL &&
4238 pf_src_connlimit(state)) {
4239 REASON_SET(reason, PFRES_SRCLIMIT);
4242 } else if (dst->state == TCPS_CLOSING) {
4243 dst->state = TCPS_FIN_WAIT_2;
4244 } else if (src->state == TCPS_SYN_SENT &&
4245 dst->state < TCPS_SYN_SENT) {
4247 * Handle a special sloppy case where we only see one
4248 * half of the connection. If there is a ACK after
4249 * the initial SYN without ever seeing a packet from
4250 * the destination, set the connection to established.
4252 dst->state = src->state = TCPS_ESTABLISHED;
4253 if ((*state)->src_node != NULL &&
4254 pf_src_connlimit(state)) {
4255 REASON_SET(reason, PFRES_SRCLIMIT);
4258 } else if (src->state == TCPS_CLOSING &&
4259 dst->state == TCPS_ESTABLISHED &&
4262 * Handle the closing of half connections where we
4263 * don't see the full bidirectional FIN/ACK+ACK
4266 dst->state = TCPS_CLOSING;
4269 if (th->th_flags & TH_RST)
4270 src->state = dst->state = TCPS_TIME_WAIT;
4272 /* update expire time */
4273 (*state)->expire = time_second;
4274 if (src->state >= TCPS_FIN_WAIT_2 &&
4275 dst->state >= TCPS_FIN_WAIT_2)
4276 (*state)->timeout = PFTM_TCP_CLOSED;
4277 else if (src->state >= TCPS_CLOSING &&
4278 dst->state >= TCPS_CLOSING)
4279 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4280 else if (src->state < TCPS_ESTABLISHED ||
4281 dst->state < TCPS_ESTABLISHED)
4282 (*state)->timeout = PFTM_TCP_OPENING;
4283 else if (src->state >= TCPS_CLOSING ||
4284 dst->state >= TCPS_CLOSING)
4285 (*state)->timeout = PFTM_TCP_CLOSING;
4287 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4293 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
4294 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4297 struct pf_state_key_cmp key;
4298 struct tcphdr *th = pd->hdr.tcp;
4300 struct pf_state_peer *src, *dst;
4301 struct pf_state_key *sk;
4304 key.proto = IPPROTO_TCP;
4305 if (direction == PF_IN) { /* wire side, straight */
4306 PF_ACPY(&key.addr[0], pd->src, key.af);
4307 PF_ACPY(&key.addr[1], pd->dst, key.af);
4308 key.port[0] = th->th_sport;
4309 key.port[1] = th->th_dport;
4310 } else { /* stack side, reverse */
4311 PF_ACPY(&key.addr[1], pd->src, key.af);
4312 PF_ACPY(&key.addr[0], pd->dst, key.af);
4313 key.port[1] = th->th_sport;
4314 key.port[0] = th->th_dport;
4317 STATE_LOOKUP(kif, &key, direction, *state, m);
4319 if (direction == (*state)->direction) {
4320 src = &(*state)->src;
4321 dst = &(*state)->dst;
4323 src = &(*state)->dst;
4324 dst = &(*state)->src;
4327 sk = (*state)->key[pd->didx];
4329 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
4330 if (direction != (*state)->direction) {
4331 REASON_SET(reason, PFRES_SYNPROXY);
4332 return (PF_SYNPROXY_DROP);
4334 if (th->th_flags & TH_SYN) {
4335 if (ntohl(th->th_seq) != (*state)->src.seqlo) {
4336 REASON_SET(reason, PFRES_SYNPROXY);
4339 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
4340 pd->src, th->th_dport, th->th_sport,
4341 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
4342 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1,
4344 REASON_SET(reason, PFRES_SYNPROXY);
4345 return (PF_SYNPROXY_DROP);
4346 } else if (!(th->th_flags & TH_ACK) ||
4347 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4348 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4349 REASON_SET(reason, PFRES_SYNPROXY);
4351 } else if ((*state)->src_node != NULL &&
4352 pf_src_connlimit(state)) {
4353 REASON_SET(reason, PFRES_SRCLIMIT);
4356 (*state)->src.state = PF_TCPS_PROXY_DST;
4358 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
4359 if (direction == (*state)->direction) {
4360 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
4361 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4362 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4363 REASON_SET(reason, PFRES_SYNPROXY);
4366 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
4367 if ((*state)->dst.seqhi == 1)
4368 (*state)->dst.seqhi = htonl(karc4random());
4369 pf_send_tcp((*state)->rule.ptr, pd->af,
4370 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4371 sk->port[pd->sidx], sk->port[pd->didx],
4372 (*state)->dst.seqhi, 0, TH_SYN, 0,
4373 (*state)->src.mss, 0, 0, (*state)->tag, NULL, NULL);
4374 REASON_SET(reason, PFRES_SYNPROXY);
4375 return (PF_SYNPROXY_DROP);
4376 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
4378 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
4379 REASON_SET(reason, PFRES_SYNPROXY);
4382 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
4383 (*state)->dst.seqlo = ntohl(th->th_seq);
4384 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
4385 pd->src, th->th_dport, th->th_sport,
4386 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
4387 TH_ACK, (*state)->src.max_win, 0, 0, 0,
4388 (*state)->tag, NULL, NULL);
4389 pf_send_tcp((*state)->rule.ptr, pd->af,
4390 &sk->addr[pd->sidx], &sk->addr[pd->didx],
4391 sk->port[pd->sidx], sk->port[pd->didx],
4392 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
4393 TH_ACK, (*state)->dst.max_win, 0, 0, 1,
4395 (*state)->src.seqdiff = (*state)->dst.seqhi -
4396 (*state)->src.seqlo;
4397 (*state)->dst.seqdiff = (*state)->src.seqhi -
4398 (*state)->dst.seqlo;
4399 (*state)->src.seqhi = (*state)->src.seqlo +
4400 (*state)->dst.max_win;
4401 (*state)->dst.seqhi = (*state)->dst.seqlo +
4402 (*state)->src.max_win;
4403 (*state)->src.wscale = (*state)->dst.wscale = 0;
4404 (*state)->src.state = (*state)->dst.state =
4406 REASON_SET(reason, PFRES_SYNPROXY);
4407 return (PF_SYNPROXY_DROP);
4411 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
4412 dst->state >= TCPS_FIN_WAIT_2 &&
4413 src->state >= TCPS_FIN_WAIT_2) {
4414 if (pf_status.debug >= PF_DEBUG_MISC) {
4415 kprintf("pf: state reuse ");
4416 pf_print_state(*state);
4417 pf_print_flags(th->th_flags);
4420 /* XXX make sure it's the same direction ?? */
4421 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
4422 pf_unlink_state(*state);
4427 if ((*state)->state_flags & PFSTATE_SLOPPY) {
4428 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP)
4431 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason,
4432 ©back) == PF_DROP)
4436 /* translate source/destination address, if necessary */
4437 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4438 struct pf_state_key *nk = (*state)->key[pd->didx];
4440 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4441 nk->port[pd->sidx] != th->th_sport)
4442 pf_change_ap(pd->src, &th->th_sport, pd->ip_sum,
4443 &th->th_sum, &nk->addr[pd->sidx],
4444 nk->port[pd->sidx], 0, pd->af);
4446 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4447 nk->port[pd->didx] != th->th_dport) {
4449 * If we don't redispatch the packet will go into
4450 * the protocol stack on the wrong cpu for the
4451 * post-translated address.
4453 m->m_flags &= ~M_HASH;
4454 pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum,
4455 &th->th_sum, &nk->addr[pd->didx],
4456 nk->port[pd->didx], 0, pd->af);
4461 /* Copyback sequence modulation or stateful scrub changes if needed */
4463 m_copyback(m, off, sizeof(*th), (caddr_t)th);
4469 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
4470 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4472 struct pf_state_peer *src, *dst;
4473 struct pf_state_key_cmp key;
4474 struct udphdr *uh = pd->hdr.udp;
4477 key.proto = IPPROTO_UDP;
4478 if (direction == PF_IN) { /* wire side, straight */
4479 PF_ACPY(&key.addr[0], pd->src, key.af);
4480 PF_ACPY(&key.addr[1], pd->dst, key.af);
4481 key.port[0] = uh->uh_sport;
4482 key.port[1] = uh->uh_dport;
4483 } else { /* stack side, reverse */
4484 PF_ACPY(&key.addr[1], pd->src, key.af);
4485 PF_ACPY(&key.addr[0], pd->dst, key.af);
4486 key.port[1] = uh->uh_sport;
4487 key.port[0] = uh->uh_dport;
4490 STATE_LOOKUP(kif, &key, direction, *state, m);
4492 if (direction == (*state)->direction) {
4493 src = &(*state)->src;
4494 dst = &(*state)->dst;
4496 src = &(*state)->dst;
4497 dst = &(*state)->src;
4501 if (src->state < PFUDPS_SINGLE)
4502 src->state = PFUDPS_SINGLE;
4503 if (dst->state == PFUDPS_SINGLE)
4504 dst->state = PFUDPS_MULTIPLE;
4506 /* update expire time */
4507 (*state)->expire = time_second;
4508 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4509 (*state)->timeout = PFTM_UDP_MULTIPLE;
4511 (*state)->timeout = PFTM_UDP_SINGLE;
4513 /* translate source/destination address, if necessary */
4514 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4515 struct pf_state_key *nk = (*state)->key[pd->didx];
4517 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4518 nk->port[pd->sidx] != uh->uh_sport)
4519 pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum,
4520 &uh->uh_sum, &nk->addr[pd->sidx],
4521 nk->port[pd->sidx], 1, pd->af);
4523 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4524 nk->port[pd->didx] != uh->uh_dport) {
4526 * If we don't redispatch the packet will go into
4527 * the protocol stack on the wrong cpu for the
4528 * post-translated address.
4530 m->m_flags &= ~M_HASH;
4531 pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum,
4532 &uh->uh_sum, &nk->addr[pd->didx],
4533 nk->port[pd->didx], 1, pd->af);
4535 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4542 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
4543 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
4545 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
4546 u_int16_t icmpid = 0, *icmpsum;
4549 struct pf_state_key_cmp key;
4551 switch (pd->proto) {
4554 icmptype = pd->hdr.icmp->icmp_type;
4555 icmpid = pd->hdr.icmp->icmp_id;
4556 icmpsum = &pd->hdr.icmp->icmp_cksum;
4558 if (icmptype == ICMP_UNREACH ||
4559 icmptype == ICMP_SOURCEQUENCH ||
4560 icmptype == ICMP_REDIRECT ||
4561 icmptype == ICMP_TIMXCEED ||
4562 icmptype == ICMP_PARAMPROB)
4567 case IPPROTO_ICMPV6:
4568 icmptype = pd->hdr.icmp6->icmp6_type;
4569 icmpid = pd->hdr.icmp6->icmp6_id;
4570 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
4572 if (icmptype == ICMP6_DST_UNREACH ||
4573 icmptype == ICMP6_PACKET_TOO_BIG ||
4574 icmptype == ICMP6_TIME_EXCEEDED ||
4575 icmptype == ICMP6_PARAM_PROB)
4584 * ICMP query/reply message not related to a TCP/UDP packet.
4585 * Search for an ICMP state.
4588 key.proto = pd->proto;
4589 key.port[0] = key.port[1] = icmpid;
4590 if (direction == PF_IN) { /* wire side, straight */
4591 PF_ACPY(&key.addr[0], pd->src, key.af);
4592 PF_ACPY(&key.addr[1], pd->dst, key.af);
4593 } else { /* stack side, reverse */
4594 PF_ACPY(&key.addr[1], pd->src, key.af);
4595 PF_ACPY(&key.addr[0], pd->dst, key.af);
4598 STATE_LOOKUP(kif, &key, direction, *state, m);
4600 (*state)->expire = time_second;
4601 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
4603 /* translate source/destination address, if necessary */
4604 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4605 struct pf_state_key *nk = (*state)->key[pd->didx];
4610 if (PF_ANEQ(pd->src,
4611 &nk->addr[pd->sidx], AF_INET))
4612 pf_change_a(&saddr->v4.s_addr,
4614 nk->addr[pd->sidx].v4.s_addr, 0);
4616 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
4618 pf_change_a(&daddr->v4.s_addr,
4620 nk->addr[pd->didx].v4.s_addr, 0);
4623 pd->hdr.icmp->icmp_id) {
4624 pd->hdr.icmp->icmp_cksum =
4626 pd->hdr.icmp->icmp_cksum, icmpid,
4627 nk->port[pd->sidx], 0);
4628 pd->hdr.icmp->icmp_id =
4632 m_copyback(m, off, ICMP_MINLEN,
4633 (caddr_t)pd->hdr.icmp);
4638 if (PF_ANEQ(pd->src,
4639 &nk->addr[pd->sidx], AF_INET6))
4641 &pd->hdr.icmp6->icmp6_cksum,
4642 &nk->addr[pd->sidx], 0);
4644 if (PF_ANEQ(pd->dst,
4645 &nk->addr[pd->didx], AF_INET6))
4647 &pd->hdr.icmp6->icmp6_cksum,
4648 &nk->addr[pd->didx], 0);
4651 sizeof(struct icmp6_hdr),
4652 (caddr_t)pd->hdr.icmp6);
4661 * ICMP error message in response to a TCP/UDP packet.
4662 * Extract the inner TCP/UDP header and search for that state.
4665 struct pf_pdesc pd2;
4670 struct ip6_hdr h2_6;
4677 /* Payload packet is from the opposite direction. */
4678 pd2.sidx = (direction == PF_IN) ? 1 : 0;
4679 pd2.didx = (direction == PF_IN) ? 0 : 1;
4683 /* offset of h2 in mbuf chain */
4684 ipoff2 = off + ICMP_MINLEN;
4686 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
4687 NULL, reason, pd2.af)) {
4688 DPFPRINTF(PF_DEBUG_MISC,
4689 ("pf: ICMP error message too short "
4694 * ICMP error messages don't refer to non-first
4697 if (h2.ip_off & htons(IP_OFFMASK)) {
4698 REASON_SET(reason, PFRES_FRAG);
4702 /* offset of protocol header that follows h2 */
4703 off2 = ipoff2 + (h2.ip_hl << 2);
4705 pd2.proto = h2.ip_p;
4706 pd2.src = (struct pf_addr *)&h2.ip_src;
4707 pd2.dst = (struct pf_addr *)&h2.ip_dst;
4708 pd2.ip_sum = &h2.ip_sum;
4713 ipoff2 = off + sizeof(struct icmp6_hdr);
4715 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
4716 NULL, reason, pd2.af)) {
4717 DPFPRINTF(PF_DEBUG_MISC,
4718 ("pf: ICMP error message too short "
4722 pd2.proto = h2_6.ip6_nxt;
4723 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
4724 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
4726 off2 = ipoff2 + sizeof(h2_6);
4728 switch (pd2.proto) {
4729 case IPPROTO_FRAGMENT:
4731 * ICMPv6 error messages for
4732 * non-first fragments
4734 REASON_SET(reason, PFRES_FRAG);
4737 case IPPROTO_HOPOPTS:
4738 case IPPROTO_ROUTING:
4739 case IPPROTO_DSTOPTS: {
4740 /* get next header and header length */
4741 struct ip6_ext opt6;
4743 if (!pf_pull_hdr(m, off2, &opt6,
4744 sizeof(opt6), NULL, reason,
4746 DPFPRINTF(PF_DEBUG_MISC,
4747 ("pf: ICMPv6 short opt\n"));
4750 if (pd2.proto == IPPROTO_AH)
4751 off2 += (opt6.ip6e_len + 2) * 4;
4753 off2 += (opt6.ip6e_len + 1) * 8;
4754 pd2.proto = opt6.ip6e_nxt;
4755 /* goto the next header */
4762 } while (!terminal);
4766 DPFPRINTF(PF_DEBUG_MISC,
4767 ("pf: ICMP AF %d unknown (ip6)\n", pd->af));
4772 switch (pd2.proto) {
4776 struct pf_state_peer *src, *dst;
4781 * Only the first 8 bytes of the TCP header can be
4782 * expected. Don't access any TCP header fields after
4783 * th_seq, an ackskew test is not possible.
4785 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
4787 DPFPRINTF(PF_DEBUG_MISC,
4788 ("pf: ICMP error message too short "
4794 key.proto = IPPROTO_TCP;
4795 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4796 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4797 key.port[pd2.sidx] = th.th_sport;
4798 key.port[pd2.didx] = th.th_dport;
4800 STATE_LOOKUP(kif, &key, direction, *state, m);
4802 if (direction == (*state)->direction) {
4803 src = &(*state)->dst;
4804 dst = &(*state)->src;
4806 src = &(*state)->src;
4807 dst = &(*state)->dst;
4810 if (src->wscale && dst->wscale)
4811 dws = dst->wscale & PF_WSCALE_MASK;
4815 /* Demodulate sequence number */
4816 seq = ntohl(th.th_seq) - src->seqdiff;
4818 pf_change_a(&th.th_seq, icmpsum,
4823 if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
4824 (!SEQ_GEQ(src->seqhi, seq) ||
4825 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
4826 if (pf_status.debug >= PF_DEBUG_MISC) {
4827 kprintf("pf: BAD ICMP %d:%d ",
4828 icmptype, pd->hdr.icmp->icmp_code);
4829 pf_print_host(pd->src, 0, pd->af);
4831 pf_print_host(pd->dst, 0, pd->af);
4832 kprintf(" state: ");
4833 pf_print_state(*state);
4834 kprintf(" seq=%u\n", seq);
4836 REASON_SET(reason, PFRES_BADSTATE);
4839 if (pf_status.debug >= PF_DEBUG_MISC) {
4840 kprintf("pf: OK ICMP %d:%d ",
4841 icmptype, pd->hdr.icmp->icmp_code);
4842 pf_print_host(pd->src, 0, pd->af);
4844 pf_print_host(pd->dst, 0, pd->af);
4845 kprintf(" state: ");
4846 pf_print_state(*state);
4847 kprintf(" seq=%u\n", seq);
4851 /* translate source/destination address, if necessary */
4852 if ((*state)->key[PF_SK_WIRE] !=
4853 (*state)->key[PF_SK_STACK]) {
4854 struct pf_state_key *nk =
4855 (*state)->key[pd->didx];
4857 if (PF_ANEQ(pd2.src,
4858 &nk->addr[pd2.sidx], pd2.af) ||
4859 nk->port[pd2.sidx] != th.th_sport)
4860 pf_change_icmp(pd2.src, &th.th_sport,
4861 daddr, &nk->addr[pd2.sidx],
4862 nk->port[pd2.sidx], NULL,
4863 pd2.ip_sum, icmpsum,
4864 pd->ip_sum, 0, pd2.af);
4866 if (PF_ANEQ(pd2.dst,
4867 &nk->addr[pd2.didx], pd2.af) ||
4868 nk->port[pd2.didx] != th.th_dport)
4869 pf_change_icmp(pd2.dst, &th.th_dport,
4870 NULL, /* XXX Inbound NAT? */
4871 &nk->addr[pd2.didx],
4872 nk->port[pd2.didx], NULL,
4873 pd2.ip_sum, icmpsum,
4874 pd->ip_sum, 0, pd2.af);
4882 m_copyback(m, off, ICMP_MINLEN,
4883 (caddr_t)pd->hdr.icmp);
4884 m_copyback(m, ipoff2, sizeof(h2),
4891 sizeof(struct icmp6_hdr),
4892 (caddr_t)pd->hdr.icmp6);
4893 m_copyback(m, ipoff2, sizeof(h2_6),
4898 m_copyback(m, off2, 8, (caddr_t)&th);
4907 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
4908 NULL, reason, pd2.af)) {
4909 DPFPRINTF(PF_DEBUG_MISC,
4910 ("pf: ICMP error message too short "
4916 key.proto = IPPROTO_UDP;
4917 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4918 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4919 key.port[pd2.sidx] = uh.uh_sport;
4920 key.port[pd2.didx] = uh.uh_dport;
4922 STATE_LOOKUP(kif, &key, direction, *state, m);
4924 /* translate source/destination address, if necessary */
4925 if ((*state)->key[PF_SK_WIRE] !=
4926 (*state)->key[PF_SK_STACK]) {
4927 struct pf_state_key *nk =
4928 (*state)->key[pd->didx];
4930 if (PF_ANEQ(pd2.src,
4931 &nk->addr[pd2.sidx], pd2.af) ||
4932 nk->port[pd2.sidx] != uh.uh_sport)
4933 pf_change_icmp(pd2.src, &uh.uh_sport,
4934 daddr, &nk->addr[pd2.sidx],
4935 nk->port[pd2.sidx], &uh.uh_sum,
4936 pd2.ip_sum, icmpsum,
4937 pd->ip_sum, 1, pd2.af);
4939 if (PF_ANEQ(pd2.dst,
4940 &nk->addr[pd2.didx], pd2.af) ||
4941 nk->port[pd2.didx] != uh.uh_dport)
4942 pf_change_icmp(pd2.dst, &uh.uh_dport,
4943 NULL, /* XXX Inbound NAT? */
4944 &nk->addr[pd2.didx],
4945 nk->port[pd2.didx], &uh.uh_sum,
4946 pd2.ip_sum, icmpsum,
4947 pd->ip_sum, 1, pd2.af);
4952 m_copyback(m, off, ICMP_MINLEN,
4953 (caddr_t)pd->hdr.icmp);
4954 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4960 sizeof(struct icmp6_hdr),
4961 (caddr_t)pd->hdr.icmp6);
4962 m_copyback(m, ipoff2, sizeof(h2_6),
4967 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
4974 case IPPROTO_ICMP: {
4977 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
4978 NULL, reason, pd2.af)) {
4979 DPFPRINTF(PF_DEBUG_MISC,
4980 ("pf: ICMP error message too short i"
4986 key.proto = IPPROTO_ICMP;
4987 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4988 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4989 key.port[0] = key.port[1] = iih.icmp_id;
4991 STATE_LOOKUP(kif, &key, direction, *state, m);
4993 /* translate source/destination address, if necessary */
4994 if ((*state)->key[PF_SK_WIRE] !=
4995 (*state)->key[PF_SK_STACK]) {
4996 struct pf_state_key *nk =
4997 (*state)->key[pd->didx];
4999 if (PF_ANEQ(pd2.src,
5000 &nk->addr[pd2.sidx], pd2.af) ||
5001 nk->port[pd2.sidx] != iih.icmp_id)
5002 pf_change_icmp(pd2.src, &iih.icmp_id,
5003 daddr, &nk->addr[pd2.sidx],
5004 nk->port[pd2.sidx], NULL,
5005 pd2.ip_sum, icmpsum,
5006 pd->ip_sum, 0, AF_INET);
5008 if (PF_ANEQ(pd2.dst,
5009 &nk->addr[pd2.didx], pd2.af) ||
5010 nk->port[pd2.didx] != iih.icmp_id)
5011 pf_change_icmp(pd2.dst, &iih.icmp_id,
5012 NULL, /* XXX Inbound NAT? */
5013 &nk->addr[pd2.didx],
5014 nk->port[pd2.didx], NULL,
5015 pd2.ip_sum, icmpsum,
5016 pd->ip_sum, 0, AF_INET);
5018 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
5019 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5020 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
5027 case IPPROTO_ICMPV6: {
5028 struct icmp6_hdr iih;
5030 if (!pf_pull_hdr(m, off2, &iih,
5031 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
5032 DPFPRINTF(PF_DEBUG_MISC,
5033 ("pf: ICMP error message too short "
5039 key.proto = IPPROTO_ICMPV6;
5040 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5041 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5042 key.port[0] = key.port[1] = iih.icmp6_id;
5044 STATE_LOOKUP(kif, &key, direction, *state, m);
5046 /* translate source/destination address, if necessary */
5047 if ((*state)->key[PF_SK_WIRE] !=
5048 (*state)->key[PF_SK_STACK]) {
5049 struct pf_state_key *nk =
5050 (*state)->key[pd->didx];
5052 if (PF_ANEQ(pd2.src,
5053 &nk->addr[pd2.sidx], pd2.af) ||
5054 nk->port[pd2.sidx] != iih.icmp6_id)
5055 pf_change_icmp(pd2.src, &iih.icmp6_id,
5056 daddr, &nk->addr[pd2.sidx],
5057 nk->port[pd2.sidx], NULL,
5058 pd2.ip_sum, icmpsum,
5059 pd->ip_sum, 0, AF_INET6);
5061 if (PF_ANEQ(pd2.dst,
5062 &nk->addr[pd2.didx], pd2.af) ||
5063 nk->port[pd2.didx] != iih.icmp6_id)
5064 pf_change_icmp(pd2.dst, &iih.icmp6_id,
5065 NULL, /* XXX Inbound NAT? */
5066 &nk->addr[pd2.didx],
5067 nk->port[pd2.didx], NULL,
5068 pd2.ip_sum, icmpsum,
5069 pd->ip_sum, 0, AF_INET6);
5071 m_copyback(m, off, sizeof(struct icmp6_hdr),
5072 (caddr_t)pd->hdr.icmp6);
5073 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
5074 m_copyback(m, off2, sizeof(struct icmp6_hdr),
5084 key.proto = pd2.proto;
5085 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5086 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5087 key.port[0] = key.port[1] = 0;
5089 STATE_LOOKUP(kif, &key, direction, *state, m);
5091 /* translate source/destination address, if necessary */
5092 if ((*state)->key[PF_SK_WIRE] !=
5093 (*state)->key[PF_SK_STACK]) {
5094 struct pf_state_key *nk =
5095 (*state)->key[pd->didx];
5097 if (PF_ANEQ(pd2.src,
5098 &nk->addr[pd2.sidx], pd2.af))
5099 pf_change_icmp(pd2.src, NULL, daddr,
5100 &nk->addr[pd2.sidx], 0, NULL,
5101 pd2.ip_sum, icmpsum,
5102 pd->ip_sum, 0, pd2.af);
5104 if (PF_ANEQ(pd2.dst,
5105 &nk->addr[pd2.didx], pd2.af))
5106 pf_change_icmp(pd2.src, NULL,
5107 NULL, /* XXX Inbound NAT? */
5108 &nk->addr[pd2.didx], 0, NULL,
5109 pd2.ip_sum, icmpsum,
5110 pd->ip_sum, 0, pd2.af);
5115 m_copyback(m, off, ICMP_MINLEN,
5116 (caddr_t)pd->hdr.icmp);
5117 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5123 sizeof(struct icmp6_hdr),
5124 (caddr_t)pd->hdr.icmp6);
5125 m_copyback(m, ipoff2, sizeof(h2_6),
5139 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
5140 struct mbuf *m, struct pf_pdesc *pd)
5142 struct pf_state_peer *src, *dst;
5143 struct pf_state_key_cmp key;
5146 key.proto = pd->proto;
5147 if (direction == PF_IN) {
5148 PF_ACPY(&key.addr[0], pd->src, key.af);
5149 PF_ACPY(&key.addr[1], pd->dst, key.af);
5150 key.port[0] = key.port[1] = 0;
5152 PF_ACPY(&key.addr[1], pd->src, key.af);
5153 PF_ACPY(&key.addr[0], pd->dst, key.af);
5154 key.port[1] = key.port[0] = 0;
5157 STATE_LOOKUP(kif, &key, direction, *state, m);
5159 if (direction == (*state)->direction) {
5160 src = &(*state)->src;
5161 dst = &(*state)->dst;
5163 src = &(*state)->dst;
5164 dst = &(*state)->src;
5168 if (src->state < PFOTHERS_SINGLE)
5169 src->state = PFOTHERS_SINGLE;
5170 if (dst->state == PFOTHERS_SINGLE)
5171 dst->state = PFOTHERS_MULTIPLE;
5173 /* update expire time */
5174 (*state)->expire = time_second;
5175 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
5176 (*state)->timeout = PFTM_OTHER_MULTIPLE;
5178 (*state)->timeout = PFTM_OTHER_SINGLE;
5180 /* translate source/destination address, if necessary */
5181 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5182 struct pf_state_key *nk = (*state)->key[pd->didx];
5191 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5192 pf_change_a(&pd->src->v4.s_addr,
5194 nk->addr[pd->sidx].v4.s_addr,
5198 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5199 pf_change_a(&pd->dst->v4.s_addr,
5201 nk->addr[pd->didx].v4.s_addr,
5208 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5209 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
5211 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5212 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
5220 * ipoff and off are measured from the start of the mbuf chain.
5221 * h must be at "ipoff" on the mbuf chain.
5224 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
5225 u_short *actionp, u_short *reasonp, sa_family_t af)
5230 struct ip *h = mtod(m, struct ip *);
5231 u_int16_t fragoff = (h->ip_off & IP_OFFMASK) << 3;
5235 ACTION_SET(actionp, PF_PASS);
5237 ACTION_SET(actionp, PF_DROP);
5238 REASON_SET(reasonp, PFRES_FRAG);
5242 if (m->m_pkthdr.len < off + len ||
5243 h->ip_len < off + len) {
5244 ACTION_SET(actionp, PF_DROP);
5245 REASON_SET(reasonp, PFRES_SHORT);
5253 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
5255 if (m->m_pkthdr.len < off + len ||
5256 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
5257 (unsigned)(off + len)) {
5258 ACTION_SET(actionp, PF_DROP);
5259 REASON_SET(reasonp, PFRES_SHORT);
5266 m_copydata(m, off, len, p);
5271 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif)
5273 struct sockaddr_in *dst;
5277 struct sockaddr_in6 *dst6;
5278 struct route_in6 ro;
5282 struct radix_node *rn;
5287 bzero(&ro, sizeof(ro));
5290 dst = satosin(&ro.ro_dst);
5291 dst->sin_family = AF_INET;
5292 dst->sin_len = sizeof(*dst);
5293 dst->sin_addr = addr->v4;
5297 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5298 dst6->sin6_family = AF_INET6;
5299 dst6->sin6_len = sizeof(*dst6);
5300 dst6->sin6_addr = addr->v6;
5307 /* Skip checks for ipsec interfaces */
5308 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5311 rtalloc_ign((struct route *)&ro, 0);
5313 if (ro.ro_rt != NULL) {
5314 /* No interface given, this is a no-route check */
5318 if (kif->pfik_ifp == NULL) {
5323 /* Perform uRPF check if passed input interface */
5325 rn = (struct radix_node *)ro.ro_rt;
5327 rt = (struct rtentry *)rn;
5330 if (kif->pfik_ifp == ifp)
5333 } while (check_mpath == 1 && rn != NULL && ret == 0);
5337 if (ro.ro_rt != NULL)
5343 pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw)
5345 struct sockaddr_in *dst;
5347 struct sockaddr_in6 *dst6;
5348 struct route_in6 ro;
5354 ASSERT_LWKT_TOKEN_HELD(&pf_token);
5356 bzero(&ro, sizeof(ro));
5359 dst = satosin(&ro.ro_dst);
5360 dst->sin_family = AF_INET;
5361 dst->sin_len = sizeof(*dst);
5362 dst->sin_addr = addr->v4;
5366 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5367 dst6->sin6_family = AF_INET6;
5368 dst6->sin6_len = sizeof(*dst6);
5369 dst6->sin6_addr = addr->v6;
5376 rtalloc_ign((struct route *)&ro, (RTF_CLONING | RTF_PRCLONING));
5378 if (ro.ro_rt != NULL) {
5387 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5388 struct pf_state *s, struct pf_pdesc *pd)
5390 struct mbuf *m0, *m1;
5391 struct route iproute;
5392 struct route *ro = NULL;
5393 struct sockaddr_in *dst;
5395 struct ifnet *ifp = NULL;
5396 struct pf_addr naddr;
5397 struct pf_src_node *sn = NULL;
5404 ASSERT_LWKT_TOKEN_HELD(&pf_token);
5406 if (m == NULL || *m == NULL || r == NULL ||
5407 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
5408 panic("pf_route: invalid parameters");
5410 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) {
5411 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED;
5412 (*m)->m_pkthdr.pf.routed = 1;
5414 if ((*m)->m_pkthdr.pf.routed++ > 3) {
5421 if (r->rt == PF_DUPTO) {
5422 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL) {
5426 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5432 if (m0->m_len < sizeof(struct ip)) {
5433 DPFPRINTF(PF_DEBUG_URGENT,
5434 ("pf_route: m0->m_len < sizeof(struct ip)\n"));
5438 ip = mtod(m0, struct ip *);
5441 bzero((caddr_t)ro, sizeof(*ro));
5442 dst = satosin(&ro->ro_dst);
5443 dst->sin_family = AF_INET;
5444 dst->sin_len = sizeof(*dst);
5445 dst->sin_addr = ip->ip_dst;
5447 if (r->rt == PF_FASTROUTE) {
5449 if (ro->ro_rt == 0) {
5450 ipstat.ips_noroute++;
5454 ifp = ro->ro_rt->rt_ifp;
5455 ro->ro_rt->rt_use++;
5457 if (ro->ro_rt->rt_flags & RTF_GATEWAY)
5458 dst = satosin(ro->ro_rt->rt_gateway);
5460 if (TAILQ_EMPTY(&r->rpool.list)) {
5461 DPFPRINTF(PF_DEBUG_URGENT,
5462 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n"));
5466 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
5468 if (!PF_AZERO(&naddr, AF_INET))
5469 dst->sin_addr.s_addr = naddr.v4.s_addr;
5470 ifp = r->rpool.cur->kif ?
5471 r->rpool.cur->kif->pfik_ifp : NULL;
5473 if (!PF_AZERO(&s->rt_addr, AF_INET))
5474 dst->sin_addr.s_addr =
5475 s->rt_addr.v4.s_addr;
5476 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5483 if (pf_test(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) {
5485 } else if (m0 == NULL) {
5488 if (m0->m_len < sizeof(struct ip)) {
5489 DPFPRINTF(PF_DEBUG_URGENT,
5490 ("pf_route: m0->m_len < sizeof(struct ip)\n"));
5493 ip = mtod(m0, struct ip *);
5496 /* Copied from FreeBSD 5.1-CURRENT ip_output. */
5497 m0->m_pkthdr.csum_flags |= CSUM_IP;
5498 sw_csum = m0->m_pkthdr.csum_flags & ~ifp->if_hwassist;
5499 if (sw_csum & CSUM_DELAY_DATA) {
5500 in_delayed_cksum(m0);
5501 sw_csum &= ~CSUM_DELAY_DATA;
5503 m0->m_pkthdr.csum_flags &= ifp->if_hwassist;
5505 if (ip->ip_len <= ifp->if_mtu ||
5506 (ifp->if_hwassist & CSUM_FRAGMENT &&
5507 (ip->ip_off & IP_DF) == 0)) {
5508 ip->ip_len = htons(ip->ip_len);
5509 ip->ip_off = htons(ip->ip_off);
5511 if (sw_csum & CSUM_DELAY_IP) {
5513 if (ip->ip_v == IPVERSION &&
5514 (ip->ip_hl << 2) == sizeof(*ip)) {
5515 ip->ip_sum = in_cksum_hdr(ip);
5517 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
5520 lwkt_reltoken(&pf_token);
5521 error = ifp->if_output(ifp, m0, sintosa(dst), ro->ro_rt);
5522 lwkt_gettoken(&pf_token);
5527 * Too large for interface; fragment if possible.
5528 * Must be able to put at least 8 bytes per fragment.
5530 if (ip->ip_off & IP_DF) {
5531 ipstat.ips_cantfrag++;
5532 if (r->rt != PF_DUPTO) {
5533 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
5541 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist, sw_csum);
5546 for (m0 = m1; m0; m0 = m1) {
5550 lwkt_reltoken(&pf_token);
5551 error = (*ifp->if_output)(ifp, m0, sintosa(dst),
5553 lwkt_gettoken(&pf_token);
5559 ipstat.ips_fragmented++;
5562 if (r->rt != PF_DUPTO)
5564 if (ro == &iproute && ro->ro_rt)
5576 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5577 struct pf_state *s, struct pf_pdesc *pd)
5580 struct route_in6 ip6route;
5581 struct route_in6 *ro;
5582 struct sockaddr_in6 *dst;
5583 struct ip6_hdr *ip6;
5584 struct ifnet *ifp = NULL;
5585 struct pf_addr naddr;
5586 struct pf_src_node *sn = NULL;
5589 if (m == NULL || *m == NULL || r == NULL ||
5590 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
5591 panic("pf_route6: invalid parameters");
5593 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) {
5594 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED;
5595 (*m)->m_pkthdr.pf.routed = 1;
5597 if ((*m)->m_pkthdr.pf.routed++ > 3) {
5604 if (r->rt == PF_DUPTO) {
5605 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL)
5608 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
5613 if (m0->m_len < sizeof(struct ip6_hdr)) {
5614 DPFPRINTF(PF_DEBUG_URGENT,
5615 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n"));
5618 ip6 = mtod(m0, struct ip6_hdr *);
5621 bzero((caddr_t)ro, sizeof(*ro));
5622 dst = (struct sockaddr_in6 *)&ro->ro_dst;
5623 dst->sin6_family = AF_INET6;
5624 dst->sin6_len = sizeof(*dst);
5625 dst->sin6_addr = ip6->ip6_dst;
5628 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags,
5629 * so make sure pf.flags is clear.
5631 * Cheat. XXX why only in the v6 case???
5633 if (r->rt == PF_FASTROUTE) {
5634 m0->m_pkthdr.fw_flags |= PF_MBUF_TAGGED;
5635 m0->m_pkthdr.pf.flags = 0;
5636 /* XXX Re-Check when Upgrading to > 4.4 */
5637 m0->m_pkthdr.pf.statekey = NULL;
5638 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
5642 if (TAILQ_EMPTY(&r->rpool.list)) {
5643 DPFPRINTF(PF_DEBUG_URGENT,
5644 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n"));
5648 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
5650 if (!PF_AZERO(&naddr, AF_INET6))
5651 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
5653 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
5655 if (!PF_AZERO(&s->rt_addr, AF_INET6))
5656 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
5657 &s->rt_addr, AF_INET6);
5658 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5664 if (pf_test6(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) {
5666 } else if (m0 == NULL) {
5669 if (m0->m_len < sizeof(struct ip6_hdr)) {
5670 DPFPRINTF(PF_DEBUG_URGENT,
5671 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n"));
5674 ip6 = mtod(m0, struct ip6_hdr *);
5678 * If the packet is too large for the outgoing interface,
5679 * send back an icmp6 error.
5681 if (IN6_IS_ADDR_LINKLOCAL(&dst->sin6_addr))
5682 dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
5683 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) {
5684 error = nd6_output(ifp, ifp, m0, dst, NULL);
5686 in6_ifstat_inc(ifp, ifs6_in_toobig);
5687 if (r->rt != PF_DUPTO)
5688 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
5694 if (r->rt != PF_DUPTO)
5706 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag
5707 * off is the offset where the protocol header starts
5708 * len is the total length of protocol header plus payload
5709 * returns 0 when the checksum is valid, otherwise returns 1.
5713 * FreeBSD supports cksum offload for the following drivers.
5714 * em(4), gx(4), lge(4), nge(4), ti(4), xl(4)
5715 * If we can make full use of it we would outperform ipfw/ipfilter in
5716 * very heavy traffic.
5717 * I have not tested 'cause I don't have NICs that supports cksum offload.
5718 * (There might be problems. Typical phenomena would be
5719 * 1. No route message for UDP packet.
5720 * 2. No connection acceptance from external hosts regardless of rule set.)
5723 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p,
5730 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
5732 if (m->m_pkthdr.len < off + len)
5738 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5739 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5740 sum = m->m_pkthdr.csum_data;
5742 ip = mtod(m, struct ip *);
5743 sum = in_pseudo(ip->ip_src.s_addr,
5744 ip->ip_dst.s_addr, htonl((u_short)len +
5745 m->m_pkthdr.csum_data + p));
5753 case IPPROTO_ICMPV6:
5763 if (p == IPPROTO_ICMP) {
5768 sum = in_cksum(m, len);
5772 if (m->m_len < sizeof(struct ip))
5774 sum = in_cksum_range(m, p, off, len);
5776 m->m_pkthdr.csum_flags |=
5779 m->m_pkthdr.csum_data = 0xffff;
5785 if (m->m_len < sizeof(struct ip6_hdr))
5787 sum = in6_cksum(m, p, off, len);
5790 * IPv6 H/W cksum off-load not supported yet!
5793 * m->m_pkthdr.csum_flags |=
5794 * (CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
5795 * m->m_pkthdr.csum_data = 0xffff;
5807 tcpstat.tcps_rcvbadsum++;
5810 udpstat.udps_badsum++;
5813 icmpstat.icps_checksum++;
5816 case IPPROTO_ICMPV6:
5817 icmp6stat.icp6s_checksum++;
5827 pf_find_divert(struct mbuf *m)
5831 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL)
5834 return ((struct pf_divert *)(mtag + 1));
5838 pf_get_divert(struct mbuf *m)
5842 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) {
5843 mtag = m_tag_get(PACKET_TAG_PF_DIVERT, sizeof(struct pf_divert),
5847 bzero(mtag + 1, sizeof(struct pf_divert));
5848 m_tag_prepend(m, mtag);
5851 return ((struct pf_divert *)(mtag + 1));
5856 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0,
5857 struct ether_header *eh, struct inpcb *inp)
5859 struct pfi_kif *kif;
5860 u_short action, reason = 0, log = 0;
5861 struct mbuf *m = *m0;
5862 struct ip *h = NULL;
5863 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
5864 struct pf_state *s = NULL;
5865 struct pf_ruleset *ruleset = NULL;
5867 int off, dirndx, pqid = 0;
5869 if (!pf_status.running)
5872 memset(&pd, 0, sizeof(pd));
5873 if (ifp->if_type == IFT_CARP && ifp->if_carpdev)
5874 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif;
5876 kif = (struct pfi_kif *)ifp->if_pf_kif;
5879 DPFPRINTF(PF_DEBUG_URGENT,
5880 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
5883 if (kif->pfik_flags & PFI_IFLAG_SKIP)
5887 if ((m->m_flags & M_PKTHDR) == 0)
5888 panic("non-M_PKTHDR is passed to pf_test");
5889 #endif /* DIAGNOSTIC */
5891 if (m->m_pkthdr.len < (int)sizeof(*h)) {
5893 REASON_SET(&reason, PFRES_SHORT);
5899 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags,
5900 * so make sure pf.flags is clear.
5902 if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED)
5904 m->m_pkthdr.pf.flags = 0;
5905 /* Re-Check when updating to > 4.4 */
5906 m->m_pkthdr.pf.statekey = NULL;
5908 /* We do IP header normalization and packet reassembly here */
5909 if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
5913 m = *m0; /* pf_normalize messes with m0 */
5914 h = mtod(m, struct ip *);
5916 off = h->ip_hl << 2;
5917 if (off < (int)sizeof(*h)) {
5919 REASON_SET(&reason, PFRES_SHORT);
5924 pd.src = (struct pf_addr *)&h->ip_src;
5925 pd.dst = (struct pf_addr *)&h->ip_dst;
5926 pd.sport = pd.dport = NULL;
5927 pd.ip_sum = &h->ip_sum;
5928 pd.proto_sum = NULL;
5931 pd.sidx = (dir == PF_IN) ? 0 : 1;
5932 pd.didx = (dir == PF_IN) ? 1 : 0;
5935 pd.tot_len = h->ip_len;
5938 /* handle fragments that didn't get reassembled by normalization */
5939 if (h->ip_off & (IP_MF | IP_OFFMASK)) {
5940 action = pf_test_fragment(&r, dir, kif, m, h,
5951 if (!pf_pull_hdr(m, off, &th, sizeof(th),
5952 &action, &reason, AF_INET)) {
5953 log = action != PF_PASS;
5956 pd.p_len = pd.tot_len - off - (th.th_off << 2);
5957 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
5959 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
5960 if (action == PF_DROP)
5962 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
5964 if (action == PF_PASS) {
5965 pfsync_update_state(s);
5969 } else if (s == NULL)
5970 action = pf_test_rule(&r, &s, dir, kif,
5971 m, off, h, &pd, &a, &ruleset, NULL, inp);
5979 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
5980 &action, &reason, AF_INET)) {
5981 log = action != PF_PASS;
5984 if (uh.uh_dport == 0 ||
5985 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
5986 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
5988 REASON_SET(&reason, PFRES_SHORT);
5991 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
5992 if (action == PF_PASS) {
5993 pfsync_update_state(s);
5997 } else if (s == NULL)
5998 action = pf_test_rule(&r, &s, dir, kif,
5999 m, off, h, &pd, &a, &ruleset, NULL, inp);
6003 case IPPROTO_ICMP: {
6007 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
6008 &action, &reason, AF_INET)) {
6009 log = action != PF_PASS;
6012 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
6014 if (action == PF_PASS) {
6015 pfsync_update_state(s);
6019 } else if (s == NULL)
6020 action = pf_test_rule(&r, &s, dir, kif,
6021 m, off, h, &pd, &a, &ruleset, NULL, inp);
6026 action = pf_test_state_other(&s, dir, kif, m, &pd);
6027 if (action == PF_PASS) {
6028 pfsync_update_state(s);
6032 } else if (s == NULL)
6033 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
6034 &pd, &a, &ruleset, NULL, inp);
6039 if (action == PF_PASS && h->ip_hl > 5 &&
6040 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6042 REASON_SET(&reason, PFRES_IPOPTIONS);
6044 DPFPRINTF(PF_DEBUG_MISC,
6045 ("pf: dropping packet with ip options\n"));
6048 if ((s && s->tag) || r->rtableid)
6049 pf_tag_packet(m, s ? s->tag : 0, r->rtableid);
6052 if (dir == PF_IN && s && s->key[PF_SK_STACK])
6053 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK];
6057 if (action == PF_PASS && r->qid) {
6058 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE;
6059 if (pqid || (pd.tos & IPTOS_LOWDELAY))
6060 m->m_pkthdr.pf.qid = r->pqid;
6062 m->m_pkthdr.pf.qid = r->qid;
6063 m->m_pkthdr.pf.ecn_af = AF_INET;
6064 m->m_pkthdr.pf.hdr = h;
6065 /* add connection hash for fairq */
6068 m->m_pkthdr.pf.state_hash = s->hash;
6069 m->m_pkthdr.pf.flags |= PF_TAG_STATE_HASHED;
6075 * connections redirected to loopback should not match sockets
6076 * bound specifically to loopback due to security implications,
6077 * see tcp_input() and in_pcblookup_listen().
6079 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6080 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6081 (s->nat_rule.ptr->action == PF_RDR ||
6082 s->nat_rule.ptr->action == PF_BINAT) &&
6083 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
6084 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST;
6086 if (dir == PF_IN && action == PF_PASS && r->divert.port) {
6087 struct pf_divert *divert;
6089 if ((divert = pf_get_divert(m))) {
6090 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED;
6091 divert->port = r->divert.port;
6092 divert->addr.ipv4 = r->divert.addr.v4;
6099 if (s != NULL && s->nat_rule.ptr != NULL &&
6100 s->nat_rule.ptr->log & PF_LOG_ALL)
6101 lr = s->nat_rule.ptr;
6104 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, lr, a, ruleset,
6108 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6109 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
6111 if (action == PF_PASS || r->action == PF_DROP) {
6112 dirndx = (dir == PF_OUT);
6113 r->packets[dirndx]++;
6114 r->bytes[dirndx] += pd.tot_len;
6116 a->packets[dirndx]++;
6117 a->bytes[dirndx] += pd.tot_len;
6120 if (s->nat_rule.ptr != NULL) {
6121 s->nat_rule.ptr->packets[dirndx]++;
6122 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6124 if (s->src_node != NULL) {
6125 s->src_node->packets[dirndx]++;
6126 s->src_node->bytes[dirndx] += pd.tot_len;
6128 if (s->nat_src_node != NULL) {
6129 s->nat_src_node->packets[dirndx]++;
6130 s->nat_src_node->bytes[dirndx] += pd.tot_len;
6132 dirndx = (dir == s->direction) ? 0 : 1;
6133 s->packets[dirndx]++;
6134 s->bytes[dirndx] += pd.tot_len;
6137 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6138 if (nr != NULL && r == &pf_default_rule)
6140 if (tr->src.addr.type == PF_ADDR_TABLE)
6141 pfr_update_stats(tr->src.addr.p.tbl,
6142 (s == NULL) ? pd.src :
6143 &s->key[(s->direction == PF_IN)]->
6144 addr[(s->direction == PF_OUT)],
6145 pd.af, pd.tot_len, dir == PF_OUT,
6146 r->action == PF_PASS, tr->src.neg);
6147 if (tr->dst.addr.type == PF_ADDR_TABLE)
6148 pfr_update_stats(tr->dst.addr.p.tbl,
6149 (s == NULL) ? pd.dst :
6150 &s->key[(s->direction == PF_IN)]->
6151 addr[(s->direction == PF_IN)],
6152 pd.af, pd.tot_len, dir == PF_OUT,
6153 r->action == PF_PASS, tr->dst.neg);
6157 if (action == PF_SYNPROXY_DROP) {
6162 /* pf_route can free the mbuf causing *m0 to become NULL */
6163 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd);
6171 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0,
6172 struct ether_header *eh, struct inpcb *inp)
6174 struct pfi_kif *kif;
6175 u_short action, reason = 0, log = 0;
6176 struct mbuf *m = *m0, *n = NULL;
6177 struct ip6_hdr *h = NULL;
6178 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
6179 struct pf_state *s = NULL;
6180 struct pf_ruleset *ruleset = NULL;
6182 int off, terminal = 0, dirndx, rh_cnt = 0;
6184 if (!pf_status.running)
6187 memset(&pd, 0, sizeof(pd));
6188 if (ifp->if_type == IFT_CARP && ifp->if_carpdev)
6189 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif;
6191 kif = (struct pfi_kif *)ifp->if_pf_kif;
6194 DPFPRINTF(PF_DEBUG_URGENT,
6195 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
6198 if (kif->pfik_flags & PFI_IFLAG_SKIP)
6202 if ((m->m_flags & M_PKTHDR) == 0)
6203 panic("non-M_PKTHDR is passed to pf_test6");
6204 #endif /* DIAGNOSTIC */
6206 if (m->m_pkthdr.len < (int)sizeof(*h)) {
6208 REASON_SET(&reason, PFRES_SHORT);
6214 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags,
6215 * so make sure pf.flags is clear.
6217 if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED)
6219 m->m_pkthdr.pf.flags = 0;
6220 /* Re-Check when updating to > 4.4 */
6221 m->m_pkthdr.pf.statekey = NULL;
6223 /* We do IP header normalization and packet reassembly here */
6224 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
6228 m = *m0; /* pf_normalize messes with m0 */
6229 h = mtod(m, struct ip6_hdr *);
6233 * we do not support jumbogram yet. if we keep going, zero ip6_plen
6234 * will do something bad, so drop the packet for now.
6236 if (htons(h->ip6_plen) == 0) {
6238 REASON_SET(&reason, PFRES_NORM); /*XXX*/
6243 pd.src = (struct pf_addr *)&h->ip6_src;
6244 pd.dst = (struct pf_addr *)&h->ip6_dst;
6245 pd.sport = pd.dport = NULL;
6247 pd.proto_sum = NULL;
6249 pd.sidx = (dir == PF_IN) ? 0 : 1;
6250 pd.didx = (dir == PF_IN) ? 1 : 0;
6253 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
6256 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
6257 pd.proto = h->ip6_nxt;
6260 case IPPROTO_FRAGMENT:
6261 action = pf_test_fragment(&r, dir, kif, m, h,
6263 if (action == PF_DROP)
6264 REASON_SET(&reason, PFRES_FRAG);
6266 case IPPROTO_ROUTING: {
6267 struct ip6_rthdr rthdr;
6270 DPFPRINTF(PF_DEBUG_MISC,
6271 ("pf: IPv6 more than one rthdr\n"));
6273 REASON_SET(&reason, PFRES_IPOPTIONS);
6277 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
6279 DPFPRINTF(PF_DEBUG_MISC,
6280 ("pf: IPv6 short rthdr\n"));
6282 REASON_SET(&reason, PFRES_SHORT);
6286 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
6287 DPFPRINTF(PF_DEBUG_MISC,
6288 ("pf: IPv6 rthdr0\n"));
6290 REASON_SET(&reason, PFRES_IPOPTIONS);
6297 case IPPROTO_HOPOPTS:
6298 case IPPROTO_DSTOPTS: {
6299 /* get next header and header length */
6300 struct ip6_ext opt6;
6302 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
6303 NULL, &reason, pd.af)) {
6304 DPFPRINTF(PF_DEBUG_MISC,
6305 ("pf: IPv6 short opt\n"));
6310 if (pd.proto == IPPROTO_AH)
6311 off += (opt6.ip6e_len + 2) * 4;
6313 off += (opt6.ip6e_len + 1) * 8;
6314 pd.proto = opt6.ip6e_nxt;
6315 /* goto the next header */
6322 } while (!terminal);
6324 /* if there's no routing header, use unmodified mbuf for checksumming */
6334 if (!pf_pull_hdr(m, off, &th, sizeof(th),
6335 &action, &reason, AF_INET6)) {
6336 log = action != PF_PASS;
6339 pd.p_len = pd.tot_len - off - (th.th_off << 2);
6340 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6341 if (action == PF_DROP)
6343 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6345 if (action == PF_PASS) {
6346 pfsync_update_state(s);
6350 } else if (s == NULL)
6351 action = pf_test_rule(&r, &s, dir, kif,
6352 m, off, h, &pd, &a, &ruleset, NULL, inp);
6360 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6361 &action, &reason, AF_INET6)) {
6362 log = action != PF_PASS;
6365 if (uh.uh_dport == 0 ||
6366 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6367 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6369 REASON_SET(&reason, PFRES_SHORT);
6372 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6373 if (action == PF_PASS) {
6374 pfsync_update_state(s);
6378 } else if (s == NULL)
6379 action = pf_test_rule(&r, &s, dir, kif,
6380 m, off, h, &pd, &a, &ruleset, NULL, inp);
6384 case IPPROTO_ICMPV6: {
6385 struct icmp6_hdr ih;
6388 if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
6389 &action, &reason, AF_INET6)) {
6390 log = action != PF_PASS;
6393 action = pf_test_state_icmp(&s, dir, kif,
6394 m, off, h, &pd, &reason);
6395 if (action == PF_PASS) {
6396 pfsync_update_state(s);
6400 } else if (s == NULL)
6401 action = pf_test_rule(&r, &s, dir, kif,
6402 m, off, h, &pd, &a, &ruleset, NULL, inp);
6407 action = pf_test_state_other(&s, dir, kif, m, &pd);
6408 if (action == PF_PASS) {
6409 pfsync_update_state(s);
6413 } else if (s == NULL)
6414 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
6415 &pd, &a, &ruleset, NULL, inp);
6425 /* handle dangerous IPv6 extension headers. */
6426 if (action == PF_PASS && rh_cnt &&
6427 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6429 REASON_SET(&reason, PFRES_IPOPTIONS);
6431 DPFPRINTF(PF_DEBUG_MISC,
6432 ("pf: dropping packet with dangerous v6 headers\n"));
6435 if ((s && s->tag) || r->rtableid)
6436 pf_tag_packet(m, s ? s->tag : 0, r->rtableid);
6439 if (dir == PF_IN && s && s->key[PF_SK_STACK])
6440 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK];
6444 if (action == PF_PASS && r->qid) {
6445 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE;
6446 if (pd.tos & IPTOS_LOWDELAY)
6447 m->m_pkthdr.pf.qid = r->pqid;
6449 m->m_pkthdr.pf.qid = r->qid;
6450 m->m_pkthdr.pf.ecn_af = AF_INET6;
6451 m->m_pkthdr.pf.hdr = h;
6454 m->m_pkthdr.pf.state_hash = s->hash;
6455 m->m_pkthdr.pf.flags |= PF_TAG_STATE_HASHED;
6460 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6461 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6462 (s->nat_rule.ptr->action == PF_RDR ||
6463 s->nat_rule.ptr->action == PF_BINAT) &&
6464 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
6465 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST;
6467 if (dir == PF_IN && action == PF_PASS && r->divert.port) {
6468 struct pf_divert *divert;
6470 if ((divert = pf_get_divert(m))) {
6471 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED;
6472 divert->port = r->divert.port;
6473 divert->addr.ipv6 = r->divert.addr.v6;
6480 if (s != NULL && s->nat_rule.ptr != NULL &&
6481 s->nat_rule.ptr->log & PF_LOG_ALL)
6482 lr = s->nat_rule.ptr;
6485 PFLOG_PACKET(kif, h, m, AF_INET6, dir, reason, lr, a, ruleset,
6489 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6490 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
6492 if (action == PF_PASS || r->action == PF_DROP) {
6493 dirndx = (dir == PF_OUT);
6494 r->packets[dirndx]++;
6495 r->bytes[dirndx] += pd.tot_len;
6497 a->packets[dirndx]++;
6498 a->bytes[dirndx] += pd.tot_len;
6501 if (s->nat_rule.ptr != NULL) {
6502 s->nat_rule.ptr->packets[dirndx]++;
6503 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6505 if (s->src_node != NULL) {
6506 s->src_node->packets[dirndx]++;
6507 s->src_node->bytes[dirndx] += pd.tot_len;
6509 if (s->nat_src_node != NULL) {
6510 s->nat_src_node->packets[dirndx]++;
6511 s->nat_src_node->bytes[dirndx] += pd.tot_len;
6513 dirndx = (dir == s->direction) ? 0 : 1;
6514 s->packets[dirndx]++;
6515 s->bytes[dirndx] += pd.tot_len;
6518 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6519 if (nr != NULL && r == &pf_default_rule)
6521 if (tr->src.addr.type == PF_ADDR_TABLE)
6522 pfr_update_stats(tr->src.addr.p.tbl,
6523 (s == NULL) ? pd.src :
6524 &s->key[(s->direction == PF_IN)]->addr[0],
6525 pd.af, pd.tot_len, dir == PF_OUT,
6526 r->action == PF_PASS, tr->src.neg);
6527 if (tr->dst.addr.type == PF_ADDR_TABLE)
6528 pfr_update_stats(tr->dst.addr.p.tbl,
6529 (s == NULL) ? pd.dst :
6530 &s->key[(s->direction == PF_IN)]->addr[1],
6531 pd.af, pd.tot_len, dir == PF_OUT,
6532 r->action == PF_PASS, tr->dst.neg);
6536 if (action == PF_SYNPROXY_DROP) {
6541 /* pf_route6 can free the mbuf causing *m0 to become NULL */
6542 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
6549 pf_check_congestion(struct ifqueue *ifq)