1 /* $FreeBSD: src/sys/contrib/pf/net/pf_norm.c,v 1.10 2004/08/14 15:32:40 dwmalone Exp $ */
2 /* $OpenBSD: pf_norm.c,v 1.80.2.1 2004/04/30 21:46:33 brad Exp $ */
3 /* add $OpenBSD: pf_norm.c,v 1.87 2004/05/11 07:34:11 dhartmei Exp $ */
4 /* $DragonFly: src/sys/net/pf/pf_norm.c,v 1.9 2007/08/11 18:53:31 dillon Exp $ */
7 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
9 * Copyright 2001 Niels Provos <provos@citi.umich.edu>
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "opt_inet6.h"
36 #include <sys/param.h>
37 #include <sys/systm.h>
39 #include <sys/filio.h>
40 #include <sys/fcntl.h>
41 #include <sys/socket.h>
42 #include <sys/kernel.h>
44 #include <vm/vm_zone.h>
47 #include <net/if_types.h>
49 #include <net/route.h>
50 #include <net/pf/if_pflog.h>
52 #include <netinet/in.h>
53 #include <netinet/in_var.h>
54 #include <netinet/in_systm.h>
55 #include <netinet/ip.h>
56 #include <netinet/ip_var.h>
57 #include <netinet/tcp.h>
58 #include <netinet/tcp_seq.h>
59 #include <netinet/udp.h>
60 #include <netinet/ip_icmp.h>
63 #include <netinet/ip6.h>
66 #include <net/pf/pfvar.h>
70 * XXX: This should go to netinet/ip6.h (KAME)
72 /* IPv6 options: common part */
78 /* Jumbo Payload Option */
79 struct ip6_opt_jumbo {
82 u_int8_t ip6oj_jumbo_len[4];
85 /* NSAP Address Option */
89 u_int8_t ip6on_src_nsap_len;
90 u_int8_t ip6on_dst_nsap_len;
91 /* followed by source NSAP */
92 /* followed by destination NSAP */
95 /* Tunnel Limit Option */
96 struct ip6_opt_tunnel {
99 u_int8_t ip6ot_encap_limit;
102 /* Router Alert Option */
103 struct ip6_opt_router {
106 u_int8_t ip6or_value[2];
110 #define PFFRAG_SEENLAST 0x0001 /* Seen the last fragment for this */
111 #define PFFRAG_NOBUFFER 0x0002 /* Non-buffering fragment cache */
112 #define PFFRAG_DROP 0x0004 /* Drop all fragments */
113 #define BUFFER_FRAGMENTS(fr) (!((fr)->fr_flags & PFFRAG_NOBUFFER))
116 TAILQ_HEAD(pf_fragqueue, pf_fragment) pf_fragqueue;
117 TAILQ_HEAD(pf_cachequeue, pf_fragment) pf_cachequeue;
119 static int pf_frag_compare(struct pf_fragment *,
120 struct pf_fragment *);
121 RB_HEAD(pf_frag_tree, pf_fragment) pf_frag_tree, pf_cache_tree;
122 RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
123 RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
125 /* Private prototypes */
126 void pf_ip2key(struct pf_fragment *, struct ip *);
127 void pf_remove_fragment(struct pf_fragment *);
128 void pf_flush_fragments(void);
129 void pf_free_fragment(struct pf_fragment *);
130 struct pf_fragment *pf_find_fragment(struct ip *, struct pf_frag_tree *);
131 struct mbuf *pf_reassemble(struct mbuf **, struct pf_fragment **,
132 struct pf_frent *, int);
133 struct mbuf *pf_fragcache(struct mbuf **, struct ip*,
134 struct pf_fragment **, int, int, int *);
135 u_int16_t pf_cksum_fixup(u_int16_t, u_int16_t, u_int16_t);
136 int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *,
137 struct tcphdr *, int);
139 #define DPFPRINTF(x) if (pf_status.debug >= PF_DEBUG_MISC) \
140 { kprintf("%s: ", __func__); kprintf x ;}
143 vm_zone_t pf_frent_pl, pf_frag_pl, pf_cache_pl, pf_cent_pl;
144 vm_zone_t pf_state_scrub_pl;
145 int pf_nfrents, pf_ncache;
148 pf_normalize_init(void)
151 pool_sethiwat(&pf_frag_pl, PFFRAG_FRAG_HIWAT);
152 pool_sethardlimit(&pf_frent_pl, PFFRAG_FRENT_HIWAT, NULL, 0);
153 pool_sethardlimit(&pf_cache_pl, PFFRAG_FRCACHE_HIWAT, NULL, 0);
154 pool_sethardlimit(&pf_cent_pl, PFFRAG_FRCENT_HIWAT, NULL, 0);
157 TAILQ_INIT(&pf_fragqueue);
158 TAILQ_INIT(&pf_cachequeue);
162 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
166 if ((diff = a->fr_id - b->fr_id))
168 else if ((diff = a->fr_p - b->fr_p))
170 else if (a->fr_src.s_addr < b->fr_src.s_addr)
172 else if (a->fr_src.s_addr > b->fr_src.s_addr)
174 else if (a->fr_dst.s_addr < b->fr_dst.s_addr)
176 else if (a->fr_dst.s_addr > b->fr_dst.s_addr)
182 pf_purge_expired_fragments(void)
184 struct pf_fragment *frag;
185 u_int32_t expire = time_second -
186 pf_default_rule.timeout[PFTM_FRAG];
188 while ((frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue)) != NULL) {
189 KASSERT((BUFFER_FRAGMENTS(frag)),
190 ("BUFFER_FRAGMENTS(frag) == 0: %s", __func__));
191 if (frag->fr_timeout > expire)
194 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
195 pf_free_fragment(frag);
198 while ((frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue)) != NULL) {
199 KASSERT((!BUFFER_FRAGMENTS(frag)),
200 ("BUFFER_FRAGMENTS(frag) != 0: %s", __func__));
201 if (frag->fr_timeout > expire)
204 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
205 pf_free_fragment(frag);
206 KASSERT((TAILQ_EMPTY(&pf_cachequeue) ||
207 TAILQ_LAST(&pf_cachequeue, pf_cachequeue) != frag),
208 ("!(TAILQ_EMPTY() || TAILQ_LAST() == farg): %s",
214 * Try to flush old fragments to make space for new ones
218 pf_flush_fragments(void)
220 struct pf_fragment *frag;
223 goal = pf_nfrents * 9 / 10;
224 DPFPRINTF(("trying to free > %d frents\n",
226 while (goal < pf_nfrents) {
227 frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue);
230 pf_free_fragment(frag);
234 goal = pf_ncache * 9 / 10;
235 DPFPRINTF(("trying to free > %d cache entries\n",
237 while (goal < pf_ncache) {
238 frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue);
241 pf_free_fragment(frag);
245 /* Frees the fragments and all associated entries */
248 pf_free_fragment(struct pf_fragment *frag)
250 struct pf_frent *frent;
251 struct pf_frcache *frcache;
253 /* Free all fragments */
254 if (BUFFER_FRAGMENTS(frag)) {
255 for (frent = LIST_FIRST(&frag->fr_queue); frent;
256 frent = LIST_FIRST(&frag->fr_queue)) {
257 LIST_REMOVE(frent, fr_next);
259 m_freem(frent->fr_m);
260 pool_put(&pf_frent_pl, frent);
264 for (frcache = LIST_FIRST(&frag->fr_cache); frcache;
265 frcache = LIST_FIRST(&frag->fr_cache)) {
266 LIST_REMOVE(frcache, fr_next);
268 KASSERT((LIST_EMPTY(&frag->fr_cache) ||
269 LIST_FIRST(&frag->fr_cache)->fr_off >
271 ("! (LIST_EMPTY() || LIST_FIRST()->fr_off >"
272 " frcache->fr_end): %s", __func__));
274 pool_put(&pf_cent_pl, frcache);
279 pf_remove_fragment(frag);
283 pf_ip2key(struct pf_fragment *key, struct ip *ip)
285 key->fr_p = ip->ip_p;
286 key->fr_id = ip->ip_id;
287 key->fr_src.s_addr = ip->ip_src.s_addr;
288 key->fr_dst.s_addr = ip->ip_dst.s_addr;
292 pf_find_fragment(struct ip *ip, struct pf_frag_tree *tree)
294 struct pf_fragment key;
295 struct pf_fragment *frag;
299 frag = RB_FIND(pf_frag_tree, tree, &key);
301 /* XXX Are we sure we want to update the timeout? */
302 frag->fr_timeout = time_second;
303 if (BUFFER_FRAGMENTS(frag)) {
304 TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
305 TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next);
307 TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
308 TAILQ_INSERT_HEAD(&pf_cachequeue, frag, frag_next);
315 /* Removes a fragment from the fragment queue and frees the fragment */
318 pf_remove_fragment(struct pf_fragment *frag)
320 if (BUFFER_FRAGMENTS(frag)) {
321 RB_REMOVE(pf_frag_tree, &pf_frag_tree, frag);
322 TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
323 pool_put(&pf_frag_pl, frag);
325 RB_REMOVE(pf_frag_tree, &pf_cache_tree, frag);
326 TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
327 pool_put(&pf_cache_pl, frag);
331 #define FR_IP_OFF(fr) (((fr)->fr_ip->ip_off & IP_OFFMASK) << 3)
333 pf_reassemble(struct mbuf **m0, struct pf_fragment **frag,
334 struct pf_frent *frent, int mff)
336 struct mbuf *m = *m0, *m2;
337 struct pf_frent *frea, *next;
338 struct pf_frent *frep = NULL;
339 struct ip *ip = frent->fr_ip;
340 int hlen = ip->ip_hl << 2;
341 u_int16_t off = (ip->ip_off & IP_OFFMASK) << 3;
342 u_int16_t ip_len = ip->ip_len - ip->ip_hl * 4;
343 u_int16_t max = ip_len + off;
345 KASSERT((*frag == NULL || BUFFER_FRAGMENTS(*frag)),
346 ("! (*frag == NULL || BUFFER_FRAGMENTS(*frag)): %s", __func__));
348 /* Strip off ip header */
352 /* Create a new reassembly queue for this packet */
354 *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
356 pf_flush_fragments();
357 *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
362 (*frag)->fr_flags = 0;
364 (*frag)->fr_src = frent->fr_ip->ip_src;
365 (*frag)->fr_dst = frent->fr_ip->ip_dst;
366 (*frag)->fr_p = frent->fr_ip->ip_p;
367 (*frag)->fr_id = frent->fr_ip->ip_id;
368 (*frag)->fr_timeout = time_second;
369 LIST_INIT(&(*frag)->fr_queue);
371 RB_INSERT(pf_frag_tree, &pf_frag_tree, *frag);
372 TAILQ_INSERT_HEAD(&pf_fragqueue, *frag, frag_next);
374 /* We do not have a previous fragment */
380 * Find a fragment after the current one:
381 * - off contains the real shifted offset.
383 LIST_FOREACH(frea, &(*frag)->fr_queue, fr_next) {
384 if (FR_IP_OFF(frea) > off)
389 KASSERT((frep != NULL || frea != NULL),
390 ("!(frep != NULL || frea != NULL): %s", __func__));
393 FR_IP_OFF(frep) + frep->fr_ip->ip_len - frep->fr_ip->ip_hl *
398 precut = FR_IP_OFF(frep) + frep->fr_ip->ip_len -
399 frep->fr_ip->ip_hl * 4 - off;
400 if (precut >= ip_len)
402 m_adj(frent->fr_m, precut);
403 DPFPRINTF(("overlap -%d\n", precut));
404 /* Enforce 8 byte boundaries */
405 ip->ip_off = ip->ip_off + (precut >> 3);
406 off = (ip->ip_off & IP_OFFMASK) << 3;
411 for (; frea != NULL && ip_len + off > FR_IP_OFF(frea);
416 aftercut = ip_len + off - FR_IP_OFF(frea);
417 DPFPRINTF(("adjust overlap %d\n", aftercut));
418 if (aftercut < frea->fr_ip->ip_len - frea->fr_ip->ip_hl
421 frea->fr_ip->ip_len =
422 frea->fr_ip->ip_len - aftercut;
423 frea->fr_ip->ip_off = frea->fr_ip->ip_off +
425 m_adj(frea->fr_m, aftercut);
429 /* This fragment is completely overlapped, loose it */
430 next = LIST_NEXT(frea, fr_next);
432 LIST_REMOVE(frea, fr_next);
433 pool_put(&pf_frent_pl, frea);
438 /* Update maximum data size */
439 if ((*frag)->fr_max < max)
440 (*frag)->fr_max = max;
441 /* This is the last segment */
443 (*frag)->fr_flags |= PFFRAG_SEENLAST;
446 LIST_INSERT_HEAD(&(*frag)->fr_queue, frent, fr_next);
448 LIST_INSERT_AFTER(frep, frent, fr_next);
450 /* Check if we are completely reassembled */
451 if (!((*frag)->fr_flags & PFFRAG_SEENLAST))
454 /* Check if we have all the data */
456 for (frep = LIST_FIRST(&(*frag)->fr_queue); frep; frep = next) {
457 next = LIST_NEXT(frep, fr_next);
459 off += frep->fr_ip->ip_len - frep->fr_ip->ip_hl * 4;
460 if (off < (*frag)->fr_max &&
461 (next == NULL || FR_IP_OFF(next) != off))
463 DPFPRINTF(("missing fragment at %d, next %d, max %d\n",
464 off, next == NULL ? -1 : FR_IP_OFF(next),
469 DPFPRINTF(("%d < %d?\n", off, (*frag)->fr_max));
470 if (off < (*frag)->fr_max)
473 /* We have all the data */
474 frent = LIST_FIRST(&(*frag)->fr_queue);
475 KASSERT((frent != NULL), ("frent == NULL: %s", __func__));
476 if ((frent->fr_ip->ip_hl << 2) + off > IP_MAXPACKET) {
477 DPFPRINTF(("drop: too big: %d\n", off));
478 pf_free_fragment(*frag);
482 next = LIST_NEXT(frent, fr_next);
484 /* Magic from ip_input */
490 pool_put(&pf_frent_pl, frent);
492 for (frent = next; frent != NULL; frent = next) {
493 next = LIST_NEXT(frent, fr_next);
496 pool_put(&pf_frent_pl, frent);
498 m->m_pkthdr.csum_flags &= m2->m_pkthdr.csum_flags;
499 m->m_pkthdr.csum_data += m2->m_pkthdr.csum_data;
504 * Note: this 1's complement optimization with <= 65535 fragments.
506 * Handle 1's complement carry for the 16 bit result. This can
507 * result in another carry which must also be handled.
509 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
510 (m->m_pkthdr.csum_data >> 16);
511 if (m->m_pkthdr.csum_data > 0xFFFF)
512 m->m_pkthdr.csum_data -= 0xFFFF;
515 ip->ip_src = (*frag)->fr_src;
516 ip->ip_dst = (*frag)->fr_dst;
518 /* Remove from fragment queue */
519 pf_remove_fragment(*frag);
522 hlen = ip->ip_hl << 2;
523 ip->ip_len = off + hlen;
527 /* some debugging cruft by sklower, below, will go away soon */
528 /* XXX this should be done elsewhere */
529 if (m->m_flags & M_PKTHDR) {
531 for (m2 = m; m2; m2 = m2->m_next)
533 m->m_pkthdr.len = plen;
536 DPFPRINTF(("complete: %p(%d)\n", m, ip->ip_len));
540 /* Oops - fail safe - drop packet */
541 pool_put(&pf_frent_pl, frent);
548 pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff,
549 int drop, int *nomem)
551 struct mbuf *m = *m0;
552 struct pf_frcache *frp, *fra, *cur = NULL;
553 int ip_len = h->ip_len - (h->ip_hl << 2);
554 u_int16_t off = h->ip_off << 3;
555 u_int16_t max = ip_len + off;
558 KASSERT((*frag == NULL || !BUFFER_FRAGMENTS(*frag)),
559 ("!(*frag == NULL || !BUFFER_FRAGMENTS(*frag)): %s", __func__));
561 /* Create a new range queue for this packet */
563 *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
565 pf_flush_fragments();
566 *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
571 /* Get an entry for the queue */
572 cur = pool_get(&pf_cent_pl, PR_NOWAIT);
574 pool_put(&pf_cache_pl, *frag);
580 (*frag)->fr_flags = PFFRAG_NOBUFFER;
582 (*frag)->fr_src = h->ip_src;
583 (*frag)->fr_dst = h->ip_dst;
584 (*frag)->fr_p = h->ip_p;
585 (*frag)->fr_id = h->ip_id;
586 (*frag)->fr_timeout = time_second;
590 LIST_INIT(&(*frag)->fr_cache);
591 LIST_INSERT_HEAD(&(*frag)->fr_cache, cur, fr_next);
593 RB_INSERT(pf_frag_tree, &pf_cache_tree, *frag);
594 TAILQ_INSERT_HEAD(&pf_cachequeue, *frag, frag_next);
596 DPFPRINTF(("fragcache[%d]: new %d-%d\n", h->ip_id, off, max));
602 * Find a fragment after the current one:
603 * - off contains the real shifted offset.
606 LIST_FOREACH(fra, &(*frag)->fr_cache, fr_next) {
607 if (fra->fr_off > off)
612 KASSERT((frp != NULL || fra != NULL),
613 ("!(frp != NULL || fra != NULL): %s", __func__));
618 precut = frp->fr_end - off;
619 if (precut >= ip_len) {
620 /* Fragment is entirely a duplicate */
621 DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n",
622 h->ip_id, frp->fr_off, frp->fr_end, off, max));
626 /* They are adjacent. Fixup cache entry */
627 DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n",
628 h->ip_id, frp->fr_off, frp->fr_end, off, max));
630 } else if (precut > 0) {
631 /* The first part of this payload overlaps with a
632 * fragment that has already been passed.
633 * Need to trim off the first part of the payload.
634 * But to do so easily, we need to create another
635 * mbuf to throw the original header into.
638 DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n",
639 h->ip_id, precut, frp->fr_off, frp->fr_end, off,
644 /* Update the previous frag to encompass this one */
648 /* XXX Optimization opportunity
649 * This is a very heavy way to trim the payload.
650 * we could do it much faster by diddling mbuf
651 * internals but that would be even less legible
652 * than this mbuf magic. For my next trick,
653 * I'll pull a rabbit out of my laptop.
655 *m0 = m_dup(m, MB_DONTWAIT);
656 /* From KAME Project : We have missed this! */
657 m_adj(*m0, (h->ip_hl << 2) -
658 (*m0)->m_pkthdr.len);
661 KASSERT(((*m0)->m_next == NULL),
662 ("(*m0)->m_next != NULL: %s",
664 m_adj(m, precut + (h->ip_hl << 2));
667 if (m->m_flags & M_PKTHDR) {
670 for (t = m; t; t = t->m_next)
672 m->m_pkthdr.len = plen;
676 h = mtod(m, struct ip *);
678 KASSERT(((int)m->m_len ==
680 ("m->m_len != h->ip_len - precut: %s",
682 h->ip_off = h->ip_off +
684 h->ip_len = h->ip_len - precut;
689 /* There is a gap between fragments */
691 DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n",
692 h->ip_id, -precut, frp->fr_off, frp->fr_end, off,
695 cur = pool_get(&pf_cent_pl, PR_NOWAIT);
702 LIST_INSERT_AFTER(frp, cur, fr_next);
710 aftercut = max - fra->fr_off;
712 /* Adjacent fragments */
713 DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n",
714 h->ip_id, off, max, fra->fr_off, fra->fr_end));
717 } else if (aftercut > 0) {
718 /* Need to chop off the tail of this fragment */
719 DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n",
720 h->ip_id, aftercut, off, max, fra->fr_off,
729 if (m->m_flags & M_PKTHDR) {
732 for (t = m; t; t = t->m_next)
734 m->m_pkthdr.len = plen;
736 h = mtod(m, struct ip *);
737 KASSERT(((int)m->m_len == h->ip_len - aftercut),
738 ("m->m_len != h->ip_len - aftercut: %s",
740 h->ip_len = h->ip_len - aftercut;
744 } else if (frp == NULL) {
745 /* There is a gap between fragments */
746 DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n",
747 h->ip_id, -aftercut, off, max, fra->fr_off,
750 cur = pool_get(&pf_cent_pl, PR_NOWAIT);
757 LIST_INSERT_BEFORE(fra, cur, fr_next);
761 /* Need to glue together two separate fragment descriptors */
763 if (cur && fra->fr_off <= cur->fr_end) {
764 /* Need to merge in a previous 'cur' */
765 DPFPRINTF(("fragcache[%d]: adjacent(merge "
766 "%d-%d) %d-%d (%d-%d)\n",
767 h->ip_id, cur->fr_off, cur->fr_end, off,
768 max, fra->fr_off, fra->fr_end));
769 fra->fr_off = cur->fr_off;
770 LIST_REMOVE(cur, fr_next);
771 pool_put(&pf_cent_pl, cur);
775 } else if (frp && fra->fr_off <= frp->fr_end) {
776 /* Need to merge in a modified 'frp' */
777 KASSERT((cur == NULL), ("cur != NULL: %s",
779 DPFPRINTF(("fragcache[%d]: adjacent(merge "
780 "%d-%d) %d-%d (%d-%d)\n",
781 h->ip_id, frp->fr_off, frp->fr_end, off,
782 max, fra->fr_off, fra->fr_end));
783 fra->fr_off = frp->fr_off;
784 LIST_REMOVE(frp, fr_next);
785 pool_put(&pf_cent_pl, frp);
795 * We must keep tracking the overall fragment even when
796 * we're going to drop it anyway so that we know when to
797 * free the overall descriptor. Thus we drop the frag late.
804 /* Update maximum data size */
805 if ((*frag)->fr_max < max)
806 (*frag)->fr_max = max;
808 /* This is the last segment */
810 (*frag)->fr_flags |= PFFRAG_SEENLAST;
812 /* Check if we are completely reassembled */
813 if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
814 LIST_FIRST(&(*frag)->fr_cache)->fr_off == 0 &&
815 LIST_FIRST(&(*frag)->fr_cache)->fr_end == (*frag)->fr_max) {
816 /* Remove from fragment queue */
817 DPFPRINTF(("fragcache[%d]: done 0-%d\n", h->ip_id,
819 pf_free_fragment(*frag);
828 /* Still need to pay attention to !IP_MF */
829 if (!mff && *frag != NULL)
830 (*frag)->fr_flags |= PFFRAG_SEENLAST;
837 /* Still need to pay attention to !IP_MF */
838 if (!mff && *frag != NULL)
839 (*frag)->fr_flags |= PFFRAG_SEENLAST;
842 /* This fragment has been deemed bad. Don't reass */
843 if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
844 DPFPRINTF(("fragcache[%d]: dropping overall fragment\n",
846 (*frag)->fr_flags |= PFFRAG_DROP;
854 pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason)
856 struct mbuf *m = *m0;
858 struct pf_frent *frent;
859 struct pf_fragment *frag = NULL;
860 struct ip *h = mtod(m, struct ip *);
861 int mff = (h->ip_off & IP_MF);
862 int hlen = h->ip_hl << 2;
863 u_int16_t fragoff = (h->ip_off & IP_OFFMASK) << 3;
868 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
871 if (r->kif != NULL &&
872 (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
873 r = r->skip[PF_SKIP_IFP].ptr;
874 else if (r->direction && r->direction != dir)
875 r = r->skip[PF_SKIP_DIR].ptr;
876 else if (r->af && r->af != AF_INET)
877 r = r->skip[PF_SKIP_AF].ptr;
878 else if (r->proto && r->proto != h->ip_p)
879 r = r->skip[PF_SKIP_PROTO].ptr;
880 else if (PF_MISMATCHAW(&r->src.addr,
881 (struct pf_addr *)&h->ip_src.s_addr, AF_INET, r->src.not))
882 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
883 else if (PF_MISMATCHAW(&r->dst.addr,
884 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET, r->dst.not))
885 r = r->skip[PF_SKIP_DST_ADDR].ptr;
895 /* Check for illegal packets */
896 if (hlen < (int)sizeof(struct ip))
899 if (hlen > h->ip_len)
902 /* Clear IP_DF if the rule uses the no-df option */
903 if (r->rule_flag & PFRULE_NODF)
906 /* We will need other tests here */
907 if (!fragoff && !mff)
910 /* We're dealing with a fragment now. Don't allow fragments
911 * with IP_DF to enter the cache. If the flag was cleared by
912 * no-df above, fine. Otherwise drop it.
914 if (h->ip_off & IP_DF) {
915 DPFPRINTF(("IP_DF\n"));
919 ip_len = h->ip_len - hlen;
920 ip_off = (h->ip_off & IP_OFFMASK) << 3;
922 /* All fragments are 8 byte aligned */
923 if (mff && (ip_len & 0x7)) {
924 DPFPRINTF(("mff and %d\n", ip_len));
928 /* Respect maximum length */
929 if (fragoff + ip_len > IP_MAXPACKET) {
930 DPFPRINTF(("max packet %d\n", fragoff + ip_len));
933 max = fragoff + ip_len;
935 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) {
936 /* Fully buffer all of the fragments */
938 frag = pf_find_fragment(h, &pf_frag_tree);
940 /* Check if we saw the last fragment already */
941 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
945 /* Get an entry for the fragment queue */
946 frent = pool_get(&pf_frent_pl, PR_NOWAIT);
948 REASON_SET(reason, PFRES_MEMORY);
955 /* Might return a completely reassembled mbuf, or NULL */
956 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
957 *m0 = m = pf_reassemble(m0, &frag, frent, mff);
962 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
965 h = mtod(m, struct ip *);
967 /* non-buffering fragment cache (drops or masks overlaps) */
971 if (m->m_pkthdr.fw_flags & PF_MBUF_FRAGCACHE) {
972 /* Already passed the fragment cache in the
973 * input direction. If we continued, it would
974 * appear to be a dup and would be dropped.
980 frag = pf_find_fragment(h, &pf_cache_tree);
982 /* Check if we saw the last fragment already */
983 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
984 max > frag->fr_max) {
985 if (r->rule_flag & PFRULE_FRAGDROP)
986 frag->fr_flags |= PFFRAG_DROP;
990 *m0 = m = pf_fragcache(m0, h, &frag, mff,
991 (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem);
999 m->m_pkthdr.fw_flags |= PF_MBUF_FRAGCACHE;
1001 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
1007 /* At this point, only IP_DF is allowed in ip_off */
1010 /* Enforce a minimum ttl, may cause endless packet loops */
1011 if (r->min_ttl && h->ip_ttl < r->min_ttl)
1012 h->ip_ttl = r->min_ttl;
1014 if (r->rule_flag & PFRULE_RANDOMID) {
1016 h->ip_id = ip_randomid();
1018 h->ip_id = htons(ip_id++);
1025 /* Enforce a minimum ttl, may cause endless packet loops */
1026 if (r->min_ttl && h->ip_ttl < r->min_ttl)
1027 h->ip_ttl = r->min_ttl;
1032 REASON_SET(reason, PFRES_MEMORY);
1033 if (r != NULL && r->log)
1034 PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL);
1038 REASON_SET(reason, PFRES_NORM);
1039 if (r != NULL && r->log)
1040 PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL);
1044 DPFPRINTF(("dropping bad fragment\n"));
1046 /* Free associated fragments */
1048 pf_free_fragment(frag);
1050 REASON_SET(reason, PFRES_FRAG);
1051 if (r != NULL && r->log)
1052 PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL);
1059 pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
1062 struct mbuf *m = *m0;
1064 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1068 struct ip6_opt_jumbo jumbo;
1069 struct ip6_frag frag;
1070 u_int32_t jumbolen = 0, plen;
1071 u_int16_t fragoff = 0;
1077 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1080 if (r->kif != NULL &&
1081 (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
1082 r = r->skip[PF_SKIP_IFP].ptr;
1083 else if (r->direction && r->direction != dir)
1084 r = r->skip[PF_SKIP_DIR].ptr;
1085 else if (r->af && r->af != AF_INET6)
1086 r = r->skip[PF_SKIP_AF].ptr;
1087 #if 0 /* header chain! */
1088 else if (r->proto && r->proto != h->ip6_nxt)
1089 r = r->skip[PF_SKIP_PROTO].ptr;
1091 else if (PF_MISMATCHAW(&r->src.addr,
1092 (struct pf_addr *)&h->ip6_src, AF_INET6, r->src.not))
1093 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1094 else if (PF_MISMATCHAW(&r->dst.addr,
1095 (struct pf_addr *)&h->ip6_dst, AF_INET6, r->dst.not))
1096 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1106 /* Check for illegal packets */
1107 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1110 off = sizeof(struct ip6_hdr);
1115 case IPPROTO_FRAGMENT:
1119 case IPPROTO_ROUTING:
1120 case IPPROTO_DSTOPTS:
1121 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1124 if (proto == IPPROTO_AH)
1125 off += (ext.ip6e_len + 2) * 4;
1127 off += (ext.ip6e_len + 1) * 8;
1128 proto = ext.ip6e_nxt;
1130 case IPPROTO_HOPOPTS:
1131 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1134 optend = off + (ext.ip6e_len + 1) * 8;
1135 ooff = off + sizeof(ext);
1137 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1138 sizeof(opt.ip6o_type), NULL, NULL,
1141 if (opt.ip6o_type == IP6OPT_PAD1) {
1145 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1146 NULL, NULL, AF_INET6))
1148 if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1150 switch (opt.ip6o_type) {
1152 if (h->ip6_plen != 0)
1154 if (!pf_pull_hdr(m, ooff, &jumbo,
1155 sizeof(jumbo), NULL, NULL,
1158 memcpy(&jumbolen, jumbo.ip6oj_jumbo_len,
1160 jumbolen = ntohl(jumbolen);
1161 if (jumbolen <= IPV6_MAXPACKET)
1163 if (sizeof(struct ip6_hdr) + jumbolen !=
1170 ooff += sizeof(opt) + opt.ip6o_len;
1171 } while (ooff < optend);
1174 proto = ext.ip6e_nxt;
1180 } while (!terminal);
1182 /* jumbo payload option must be present, or plen > 0 */
1183 if (ntohs(h->ip6_plen) == 0)
1186 plen = ntohs(h->ip6_plen);
1189 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1192 /* Enforce a minimum ttl, may cause endless packet loops */
1193 if (r->min_ttl && h->ip6_hlim < r->min_ttl)
1194 h->ip6_hlim = r->min_ttl;
1199 if (ntohs(h->ip6_plen) == 0 || jumbolen)
1201 plen = ntohs(h->ip6_plen);
1203 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1205 fragoff = ntohs(frag.ip6f_offlg & IP6F_OFF_MASK);
1206 if (fragoff + (plen - off - sizeof(frag)) > IPV6_MAXPACKET)
1209 /* do something about it */
1213 REASON_SET(reason, PFRES_SHORT);
1214 if (r != NULL && r->log)
1215 PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
1219 REASON_SET(reason, PFRES_NORM);
1220 if (r != NULL && r->log)
1221 PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
1225 REASON_SET(reason, PFRES_FRAG);
1226 if (r != NULL && r->log)
1227 PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
1233 pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
1234 int off, void *h, struct pf_pdesc *pd)
1236 struct pf_rule *r, *rm = NULL;
1237 struct tcphdr *th = pd->hdr.tcp;
1241 sa_family_t af = pd->af;
1243 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1246 if (r->kif != NULL &&
1247 (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
1248 r = r->skip[PF_SKIP_IFP].ptr;
1249 else if (r->direction && r->direction != dir)
1250 r = r->skip[PF_SKIP_DIR].ptr;
1251 else if (r->af && r->af != af)
1252 r = r->skip[PF_SKIP_AF].ptr;
1253 else if (r->proto && r->proto != pd->proto)
1254 r = r->skip[PF_SKIP_PROTO].ptr;
1255 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, r->src.not))
1256 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1257 else if (r->src.port_op && !pf_match_port(r->src.port_op,
1258 r->src.port[0], r->src.port[1], th->th_sport))
1259 r = r->skip[PF_SKIP_SRC_PORT].ptr;
1260 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, r->dst.not))
1261 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1262 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1263 r->dst.port[0], r->dst.port[1], th->th_dport))
1264 r = r->skip[PF_SKIP_DST_PORT].ptr;
1265 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1266 pf_osfp_fingerprint(pd, m, off, th),
1268 r = TAILQ_NEXT(r, entries);
1280 if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1281 pd->flags |= PFDESC_TCP_NORM;
1283 flags = th->th_flags;
1284 if (flags & TH_SYN) {
1285 /* Illegal packet */
1292 /* Illegal packet */
1293 if (!(flags & (TH_ACK|TH_RST)))
1297 if (!(flags & TH_ACK)) {
1298 /* These flags are only valid if ACK is set */
1299 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1303 /* Check for illegal header length */
1304 if (th->th_off < (sizeof(struct tcphdr) >> 2))
1307 /* If flags changed, or reserved data set, then adjust */
1308 if (flags != th->th_flags || th->th_x2 != 0) {
1311 ov = *(u_int16_t *)(&th->th_ack + 1);
1312 th->th_flags = flags;
1314 nv = *(u_int16_t *)(&th->th_ack + 1);
1316 th->th_sum = pf_cksum_fixup(th->th_sum, ov, nv);
1320 /* Remove urgent pointer, if TH_URG is not set */
1321 if (!(flags & TH_URG) && th->th_urp) {
1322 th->th_sum = pf_cksum_fixup(th->th_sum, th->th_urp, 0);
1327 /* Process options */
1328 if (r->max_mss && pf_normalize_tcpopt(r, m, th, off))
1331 /* copy back packet headers if we sanitized */
1333 m_copyback(m, off, sizeof(*th), (caddr_t)th);
1338 REASON_SET(&reason, PFRES_NORM);
1339 if (rm != NULL && r->log)
1340 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, r, NULL, NULL);
1345 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1346 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1351 KASSERT((src->scrub == NULL),
1352 ("pf_normalize_tcp_init: src->scrub != NULL"));
1354 src->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT);
1355 if (src->scrub == NULL)
1357 bzero(src->scrub, sizeof(*src->scrub));
1362 struct ip *h = mtod(m, struct ip *);
1363 src->scrub->pfss_ttl = h->ip_ttl;
1369 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1370 src->scrub->pfss_ttl = h->ip6_hlim;
1378 * All normalizations below are only begun if we see the start of
1379 * the connections. They must all set an enabled bit in pfss_flags
1381 if ((th->th_flags & TH_SYN) == 0)
1385 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1386 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1387 /* Diddle with TCP options */
1389 opt = hdr + sizeof(struct tcphdr);
1390 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1391 while (hlen >= TCPOLEN_TIMESTAMP) {
1393 case TCPOPT_EOL: /* FALLTHROUGH */
1398 case TCPOPT_TIMESTAMP:
1399 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1400 src->scrub->pfss_flags |=
1402 src->scrub->pfss_ts_mod = karc4random();
1406 hlen -= MAX(opt[1], 2);
1407 opt += MAX(opt[1], 2);
1417 pf_normalize_tcp_cleanup(struct pf_state *state)
1419 if (state->src.scrub)
1420 pool_put(&pf_state_scrub_pl, state->src.scrub);
1421 if (state->dst.scrub)
1422 pool_put(&pf_state_scrub_pl, state->dst.scrub);
1424 /* Someday... flush the TCP segment reassembly descriptors. */
1428 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1429 u_short *reason, struct tcphdr *th, struct pf_state_peer *src,
1430 struct pf_state_peer *dst, int *writeback)
1436 KASSERT((src->scrub || dst->scrub),
1437 ("pf_normalize_tcp_statefull: src->scrub && dst->scrub!"));
1440 * Enforce the minimum TTL seen for this connection. Negate a common
1441 * technique to evade an intrusion detection system and confuse
1442 * firewall state code.
1448 struct ip *h = mtod(m, struct ip *);
1449 if (h->ip_ttl > src->scrub->pfss_ttl)
1450 src->scrub->pfss_ttl = h->ip_ttl;
1451 h->ip_ttl = src->scrub->pfss_ttl;
1459 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1460 if (h->ip6_hlim > src->scrub->pfss_ttl)
1461 src->scrub->pfss_ttl = h->ip6_hlim;
1462 h->ip6_hlim = src->scrub->pfss_ttl;
1469 if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1470 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1471 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1472 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1473 /* Diddle with TCP options */
1475 opt = hdr + sizeof(struct tcphdr);
1476 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1477 while (hlen >= TCPOLEN_TIMESTAMP) {
1479 case TCPOPT_EOL: /* FALLTHROUGH */
1484 case TCPOPT_TIMESTAMP:
1485 /* Modulate the timestamps. Can be used for
1486 * NAT detection, OS uptime determination or
1489 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1492 (src->scrub->pfss_flags &
1494 memcpy(&ts_value, &opt[2],
1496 ts_value = htonl(ntohl(ts_value)
1497 + src->scrub->pfss_ts_mod);
1498 pf_change_a(&opt[2],
1499 &th->th_sum, ts_value, 0);
1503 /* Modulate TS reply iff valid (!0) */
1504 memcpy(&ts_value, &opt[6],
1506 if (ts_value && dst->scrub &&
1507 (dst->scrub->pfss_flags &
1509 ts_value = htonl(ntohl(ts_value)
1510 - dst->scrub->pfss_ts_mod);
1511 pf_change_a(&opt[6],
1512 &th->th_sum, ts_value, 0);
1518 hlen -= MAX(opt[1], 2);
1519 opt += MAX(opt[1], 2);
1524 /* Copyback the options, caller copys back header */
1526 m_copyback(m, off + sizeof(struct tcphdr),
1527 (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1528 sizeof(struct tcphdr));
1533 /* I have a dream.... TCP segment reassembly.... */
1538 pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
1543 int opt, cnt, optlen = 0;
1547 thoff = th->th_off << 2;
1548 cnt = thoff - sizeof(struct tcphdr);
1549 optp = mtod(m, caddr_t) + off + sizeof(struct tcphdr);
1551 for (; cnt > 0; cnt -= optlen, optp += optlen) {
1553 if (opt == TCPOPT_EOL)
1555 if (opt == TCPOPT_NOP)
1561 if (optlen < 2 || optlen > cnt)
1566 mss = (u_int16_t *)(optp + 2);
1567 if ((ntohs(*mss)) > r->max_mss) {
1568 th->th_sum = pf_cksum_fixup(th->th_sum,
1569 *mss, htons(r->max_mss));
1570 *mss = htons(r->max_mss);