1 /* $FreeBSD: src/sys/contrib/pf/net/pf_norm.c,v 1.10 2004/08/14 15:32:40 dwmalone Exp $ */
2 /* $OpenBSD: pf_norm.c,v 1.80.2.1 2004/04/30 21:46:33 brad Exp $ */
3 /* add $OpenBSD: pf_norm.c,v 1.87 2004/05/11 07:34:11 dhartmei Exp $ */
4 /* $DragonFly: src/sys/net/pf/pf_norm.c,v 1.2 2005/02/11 22:25:57 joerg Exp $ */
7 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
9 * Copyright 2001 Niels Provos <provos@citi.umich.edu>
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "opt_inet6.h"
36 #include <sys/param.h>
37 #include <sys/systm.h>
39 #include <sys/filio.h>
40 #include <sys/fcntl.h>
41 #include <sys/socket.h>
42 #include <sys/kernel.h>
44 #include <vm/vm_zone.h>
47 #include <net/if_types.h>
49 #include <net/route.h>
50 #include <net/pf/if_pflog.h>
52 #include <netinet/in.h>
53 #include <netinet/in_var.h>
54 #include <netinet/in_systm.h>
55 #include <netinet/ip.h>
56 #include <netinet/ip_var.h>
57 #include <netinet/tcp.h>
58 #include <netinet/tcp_seq.h>
59 #include <netinet/udp.h>
60 #include <netinet/ip_icmp.h>
63 #include <netinet/ip6.h>
66 #include <net/pf/pfvar.h>
70 * XXX: This should go to netinet/ip6.h (KAME)
72 /* IPv6 options: common part */
78 /* Jumbo Payload Option */
79 struct ip6_opt_jumbo {
82 u_int8_t ip6oj_jumbo_len[4];
85 /* NSAP Address Option */
89 u_int8_t ip6on_src_nsap_len;
90 u_int8_t ip6on_dst_nsap_len;
91 /* followed by source NSAP */
92 /* followed by destination NSAP */
95 /* Tunnel Limit Option */
96 struct ip6_opt_tunnel {
99 u_int8_t ip6ot_encap_limit;
102 /* Router Alert Option */
103 struct ip6_opt_router {
106 u_int8_t ip6or_value[2];
110 #define PFFRAG_SEENLAST 0x0001 /* Seen the last fragment for this */
111 #define PFFRAG_NOBUFFER 0x0002 /* Non-buffering fragment cache */
112 #define PFFRAG_DROP 0x0004 /* Drop all fragments */
113 #define BUFFER_FRAGMENTS(fr) (!((fr)->fr_flags & PFFRAG_NOBUFFER))
116 TAILQ_HEAD(pf_fragqueue, pf_fragment) pf_fragqueue;
117 TAILQ_HEAD(pf_cachequeue, pf_fragment) pf_cachequeue;
119 static int pf_frag_compare(struct pf_fragment *,
120 struct pf_fragment *);
121 RB_HEAD(pf_frag_tree, pf_fragment) pf_frag_tree, pf_cache_tree;
122 RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
123 RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
125 /* Private prototypes */
126 void pf_ip2key(struct pf_fragment *, struct ip *);
127 void pf_remove_fragment(struct pf_fragment *);
128 void pf_flush_fragments(void);
129 void pf_free_fragment(struct pf_fragment *);
130 struct pf_fragment *pf_find_fragment(struct ip *, struct pf_frag_tree *);
131 struct mbuf *pf_reassemble(struct mbuf **, struct pf_fragment **,
132 struct pf_frent *, int);
133 struct mbuf *pf_fragcache(struct mbuf **, struct ip*,
134 struct pf_fragment **, int, int, int *);
135 u_int16_t pf_cksum_fixup(u_int16_t, u_int16_t, u_int16_t);
136 int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *,
137 struct tcphdr *, int);
139 #define DPFPRINTF(x) if (pf_status.debug >= PF_DEBUG_MISC) \
140 { printf("%s: ", __func__); printf x ;}
143 vm_zone_t pf_frent_pl, pf_frag_pl, pf_cache_pl, pf_cent_pl;
144 vm_zone_t pf_state_scrub_pl;
145 int pf_nfrents, pf_ncache;
148 pf_normalize_init(void)
151 pool_sethiwat(&pf_frag_pl, PFFRAG_FRAG_HIWAT);
152 pool_sethardlimit(&pf_frent_pl, PFFRAG_FRENT_HIWAT, NULL, 0);
153 pool_sethardlimit(&pf_cache_pl, PFFRAG_FRCACHE_HIWAT, NULL, 0);
154 pool_sethardlimit(&pf_cent_pl, PFFRAG_FRCENT_HIWAT, NULL, 0);
157 TAILQ_INIT(&pf_fragqueue);
158 TAILQ_INIT(&pf_cachequeue);
162 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
166 if ((diff = a->fr_id - b->fr_id))
168 else if ((diff = a->fr_p - b->fr_p))
170 else if (a->fr_src.s_addr < b->fr_src.s_addr)
172 else if (a->fr_src.s_addr > b->fr_src.s_addr)
174 else if (a->fr_dst.s_addr < b->fr_dst.s_addr)
176 else if (a->fr_dst.s_addr > b->fr_dst.s_addr)
182 pf_purge_expired_fragments(void)
184 struct pf_fragment *frag;
185 u_int32_t expire = time_second -
186 pf_default_rule.timeout[PFTM_FRAG];
188 while ((frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue)) != NULL) {
189 KASSERT((BUFFER_FRAGMENTS(frag)),
190 ("BUFFER_FRAGMENTS(frag) == 0: %s", __FUNCTION__));
191 if (frag->fr_timeout > expire)
194 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
195 pf_free_fragment(frag);
198 while ((frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue)) != NULL) {
199 KASSERT((!BUFFER_FRAGMENTS(frag)),
200 ("BUFFER_FRAGMENTS(frag) != 0: %s", __FUNCTION__));
201 if (frag->fr_timeout > expire)
204 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
205 pf_free_fragment(frag);
206 KASSERT((TAILQ_EMPTY(&pf_cachequeue) ||
207 TAILQ_LAST(&pf_cachequeue, pf_cachequeue) != frag),
208 ("!(TAILQ_EMPTY() || TAILQ_LAST() == farg): %s",
214 * Try to flush old fragments to make space for new ones
218 pf_flush_fragments(void)
220 struct pf_fragment *frag;
223 goal = pf_nfrents * 9 / 10;
224 DPFPRINTF(("trying to free > %d frents\n",
226 while (goal < pf_nfrents) {
227 frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue);
230 pf_free_fragment(frag);
234 goal = pf_ncache * 9 / 10;
235 DPFPRINTF(("trying to free > %d cache entries\n",
237 while (goal < pf_ncache) {
238 frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue);
241 pf_free_fragment(frag);
245 /* Frees the fragments and all associated entries */
248 pf_free_fragment(struct pf_fragment *frag)
250 struct pf_frent *frent;
251 struct pf_frcache *frcache;
253 /* Free all fragments */
254 if (BUFFER_FRAGMENTS(frag)) {
255 for (frent = LIST_FIRST(&frag->fr_queue); frent;
256 frent = LIST_FIRST(&frag->fr_queue)) {
257 LIST_REMOVE(frent, fr_next);
259 m_freem(frent->fr_m);
260 pool_put(&pf_frent_pl, frent);
264 for (frcache = LIST_FIRST(&frag->fr_cache); frcache;
265 frcache = LIST_FIRST(&frag->fr_cache)) {
266 LIST_REMOVE(frcache, fr_next);
268 KASSERT((LIST_EMPTY(&frag->fr_cache) ||
269 LIST_FIRST(&frag->fr_cache)->fr_off >
271 ("! (LIST_EMPTY() || LIST_FIRST()->fr_off >"
272 " frcache->fr_end): %s", __FUNCTION__));
274 pool_put(&pf_cent_pl, frcache);
279 pf_remove_fragment(frag);
283 pf_ip2key(struct pf_fragment *key, struct ip *ip)
285 key->fr_p = ip->ip_p;
286 key->fr_id = ip->ip_id;
287 key->fr_src.s_addr = ip->ip_src.s_addr;
288 key->fr_dst.s_addr = ip->ip_dst.s_addr;
292 pf_find_fragment(struct ip *ip, struct pf_frag_tree *tree)
294 struct pf_fragment key;
295 struct pf_fragment *frag;
299 frag = RB_FIND(pf_frag_tree, tree, &key);
301 /* XXX Are we sure we want to update the timeout? */
302 frag->fr_timeout = time_second;
303 if (BUFFER_FRAGMENTS(frag)) {
304 TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
305 TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next);
307 TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
308 TAILQ_INSERT_HEAD(&pf_cachequeue, frag, frag_next);
315 /* Removes a fragment from the fragment queue and frees the fragment */
318 pf_remove_fragment(struct pf_fragment *frag)
320 if (BUFFER_FRAGMENTS(frag)) {
321 RB_REMOVE(pf_frag_tree, &pf_frag_tree, frag);
322 TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
323 pool_put(&pf_frag_pl, frag);
325 RB_REMOVE(pf_frag_tree, &pf_cache_tree, frag);
326 TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
327 pool_put(&pf_cache_pl, frag);
331 #define FR_IP_OFF(fr) (((fr)->fr_ip->ip_off & IP_OFFMASK) << 3)
333 pf_reassemble(struct mbuf **m0, struct pf_fragment **frag,
334 struct pf_frent *frent, int mff)
336 struct mbuf *m = *m0, *m2;
337 struct pf_frent *frea, *next;
338 struct pf_frent *frep = NULL;
339 struct ip *ip = frent->fr_ip;
340 int hlen = ip->ip_hl << 2;
341 u_int16_t off = (ip->ip_off & IP_OFFMASK) << 3;
342 u_int16_t ip_len = ip->ip_len - ip->ip_hl * 4;
343 u_int16_t max = ip_len + off;
345 KASSERT((*frag == NULL || BUFFER_FRAGMENTS(*frag)),
346 ("! (*frag == NULL || BUFFER_FRAGMENTS(*frag)): %s", __FUNCTION__));
348 /* Strip off ip header */
352 /* Create a new reassembly queue for this packet */
354 *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
356 pf_flush_fragments();
357 *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
362 (*frag)->fr_flags = 0;
364 (*frag)->fr_src = frent->fr_ip->ip_src;
365 (*frag)->fr_dst = frent->fr_ip->ip_dst;
366 (*frag)->fr_p = frent->fr_ip->ip_p;
367 (*frag)->fr_id = frent->fr_ip->ip_id;
368 (*frag)->fr_timeout = time_second;
369 LIST_INIT(&(*frag)->fr_queue);
371 RB_INSERT(pf_frag_tree, &pf_frag_tree, *frag);
372 TAILQ_INSERT_HEAD(&pf_fragqueue, *frag, frag_next);
374 /* We do not have a previous fragment */
380 * Find a fragment after the current one:
381 * - off contains the real shifted offset.
383 LIST_FOREACH(frea, &(*frag)->fr_queue, fr_next) {
384 if (FR_IP_OFF(frea) > off)
389 KASSERT((frep != NULL || frea != NULL),
390 ("!(frep != NULL || frea != NULL): %s", __FUNCTION__));;
393 FR_IP_OFF(frep) + frep->fr_ip->ip_len - frep->fr_ip->ip_hl *
398 precut = FR_IP_OFF(frep) + frep->fr_ip->ip_len -
399 frep->fr_ip->ip_hl * 4 - off;
400 if (precut >= ip_len)
402 m_adj(frent->fr_m, precut);
403 DPFPRINTF(("overlap -%d\n", precut));
404 /* Enforce 8 byte boundaries */
405 ip->ip_off = ip->ip_off + (precut >> 3);
406 off = (ip->ip_off & IP_OFFMASK) << 3;
411 for (; frea != NULL && ip_len + off > FR_IP_OFF(frea);
416 aftercut = ip_len + off - FR_IP_OFF(frea);
417 DPFPRINTF(("adjust overlap %d\n", aftercut));
418 if (aftercut < frea->fr_ip->ip_len - frea->fr_ip->ip_hl
421 frea->fr_ip->ip_len =
422 frea->fr_ip->ip_len - aftercut;
423 frea->fr_ip->ip_off = frea->fr_ip->ip_off +
425 m_adj(frea->fr_m, aftercut);
429 /* This fragment is completely overlapped, loose it */
430 next = LIST_NEXT(frea, fr_next);
432 LIST_REMOVE(frea, fr_next);
433 pool_put(&pf_frent_pl, frea);
438 /* Update maximum data size */
439 if ((*frag)->fr_max < max)
440 (*frag)->fr_max = max;
441 /* This is the last segment */
443 (*frag)->fr_flags |= PFFRAG_SEENLAST;
446 LIST_INSERT_HEAD(&(*frag)->fr_queue, frent, fr_next);
448 LIST_INSERT_AFTER(frep, frent, fr_next);
450 /* Check if we are completely reassembled */
451 if (!((*frag)->fr_flags & PFFRAG_SEENLAST))
454 /* Check if we have all the data */
456 for (frep = LIST_FIRST(&(*frag)->fr_queue); frep; frep = next) {
457 next = LIST_NEXT(frep, fr_next);
459 off += frep->fr_ip->ip_len - frep->fr_ip->ip_hl * 4;
460 if (off < (*frag)->fr_max &&
461 (next == NULL || FR_IP_OFF(next) != off))
463 DPFPRINTF(("missing fragment at %d, next %d, max %d\n",
464 off, next == NULL ? -1 : FR_IP_OFF(next),
469 DPFPRINTF(("%d < %d?\n", off, (*frag)->fr_max));
470 if (off < (*frag)->fr_max)
473 /* We have all the data */
474 frent = LIST_FIRST(&(*frag)->fr_queue);
475 KASSERT((frent != NULL), ("frent == NULL: %s", __FUNCTION__));
476 if ((frent->fr_ip->ip_hl << 2) + off > IP_MAXPACKET) {
477 DPFPRINTF(("drop: too big: %d\n", off));
478 pf_free_fragment(*frag);
482 next = LIST_NEXT(frent, fr_next);
484 /* Magic from ip_input */
490 pool_put(&pf_frent_pl, frent);
492 for (frent = next; frent != NULL; frent = next) {
493 next = LIST_NEXT(frent, fr_next);
496 pool_put(&pf_frent_pl, frent);
501 ip->ip_src = (*frag)->fr_src;
502 ip->ip_dst = (*frag)->fr_dst;
504 /* Remove from fragment queue */
505 pf_remove_fragment(*frag);
508 hlen = ip->ip_hl << 2;
509 ip->ip_len = off + hlen;
513 /* some debugging cruft by sklower, below, will go away soon */
514 /* XXX this should be done elsewhere */
515 if (m->m_flags & M_PKTHDR) {
517 for (m2 = m; m2; m2 = m2->m_next)
519 m->m_pkthdr.len = plen;
522 DPFPRINTF(("complete: %p(%d)\n", m, ip->ip_len));
526 /* Oops - fail safe - drop packet */
527 pool_put(&pf_frent_pl, frent);
534 pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff,
535 int drop, int *nomem)
537 struct mbuf *m = *m0;
538 struct pf_frcache *frp, *fra, *cur = NULL;
539 int ip_len = h->ip_len - (h->ip_hl << 2);
540 u_int16_t off = h->ip_off << 3;
541 u_int16_t max = ip_len + off;
544 KASSERT((*frag == NULL || !BUFFER_FRAGMENTS(*frag)),
545 ("!(*frag == NULL || !BUFFER_FRAGMENTS(*frag)): %s", __FUNCTION__));
547 /* Create a new range queue for this packet */
549 *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
551 pf_flush_fragments();
552 *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
557 /* Get an entry for the queue */
558 cur = pool_get(&pf_cent_pl, PR_NOWAIT);
560 pool_put(&pf_cache_pl, *frag);
566 (*frag)->fr_flags = PFFRAG_NOBUFFER;
568 (*frag)->fr_src = h->ip_src;
569 (*frag)->fr_dst = h->ip_dst;
570 (*frag)->fr_p = h->ip_p;
571 (*frag)->fr_id = h->ip_id;
572 (*frag)->fr_timeout = time_second;
576 LIST_INIT(&(*frag)->fr_cache);
577 LIST_INSERT_HEAD(&(*frag)->fr_cache, cur, fr_next);
579 RB_INSERT(pf_frag_tree, &pf_cache_tree, *frag);
580 TAILQ_INSERT_HEAD(&pf_cachequeue, *frag, frag_next);
582 DPFPRINTF(("fragcache[%d]: new %d-%d\n", h->ip_id, off, max));
588 * Find a fragment after the current one:
589 * - off contains the real shifted offset.
592 LIST_FOREACH(fra, &(*frag)->fr_cache, fr_next) {
593 if (fra->fr_off > off)
598 KASSERT((frp != NULL || fra != NULL),
599 ("!(frp != NULL || fra != NULL): %s", __FUNCTION__));
604 precut = frp->fr_end - off;
605 if (precut >= ip_len) {
606 /* Fragment is entirely a duplicate */
607 DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n",
608 h->ip_id, frp->fr_off, frp->fr_end, off, max));
612 /* They are adjacent. Fixup cache entry */
613 DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n",
614 h->ip_id, frp->fr_off, frp->fr_end, off, max));
616 } else if (precut > 0) {
617 /* The first part of this payload overlaps with a
618 * fragment that has already been passed.
619 * Need to trim off the first part of the payload.
620 * But to do so easily, we need to create another
621 * mbuf to throw the original header into.
624 DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n",
625 h->ip_id, precut, frp->fr_off, frp->fr_end, off,
630 /* Update the previous frag to encompass this one */
634 /* XXX Optimization opportunity
635 * This is a very heavy way to trim the payload.
636 * we could do it much faster by diddling mbuf
637 * internals but that would be even less legible
638 * than this mbuf magic. For my next trick,
639 * I'll pull a rabbit out of my laptop.
641 *m0 = m_dup(m, MB_DONTWAIT);
642 /* From KAME Project : We have missed this! */
643 m_adj(*m0, (h->ip_hl << 2) -
644 (*m0)->m_pkthdr.len);
647 KASSERT(((*m0)->m_next == NULL),
648 ("(*m0)->m_next != NULL: %s",
650 m_adj(m, precut + (h->ip_hl << 2));
653 if (m->m_flags & M_PKTHDR) {
656 for (t = m; t; t = t->m_next)
658 m->m_pkthdr.len = plen;
662 h = mtod(m, struct ip *);
664 KASSERT(((int)m->m_len ==
666 ("m->m_len != h->ip_len - precut: %s",
668 h->ip_off = h->ip_off +
670 h->ip_len = h->ip_len - precut;
675 /* There is a gap between fragments */
677 DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n",
678 h->ip_id, -precut, frp->fr_off, frp->fr_end, off,
681 cur = pool_get(&pf_cent_pl, PR_NOWAIT);
688 LIST_INSERT_AFTER(frp, cur, fr_next);
696 aftercut = max - fra->fr_off;
698 /* Adjacent fragments */
699 DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n",
700 h->ip_id, off, max, fra->fr_off, fra->fr_end));
703 } else if (aftercut > 0) {
704 /* Need to chop off the tail of this fragment */
705 DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n",
706 h->ip_id, aftercut, off, max, fra->fr_off,
715 if (m->m_flags & M_PKTHDR) {
718 for (t = m; t; t = t->m_next)
720 m->m_pkthdr.len = plen;
722 h = mtod(m, struct ip *);
723 KASSERT(((int)m->m_len == h->ip_len - aftercut),
724 ("m->m_len != h->ip_len - aftercut: %s",
726 h->ip_len = h->ip_len - aftercut;
731 /* There is a gap between fragments */
732 DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n",
733 h->ip_id, -aftercut, off, max, fra->fr_off,
736 cur = pool_get(&pf_cent_pl, PR_NOWAIT);
743 LIST_INSERT_BEFORE(fra, cur, fr_next);
747 /* Need to glue together two separate fragment descriptors */
749 if (cur && fra->fr_off <= cur->fr_end) {
750 /* Need to merge in a previous 'cur' */
751 DPFPRINTF(("fragcache[%d]: adjacent(merge "
752 "%d-%d) %d-%d (%d-%d)\n",
753 h->ip_id, cur->fr_off, cur->fr_end, off,
754 max, fra->fr_off, fra->fr_end));
755 fra->fr_off = cur->fr_off;
756 LIST_REMOVE(cur, fr_next);
757 pool_put(&pf_cent_pl, cur);
761 } else if (frp && fra->fr_off <= frp->fr_end) {
762 /* Need to merge in a modified 'frp' */
763 KASSERT((cur == NULL), ("cur != NULL: %s",
765 DPFPRINTF(("fragcache[%d]: adjacent(merge "
766 "%d-%d) %d-%d (%d-%d)\n",
767 h->ip_id, frp->fr_off, frp->fr_end, off,
768 max, fra->fr_off, fra->fr_end));
769 fra->fr_off = frp->fr_off;
770 LIST_REMOVE(frp, fr_next);
771 pool_put(&pf_cent_pl, frp);
781 * We must keep tracking the overall fragment even when
782 * we're going to drop it anyway so that we know when to
783 * free the overall descriptor. Thus we drop the frag late.
790 /* Update maximum data size */
791 if ((*frag)->fr_max < max)
792 (*frag)->fr_max = max;
794 /* This is the last segment */
796 (*frag)->fr_flags |= PFFRAG_SEENLAST;
798 /* Check if we are completely reassembled */
799 if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
800 LIST_FIRST(&(*frag)->fr_cache)->fr_off == 0 &&
801 LIST_FIRST(&(*frag)->fr_cache)->fr_end == (*frag)->fr_max) {
802 /* Remove from fragment queue */
803 DPFPRINTF(("fragcache[%d]: done 0-%d\n", h->ip_id,
805 pf_free_fragment(*frag);
814 /* Still need to pay attention to !IP_MF */
815 if (!mff && *frag != NULL)
816 (*frag)->fr_flags |= PFFRAG_SEENLAST;
823 /* Still need to pay attention to !IP_MF */
824 if (!mff && *frag != NULL)
825 (*frag)->fr_flags |= PFFRAG_SEENLAST;
828 /* This fragment has been deemed bad. Don't reass */
829 if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
830 DPFPRINTF(("fragcache[%d]: dropping overall fragment\n",
832 (*frag)->fr_flags |= PFFRAG_DROP;
840 pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason)
842 struct mbuf *m = *m0;
844 struct pf_frent *frent;
845 struct pf_fragment *frag = NULL;
846 struct ip *h = mtod(m, struct ip *);
847 int mff = (h->ip_off & IP_MF);
848 int hlen = h->ip_hl << 2;
849 u_int16_t fragoff = (h->ip_off & IP_OFFMASK) << 3;
854 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
857 if (r->kif != NULL &&
858 (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
859 r = r->skip[PF_SKIP_IFP].ptr;
860 else if (r->direction && r->direction != dir)
861 r = r->skip[PF_SKIP_DIR].ptr;
862 else if (r->af && r->af != AF_INET)
863 r = r->skip[PF_SKIP_AF].ptr;
864 else if (r->proto && r->proto != h->ip_p)
865 r = r->skip[PF_SKIP_PROTO].ptr;
866 else if (PF_MISMATCHAW(&r->src.addr,
867 (struct pf_addr *)&h->ip_src.s_addr, AF_INET, r->src.not))
868 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
869 else if (PF_MISMATCHAW(&r->dst.addr,
870 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET, r->dst.not))
871 r = r->skip[PF_SKIP_DST_ADDR].ptr;
881 /* Check for illegal packets */
882 if (hlen < (int)sizeof(struct ip))
885 if (hlen > h->ip_len)
888 /* Clear IP_DF if the rule uses the no-df option */
889 if (r->rule_flag & PFRULE_NODF)
892 /* We will need other tests here */
893 if (!fragoff && !mff)
896 /* We're dealing with a fragment now. Don't allow fragments
897 * with IP_DF to enter the cache. If the flag was cleared by
898 * no-df above, fine. Otherwise drop it.
900 if (h->ip_off & IP_DF) {
901 DPFPRINTF(("IP_DF\n"));
905 ip_len = h->ip_len - hlen;
906 ip_off = (h->ip_off & IP_OFFMASK) << 3;
908 /* All fragments are 8 byte aligned */
909 if (mff && (ip_len & 0x7)) {
910 DPFPRINTF(("mff and %d\n", ip_len));
914 /* Respect maximum length */
915 if (fragoff + ip_len > IP_MAXPACKET) {
916 DPFPRINTF(("max packet %d\n", fragoff + ip_len));
919 max = fragoff + ip_len;
921 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) {
922 /* Fully buffer all of the fragments */
924 frag = pf_find_fragment(h, &pf_frag_tree);
926 /* Check if we saw the last fragment already */
927 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
931 /* Get an entry for the fragment queue */
932 frent = pool_get(&pf_frent_pl, PR_NOWAIT);
934 REASON_SET(reason, PFRES_MEMORY);
941 /* Might return a completely reassembled mbuf, or NULL */
942 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
943 *m0 = m = pf_reassemble(m0, &frag, frent, mff);
948 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
951 h = mtod(m, struct ip *);
953 /* non-buffering fragment cache (drops or masks overlaps) */
957 if (m->m_pkthdr.fw_flags & PF_MBUF_FRAGCACHE) {
958 /* Already passed the fragment cache in the
959 * input direction. If we continued, it would
960 * appear to be a dup and would be dropped.
966 frag = pf_find_fragment(h, &pf_cache_tree);
968 /* Check if we saw the last fragment already */
969 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
970 max > frag->fr_max) {
971 if (r->rule_flag & PFRULE_FRAGDROP)
972 frag->fr_flags |= PFFRAG_DROP;
976 *m0 = m = pf_fragcache(m0, h, &frag, mff,
977 (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem);
985 m->m_pkthdr.fw_flags |= PF_MBUF_FRAGCACHE;
987 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
993 /* At this point, only IP_DF is allowed in ip_off */
996 /* Enforce a minimum ttl, may cause endless packet loops */
997 if (r->min_ttl && h->ip_ttl < r->min_ttl)
998 h->ip_ttl = r->min_ttl;
1000 if (r->rule_flag & PFRULE_RANDOMID) {
1002 h->ip_id = ip_randomid();
1004 h->ip_id = htons(ip_id++);
1011 /* Enforce a minimum ttl, may cause endless packet loops */
1012 if (r->min_ttl && h->ip_ttl < r->min_ttl)
1013 h->ip_ttl = r->min_ttl;
1018 REASON_SET(reason, PFRES_MEMORY);
1019 if (r != NULL && r->log)
1020 PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL);
1024 REASON_SET(reason, PFRES_NORM);
1025 if (r != NULL && r->log)
1026 PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL);
1030 DPFPRINTF(("dropping bad fragment\n"));
1032 /* Free associated fragments */
1034 pf_free_fragment(frag);
1036 REASON_SET(reason, PFRES_FRAG);
1037 if (r != NULL && r->log)
1038 PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL);
1045 pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
1048 struct mbuf *m = *m0;
1050 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1054 struct ip6_opt_jumbo jumbo;
1055 struct ip6_frag frag;
1056 u_int32_t jumbolen = 0, plen;
1057 u_int16_t fragoff = 0;
1063 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1066 if (r->kif != NULL &&
1067 (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
1068 r = r->skip[PF_SKIP_IFP].ptr;
1069 else if (r->direction && r->direction != dir)
1070 r = r->skip[PF_SKIP_DIR].ptr;
1071 else if (r->af && r->af != AF_INET6)
1072 r = r->skip[PF_SKIP_AF].ptr;
1073 #if 0 /* header chain! */
1074 else if (r->proto && r->proto != h->ip6_nxt)
1075 r = r->skip[PF_SKIP_PROTO].ptr;
1077 else if (PF_MISMATCHAW(&r->src.addr,
1078 (struct pf_addr *)&h->ip6_src, AF_INET6, r->src.not))
1079 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1080 else if (PF_MISMATCHAW(&r->dst.addr,
1081 (struct pf_addr *)&h->ip6_dst, AF_INET6, r->dst.not))
1082 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1092 /* Check for illegal packets */
1093 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1096 off = sizeof(struct ip6_hdr);
1101 case IPPROTO_FRAGMENT:
1105 case IPPROTO_ROUTING:
1106 case IPPROTO_DSTOPTS:
1107 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1110 if (proto == IPPROTO_AH)
1111 off += (ext.ip6e_len + 2) * 4;
1113 off += (ext.ip6e_len + 1) * 8;
1114 proto = ext.ip6e_nxt;
1116 case IPPROTO_HOPOPTS:
1117 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1120 optend = off + (ext.ip6e_len + 1) * 8;
1121 ooff = off + sizeof(ext);
1123 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1124 sizeof(opt.ip6o_type), NULL, NULL,
1127 if (opt.ip6o_type == IP6OPT_PAD1) {
1131 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1132 NULL, NULL, AF_INET6))
1134 if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1136 switch (opt.ip6o_type) {
1138 if (h->ip6_plen != 0)
1140 if (!pf_pull_hdr(m, ooff, &jumbo,
1141 sizeof(jumbo), NULL, NULL,
1144 memcpy(&jumbolen, jumbo.ip6oj_jumbo_len,
1146 jumbolen = ntohl(jumbolen);
1147 if (jumbolen <= IPV6_MAXPACKET)
1149 if (sizeof(struct ip6_hdr) + jumbolen !=
1156 ooff += sizeof(opt) + opt.ip6o_len;
1157 } while (ooff < optend);
1160 proto = ext.ip6e_nxt;
1166 } while (!terminal);
1168 /* jumbo payload option must be present, or plen > 0 */
1169 if (ntohs(h->ip6_plen) == 0)
1172 plen = ntohs(h->ip6_plen);
1175 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1178 /* Enforce a minimum ttl, may cause endless packet loops */
1179 if (r->min_ttl && h->ip6_hlim < r->min_ttl)
1180 h->ip6_hlim = r->min_ttl;
1185 if (ntohs(h->ip6_plen) == 0 || jumbolen)
1187 plen = ntohs(h->ip6_plen);
1189 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1191 fragoff = ntohs(frag.ip6f_offlg & IP6F_OFF_MASK);
1192 if (fragoff + (plen - off - sizeof(frag)) > IPV6_MAXPACKET)
1195 /* do something about it */
1199 REASON_SET(reason, PFRES_SHORT);
1200 if (r != NULL && r->log)
1201 PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
1205 REASON_SET(reason, PFRES_NORM);
1206 if (r != NULL && r->log)
1207 PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
1211 REASON_SET(reason, PFRES_FRAG);
1212 if (r != NULL && r->log)
1213 PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
1219 pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
1220 int off, void *h, struct pf_pdesc *pd)
1222 struct pf_rule *r, *rm = NULL;
1223 struct tcphdr *th = pd->hdr.tcp;
1227 sa_family_t af = pd->af;
1229 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1232 if (r->kif != NULL &&
1233 (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
1234 r = r->skip[PF_SKIP_IFP].ptr;
1235 else if (r->direction && r->direction != dir)
1236 r = r->skip[PF_SKIP_DIR].ptr;
1237 else if (r->af && r->af != af)
1238 r = r->skip[PF_SKIP_AF].ptr;
1239 else if (r->proto && r->proto != pd->proto)
1240 r = r->skip[PF_SKIP_PROTO].ptr;
1241 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, r->src.not))
1242 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1243 else if (r->src.port_op && !pf_match_port(r->src.port_op,
1244 r->src.port[0], r->src.port[1], th->th_sport))
1245 r = r->skip[PF_SKIP_SRC_PORT].ptr;
1246 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, r->dst.not))
1247 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1248 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1249 r->dst.port[0], r->dst.port[1], th->th_dport))
1250 r = r->skip[PF_SKIP_DST_PORT].ptr;
1251 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1252 pf_osfp_fingerprint(pd, m, off, th),
1254 r = TAILQ_NEXT(r, entries);
1266 if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1267 pd->flags |= PFDESC_TCP_NORM;
1269 flags = th->th_flags;
1270 if (flags & TH_SYN) {
1271 /* Illegal packet */
1278 /* Illegal packet */
1279 if (!(flags & (TH_ACK|TH_RST)))
1283 if (!(flags & TH_ACK)) {
1284 /* These flags are only valid if ACK is set */
1285 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1289 /* Check for illegal header length */
1290 if (th->th_off < (sizeof(struct tcphdr) >> 2))
1293 /* If flags changed, or reserved data set, then adjust */
1294 if (flags != th->th_flags || th->th_x2 != 0) {
1297 ov = *(u_int16_t *)(&th->th_ack + 1);
1298 th->th_flags = flags;
1300 nv = *(u_int16_t *)(&th->th_ack + 1);
1302 th->th_sum = pf_cksum_fixup(th->th_sum, ov, nv);
1306 /* Remove urgent pointer, if TH_URG is not set */
1307 if (!(flags & TH_URG) && th->th_urp) {
1308 th->th_sum = pf_cksum_fixup(th->th_sum, th->th_urp, 0);
1313 /* Process options */
1314 if (r->max_mss && pf_normalize_tcpopt(r, m, th, off))
1317 /* copy back packet headers if we sanitized */
1319 m_copyback(m, off, sizeof(*th), (caddr_t)th);
1324 REASON_SET(&reason, PFRES_NORM);
1325 if (rm != NULL && r->log)
1326 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, r, NULL, NULL);
1331 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1332 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1337 KASSERT((src->scrub == NULL),
1338 ("pf_normalize_tcp_init: src->scrub != NULL"));
1340 src->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT);
1341 if (src->scrub == NULL)
1343 bzero(src->scrub, sizeof(*src->scrub));
1348 struct ip *h = mtod(m, struct ip *);
1349 src->scrub->pfss_ttl = h->ip_ttl;
1355 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1356 src->scrub->pfss_ttl = h->ip6_hlim;
1364 * All normalizations below are only begun if we see the start of
1365 * the connections. They must all set an enabled bit in pfss_flags
1367 if ((th->th_flags & TH_SYN) == 0)
1371 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1372 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1373 /* Diddle with TCP options */
1375 opt = hdr + sizeof(struct tcphdr);
1376 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1377 while (hlen >= TCPOLEN_TIMESTAMP) {
1379 case TCPOPT_EOL: /* FALLTHROUGH */
1384 case TCPOPT_TIMESTAMP:
1385 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1386 src->scrub->pfss_flags |=
1388 src->scrub->pfss_ts_mod = arc4random();
1392 hlen -= MAX(opt[1], 2);
1393 opt += MAX(opt[1], 2);
1403 pf_normalize_tcp_cleanup(struct pf_state *state)
1405 if (state->src.scrub)
1406 pool_put(&pf_state_scrub_pl, state->src.scrub);
1407 if (state->dst.scrub)
1408 pool_put(&pf_state_scrub_pl, state->dst.scrub);
1410 /* Someday... flush the TCP segment reassembly descriptors. */
1414 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1415 u_short *reason, struct tcphdr *th, struct pf_state_peer *src,
1416 struct pf_state_peer *dst, int *writeback)
1422 KASSERT((src->scrub || dst->scrub),
1423 ("pf_normalize_tcp_statefull: src->scrub && dst->scrub!"));
1426 * Enforce the minimum TTL seen for this connection. Negate a common
1427 * technique to evade an intrusion detection system and confuse
1428 * firewall state code.
1434 struct ip *h = mtod(m, struct ip *);
1435 if (h->ip_ttl > src->scrub->pfss_ttl)
1436 src->scrub->pfss_ttl = h->ip_ttl;
1437 h->ip_ttl = src->scrub->pfss_ttl;
1445 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1446 if (h->ip6_hlim > src->scrub->pfss_ttl)
1447 src->scrub->pfss_ttl = h->ip6_hlim;
1448 h->ip6_hlim = src->scrub->pfss_ttl;
1455 if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1456 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1457 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1458 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1459 /* Diddle with TCP options */
1461 opt = hdr + sizeof(struct tcphdr);
1462 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1463 while (hlen >= TCPOLEN_TIMESTAMP) {
1465 case TCPOPT_EOL: /* FALLTHROUGH */
1470 case TCPOPT_TIMESTAMP:
1471 /* Modulate the timestamps. Can be used for
1472 * NAT detection, OS uptime determination or
1475 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1478 (src->scrub->pfss_flags &
1480 memcpy(&ts_value, &opt[2],
1482 ts_value = htonl(ntohl(ts_value)
1483 + src->scrub->pfss_ts_mod);
1484 pf_change_a(&opt[2],
1485 &th->th_sum, ts_value, 0);
1489 /* Modulate TS reply iff valid (!0) */
1490 memcpy(&ts_value, &opt[6],
1492 if (ts_value && dst->scrub &&
1493 (dst->scrub->pfss_flags &
1495 ts_value = htonl(ntohl(ts_value)
1496 - dst->scrub->pfss_ts_mod);
1497 pf_change_a(&opt[6],
1498 &th->th_sum, ts_value, 0);
1504 hlen -= MAX(opt[1], 2);
1505 opt += MAX(opt[1], 2);
1510 /* Copyback the options, caller copys back header */
1512 m_copyback(m, off + sizeof(struct tcphdr),
1513 (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1514 sizeof(struct tcphdr));
1519 /* I have a dream.... TCP segment reassembly.... */
1524 pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
1529 int opt, cnt, optlen = 0;
1533 thoff = th->th_off << 2;
1534 cnt = thoff - sizeof(struct tcphdr);
1535 optp = mtod(m, caddr_t) + off + sizeof(struct tcphdr);
1537 for (; cnt > 0; cnt -= optlen, optp += optlen) {
1539 if (opt == TCPOPT_EOL)
1541 if (opt == TCPOPT_NOP)
1547 if (optlen < 2 || optlen > cnt)
1552 mss = (u_int16_t *)(optp + 2);
1553 if ((ntohs(*mss)) > r->max_mss) {
1554 th->th_sum = pf_cksum_fixup(th->th_sum,
1555 *mss, htons(r->max_mss));
1556 *mss = htons(r->max_mss);