1 /* $OpenBSD: pf_norm.c,v 1.113 2008/05/07 07:07:29 markus Exp $ */
4 * Copyright (c) 2010 The DragonFly Project. All rights reserved.
6 * Copyright 2001 Niels Provos <provos@citi.umich.edu>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "opt_inet6.h"
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/filio.h>
37 #include <sys/fcntl.h>
38 #include <sys/socket.h>
39 #include <sys/kernel.h>
43 #include <net/if_var.h>
44 #include <net/if_types.h>
46 #include <net/route.h>
47 #include <net/pf/if_pflog.h>
49 #include <netinet/in.h>
50 #include <netinet/in_var.h>
51 #include <netinet/in_systm.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip_var.h>
54 #include <netinet/tcp.h>
55 #include <netinet/tcp_seq.h>
56 #include <netinet/udp.h>
57 #include <netinet/ip_icmp.h>
60 #include <netinet/ip6.h>
63 #include <net/pf/pfvar.h>
65 #define PFFRAG_SEENLAST 0x0001 /* Seen the last fragment for this */
66 #define PFFRAG_NOBUFFER 0x0002 /* Non-buffering fragment cache */
67 #define PFFRAG_DROP 0x0004 /* Drop all fragments */
68 #define BUFFER_FRAGMENTS(fr) (!((fr)->fr_flags & PFFRAG_NOBUFFER))
71 TAILQ_HEAD(pf_fragqueue, pf_fragment) *pf_fragqueue;
72 TAILQ_HEAD(pf_cachequeue, pf_fragment) *pf_cachequeue;
74 static __inline int pf_frag_compare(struct pf_fragment *,
75 struct pf_fragment *);
76 RB_HEAD(pf_frag_tree, pf_fragment) *pf_frag_tree,
78 RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
79 RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
81 /* Private prototypes */
82 void pf_ip2key(struct pf_fragment *, struct ip *);
83 void pf_remove_fragment(struct pf_fragment *);
84 void pf_flush_fragments(void);
85 void pf_free_fragment(struct pf_fragment *);
86 struct pf_fragment *pf_find_fragment(struct ip *, struct pf_frag_tree *);
87 struct mbuf *pf_reassemble(struct mbuf **, struct pf_fragment **,
88 struct pf_frent *, int);
89 struct mbuf *pf_fragcache(struct mbuf **, struct ip*,
90 struct pf_fragment **, int, int, int *);
91 int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *,
92 struct tcphdr *, int, sa_family_t);
94 #define DPFPRINTF(x) do { \
95 if (pf_status.debug >= PF_DEBUG_MISC) { \
96 kprintf("%s: ", __func__); \
101 static MALLOC_DEFINE(M_PFFRAGPL, "pffrag", "pf fragment pool list");
102 static MALLOC_DEFINE(M_PFCACHEPL, "pffrcache", "pf fragment cache pool list");
103 static MALLOC_DEFINE(M_PFFRENTPL, "pffrent", "pf frent pool list");
104 static MALLOC_DEFINE(M_PFCENTPL, "pffrcent", "pf fragment cent pool list");
105 static MALLOC_DEFINE(M_PFSTATESCRUBPL, "pfstatescrub", "pf state scrub pool list");
108 struct malloc_type *pf_frent_pl, *pf_frag_pl, *pf_cache_pl, *pf_cent_pl;
109 struct malloc_type *pf_state_scrub_pl;
110 int pf_nfrents, pf_ncache;
113 pf_normalize_init(void)
118 pool_sethiwat(&pf_frag_pl, PFFRAG_FRAG_HIWAT);
119 pool_sethardlimit(&pf_frent_pl, PFFRAG_FRENT_HIWAT, NULL, 0);
120 pool_sethardlimit(&pf_cache_pl, PFFRAG_FRCACHE_HIWAT, NULL, 0);
121 pool_sethardlimit(&pf_cent_pl, PFFRAG_FRCENT_HIWAT, NULL, 0);
125 * pcpu queues and trees
127 pf_fragqueue = kmalloc(sizeof(*pf_fragqueue) * ncpus,
128 M_PF, M_WAITOK | M_ZERO);
129 pf_cachequeue = kmalloc(sizeof(*pf_cachequeue) * ncpus,
130 M_PF, M_WAITOK | M_ZERO);
131 pf_frag_tree = kmalloc(sizeof(*pf_frag_tree) * ncpus,
132 M_PF, M_WAITOK | M_ZERO);
133 pf_cache_tree = kmalloc(sizeof(*pf_cache_tree) * ncpus,
134 M_PF, M_WAITOK | M_ZERO);
136 for (n = 0; n < ncpus; ++n) {
137 TAILQ_INIT(&pf_fragqueue[n]);
138 TAILQ_INIT(&pf_cachequeue[n]);
139 RB_INIT(&pf_frag_tree[n]);
140 RB_INIT(&pf_cache_tree[n]);
145 pf_normalize_unload(void)
147 kfree(pf_fragqueue, M_PF);
148 kfree(pf_cachequeue, M_PF);
149 kfree(pf_frag_tree, M_PF);
150 kfree(pf_cache_tree, M_PF);
154 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
158 if ((diff = a->fr_id - b->fr_id))
160 else if ((diff = a->fr_p - b->fr_p))
162 else if (a->fr_src.s_addr < b->fr_src.s_addr)
164 else if (a->fr_src.s_addr > b->fr_src.s_addr)
166 else if (a->fr_dst.s_addr < b->fr_dst.s_addr)
168 else if (a->fr_dst.s_addr > b->fr_dst.s_addr)
174 pf_purge_expired_fragments(void)
176 struct pf_fragment *frag;
178 int cpu = mycpu->gd_cpuid;
180 expire = time_second - pf_default_rule.timeout[PFTM_FRAG];
182 while ((frag = TAILQ_LAST(&pf_fragqueue[cpu], pf_fragqueue)) != NULL) {
183 KASSERT((BUFFER_FRAGMENTS(frag)),
184 ("BUFFER_FRAGMENTS(frag) == 0: %s", __func__));
185 if (frag->fr_timeout > expire)
188 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
189 pf_free_fragment(frag);
192 while ((frag = TAILQ_LAST(&pf_cachequeue[cpu], pf_cachequeue)) != NULL) {
193 KASSERT((!BUFFER_FRAGMENTS(frag)),
194 ("BUFFER_FRAGMENTS(frag) != 0: %s", __func__));
195 if (frag->fr_timeout > expire)
198 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
199 pf_free_fragment(frag);
200 KASSERT((TAILQ_EMPTY(&pf_cachequeue[cpu]) ||
201 TAILQ_LAST(&pf_cachequeue[cpu], pf_cachequeue) != frag),
202 ("!(TAILQ_EMPTY() || TAILQ_LAST() == farg): %s",
208 * Try to flush old fragments to make space for new ones
212 pf_flush_fragments(void)
214 struct pf_fragment *frag;
216 int cpu = mycpu->gd_cpuid;
218 goal = pf_nfrents * 9 / 10;
219 DPFPRINTF(("trying to free > %d frents\n",
221 while (goal < pf_nfrents) {
222 frag = TAILQ_LAST(&pf_fragqueue[cpu], pf_fragqueue);
225 pf_free_fragment(frag);
229 goal = pf_ncache * 9 / 10;
230 DPFPRINTF(("trying to free > %d cache entries\n",
232 while (goal < pf_ncache) {
233 frag = TAILQ_LAST(&pf_cachequeue[cpu], pf_cachequeue);
236 pf_free_fragment(frag);
240 /* Frees the fragments and all associated entries */
243 pf_free_fragment(struct pf_fragment *frag)
245 struct pf_frent *frent;
246 struct pf_frcache *frcache;
248 /* Free all fragments */
249 if (BUFFER_FRAGMENTS(frag)) {
250 for (frent = LIST_FIRST(&frag->fr_queue); frent;
251 frent = LIST_FIRST(&frag->fr_queue)) {
252 LIST_REMOVE(frent, fr_next);
254 m_freem(frent->fr_m);
255 kfree(frent, M_PFFRENTPL);
259 for (frcache = LIST_FIRST(&frag->fr_cache); frcache;
260 frcache = LIST_FIRST(&frag->fr_cache)) {
261 LIST_REMOVE(frcache, fr_next);
263 KASSERT((LIST_EMPTY(&frag->fr_cache) ||
264 LIST_FIRST(&frag->fr_cache)->fr_off >
266 ("! (LIST_EMPTY() || LIST_FIRST()->fr_off >"
267 " frcache->fr_end): %s", __func__));
269 kfree(frcache, M_PFCENTPL);
274 pf_remove_fragment(frag);
278 pf_ip2key(struct pf_fragment *key, struct ip *ip)
280 key->fr_p = ip->ip_p;
281 key->fr_id = ip->ip_id;
282 key->fr_src.s_addr = ip->ip_src.s_addr;
283 key->fr_dst.s_addr = ip->ip_dst.s_addr;
287 pf_find_fragment(struct ip *ip, struct pf_frag_tree *tree)
289 struct pf_fragment key;
290 struct pf_fragment *frag;
291 int cpu = mycpu->gd_cpuid;
295 frag = RB_FIND(pf_frag_tree, tree, &key);
297 /* XXX Are we sure we want to update the timeout? */
298 frag->fr_timeout = time_second;
299 if (BUFFER_FRAGMENTS(frag)) {
300 TAILQ_REMOVE(&pf_fragqueue[cpu], frag, frag_next);
301 TAILQ_INSERT_HEAD(&pf_fragqueue[cpu], frag, frag_next);
303 TAILQ_REMOVE(&pf_cachequeue[cpu], frag, frag_next);
304 TAILQ_INSERT_HEAD(&pf_cachequeue[cpu], frag, frag_next);
311 /* Removes a fragment from the fragment queue and frees the fragment */
314 pf_remove_fragment(struct pf_fragment *frag)
316 int cpu = mycpu->gd_cpuid;
318 if (BUFFER_FRAGMENTS(frag)) {
319 RB_REMOVE(pf_frag_tree, &pf_frag_tree[cpu], frag);
320 TAILQ_REMOVE(&pf_fragqueue[cpu], frag, frag_next);
321 kfree(frag, M_PFFRAGPL);
323 RB_REMOVE(pf_frag_tree, &pf_cache_tree[cpu], frag);
324 TAILQ_REMOVE(&pf_cachequeue[cpu], frag, frag_next);
325 kfree(frag, M_PFCACHEPL);
329 #define FR_IP_OFF(fr) (((fr)->fr_ip->ip_off & IP_OFFMASK) << 3)
331 pf_reassemble(struct mbuf **m0, struct pf_fragment **frag,
332 struct pf_frent *frent, int mff)
334 struct mbuf *m = *m0, *m2;
335 struct pf_frent *frea, *next;
336 struct pf_frent *frep = NULL;
337 struct ip *ip = frent->fr_ip;
338 int hlen = ip->ip_hl << 2;
339 u_int16_t off = (ip->ip_off & IP_OFFMASK) << 3;
340 u_int16_t ip_len = ip->ip_len - ip->ip_hl * 4;
341 u_int16_t max = ip_len + off;
342 int cpu = mycpu->gd_cpuid;
344 KASSERT((*frag == NULL || BUFFER_FRAGMENTS(*frag)),
345 ("! (*frag == NULL || BUFFER_FRAGMENTS(*frag)): %s", __func__));
347 /* Strip off ip header */
351 /* Create a new reassembly queue for this packet */
353 *frag = kmalloc(sizeof(struct pf_fragment), M_PFFRAGPL, M_NOWAIT);
355 pf_flush_fragments();
356 *frag = kmalloc(sizeof(struct pf_fragment), M_PFFRAGPL, M_NOWAIT);
361 (*frag)->fr_flags = 0;
363 (*frag)->fr_src = frent->fr_ip->ip_src;
364 (*frag)->fr_dst = frent->fr_ip->ip_dst;
365 (*frag)->fr_p = frent->fr_ip->ip_p;
366 (*frag)->fr_id = frent->fr_ip->ip_id;
367 (*frag)->fr_timeout = time_second;
368 LIST_INIT(&(*frag)->fr_queue);
370 RB_INSERT(pf_frag_tree, &pf_frag_tree[cpu], *frag);
371 TAILQ_INSERT_HEAD(&pf_fragqueue[cpu], *frag, frag_next);
373 /* We do not have a previous fragment */
379 * Find a fragment after the current one:
380 * - off contains the real shifted offset.
382 LIST_FOREACH(frea, &(*frag)->fr_queue, fr_next) {
383 if (FR_IP_OFF(frea) > off)
388 KASSERT((frep != NULL || frea != NULL),
389 ("!(frep != NULL || frea != NULL): %s", __func__));
392 FR_IP_OFF(frep) + frep->fr_ip->ip_len - frep->fr_ip->ip_hl *
397 precut = FR_IP_OFF(frep) + frep->fr_ip->ip_len -
398 frep->fr_ip->ip_hl * 4 - off;
399 if (precut >= ip_len)
401 m_adj(frent->fr_m, precut);
402 DPFPRINTF(("overlap -%d\n", precut));
403 /* Enforce 8 byte boundaries */
404 ip->ip_off = ip->ip_off + (precut >> 3);
405 off = (ip->ip_off & IP_OFFMASK) << 3;
410 for (; frea != NULL && ip_len + off > FR_IP_OFF(frea);
415 aftercut = ip_len + off - FR_IP_OFF(frea);
416 DPFPRINTF(("adjust overlap %d\n", aftercut));
417 if (aftercut < frea->fr_ip->ip_len - frea->fr_ip->ip_hl
420 frea->fr_ip->ip_len =
421 frea->fr_ip->ip_len - aftercut;
422 frea->fr_ip->ip_off = frea->fr_ip->ip_off +
424 m_adj(frea->fr_m, aftercut);
428 /* This fragment is completely overlapped, lose it */
429 next = LIST_NEXT(frea, fr_next);
431 LIST_REMOVE(frea, fr_next);
432 kfree(frea, M_PFFRENTPL);
437 /* Update maximum data size */
438 if ((*frag)->fr_max < max)
439 (*frag)->fr_max = max;
440 /* This is the last segment */
442 (*frag)->fr_flags |= PFFRAG_SEENLAST;
445 LIST_INSERT_HEAD(&(*frag)->fr_queue, frent, fr_next);
447 LIST_INSERT_AFTER(frep, frent, fr_next);
449 /* Check if we are completely reassembled */
450 if (!((*frag)->fr_flags & PFFRAG_SEENLAST))
453 /* Check if we have all the data */
455 for (frep = LIST_FIRST(&(*frag)->fr_queue); frep; frep = next) {
456 next = LIST_NEXT(frep, fr_next);
458 off += frep->fr_ip->ip_len - frep->fr_ip->ip_hl * 4;
459 if (off < (*frag)->fr_max &&
460 (next == NULL || FR_IP_OFF(next) != off))
462 DPFPRINTF(("missing fragment at %d, next %d, max %d\n",
463 off, next == NULL ? -1 : FR_IP_OFF(next),
468 DPFPRINTF(("%d < %d?\n", off, (*frag)->fr_max));
469 if (off < (*frag)->fr_max)
472 /* We have all the data */
473 frent = LIST_FIRST(&(*frag)->fr_queue);
474 KASSERT((frent != NULL), ("frent == NULL: %s", __func__));
475 if ((frent->fr_ip->ip_hl << 2) + off > IP_MAXPACKET) {
476 DPFPRINTF(("drop: too big: %d\n", off));
477 pf_free_fragment(*frag);
481 next = LIST_NEXT(frent, fr_next);
483 /* Magic from ip_input */
489 kfree(frent, M_PFFRENTPL);
491 for (frent = next; frent != NULL; frent = next) {
492 next = LIST_NEXT(frent, fr_next);
495 kfree(frent, M_PFFRENTPL);
500 ip->ip_src = (*frag)->fr_src;
501 ip->ip_dst = (*frag)->fr_dst;
503 /* Remove from fragment queue */
504 pf_remove_fragment(*frag);
507 hlen = ip->ip_hl << 2;
508 ip->ip_len = off + hlen;
512 /* some debugging cruft by sklower, below, will go away soon */
513 /* XXX this should be done elsewhere */
514 if (m->m_flags & M_PKTHDR) {
516 for (m2 = m; m2; m2 = m2->m_next)
518 m->m_pkthdr.len = plen;
521 DPFPRINTF(("complete: %p(%d)\n", m, ip->ip_len));
525 /* Oops - fail safe - drop packet */
526 kfree(frent, M_PFFRENTPL);
533 pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff,
534 int drop, int *nomem)
536 struct mbuf *m = *m0;
537 struct pf_frcache *frp, *fra, *cur = NULL;
538 int ip_len = h->ip_len - (h->ip_hl << 2);
539 u_int16_t off = h->ip_off << 3;
540 u_int16_t max = ip_len + off;
542 int cpu = mycpu->gd_cpuid;
544 KASSERT((*frag == NULL || !BUFFER_FRAGMENTS(*frag)),
545 ("!(*frag == NULL || !BUFFER_FRAGMENTS(*frag)): %s", __func__));
547 /* Create a new range queue for this packet */
549 *frag = kmalloc(sizeof(struct pf_fragment), M_PFCACHEPL, M_NOWAIT);
551 pf_flush_fragments();
552 *frag = kmalloc(sizeof(struct pf_fragment), M_PFCACHEPL, M_NOWAIT);
557 /* Get an entry for the queue */
558 cur = kmalloc(sizeof(struct pf_frcache), M_PFCENTPL, M_NOWAIT);
560 kfree(*frag, M_PFCACHEPL);
566 (*frag)->fr_flags = PFFRAG_NOBUFFER;
568 (*frag)->fr_src = h->ip_src;
569 (*frag)->fr_dst = h->ip_dst;
570 (*frag)->fr_p = h->ip_p;
571 (*frag)->fr_id = h->ip_id;
572 (*frag)->fr_timeout = time_second;
576 LIST_INIT(&(*frag)->fr_cache);
577 LIST_INSERT_HEAD(&(*frag)->fr_cache, cur, fr_next);
579 RB_INSERT(pf_frag_tree, &pf_cache_tree[cpu], *frag);
580 TAILQ_INSERT_HEAD(&pf_cachequeue[cpu], *frag, frag_next);
582 DPFPRINTF(("fragcache[%d]: new %d-%d\n", h->ip_id, off, max));
588 * Find a fragment after the current one:
589 * - off contains the real shifted offset.
592 LIST_FOREACH(fra, &(*frag)->fr_cache, fr_next) {
593 if (fra->fr_off > off)
598 KASSERT((frp != NULL || fra != NULL),
599 ("!(frp != NULL || fra != NULL): %s", __func__));
604 precut = frp->fr_end - off;
605 if (precut >= ip_len) {
606 /* Fragment is entirely a duplicate */
607 DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n",
608 h->ip_id, frp->fr_off, frp->fr_end, off, max));
612 /* They are adjacent. Fixup cache entry */
613 DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n",
614 h->ip_id, frp->fr_off, frp->fr_end, off, max));
616 } else if (precut > 0) {
617 /* The first part of this payload overlaps with a
618 * fragment that has already been passed.
619 * Need to trim off the first part of the payload.
620 * But to do so easily, we need to create another
621 * mbuf to throw the original header into.
624 DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n",
625 h->ip_id, precut, frp->fr_off, frp->fr_end, off,
630 /* Update the previous frag to encompass this one */
634 /* XXX Optimization opportunity
635 * This is a very heavy way to trim the payload.
636 * we could do it much faster by diddling mbuf
637 * internals but that would be even less legible
638 * than this mbuf magic. For my next trick,
639 * I'll pull a rabbit out of my laptop.
641 *m0 = m_dup(m, M_NOWAIT);
642 /* From KAME Project : We have missed this! */
643 m_adj(*m0, (h->ip_hl << 2) -
644 (*m0)->m_pkthdr.len);
647 KASSERT(((*m0)->m_next == NULL),
648 ("(*m0)->m_next != NULL: %s",
650 m_adj(m, precut + (h->ip_hl << 2));
653 if (m->m_flags & M_PKTHDR) {
656 for (t = m; t; t = t->m_next)
658 m->m_pkthdr.len = plen;
662 h = mtod(m, struct ip *);
664 KASSERT(((int)m->m_len ==
666 ("m->m_len != h->ip_len - precut: %s",
668 h->ip_off = h->ip_off +
670 h->ip_len = h->ip_len - precut;
675 /* There is a gap between fragments */
677 DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n",
678 h->ip_id, -precut, frp->fr_off, frp->fr_end, off,
681 cur = kmalloc(sizeof(struct pf_frcache), M_PFCENTPL, M_NOWAIT);
688 LIST_INSERT_AFTER(frp, cur, fr_next);
696 aftercut = max - fra->fr_off;
698 /* Adjacent fragments */
699 DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n",
700 h->ip_id, off, max, fra->fr_off, fra->fr_end));
703 } else if (aftercut > 0) {
704 /* Need to chop off the tail of this fragment */
705 DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n",
706 h->ip_id, aftercut, off, max, fra->fr_off,
715 if (m->m_flags & M_PKTHDR) {
718 for (t = m; t; t = t->m_next)
720 m->m_pkthdr.len = plen;
722 h = mtod(m, struct ip *);
723 KASSERT(((int)m->m_len == h->ip_len - aftercut),
724 ("m->m_len != h->ip_len - aftercut: %s",
726 h->ip_len = h->ip_len - aftercut;
730 } else if (frp == NULL) {
731 /* There is a gap between fragments */
732 DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n",
733 h->ip_id, -aftercut, off, max, fra->fr_off,
736 cur = kmalloc(sizeof(struct pf_frcache), M_PFCENTPL, M_NOWAIT);
743 LIST_INSERT_BEFORE(fra, cur, fr_next);
747 /* Need to glue together two separate fragment descriptors */
749 if (cur && fra->fr_off <= cur->fr_end) {
750 /* Need to merge in a previous 'cur' */
751 DPFPRINTF(("fragcache[%d]: adjacent(merge "
752 "%d-%d) %d-%d (%d-%d)\n",
753 h->ip_id, cur->fr_off, cur->fr_end, off,
754 max, fra->fr_off, fra->fr_end));
755 fra->fr_off = cur->fr_off;
756 LIST_REMOVE(cur, fr_next);
757 kfree(cur, M_PFCENTPL);
761 } else if (frp && fra->fr_off <= frp->fr_end) {
762 /* Need to merge in a modified 'frp' */
763 KASSERT((cur == NULL), ("cur != NULL: %s",
765 DPFPRINTF(("fragcache[%d]: adjacent(merge "
766 "%d-%d) %d-%d (%d-%d)\n",
767 h->ip_id, frp->fr_off, frp->fr_end, off,
768 max, fra->fr_off, fra->fr_end));
769 fra->fr_off = frp->fr_off;
770 LIST_REMOVE(frp, fr_next);
771 kfree(frp, M_PFCENTPL);
781 * We must keep tracking the overall fragment even when
782 * we're going to drop it anyway so that we know when to
783 * free the overall descriptor. Thus we drop the frag late.
790 /* Update maximum data size */
791 if ((*frag)->fr_max < max)
792 (*frag)->fr_max = max;
794 /* This is the last segment */
796 (*frag)->fr_flags |= PFFRAG_SEENLAST;
798 /* Check if we are completely reassembled */
799 if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
800 LIST_FIRST(&(*frag)->fr_cache)->fr_off == 0 &&
801 LIST_FIRST(&(*frag)->fr_cache)->fr_end == (*frag)->fr_max) {
802 /* Remove from fragment queue */
803 DPFPRINTF(("fragcache[%d]: done 0-%d\n", h->ip_id,
805 pf_free_fragment(*frag);
814 /* Still need to pay attention to !IP_MF */
815 if (!mff && *frag != NULL)
816 (*frag)->fr_flags |= PFFRAG_SEENLAST;
823 /* Still need to pay attention to !IP_MF */
824 if (!mff && *frag != NULL)
825 (*frag)->fr_flags |= PFFRAG_SEENLAST;
828 /* This fragment has been deemed bad. Don't reass */
829 if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
830 DPFPRINTF(("fragcache[%d]: dropping overall fragment\n",
832 (*frag)->fr_flags |= PFFRAG_DROP;
840 pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
843 struct mbuf *m = *m0;
845 struct pf_frent *frent;
846 struct pf_fragment *frag = NULL;
847 struct ip *h = mtod(m, struct ip *);
848 int mff = (h->ip_off & IP_MF);
849 int hlen = h->ip_hl << 2;
850 u_int16_t fragoff = (h->ip_off & IP_OFFMASK) << 3;
854 int cpu = mycpu->gd_cpuid;
856 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
859 if (pfi_kif_match(r->kif, kif) == r->ifnot)
860 r = r->skip[PF_SKIP_IFP].ptr;
861 else if (r->direction && r->direction != dir)
862 r = r->skip[PF_SKIP_DIR].ptr;
863 else if (r->af && r->af != AF_INET)
864 r = r->skip[PF_SKIP_AF].ptr;
865 else if (r->proto && r->proto != h->ip_p)
866 r = r->skip[PF_SKIP_PROTO].ptr;
867 else if (PF_MISMATCHAW(&r->src.addr,
868 (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
870 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
871 else if (PF_MISMATCHAW(&r->dst.addr,
872 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
874 r = r->skip[PF_SKIP_DST_ADDR].ptr;
875 else if (r->match_tag && !pf_match_tag(m, r, &tag))
876 r = TAILQ_NEXT(r, entries);
881 if (r == NULL || r->action == PF_NOSCRUB)
884 r->packets[dir == PF_OUT]++;
885 r->bytes[dir == PF_OUT] += pd->tot_len;
888 /* Check for illegal packets */
889 if (hlen < (int)sizeof(struct ip))
892 if (hlen > h->ip_len)
895 /* Clear IP_DF if the rule uses the no-df option */
896 if (r->rule_flag & PFRULE_NODF && h->ip_off & IP_DF) {
897 u_int16_t ip_off = h->ip_off;
900 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
903 /* We will need other tests here */
904 if (!fragoff && !mff)
907 /* A fragment; rehash required. */
908 m->m_flags &= ~M_HASH;
910 /* We're dealing with a fragment now. Don't allow fragments
911 * with IP_DF to enter the cache. If the flag was cleared by
912 * no-df above, fine. Otherwise drop it.
914 if (h->ip_off & IP_DF) {
915 DPFPRINTF(("IP_DF\n"));
919 ip_len = h->ip_len - hlen;
921 /* All fragments are 8 byte aligned */
922 if (mff && (ip_len & 0x7)) {
923 DPFPRINTF(("mff and %d\n", ip_len));
927 /* Respect maximum length */
928 if (fragoff + ip_len > IP_MAXPACKET) {
929 DPFPRINTF(("max packet %d\n", fragoff + ip_len));
932 max = fragoff + ip_len;
934 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) {
935 /* Fully buffer all of the fragments */
937 frag = pf_find_fragment(h, &pf_frag_tree[cpu]);
939 /* Check if we saw the last fragment already */
940 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
944 /* Get an entry for the fragment queue */
945 frent = kmalloc(sizeof(struct pf_frent), M_PFFRENTPL, M_NOWAIT);
947 REASON_SET(reason, PFRES_MEMORY);
954 /* Might return a completely reassembled mbuf, or NULL */
955 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
956 *m0 = m = pf_reassemble(m0, &frag, frent, mff);
961 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
964 h = mtod(m, struct ip *);
966 /* non-buffering fragment cache (drops or masks overlaps) */
969 if (dir == PF_OUT && m->m_pkthdr.pf.flags & PF_TAG_FRAGCACHE) {
971 * Already passed the fragment cache in the
972 * input direction. If we continued, it would
973 * appear to be a dup and would be dropped.
978 frag = pf_find_fragment(h, &pf_cache_tree[cpu]);
980 /* Check if we saw the last fragment already */
981 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
982 max > frag->fr_max) {
983 if (r->rule_flag & PFRULE_FRAGDROP)
984 frag->fr_flags |= PFFRAG_DROP;
988 *m0 = m = pf_fragcache(m0, h, &frag, mff,
989 (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem);
997 m->m_pkthdr.pf.flags |= PF_TAG_FRAGCACHE;
999 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
1005 /* At this point, only IP_DF is allowed in ip_off */
1006 if (h->ip_off & ~IP_DF) {
1007 u_int16_t ip_off = h->ip_off;
1010 h->ip_sum = pf_cksum_fixup(h->ip_sum, htons(ip_off), htons(h->ip_off), 0);
1013 /* Enforce a minimum ttl, may cause endless packet loops */
1014 if (r->min_ttl && h->ip_ttl < r->min_ttl) {
1015 u_int16_t ip_ttl = h->ip_ttl;
1017 h->ip_ttl = r->min_ttl;
1018 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
1022 if (r->rule_flag & PFRULE_SET_TOS) {
1025 ov = *(u_int16_t *)h;
1026 h->ip_tos = r->set_tos;
1027 nv = *(u_int16_t *)h;
1029 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
1032 if (r->rule_flag & PFRULE_RANDOMID) {
1033 u_int16_t ip_id = h->ip_id;
1035 h->ip_id = ip_randomid();
1036 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
1038 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0)
1039 pd->flags |= PFDESC_IP_REAS;
1044 /* Enforce a minimum ttl, may cause endless packet loops */
1045 if (r->min_ttl && h->ip_ttl < r->min_ttl) {
1046 u_int16_t ip_ttl = h->ip_ttl;
1048 h->ip_ttl = r->min_ttl;
1049 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
1052 if (r->rule_flag & PFRULE_SET_TOS) {
1055 ov = *(u_int16_t *)h;
1056 h->ip_tos = r->set_tos;
1057 nv = *(u_int16_t *)h;
1059 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
1061 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0)
1062 pd->flags |= PFDESC_IP_REAS;
1066 REASON_SET(reason, PFRES_MEMORY);
1067 if (r != NULL && r->log)
1068 PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL, pd);
1072 REASON_SET(reason, PFRES_NORM);
1073 if (r != NULL && r->log)
1074 PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL, pd);
1078 DPFPRINTF(("dropping bad fragment\n"));
1080 /* Free associated fragments */
1082 pf_free_fragment(frag);
1084 REASON_SET(reason, PFRES_FRAG);
1085 if (r != NULL && r->log)
1086 PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL, pd);
1093 pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
1094 u_short *reason, struct pf_pdesc *pd)
1096 struct mbuf *m = *m0;
1098 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1102 struct ip6_opt_jumbo jumbo;
1103 struct ip6_frag frag;
1104 u_int32_t jumbolen = 0, plen;
1105 u_int16_t fragoff = 0;
1111 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1114 if (pfi_kif_match(r->kif, kif) == r->ifnot)
1115 r = r->skip[PF_SKIP_IFP].ptr;
1116 else if (r->direction && r->direction != dir)
1117 r = r->skip[PF_SKIP_DIR].ptr;
1118 else if (r->af && r->af != AF_INET6)
1119 r = r->skip[PF_SKIP_AF].ptr;
1120 #if 0 /* header chain! */
1121 else if (r->proto && r->proto != h->ip6_nxt)
1122 r = r->skip[PF_SKIP_PROTO].ptr;
1124 else if (PF_MISMATCHAW(&r->src.addr,
1125 (struct pf_addr *)&h->ip6_src, AF_INET6,
1127 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1128 else if (PF_MISMATCHAW(&r->dst.addr,
1129 (struct pf_addr *)&h->ip6_dst, AF_INET6,
1131 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1136 if (r == NULL || r->action == PF_NOSCRUB)
1139 r->packets[dir == PF_OUT]++;
1140 r->bytes[dir == PF_OUT] += pd->tot_len;
1143 /* Check for illegal packets */
1144 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1147 off = sizeof(struct ip6_hdr);
1152 case IPPROTO_FRAGMENT:
1156 case IPPROTO_ROUTING:
1157 case IPPROTO_DSTOPTS:
1158 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1161 if (proto == IPPROTO_AH)
1162 off += (ext.ip6e_len + 2) * 4;
1164 off += (ext.ip6e_len + 1) * 8;
1165 proto = ext.ip6e_nxt;
1167 case IPPROTO_HOPOPTS:
1168 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1171 optend = off + (ext.ip6e_len + 1) * 8;
1172 ooff = off + sizeof(ext);
1174 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1175 sizeof(opt.ip6o_type), NULL, NULL,
1178 if (opt.ip6o_type == IP6OPT_PAD1) {
1182 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1183 NULL, NULL, AF_INET6))
1185 if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1187 switch (opt.ip6o_type) {
1189 if (h->ip6_plen != 0)
1191 if (!pf_pull_hdr(m, ooff, &jumbo,
1192 sizeof(jumbo), NULL, NULL,
1195 memcpy(&jumbolen, jumbo.ip6oj_jumbo_len,
1197 jumbolen = ntohl(jumbolen);
1198 if (jumbolen <= IPV6_MAXPACKET)
1200 if (sizeof(struct ip6_hdr) + jumbolen !=
1207 ooff += sizeof(opt) + opt.ip6o_len;
1208 } while (ooff < optend);
1211 proto = ext.ip6e_nxt;
1217 } while (!terminal);
1219 /* jumbo payload option must be present, or plen > 0 */
1220 if (ntohs(h->ip6_plen) == 0)
1223 plen = ntohs(h->ip6_plen);
1226 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1229 /* Enforce a minimum ttl, may cause endless packet loops */
1230 if (r->min_ttl && h->ip6_hlim < r->min_ttl)
1231 h->ip6_hlim = r->min_ttl;
1236 if (ntohs(h->ip6_plen) == 0 || jumbolen)
1238 plen = ntohs(h->ip6_plen);
1240 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1242 fragoff = ntohs(frag.ip6f_offlg & IP6F_OFF_MASK);
1243 if (fragoff + (plen - off - sizeof(frag)) > IPV6_MAXPACKET)
1246 /* do something about it */
1247 /* remember to set pd->flags |= PFDESC_IP_REAS */
1251 REASON_SET(reason, PFRES_SHORT);
1252 if (r != NULL && r->log)
1253 PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL, pd);
1257 REASON_SET(reason, PFRES_NORM);
1258 if (r != NULL && r->log)
1259 PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL, pd);
1263 REASON_SET(reason, PFRES_FRAG);
1264 if (r != NULL && r->log)
1265 PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL, pd);
1271 pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
1272 int off, void *h, struct pf_pdesc *pd)
1274 struct pf_rule *r, *rm = NULL;
1275 struct tcphdr *th = pd->hdr.tcp;
1279 sa_family_t af = pd->af;
1281 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1284 if (pfi_kif_match(r->kif, kif) == r->ifnot)
1285 r = r->skip[PF_SKIP_IFP].ptr;
1286 else if (r->direction && r->direction != dir)
1287 r = r->skip[PF_SKIP_DIR].ptr;
1288 else if (r->af && r->af != af)
1289 r = r->skip[PF_SKIP_AF].ptr;
1290 else if (r->proto && r->proto != pd->proto)
1291 r = r->skip[PF_SKIP_PROTO].ptr;
1292 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
1294 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1295 else if (r->src.port_op && !pf_match_port(r->src.port_op,
1296 r->src.port[0], r->src.port[1], th->th_sport))
1297 r = r->skip[PF_SKIP_SRC_PORT].ptr;
1298 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
1300 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1301 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1302 r->dst.port[0], r->dst.port[1], th->th_dport))
1303 r = r->skip[PF_SKIP_DST_PORT].ptr;
1304 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1305 pf_osfp_fingerprint(pd, m, off, th),
1307 r = TAILQ_NEXT(r, entries);
1314 if (rm == NULL || rm->action == PF_NOSCRUB)
1317 r->packets[dir == PF_OUT]++;
1318 r->bytes[dir == PF_OUT] += pd->tot_len;
1321 if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1322 pd->flags |= PFDESC_TCP_NORM;
1324 flags = th->th_flags;
1325 if (flags & TH_SYN) {
1326 /* Illegal packet */
1333 /* Illegal packet */
1334 if (!(flags & (TH_ACK|TH_RST)))
1338 if (!(flags & TH_ACK)) {
1339 /* These flags are only valid if ACK is set */
1340 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1344 /* Check for illegal header length */
1345 if (th->th_off < (sizeof(struct tcphdr) >> 2))
1348 /* If flags changed, or reserved data set, then adjust */
1349 if (flags != th->th_flags || th->th_x2 != 0) {
1352 ov = *(u_int16_t *)(&th->th_ack + 1);
1353 th->th_flags = flags;
1355 nv = *(u_int16_t *)(&th->th_ack + 1);
1357 th->th_sum = pf_cksum_fixup(th->th_sum, ov, nv, 0);
1361 /* Remove urgent pointer, if TH_URG is not set */
1362 if (!(flags & TH_URG) && th->th_urp) {
1363 th->th_sum = pf_cksum_fixup(th->th_sum, th->th_urp, 0, 0);
1368 /* Process options */
1369 if (r->max_mss && pf_normalize_tcpopt(r, m, th, off, pd->af))
1372 /* copy back packet headers if we sanitized */
1374 m_copyback(m, off, sizeof(*th), (caddr_t)th);
1379 REASON_SET(&reason, PFRES_NORM);
1380 if (rm != NULL && r->log)
1381 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, r, NULL, NULL, pd);
1386 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1387 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1389 u_int32_t tsval, tsecr;
1393 KASSERT((src->scrub == NULL),
1394 ("pf_normalize_tcp_init: src->scrub != NULL"));
1396 src->scrub = kmalloc(sizeof(struct pf_state_scrub), M_PFSTATESCRUBPL,
1398 if (src->scrub == NULL)
1404 struct ip *h = mtod(m, struct ip *);
1405 src->scrub->pfss_ttl = h->ip_ttl;
1411 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1412 src->scrub->pfss_ttl = h->ip6_hlim;
1420 * All normalizations below are only begun if we see the start of
1421 * the connections. They must all set an enabled bit in pfss_flags
1423 if ((th->th_flags & TH_SYN) == 0)
1427 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1428 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1429 /* Diddle with TCP options */
1431 opt = hdr + sizeof(struct tcphdr);
1432 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1433 while (hlen >= TCPOLEN_TIMESTAMP) {
1435 case TCPOPT_EOL: /* FALLTHROUGH */
1440 case TCPOPT_TIMESTAMP:
1441 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1442 src->scrub->pfss_flags |=
1444 src->scrub->pfss_ts_mod = karc4random();
1446 /* note PFSS_PAWS not set yet */
1447 memcpy(&tsval, &opt[2],
1449 memcpy(&tsecr, &opt[6],
1451 src->scrub->pfss_tsval0 = ntohl(tsval);
1452 src->scrub->pfss_tsval = ntohl(tsval);
1453 src->scrub->pfss_tsecr = ntohl(tsecr);
1454 getmicrouptime(&src->scrub->pfss_last);
1458 hlen -= MAX(opt[1], 2);
1459 opt += MAX(opt[1], 2);
1469 pf_normalize_tcp_cleanup(struct pf_state *state)
1471 if (state->src.scrub)
1472 kfree(state->src.scrub, M_PFSTATESCRUBPL);
1473 if (state->dst.scrub)
1474 kfree(state->dst.scrub, M_PFSTATESCRUBPL);
1476 /* Someday... flush the TCP segment reassembly descriptors. */
1480 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1481 u_short *reason, struct tcphdr *th, struct pf_state *state,
1482 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1484 struct timeval uptime;
1485 u_int32_t tsval, tsecr;
1486 u_int tsval_from_last;
1492 KASSERT((src->scrub || dst->scrub),
1493 ("pf_normalize_tcp_statefull: src->scrub && dst->scrub!"));
1495 tsval = 0; /* avoid gcc complaint */
1496 tsecr = 0; /* avoid gcc complaint */
1499 * Enforce the minimum TTL seen for this connection. Negate a common
1500 * technique to evade an intrusion detection system and confuse
1501 * firewall state code.
1507 struct ip *h = mtod(m, struct ip *);
1508 if (h->ip_ttl > src->scrub->pfss_ttl)
1509 src->scrub->pfss_ttl = h->ip_ttl;
1510 h->ip_ttl = src->scrub->pfss_ttl;
1518 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1519 if (h->ip6_hlim > src->scrub->pfss_ttl)
1520 src->scrub->pfss_ttl = h->ip6_hlim;
1521 h->ip6_hlim = src->scrub->pfss_ttl;
1528 if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1529 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1530 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1531 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1532 /* Diddle with TCP options */
1534 opt = hdr + sizeof(struct tcphdr);
1535 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1536 while (hlen >= TCPOLEN_TIMESTAMP) {
1538 case TCPOPT_EOL: /* FALLTHROUGH */
1543 case TCPOPT_TIMESTAMP:
1544 /* Modulate the timestamps. Can be used for
1545 * NAT detection, OS uptime determination or
1550 /* Huh? Multiple timestamps!? */
1551 if (pf_status.debug >= PF_DEBUG_MISC) {
1552 DPFPRINTF(("multiple TS??"));
1553 pf_print_state(state);
1556 REASON_SET(reason, PFRES_TS);
1559 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1560 memcpy(&tsval, &opt[2],
1562 if (tsval && src->scrub &&
1563 (src->scrub->pfss_flags &
1565 tsval = ntohl(tsval);
1566 pf_change_a(&opt[2],
1569 src->scrub->pfss_ts_mod),
1574 /* Modulate TS reply iff valid (!0) */
1575 memcpy(&tsecr, &opt[6],
1577 if (tsecr && dst->scrub &&
1578 (dst->scrub->pfss_flags &
1580 tsecr = ntohl(tsecr)
1581 - dst->scrub->pfss_ts_mod;
1582 pf_change_a(&opt[6],
1583 &th->th_sum, htonl(tsecr),
1591 hlen -= MAX(opt[1], 2);
1592 opt += MAX(opt[1], 2);
1597 /* Copyback the options, caller copys back header */
1599 m_copyback(m, off + sizeof(struct tcphdr),
1600 (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1601 sizeof(struct tcphdr));
1607 * Must invalidate PAWS checks on connections idle for too long.
1608 * The fastest allowed timestamp clock is 1ms. That turns out to
1609 * be about 24 days before it wraps. XXX Right now our lowerbound
1610 * TS echo check only works for the first 12 days of a connection
1611 * when the TS has exhausted half its 32bit space
1613 #define TS_MAX_IDLE (24*24*60*60)
1614 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */
1616 getmicrouptime(&uptime);
1617 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1618 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1619 time_second - state->creation > TS_MAX_CONN)) {
1620 if (pf_status.debug >= PF_DEBUG_MISC) {
1621 DPFPRINTF(("src idled out of PAWS\n"));
1622 pf_print_state(state);
1625 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1628 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1629 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1630 if (pf_status.debug >= PF_DEBUG_MISC) {
1631 DPFPRINTF(("dst idled out of PAWS\n"));
1632 pf_print_state(state);
1635 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1639 if (got_ts && src->scrub && dst->scrub &&
1640 (src->scrub->pfss_flags & PFSS_PAWS) &&
1641 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1642 /* Validate that the timestamps are "in-window".
1643 * RFC1323 describes TCP Timestamp options that allow
1644 * measurement of RTT (round trip time) and PAWS
1645 * (protection against wrapped sequence numbers). PAWS
1646 * gives us a set of rules for rejecting packets on
1647 * long fat pipes (packets that were somehow delayed
1648 * in transit longer than the time it took to send the
1649 * full TCP sequence space of 4Gb). We can use these
1650 * rules and infer a few others that will let us treat
1651 * the 32bit timestamp and the 32bit echoed timestamp
1652 * as sequence numbers to prevent a blind attacker from
1653 * inserting packets into a connection.
1656 * - The timestamp on this packet must be greater than
1657 * or equal to the last value echoed by the other
1658 * endpoint. The RFC says those will be discarded
1659 * since it is a dup that has already been acked.
1660 * This gives us a lowerbound on the timestamp.
1661 * timestamp >= other last echoed timestamp
1662 * - The timestamp will be less than or equal to
1663 * the last timestamp plus the time between the
1664 * last packet and now. The RFC defines the max
1665 * clock rate as 1ms. We will allow clocks to be
1666 * up to 10% fast and will allow a total difference
1667 * or 30 seconds due to a route change. And this
1668 * gives us an upperbound on the timestamp.
1669 * timestamp <= last timestamp + max ticks
1670 * We have to be careful here. Windows will send an
1671 * initial timestamp of zero and then initialize it
1672 * to a random value after the 3whs; presumably to
1673 * avoid a DoS by having to call an expensive RNG
1674 * during a SYN flood. Proof MS has at least one
1675 * good security geek.
1677 * - The TCP timestamp option must also echo the other
1678 * endpoints timestamp. The timestamp echoed is the
1679 * one carried on the earliest unacknowledged segment
1680 * on the left edge of the sequence window. The RFC
1681 * states that the host will reject any echoed
1682 * timestamps that were larger than any ever sent.
1683 * This gives us an upperbound on the TS echo.
1684 * tescr <= largest_tsval
1685 * - The lowerbound on the TS echo is a little more
1686 * tricky to determine. The other endpoint's echoed
1687 * values will not decrease. But there may be
1688 * network conditions that re-order packets and
1689 * cause our view of them to decrease. For now the
1690 * only lowerbound we can safely determine is that
1691 * the TS echo will never be less than the original
1692 * TS. XXX There is probably a better lowerbound.
1693 * Remove TS_MAX_CONN with better lowerbound check.
1694 * tescr >= other original TS
1696 * It is also important to note that the fastest
1697 * timestamp clock of 1ms will wrap its 32bit space in
1698 * 24 days. So we just disable TS checking after 24
1699 * days of idle time. We actually must use a 12d
1700 * connection limit until we can come up with a better
1701 * lowerbound to the TS echo check.
1703 struct timeval delta_ts;
1708 * PFTM_TS_DIFF is how many seconds of leeway to allow
1709 * a host's timestamp. This can happen if the previous
1710 * packet got delayed in transit for much longer than
1713 if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0)
1714 ts_fudge = pf_default_rule.timeout[PFTM_TS_DIFF];
1717 /* Calculate max ticks since the last timestamp */
1718 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */
1719 #define TS_MICROSECS 1000000 /* microseconds per second */
1721 #define timersub(tvp, uvp, vvp) \
1723 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
1724 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
1725 if ((vvp)->tv_usec < 0) { \
1727 (vvp)->tv_usec += 1000000; \
1732 timersub(&uptime, &src->scrub->pfss_last, &delta_ts);
1733 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
1734 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
1737 if ((src->state >= TCPS_ESTABLISHED &&
1738 dst->state >= TCPS_ESTABLISHED) &&
1739 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
1740 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
1741 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
1742 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
1743 /* Bad RFC1323 implementation or an insertion attack.
1745 * - Solaris 2.6 and 2.7 are known to send another ACK
1746 * after the FIN,FIN|ACK,ACK closing that carries
1750 DPFPRINTF(("Timestamp failed %c%c%c%c\n",
1751 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
1752 SEQ_GT(tsval, src->scrub->pfss_tsval +
1753 tsval_from_last) ? '1' : ' ',
1754 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
1755 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
1756 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u "
1757 "idle: %lus %lums\n",
1758 tsval, tsecr, tsval_from_last, delta_ts.tv_sec,
1759 delta_ts.tv_usec / 1000));
1760 DPFPRINTF((" src->tsval: %u tsecr: %u\n",
1761 src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
1762 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u"
1763 "\n", dst->scrub->pfss_tsval,
1764 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
1765 if (pf_status.debug >= PF_DEBUG_MISC) {
1766 pf_print_state(state);
1767 pf_print_flags(th->th_flags);
1770 REASON_SET(reason, PFRES_TS);
1774 /* XXX I'd really like to require tsecr but it's optional */
1776 } else if (!got_ts && (th->th_flags & TH_RST) == 0 &&
1777 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
1778 || pd->p_len > 0 || (th->th_flags & TH_SYN)) &&
1779 src->scrub && dst->scrub &&
1780 (src->scrub->pfss_flags & PFSS_PAWS) &&
1781 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1782 /* Didn't send a timestamp. Timestamps aren't really useful
1784 * - connection opening or closing (often not even sent).
1785 * but we must not let an attacker to put a FIN on a
1786 * data packet to sneak it through our ESTABLISHED check.
1787 * - on a TCP reset. RFC suggests not even looking at TS.
1788 * - on an empty ACK. The TS will not be echoed so it will
1789 * probably not help keep the RTT calculation in sync and
1790 * there isn't as much danger when the sequence numbers
1791 * got wrapped. So some stacks don't include TS on empty
1794 * To minimize the disruption to mostly RFC1323 conformant
1795 * stacks, we will only require timestamps on data packets.
1797 * And what do ya know, we cannot require timestamps on data
1798 * packets. There appear to be devices that do legitimate
1799 * TCP connection hijacking. There are HTTP devices that allow
1800 * a 3whs (with timestamps) and then buffer the HTTP request.
1801 * If the intermediate device has the HTTP response cache, it
1802 * will spoof the response but not bother timestamping its
1803 * packets. So we can look for the presence of a timestamp in
1804 * the first data packet and if there, require it in all future
1808 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
1810 * Hey! Someone tried to sneak a packet in. Or the
1811 * stack changed its RFC1323 behavior?!?!
1813 if (pf_status.debug >= PF_DEBUG_MISC) {
1814 DPFPRINTF(("Did not receive expected RFC1323 "
1816 pf_print_state(state);
1817 pf_print_flags(th->th_flags);
1820 REASON_SET(reason, PFRES_TS);
1827 * We will note if a host sends his data packets with or without
1828 * timestamps. And require all data packets to contain a timestamp
1829 * if the first does. PAWS implicitly requires that all data packets be
1830 * timestamped. But I think there are middle-man devices that hijack
1831 * TCP streams immediately after the 3whs and don't timestamp their
1832 * packets (seen in a WWW accelerator or cache).
1834 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
1835 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
1837 src->scrub->pfss_flags |= PFSS_DATA_TS;
1839 src->scrub->pfss_flags |= PFSS_DATA_NOTS;
1840 if (pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
1841 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1842 /* Don't warn if other host rejected RFC1323 */
1843 DPFPRINTF(("Broken RFC1323 stack did not "
1844 "timestamp data packet. Disabled PAWS "
1846 pf_print_state(state);
1847 pf_print_flags(th->th_flags);
1855 * Update PAWS values
1857 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
1858 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
1859 getmicrouptime(&src->scrub->pfss_last);
1860 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
1861 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1862 src->scrub->pfss_tsval = tsval;
1865 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
1866 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1867 src->scrub->pfss_tsecr = tsecr;
1869 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
1870 (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
1871 src->scrub->pfss_tsval0 == 0)) {
1872 /* tsval0 MUST be the lowest timestamp */
1873 src->scrub->pfss_tsval0 = tsval;
1876 /* Only fully initialized after a TS gets echoed */
1877 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
1878 src->scrub->pfss_flags |= PFSS_PAWS;
1882 /* I have a dream.... TCP segment reassembly.... */
1887 pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
1888 int off, sa_family_t af)
1892 int opt, cnt, optlen = 0;
1894 u_char opts[TCP_MAXOLEN];
1895 u_char *optp = opts;
1897 thoff = th->th_off << 2;
1898 cnt = thoff - sizeof(struct tcphdr);
1900 if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt,
1904 for (; cnt > 0; cnt -= optlen, optp += optlen) {
1906 if (opt == TCPOPT_EOL)
1908 if (opt == TCPOPT_NOP)
1914 if (optlen < 2 || optlen > cnt)
1919 mss = (u_int16_t *)(optp + 2);
1920 if ((ntohs(*mss)) > r->max_mss) {
1921 th->th_sum = pf_cksum_fixup(th->th_sum,
1922 *mss, htons(r->max_mss), 0);
1923 *mss = htons(r->max_mss);
1933 m_copyback(m, off + sizeof(*th), thoff - sizeof(*th), opts);