2 * Copyright (c) 2003-2004 Jeffrey M. Hsu. All rights reserved.
4 * All advertising materials mentioning features or use of this software
5 * must display the following acknowledgement:
6 * This product includes software developed by Jeffrey M. Hsu.
8 * Copyright (c) 2001 Networks Associates Technologies, Inc.
11 * This software was developed for the FreeBSD Project by Jonathan Lemon
12 * and NAI Labs, the Security Research Division of Network Associates, Inc.
13 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
14 * DARPA CHATS research program.
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. The name of the author may not be used to endorse or promote
25 * products derived from this software without specific prior written
28 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * $FreeBSD: src/sys/netinet/tcp_syncache.c,v 1.5.2.14 2003/02/24 04:02:27 silby Exp $
41 * $DragonFly: src/sys/netinet/tcp_syncache.c,v 1.14 2004/07/02 04:41:01 hsu Exp $
44 #include "opt_inet6.h"
45 #include "opt_ipsec.h"
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/sysctl.h>
51 #include <sys/malloc.h>
54 #include <sys/proc.h> /* for proc0 declaration */
55 #include <sys/random.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/in_cksum.h>
61 #include <net/route.h>
63 #include <netinet/in.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/ip.h>
66 #include <netinet/in_var.h>
67 #include <netinet/in_pcb.h>
68 #include <netinet/ip_var.h>
70 #include <netinet/ip6.h>
71 #include <netinet/icmp6.h>
72 #include <netinet6/nd6.h>
73 #include <netinet6/ip6_var.h>
74 #include <netinet6/in6_pcb.h>
76 #include <netinet/tcp.h>
77 #include <netinet/tcp_fsm.h>
78 #include <netinet/tcp_seq.h>
79 #include <netinet/tcp_timer.h>
80 #include <netinet/tcp_var.h>
82 #include <netinet6/tcp6_var.h>
86 #include <netinet6/ipsec.h>
88 #include <netinet6/ipsec6.h>
90 #include <netproto/key/key.h>
94 #include <netipsec/ipsec.h>
96 #include <netipsec/ipsec6.h>
98 #include <netipsec/key.h>
100 #endif /*FAST_IPSEC*/
102 #include <vm/vm_zone.h>
104 static int tcp_syncookies = 1;
105 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW,
107 "Use TCP SYN cookies if the syncache overflows");
109 static void syncache_drop(struct syncache *, struct syncache_head *);
110 static void syncache_free(struct syncache *);
111 static void syncache_insert(struct syncache *, struct syncache_head *);
112 struct syncache *syncache_lookup(struct in_conninfo *, struct syncache_head **);
113 static int syncache_respond(struct syncache *, struct mbuf *);
114 static struct socket *syncache_socket(struct syncache *, struct socket *);
115 static void syncache_timer(void *);
116 static u_int32_t syncookie_generate(struct syncache *);
117 static struct syncache *syncookie_lookup(struct in_conninfo *,
118 struct tcphdr *, struct socket *);
121 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies.
122 * 3 retransmits corresponds to a timeout of (1 + 2 + 4 + 8 == 15) seconds,
123 * the odds are that the user has given up attempting to connect by then.
125 #define SYNCACHE_MAXREXMTS 3
127 /* Arbitrary values */
128 #define TCP_SYNCACHE_HASHSIZE 512
129 #define TCP_SYNCACHE_BUCKETLIMIT 30
131 struct tcp_syncache {
132 struct syncache_head *hashbase;
133 struct vm_zone *zone;
141 TAILQ_HEAD(, syncache) timerq[SYNCACHE_MAXREXMTS + 1];
142 struct callout tt_timerq[SYNCACHE_MAXREXMTS + 1];
144 static struct tcp_syncache tcp_syncache;
146 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache");
148 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RD,
149 &tcp_syncache.bucket_limit, 0, "Per-bucket hash limit for syncache");
151 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RD,
152 &tcp_syncache.cache_limit, 0, "Overall entry limit for syncache");
154 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_RD,
155 &tcp_syncache.cache_count, 0, "Current number of entries in syncache");
157 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RD,
158 &tcp_syncache.hashsize, 0, "Size of TCP syncache hashtable");
160 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW,
161 &tcp_syncache.rexmt_limit, 0, "Limit on SYN/ACK retransmissions");
163 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
165 #define SYNCACHE_HASH(inc, mask) \
166 ((tcp_syncache.hash_secret ^ \
167 (inc)->inc_faddr.s_addr ^ \
168 ((inc)->inc_faddr.s_addr >> 16) ^ \
169 (inc)->inc_fport ^ (inc)->inc_lport) & mask)
171 #define SYNCACHE_HASH6(inc, mask) \
172 ((tcp_syncache.hash_secret ^ \
173 (inc)->inc6_faddr.s6_addr32[0] ^ \
174 (inc)->inc6_faddr.s6_addr32[3] ^ \
175 (inc)->inc_fport ^ (inc)->inc_lport) & mask)
177 #define ENDPTS_EQ(a, b) ( \
178 (a)->ie_fport == (b)->ie_fport && \
179 (a)->ie_lport == (b)->ie_lport && \
180 (a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr && \
181 (a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr \
184 #define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0)
186 #define SYNCACHE_TIMEOUT(sc, slot) do { \
187 sc->sc_rxtslot = slot; \
188 sc->sc_rxttime = ticks + TCPTV_RTOBASE * tcp_backoff[slot]; \
189 TAILQ_INSERT_TAIL(&tcp_syncache.timerq[slot], sc, sc_timerq); \
190 if (!callout_active(&tcp_syncache.tt_timerq[slot])) \
191 callout_reset(&tcp_syncache.tt_timerq[slot], \
192 TCPTV_RTOBASE * tcp_backoff[slot], \
193 syncache_timer, (void *)((intptr_t)slot)); \
197 syncache_free(struct syncache *sc)
202 (void) m_free(sc->sc_ipopts);
204 if (sc->sc_inc.inc_isipv6)
205 rt = sc->sc_route6.ro_rt;
208 rt = sc->sc_route.ro_rt;
211 * If this is the only reference to a protocol cloned
212 * route, remove it immediately.
214 if (rt->rt_flags & RTF_WASCLONED &&
215 (sc->sc_flags & SCF_KEEPROUTE) == 0 &&
217 rtrequest(RTM_DELETE, rt_key(rt),
218 rt->rt_gateway, rt_mask(rt),
222 zfree(tcp_syncache.zone, sc);
230 tcp_syncache.cache_count = 0;
231 tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
232 tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
233 tcp_syncache.cache_limit =
234 tcp_syncache.hashsize * tcp_syncache.bucket_limit;
235 tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
236 tcp_syncache.hash_secret = arc4random();
238 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
239 &tcp_syncache.hashsize);
240 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
241 &tcp_syncache.cache_limit);
242 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
243 &tcp_syncache.bucket_limit);
244 if (!powerof2(tcp_syncache.hashsize)) {
245 printf("WARNING: syncache hash size is not a power of 2.\n");
246 tcp_syncache.hashsize = 512; /* safe default */
248 tcp_syncache.hashmask = tcp_syncache.hashsize - 1;
250 /* Allocate the hash table. */
251 MALLOC(tcp_syncache.hashbase, struct syncache_head *,
252 tcp_syncache.hashsize * sizeof(struct syncache_head),
253 M_SYNCACHE, M_WAITOK);
255 /* Initialize the hash buckets. */
256 for (i = 0; i < tcp_syncache.hashsize; i++) {
257 TAILQ_INIT(&tcp_syncache.hashbase[i].sch_bucket);
258 tcp_syncache.hashbase[i].sch_length = 0;
261 /* Initialize the timer queues. */
262 for (i = 0; i <= SYNCACHE_MAXREXMTS; i++) {
263 TAILQ_INIT(&tcp_syncache.timerq[i]);
264 callout_init(&tcp_syncache.tt_timerq[i]);
268 * Allocate the syncache entries. Allow the zone to allocate one
269 * more entry than cache limit, so a new entry can bump out an
272 tcp_syncache.zone = zinit("syncache", sizeof(struct syncache),
273 tcp_syncache.cache_limit, ZONE_INTERRUPT, 0);
274 tcp_syncache.cache_limit -= 1;
278 syncache_insert(sc, sch)
280 struct syncache_head *sch;
282 struct syncache *sc2;
286 * Make sure that we don't overflow the per-bucket
287 * limit or the total cache size limit.
289 if (sch->sch_length >= tcp_syncache.bucket_limit) {
291 * The bucket is full, toss the oldest element.
293 sc2 = TAILQ_FIRST(&sch->sch_bucket);
294 sc2->sc_tp->ts_recent = ticks;
295 syncache_drop(sc2, sch);
296 tcpstat.tcps_sc_bucketoverflow++;
297 } else if (tcp_syncache.cache_count >= tcp_syncache.cache_limit) {
299 * The cache is full. Toss the oldest entry in the
300 * entire cache. This is the front entry in the
301 * first non-empty timer queue with the largest
304 for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) {
305 sc2 = TAILQ_FIRST(&tcp_syncache.timerq[i]);
309 sc2->sc_tp->ts_recent = ticks;
310 syncache_drop(sc2, NULL);
311 tcpstat.tcps_sc_cacheoverflow++;
314 /* Initialize the entry's timer. */
315 SYNCACHE_TIMEOUT(sc, 0);
317 /* Put it into the bucket. */
318 TAILQ_INSERT_TAIL(&sch->sch_bucket, sc, sc_hash);
320 tcp_syncache.cache_count++;
321 tcpstat.tcps_sc_added++;
325 syncache_drop(sc, sch)
327 struct syncache_head *sch;
332 if (sc->sc_inc.inc_isipv6) {
333 sch = &tcp_syncache.hashbase[
334 SYNCACHE_HASH6(&sc->sc_inc, tcp_syncache.hashmask)];
338 sch = &tcp_syncache.hashbase[
339 SYNCACHE_HASH(&sc->sc_inc, tcp_syncache.hashmask)];
343 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
345 tcp_syncache.cache_count--;
347 TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot], sc, sc_timerq);
348 if (TAILQ_EMPTY(&tcp_syncache.timerq[sc->sc_rxtslot]))
349 callout_stop(&tcp_syncache.tt_timerq[sc->sc_rxtslot]);
355 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted.
356 * If we have retransmitted an entry the maximum number of times, expire it.
359 syncache_timer(xslot)
362 intptr_t slot = (intptr_t)xslot;
363 struct syncache *sc, *nsc;
368 if (callout_pending(&tcp_syncache.tt_timerq[slot]) ||
369 !callout_active(&tcp_syncache.tt_timerq[slot])) {
373 callout_deactivate(&tcp_syncache.tt_timerq[slot]);
375 nsc = TAILQ_FIRST(&tcp_syncache.timerq[slot]);
376 while (nsc != NULL) {
377 if (ticks < nsc->sc_rxttime)
380 inp = sc->sc_tp->t_inpcb;
381 if (slot == SYNCACHE_MAXREXMTS ||
382 slot >= tcp_syncache.rexmt_limit ||
383 inp->inp_gencnt != sc->sc_inp_gencnt) {
384 nsc = TAILQ_NEXT(sc, sc_timerq);
385 syncache_drop(sc, NULL);
386 tcpstat.tcps_sc_stale++;
390 * syncache_respond() may call back into the syncache to
391 * to modify another entry, so do not obtain the next
392 * entry on the timer chain until it has completed.
394 (void) syncache_respond(sc, NULL);
395 nsc = TAILQ_NEXT(sc, sc_timerq);
396 tcpstat.tcps_sc_retransmitted++;
397 TAILQ_REMOVE(&tcp_syncache.timerq[slot], sc, sc_timerq);
398 SYNCACHE_TIMEOUT(sc, slot + 1);
401 callout_reset(&tcp_syncache.tt_timerq[slot],
402 nsc->sc_rxttime - ticks, syncache_timer, (void *)(slot));
407 * Find an entry in the syncache.
410 syncache_lookup(inc, schp)
411 struct in_conninfo *inc;
412 struct syncache_head **schp;
415 struct syncache_head *sch;
418 if (inc->inc_isipv6) {
419 sch = &tcp_syncache.hashbase[
420 SYNCACHE_HASH6(inc, tcp_syncache.hashmask)];
422 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash)
423 if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
428 sch = &tcp_syncache.hashbase[
429 SYNCACHE_HASH(inc, tcp_syncache.hashmask)];
431 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
433 if (sc->sc_inc.inc_isipv6)
436 if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
444 * This function is called when we get a RST for a
445 * non-existent connection, so that we can see if the
446 * connection is in the syn cache. If it is, zap it.
449 syncache_chkrst(inc, th)
450 struct in_conninfo *inc;
454 struct syncache_head *sch;
456 sc = syncache_lookup(inc, &sch);
460 * If the RST bit is set, check the sequence number to see
461 * if this is a valid reset segment.
463 * In all states except SYN-SENT, all reset (RST) segments
464 * are validated by checking their SEQ-fields. A reset is
465 * valid if its sequence number is in the window.
467 * The sequence number in the reset segment is normally an
468 * echo of our outgoing acknowlegement numbers, but some hosts
469 * send a reset with the sequence number at the rightmost edge
470 * of our receive window, and we have to handle this case.
472 if (SEQ_GEQ(th->th_seq, sc->sc_irs) &&
473 SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
474 syncache_drop(sc, sch);
475 tcpstat.tcps_sc_reset++;
481 struct in_conninfo *inc;
484 struct syncache_head *sch;
486 sc = syncache_lookup(inc, &sch);
488 syncache_drop(sc, sch);
489 tcpstat.tcps_sc_badack++;
494 syncache_unreach(inc, th)
495 struct in_conninfo *inc;
499 struct syncache_head *sch;
501 /* we are called at splnet() here */
502 sc = syncache_lookup(inc, &sch);
506 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */
507 if (ntohl(th->th_seq) != sc->sc_iss)
511 * If we've rertransmitted 3 times and this is our second error,
512 * we remove the entry. Otherwise, we allow it to continue on.
513 * This prevents us from incorrectly nuking an entry during a
514 * spurious network outage.
518 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxtslot < 3) {
519 sc->sc_flags |= SCF_UNREACH;
522 syncache_drop(sc, sch);
523 tcpstat.tcps_sc_unreach++;
527 * Build a new TCP socket structure from a syncache entry.
529 static struct socket *
530 syncache_socket(sc, lso)
534 struct inpcb *inp = NULL;
539 * Ok, create the full blown connection, and set things up
540 * as they would have been set up if we had created the
541 * connection when the SYN arrived. If we can't create
542 * the connection, abort it.
544 so = sonewconn(lso, SS_ISCONNECTED);
547 * Drop the connection; we will send a RST if the peer
548 * retransmits the ACK,
550 tcpstat.tcps_listendrop++;
557 * Insert new socket into hash list.
559 inp->inp_inc.inc_isipv6 = sc->sc_inc.inc_isipv6;
561 if (sc->sc_inc.inc_isipv6) {
562 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
564 inp->inp_vflag &= ~INP_IPV6;
565 inp->inp_vflag |= INP_IPV4;
567 inp->inp_laddr = sc->sc_inc.inc_laddr;
571 inp->inp_lport = sc->sc_inc.inc_lport;
572 if (in_pcbinsporthash(inp) != 0) {
574 * Undo the assignments above if we failed to
575 * put the PCB on the hash lists.
578 if (sc->sc_inc.inc_isipv6)
579 inp->in6p_laddr = in6addr_any;
582 inp->inp_laddr.s_addr = INADDR_ANY;
587 /* copy old policy into new socket's */
588 if (ipsec_copy_policy(sotoinpcb(lso)->inp_sp, inp->inp_sp))
589 printf("syncache_expand: could not copy policy\n");
592 if (sc->sc_inc.inc_isipv6) {
593 struct inpcb *oinp = sotoinpcb(lso);
594 struct in6_addr laddr6;
595 struct sockaddr_in6 sin6;
597 * Inherit socket options from the listening socket.
598 * Note that in6p_inputopts are not (and should not be)
599 * copied, since it stores previously received options and is
600 * used to detect if each new option is different than the
601 * previous one and hence should be passed to a user.
602 * If we copied in6p_inputopts, a user would not be able to
603 * receive options just after calling the accept system call.
605 inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS;
606 if (oinp->in6p_outputopts)
607 inp->in6p_outputopts =
608 ip6_copypktopts(oinp->in6p_outputopts, M_INTWAIT);
609 inp->in6p_route = sc->sc_route6;
610 sc->sc_route6.ro_rt = NULL;
612 sin6.sin6_family = AF_INET6;
613 sin6.sin6_len = sizeof sin6;
614 sin6.sin6_addr = sc->sc_inc.inc6_faddr;
615 sin6.sin6_port = sc->sc_inc.inc_fport;
616 sin6.sin6_flowinfo = sin6.sin6_scope_id = 0;
617 laddr6 = inp->in6p_laddr;
618 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
619 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
620 if (in6_pcbconnect(inp, (struct sockaddr *)&sin6, &thread0)) {
621 inp->in6p_laddr = laddr6;
627 struct in_addr laddr;
628 struct sockaddr_in sin;
630 inp->inp_options = ip_srcroute();
631 if (inp->inp_options == NULL) {
632 inp->inp_options = sc->sc_ipopts;
633 sc->sc_ipopts = NULL;
635 inp->inp_route = sc->sc_route;
636 sc->sc_route.ro_rt = NULL;
638 sin.sin_family = AF_INET;
639 sin.sin_len = sizeof sin;
640 sin.sin_addr = sc->sc_inc.inc_faddr;
641 sin.sin_port = sc->sc_inc.inc_fport;
642 bzero(sin.sin_zero, sizeof sin.sin_zero);
643 laddr = inp->inp_laddr;
644 if (inp->inp_laddr.s_addr == INADDR_ANY)
645 inp->inp_laddr = sc->sc_inc.inc_laddr;
646 if (in_pcbconnect(inp, (struct sockaddr *)&sin, &thread0)) {
647 inp->inp_laddr = laddr;
653 tp->t_state = TCPS_SYN_RECEIVED;
654 tp->iss = sc->sc_iss;
655 tp->irs = sc->sc_irs;
658 tp->snd_wl1 = sc->sc_irs;
659 tp->rcv_up = sc->sc_irs + 1;
660 tp->rcv_wnd = sc->sc_wnd;
661 tp->rcv_adv += tp->rcv_wnd;
663 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY);
664 if (sc->sc_flags & SCF_NOOPT)
665 tp->t_flags |= TF_NOOPT;
666 if (sc->sc_flags & SCF_WINSCALE) {
667 tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE;
668 tp->requested_s_scale = sc->sc_requested_s_scale;
669 tp->request_r_scale = sc->sc_request_r_scale;
671 if (sc->sc_flags & SCF_TIMESTAMP) {
672 tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP;
673 tp->ts_recent = sc->sc_tsrecent;
674 tp->ts_recent_age = ticks;
676 if (sc->sc_flags & SCF_CC) {
678 * Initialization of the tcpcb for transaction;
679 * set SND.WND = SEG.WND,
680 * initialize CCsend and CCrecv.
682 tp->t_flags |= TF_REQ_CC|TF_RCVD_CC;
683 tp->cc_send = sc->sc_cc_send;
684 tp->cc_recv = sc->sc_cc_recv;
687 tcp_mss(tp, sc->sc_peer_mss);
690 * If the SYN,ACK was retransmitted, reset cwnd to 1 segment.
692 if (sc->sc_rxtslot != 0)
693 tp->snd_cwnd = tp->t_maxseg;
694 callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp);
696 tcpstat.tcps_accepts++;
706 * This function gets called when we receive an ACK for a
707 * socket in the LISTEN state. We look up the connection
708 * in the syncache, and if its there, we pull it out of
709 * the cache and turn it into a full-blown connection in
710 * the SYN-RECEIVED state.
713 syncache_expand(inc, th, sop, m)
714 struct in_conninfo *inc;
720 struct syncache_head *sch;
723 sc = syncache_lookup(inc, &sch);
726 * There is no syncache entry, so see if this ACK is
727 * a returning syncookie. To do this, first:
728 * A. See if this socket has had a syncache entry dropped in
729 * the past. We don't want to accept a bogus syncookie
730 * if we've never received a SYN.
731 * B. check that the syncookie is valid. If it is, then
732 * cobble up a fake syncache entry, and return.
736 sc = syncookie_lookup(inc, th, *sop);
740 tcpstat.tcps_sc_recvcookie++;
744 * If seg contains an ACK, but not for our SYN/ACK, send a RST.
746 if (th->th_ack != sc->sc_iss + 1)
749 so = syncache_socket(sc, *sop);
753 /* XXXjlemon check this - is this correct? */
754 (void) tcp_respond(NULL, m, m, th,
755 th->th_seq + tlen, (tcp_seq)0, TH_RST|TH_ACK);
757 m_freem(m); /* XXX only needed for above */
758 tcpstat.tcps_sc_aborted++;
760 sc->sc_flags |= SCF_KEEPROUTE;
761 tcpstat.tcps_sc_completed++;
766 syncache_drop(sc, sch);
772 * Given a LISTEN socket and an inbound SYN request, add
773 * this to the syn cache, and send back a segment:
774 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
777 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN.
778 * Doing so would require that we hold onto the data and deliver it
779 * to the application. However, if we are the target of a SYN-flood
780 * DoS attack, an attacker could send data which would eventually
781 * consume all available buffer space if it were ACKed. By not ACKing
782 * the data, we avoid this DoS scenario.
785 syncache_add(inc, to, th, sop, m)
786 struct in_conninfo *inc;
794 struct syncache *sc = NULL;
795 struct syncache_head *sch;
796 struct mbuf *ipopts = NULL;
797 struct rmxp_tao *taop;
804 * Remember the IP options, if any.
807 if (!inc->inc_isipv6)
809 ipopts = ip_srcroute();
812 * See if we already have an entry for this connection.
813 * If we do, resend the SYN,ACK, and reset the retransmit timer.
816 * should the syncache be re-initialized with the contents
817 * of the new SYN here (which may have different options?)
819 sc = syncache_lookup(inc, &sch);
821 tcpstat.tcps_sc_dupsyn++;
824 * If we were remembering a previous source route,
825 * forget it and use the new one we've been given.
828 (void) m_free(sc->sc_ipopts);
829 sc->sc_ipopts = ipopts;
832 * Update timestamp if present.
834 if (sc->sc_flags & SCF_TIMESTAMP)
835 sc->sc_tsrecent = to->to_tsval;
837 * PCB may have changed, pick up new values.
840 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt;
841 if (syncache_respond(sc, m) == 0) {
842 TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot],
844 SYNCACHE_TIMEOUT(sc, sc->sc_rxtslot);
845 tcpstat.tcps_sndacks++;
846 tcpstat.tcps_sndtotal++;
853 * This allocation is guaranteed to succeed because we
854 * preallocate one more syncache entry than cache_limit.
856 sc = zalloc(tcp_syncache.zone);
859 * Fill in the syncache values.
862 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt;
863 sc->sc_ipopts = ipopts;
864 sc->sc_inc.inc_fport = inc->inc_fport;
865 sc->sc_inc.inc_lport = inc->inc_lport;
867 sc->sc_inc.inc_isipv6 = inc->inc_isipv6;
868 if (inc->inc_isipv6) {
869 sc->sc_inc.inc6_faddr = inc->inc6_faddr;
870 sc->sc_inc.inc6_laddr = inc->inc6_laddr;
871 sc->sc_route6.ro_rt = NULL;
875 sc->sc_inc.inc_faddr = inc->inc_faddr;
876 sc->sc_inc.inc_laddr = inc->inc_laddr;
877 sc->sc_route.ro_rt = NULL;
879 sc->sc_irs = th->th_seq;
881 sc->sc_peer_mss = to->to_flags & TOF_MSS ? to->to_mss : 0;
883 sc->sc_iss = syncookie_generate(sc);
885 sc->sc_iss = arc4random();
887 /* Initial receive window: clip sbspace to [0 .. TCP_MAXWIN] */
888 win = sbspace(&so->so_rcv);
890 win = imin(win, TCP_MAXWIN);
893 if (tcp_do_rfc1323) {
895 * A timestamp received in a SYN makes
896 * it ok to send timestamp requests and replies.
898 if (to->to_flags & TOF_TS) {
899 sc->sc_tsrecent = to->to_tsval;
900 sc->sc_flags |= SCF_TIMESTAMP;
902 if (to->to_flags & TOF_SCALE) {
905 /* Compute proper scaling value from buffer space */
906 while (wscale < TCP_MAX_WINSHIFT &&
907 (TCP_MAXWIN << wscale) < so->so_rcv.sb_hiwat)
909 sc->sc_request_r_scale = wscale;
910 sc->sc_requested_s_scale = to->to_requested_s_scale;
911 sc->sc_flags |= SCF_WINSCALE;
914 if (tcp_do_rfc1644) {
916 * A CC or CC.new option received in a SYN makes
917 * it ok to send CC in subsequent segments.
919 if (to->to_flags & (TOF_CC|TOF_CCNEW)) {
920 sc->sc_cc_recv = to->to_cc;
921 sc->sc_cc_send = CC_INC(tcp_ccgen);
922 sc->sc_flags |= SCF_CC;
925 if (tp->t_flags & TF_NOOPT)
926 sc->sc_flags = SCF_NOOPT;
930 * We have the option here of not doing TAO (even if the segment
931 * qualifies) and instead fall back to a normal 3WHS via the syncache.
932 * This allows us to apply synflood protection to TAO-qualifying SYNs
933 * also. However, there should be a hueristic to determine when to
934 * do this, and is not present at the moment.
938 * Perform TAO test on incoming CC (SEG.CC) option, if any.
939 * - compare SEG.CC against cached CC from the same host, if any.
940 * - if SEG.CC > chached value, SYN must be new and is accepted
941 * immediately: save new CC in the cache, mark the socket
942 * connected, enter ESTABLISHED state, turn on flag to
943 * send a SYN in the next segment.
944 * A virtual advertised window is set in rcv_adv to
945 * initialize SWS prevention. Then enter normal segment
946 * processing: drop SYN, process data and FIN.
947 * - otherwise do a normal 3-way handshake.
949 taop = tcp_gettaocache(&sc->sc_inc);
950 if ((to->to_flags & TOF_CC) != 0) {
951 if (((tp->t_flags & TF_NOPUSH) != 0) &&
952 sc->sc_flags & SCF_CC &&
953 taop != NULL && taop->tao_cc != 0 &&
954 CC_GT(to->to_cc, taop->tao_cc)) {
956 so = syncache_socket(sc, *sop);
958 sc->sc_flags |= SCF_KEEPROUTE;
959 taop->tao_cc = to->to_cc;
967 * No CC option, but maybe CC.NEW: invalidate cached value.
973 * TAO test failed or there was no CC option,
974 * do a standard 3-way handshake.
976 if (syncache_respond(sc, m) == 0) {
977 syncache_insert(sc, sch);
978 tcpstat.tcps_sndacks++;
979 tcpstat.tcps_sndtotal++;
982 tcpstat.tcps_sc_dropped++;
989 syncache_respond(sc, m)
995 u_int16_t tlen, hlen, mssopt;
996 struct ip *ip = NULL;
1000 struct ip6_hdr *ip6 = NULL;
1004 if (sc->sc_inc.inc_isipv6) {
1005 rt = tcp_rtlookup6(&sc->sc_inc);
1007 mssopt = rt->rt_ifp->if_mtu -
1008 (sizeof(struct ip6_hdr) + sizeof(struct tcphdr));
1010 mssopt = tcp_v6mssdflt;
1011 hlen = sizeof(struct ip6_hdr);
1015 rt = tcp_rtlookup(&sc->sc_inc);
1017 mssopt = rt->rt_ifp->if_mtu -
1018 (sizeof(struct ip) + sizeof(struct tcphdr));
1020 mssopt = tcp_mssdflt;
1021 hlen = sizeof(struct ip);
1024 /* Compute the size of the TCP options. */
1025 if (sc->sc_flags & SCF_NOOPT) {
1028 optlen = TCPOLEN_MAXSEG +
1029 ((sc->sc_flags & SCF_WINSCALE) ? 4 : 0) +
1030 ((sc->sc_flags & SCF_TIMESTAMP) ? TCPOLEN_TSTAMP_APPA : 0) +
1031 ((sc->sc_flags & SCF_CC) ? TCPOLEN_CC_APPA * 2 : 0);
1033 tlen = hlen + sizeof(struct tcphdr) + optlen;
1037 * assume that the entire packet will fit in a header mbuf
1039 KASSERT(max_linkhdr + tlen <= MHLEN, ("syncache: mbuf too small"));
1042 * XXX shouldn't this reuse the mbuf if possible ?
1043 * Create the IP+TCP header from scratch.
1048 m = m_gethdr(MB_DONTWAIT, MT_HEADER);
1051 m->m_data += max_linkhdr;
1053 m->m_pkthdr.len = tlen;
1054 m->m_pkthdr.rcvif = NULL;
1057 if (sc->sc_inc.inc_isipv6) {
1058 ip6 = mtod(m, struct ip6_hdr *);
1059 ip6->ip6_vfc = IPV6_VERSION;
1060 ip6->ip6_nxt = IPPROTO_TCP;
1061 ip6->ip6_src = sc->sc_inc.inc6_laddr;
1062 ip6->ip6_dst = sc->sc_inc.inc6_faddr;
1063 ip6->ip6_plen = htons(tlen - hlen);
1064 /* ip6_hlim is set after checksum */
1065 /* ip6_flow = ??? */
1067 th = (struct tcphdr *)(ip6 + 1);
1071 ip = mtod(m, struct ip *);
1072 ip->ip_v = IPVERSION;
1073 ip->ip_hl = sizeof(struct ip) >> 2;
1078 ip->ip_p = IPPROTO_TCP;
1079 ip->ip_src = sc->sc_inc.inc_laddr;
1080 ip->ip_dst = sc->sc_inc.inc_faddr;
1081 ip->ip_ttl = sc->sc_tp->t_inpcb->inp_ip_ttl; /* XXX */
1082 ip->ip_tos = sc->sc_tp->t_inpcb->inp_ip_tos; /* XXX */
1085 * See if we should do MTU discovery. Route lookups are expensive,
1086 * so we will only unset the DF bit if:
1088 * 1) path_mtu_discovery is disabled
1089 * 2) the SCF_UNREACH flag has been set
1091 if (path_mtu_discovery
1092 && ((sc->sc_flags & SCF_UNREACH) == 0)) {
1093 ip->ip_off |= IP_DF;
1096 th = (struct tcphdr *)(ip + 1);
1098 th->th_sport = sc->sc_inc.inc_lport;
1099 th->th_dport = sc->sc_inc.inc_fport;
1101 th->th_seq = htonl(sc->sc_iss);
1102 th->th_ack = htonl(sc->sc_irs + 1);
1103 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1105 th->th_flags = TH_SYN|TH_ACK;
1106 th->th_win = htons(sc->sc_wnd);
1109 /* Tack on the TCP options. */
1112 optp = (u_int8_t *)(th + 1);
1113 *optp++ = TCPOPT_MAXSEG;
1114 *optp++ = TCPOLEN_MAXSEG;
1115 *optp++ = (mssopt >> 8) & 0xff;
1116 *optp++ = mssopt & 0xff;
1118 if (sc->sc_flags & SCF_WINSCALE) {
1119 *((u_int32_t *)optp) = htonl(TCPOPT_NOP << 24 |
1120 TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 |
1121 sc->sc_request_r_scale);
1125 if (sc->sc_flags & SCF_TIMESTAMP) {
1126 u_int32_t *lp = (u_int32_t *)(optp);
1128 /* Form timestamp option as shown in appendix A of RFC 1323. */
1129 *lp++ = htonl(TCPOPT_TSTAMP_HDR);
1130 *lp++ = htonl(ticks);
1131 *lp = htonl(sc->sc_tsrecent);
1132 optp += TCPOLEN_TSTAMP_APPA;
1136 * Send CC and CC.echo if we received CC from our peer.
1138 if (sc->sc_flags & SCF_CC) {
1139 u_int32_t *lp = (u_int32_t *)(optp);
1141 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC));
1142 *lp++ = htonl(sc->sc_cc_send);
1143 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CCECHO));
1144 *lp = htonl(sc->sc_cc_recv);
1145 optp += TCPOLEN_CC_APPA * 2;
1150 if (sc->sc_inc.inc_isipv6) {
1151 struct route_in6 *ro6 = &sc->sc_route6;
1154 th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen, tlen - hlen);
1155 ip6->ip6_hlim = in6_selecthlim(NULL,
1156 ro6->ro_rt ? ro6->ro_rt->rt_ifp : NULL);
1157 error = ip6_output(m, NULL, ro6, 0, NULL, NULL,
1158 sc->sc_tp->t_inpcb);
1162 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1163 htons(tlen - hlen + IPPROTO_TCP));
1164 m->m_pkthdr.csum_flags = CSUM_TCP;
1165 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1166 error = ip_output(m, sc->sc_ipopts, &sc->sc_route, 0, NULL,
1167 sc->sc_tp->t_inpcb);
1175 * |. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .|
1177 * | MD5(laddr,faddr,secret,lport,fport) |. . . . . . .|
1179 * (A): peer mss index
1183 * The values below are chosen to minimize the size of the tcp_secret
1184 * table, as well as providing roughly a 16 second lifetime for the cookie.
1187 #define SYNCOOKIE_WNDBITS 5 /* exposed bits for window indexing */
1188 #define SYNCOOKIE_TIMESHIFT 1 /* scale ticks to window time units */
1190 #define SYNCOOKIE_WNDMASK ((1 << SYNCOOKIE_WNDBITS) - 1)
1191 #define SYNCOOKIE_NSECRETS (1 << SYNCOOKIE_WNDBITS)
1192 #define SYNCOOKIE_TIMEOUT \
1193 (hz * (1 << SYNCOOKIE_WNDBITS) / (1 << SYNCOOKIE_TIMESHIFT))
1194 #define SYNCOOKIE_DATAMASK ((3 << SYNCOOKIE_WNDBITS) | SYNCOOKIE_WNDMASK)
1197 u_int32_t ts_secbits[4];
1199 } tcp_secret[SYNCOOKIE_NSECRETS];
1201 static int tcp_msstab[] = { 0, 536, 1460, 8960 };
1203 static MD5_CTX syn_ctx;
1205 #define MD5Add(v) MD5Update(&syn_ctx, (u_char *)&v, sizeof(v))
1208 u_int32_t laddr, faddr;
1209 u_int32_t secbits[4];
1210 u_int16_t lport, fport;
1214 CTASSERT(sizeof(struct md5_add) == 28);
1218 * Consider the problem of a recreated (and retransmitted) cookie. If the
1219 * original SYN was accepted, the connection is established. The second
1220 * SYN is inflight, and if it arrives with an ISN that falls within the
1221 * receive window, the connection is killed.
1223 * However, since cookies have other problems, this may not be worth
1228 syncookie_generate(struct syncache *sc)
1230 u_int32_t md5_buffer[4];
1235 idx = ((ticks << SYNCOOKIE_TIMESHIFT) / hz) & SYNCOOKIE_WNDMASK;
1236 if (tcp_secret[idx].ts_expire < ticks) {
1237 for (i = 0; i < 4; i++)
1238 tcp_secret[idx].ts_secbits[i] = arc4random();
1239 tcp_secret[idx].ts_expire = ticks + SYNCOOKIE_TIMEOUT;
1241 for (data = sizeof(tcp_msstab) / sizeof(int) - 1; data > 0; data--)
1242 if (tcp_msstab[data] <= sc->sc_peer_mss)
1244 data = (data << SYNCOOKIE_WNDBITS) | idx;
1245 data ^= sc->sc_irs; /* peer's iss */
1248 if (sc->sc_inc.inc_isipv6) {
1249 MD5Add(sc->sc_inc.inc6_laddr);
1250 MD5Add(sc->sc_inc.inc6_faddr);
1256 add.laddr = sc->sc_inc.inc_laddr.s_addr;
1257 add.faddr = sc->sc_inc.inc_faddr.s_addr;
1259 add.lport = sc->sc_inc.inc_lport;
1260 add.fport = sc->sc_inc.inc_fport;
1261 add.secbits[0] = tcp_secret[idx].ts_secbits[0];
1262 add.secbits[1] = tcp_secret[idx].ts_secbits[1];
1263 add.secbits[2] = tcp_secret[idx].ts_secbits[2];
1264 add.secbits[3] = tcp_secret[idx].ts_secbits[3];
1266 MD5Final((u_char *)&md5_buffer, &syn_ctx);
1267 data ^= (md5_buffer[0] & ~SYNCOOKIE_WNDMASK);
1271 static struct syncache *
1272 syncookie_lookup(inc, th, so)
1273 struct in_conninfo *inc;
1277 u_int32_t md5_buffer[4];
1278 struct syncache *sc;
1283 data = (th->th_ack - 1) ^ (th->th_seq - 1); /* remove ISS */
1284 idx = data & SYNCOOKIE_WNDMASK;
1285 if (tcp_secret[idx].ts_expire < ticks ||
1286 sototcpcb(so)->ts_recent + SYNCOOKIE_TIMEOUT < ticks)
1290 if (inc->inc_isipv6) {
1291 MD5Add(inc->inc6_laddr);
1292 MD5Add(inc->inc6_faddr);
1298 add.laddr = inc->inc_laddr.s_addr;
1299 add.faddr = inc->inc_faddr.s_addr;
1301 add.lport = inc->inc_lport;
1302 add.fport = inc->inc_fport;
1303 add.secbits[0] = tcp_secret[idx].ts_secbits[0];
1304 add.secbits[1] = tcp_secret[idx].ts_secbits[1];
1305 add.secbits[2] = tcp_secret[idx].ts_secbits[2];
1306 add.secbits[3] = tcp_secret[idx].ts_secbits[3];
1308 MD5Final((u_char *)&md5_buffer, &syn_ctx);
1309 data ^= md5_buffer[0];
1310 if ((data & ~SYNCOOKIE_DATAMASK) != 0)
1312 data = data >> SYNCOOKIE_WNDBITS;
1315 * This allocation is guaranteed to succeed because we
1316 * preallocate one more syncache entry than cache_limit.
1318 sc = zalloc(tcp_syncache.zone);
1321 * Fill in the syncache values.
1322 * XXX duplicate code from syncache_add
1324 sc->sc_ipopts = NULL;
1325 sc->sc_inc.inc_fport = inc->inc_fport;
1326 sc->sc_inc.inc_lport = inc->inc_lport;
1328 sc->sc_inc.inc_isipv6 = inc->inc_isipv6;
1329 if (inc->inc_isipv6) {
1330 sc->sc_inc.inc6_faddr = inc->inc6_faddr;
1331 sc->sc_inc.inc6_laddr = inc->inc6_laddr;
1332 sc->sc_route6.ro_rt = NULL;
1336 sc->sc_inc.inc_faddr = inc->inc_faddr;
1337 sc->sc_inc.inc_laddr = inc->inc_laddr;
1338 sc->sc_route.ro_rt = NULL;
1340 sc->sc_irs = th->th_seq - 1;
1341 sc->sc_iss = th->th_ack - 1;
1342 wnd = sbspace(&so->so_rcv);
1344 wnd = imin(wnd, TCP_MAXWIN);
1348 sc->sc_peer_mss = tcp_msstab[data];