proc->thread stage 4: rework the VFS and DEVICE subsystems to take thread
[dragonfly.git] / sys / netinet / tcp_syncache.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 2001 Networks Associates Technologies, Inc.
3 * All rights reserved.
4 *
5 * This software was developed for the FreeBSD Project by Jonathan Lemon
6 * and NAI Labs, the Security Research Division of Network Associates, Inc.
7 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
8 * DARPA CHATS research program.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote
19 * products derived from this software without specific prior written
20 * permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $FreeBSD: src/sys/netinet/tcp_syncache.c,v 1.5.2.14 2003/02/24 04:02:27 silby Exp $
dadab5e9 35 * $DragonFly: src/sys/netinet/tcp_syncache.c,v 1.3 2003/06/25 03:56:04 dillon Exp $
984263bc
MD
36 */
37
38#include "opt_inet6.h"
39#include "opt_ipsec.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/kernel.h>
44#include <sys/sysctl.h>
45#include <sys/malloc.h>
46#include <sys/mbuf.h>
47#include <sys/md5.h>
48#include <sys/proc.h> /* for proc0 declaration */
49#include <sys/random.h>
50#include <sys/socket.h>
51#include <sys/socketvar.h>
52
53#include <net/if.h>
54#include <net/route.h>
55
56#include <netinet/in.h>
57#include <netinet/in_systm.h>
58#include <netinet/ip.h>
59#include <netinet/in_var.h>
60#include <netinet/in_pcb.h>
61#include <netinet/ip_var.h>
62#ifdef INET6
63#include <netinet/ip6.h>
64#include <netinet/icmp6.h>
65#include <netinet6/nd6.h>
66#include <netinet6/ip6_var.h>
67#include <netinet6/in6_pcb.h>
68#endif
69#include <netinet/tcp.h>
70#include <netinet/tcp_fsm.h>
71#include <netinet/tcp_seq.h>
72#include <netinet/tcp_timer.h>
73#include <netinet/tcp_var.h>
74#ifdef INET6
75#include <netinet6/tcp6_var.h>
76#endif
77
78#ifdef IPSEC
79#include <netinet6/ipsec.h>
80#ifdef INET6
81#include <netinet6/ipsec6.h>
82#endif
83#include <netkey/key.h>
84#endif /*IPSEC*/
85
86#ifdef FAST_IPSEC
87#include <netipsec/ipsec.h>
88#ifdef INET6
89#include <netipsec/ipsec6.h>
90#endif
91#include <netipsec/key.h>
92#define IPSEC
93#endif /*FAST_IPSEC*/
94
95#include <machine/in_cksum.h>
96#include <vm/vm_zone.h>
97
98static int tcp_syncookies = 1;
99SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW,
100 &tcp_syncookies, 0,
101 "Use TCP SYN cookies if the syncache overflows");
102
103static void syncache_drop(struct syncache *, struct syncache_head *);
104static void syncache_free(struct syncache *);
105static void syncache_insert(struct syncache *, struct syncache_head *);
106struct syncache *syncache_lookup(struct in_conninfo *, struct syncache_head **);
107static int syncache_respond(struct syncache *, struct mbuf *);
108static struct socket *syncache_socket(struct syncache *, struct socket *);
109static void syncache_timer(void *);
110static u_int32_t syncookie_generate(struct syncache *);
111static struct syncache *syncookie_lookup(struct in_conninfo *,
112 struct tcphdr *, struct socket *);
113
114/*
115 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies.
116 * 3 retransmits corresponds to a timeout of (1 + 2 + 4 + 8 == 15) seconds,
117 * the odds are that the user has given up attempting to connect by then.
118 */
119#define SYNCACHE_MAXREXMTS 3
120
121/* Arbitrary values */
122#define TCP_SYNCACHE_HASHSIZE 512
123#define TCP_SYNCACHE_BUCKETLIMIT 30
124
125struct tcp_syncache {
126 struct syncache_head *hashbase;
127 struct vm_zone *zone;
128 u_int hashsize;
129 u_int hashmask;
130 u_int bucket_limit;
131 u_int cache_count;
132 u_int cache_limit;
133 u_int rexmt_limit;
134 u_int hash_secret;
135 u_int next_reseed;
136 TAILQ_HEAD(, syncache) timerq[SYNCACHE_MAXREXMTS + 1];
137 struct callout tt_timerq[SYNCACHE_MAXREXMTS + 1];
138};
139static struct tcp_syncache tcp_syncache;
140
141SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache");
142
143SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RD,
144 &tcp_syncache.bucket_limit, 0, "Per-bucket hash limit for syncache");
145
146SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RD,
147 &tcp_syncache.cache_limit, 0, "Overall entry limit for syncache");
148
149SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_RD,
150 &tcp_syncache.cache_count, 0, "Current number of entries in syncache");
151
152SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RD,
153 &tcp_syncache.hashsize, 0, "Size of TCP syncache hashtable");
154
155SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW,
156 &tcp_syncache.rexmt_limit, 0, "Limit on SYN/ACK retransmissions");
157
158static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
159
160#define SYNCACHE_HASH(inc, mask) \
161 ((tcp_syncache.hash_secret ^ \
162 (inc)->inc_faddr.s_addr ^ \
163 ((inc)->inc_faddr.s_addr >> 16) ^ \
164 (inc)->inc_fport ^ (inc)->inc_lport) & mask)
165
166#define SYNCACHE_HASH6(inc, mask) \
167 ((tcp_syncache.hash_secret ^ \
168 (inc)->inc6_faddr.s6_addr32[0] ^ \
169 (inc)->inc6_faddr.s6_addr32[3] ^ \
170 (inc)->inc_fport ^ (inc)->inc_lport) & mask)
171
172#define ENDPTS_EQ(a, b) ( \
173 (a)->ie_fport == (b)->ie_fport && \
174 (a)->ie_lport == (b)->ie_lport && \
175 (a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr && \
176 (a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr \
177)
178
179#define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0)
180
181#define SYNCACHE_TIMEOUT(sc, slot) do { \
182 sc->sc_rxtslot = slot; \
183 sc->sc_rxttime = ticks + TCPTV_RTOBASE * tcp_backoff[slot]; \
184 TAILQ_INSERT_TAIL(&tcp_syncache.timerq[slot], sc, sc_timerq); \
185 if (!callout_active(&tcp_syncache.tt_timerq[slot])) \
186 callout_reset(&tcp_syncache.tt_timerq[slot], \
187 TCPTV_RTOBASE * tcp_backoff[slot], \
188 syncache_timer, (void *)((intptr_t)slot)); \
189} while (0)
190
191static void
192syncache_free(struct syncache *sc)
193{
194 struct rtentry *rt;
195
196 if (sc->sc_ipopts)
197 (void) m_free(sc->sc_ipopts);
198#ifdef INET6
199 if (sc->sc_inc.inc_isipv6)
200 rt = sc->sc_route6.ro_rt;
201 else
202#endif
203 rt = sc->sc_route.ro_rt;
204 if (rt != NULL) {
205 /*
206 * If this is the only reference to a protocol cloned
207 * route, remove it immediately.
208 */
209 if (rt->rt_flags & RTF_WASCLONED &&
210 (sc->sc_flags & SCF_KEEPROUTE) == 0 &&
211 rt->rt_refcnt == 1)
212 rtrequest(RTM_DELETE, rt_key(rt),
213 rt->rt_gateway, rt_mask(rt),
214 rt->rt_flags, NULL);
215 RTFREE(rt);
216 }
217 zfree(tcp_syncache.zone, sc);
218}
219
220void
221syncache_init(void)
222{
223 int i;
224
225 tcp_syncache.cache_count = 0;
226 tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
227 tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
228 tcp_syncache.cache_limit =
229 tcp_syncache.hashsize * tcp_syncache.bucket_limit;
230 tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
231 tcp_syncache.next_reseed = 0;
232 tcp_syncache.hash_secret = arc4random();
233
234 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
235 &tcp_syncache.hashsize);
236 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
237 &tcp_syncache.cache_limit);
238 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
239 &tcp_syncache.bucket_limit);
240 if (!powerof2(tcp_syncache.hashsize)) {
241 printf("WARNING: syncache hash size is not a power of 2.\n");
242 tcp_syncache.hashsize = 512; /* safe default */
243 }
244 tcp_syncache.hashmask = tcp_syncache.hashsize - 1;
245
246 /* Allocate the hash table. */
247 MALLOC(tcp_syncache.hashbase, struct syncache_head *,
248 tcp_syncache.hashsize * sizeof(struct syncache_head),
249 M_SYNCACHE, M_WAITOK);
250
251 /* Initialize the hash buckets. */
252 for (i = 0; i < tcp_syncache.hashsize; i++) {
253 TAILQ_INIT(&tcp_syncache.hashbase[i].sch_bucket);
254 tcp_syncache.hashbase[i].sch_length = 0;
255 }
256
257 /* Initialize the timer queues. */
258 for (i = 0; i <= SYNCACHE_MAXREXMTS; i++) {
259 TAILQ_INIT(&tcp_syncache.timerq[i]);
260 callout_init(&tcp_syncache.tt_timerq[i]);
261 }
262
263 /*
264 * Allocate the syncache entries. Allow the zone to allocate one
265 * more entry than cache limit, so a new entry can bump out an
266 * older one.
267 */
268 tcp_syncache.cache_limit -= 1;
269 tcp_syncache.zone = zinit("syncache", sizeof(struct syncache),
270 tcp_syncache.cache_limit, ZONE_INTERRUPT, 0);
271}
272
273static void
274syncache_insert(sc, sch)
275 struct syncache *sc;
276 struct syncache_head *sch;
277{
278 struct syncache *sc2;
279 int s, i;
280
281 /*
282 * Make sure that we don't overflow the per-bucket
283 * limit or the total cache size limit.
284 */
285 s = splnet();
286 if (sch->sch_length >= tcp_syncache.bucket_limit) {
287 /*
288 * The bucket is full, toss the oldest element.
289 */
290 sc2 = TAILQ_FIRST(&sch->sch_bucket);
291 sc2->sc_tp->ts_recent = ticks;
292 syncache_drop(sc2, sch);
293 tcpstat.tcps_sc_bucketoverflow++;
294 } else if (tcp_syncache.cache_count >= tcp_syncache.cache_limit) {
295 /*
296 * The cache is full. Toss the oldest entry in the
297 * entire cache. This is the front entry in the
298 * first non-empty timer queue with the largest
299 * timeout value.
300 */
301 for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) {
302 sc2 = TAILQ_FIRST(&tcp_syncache.timerq[i]);
303 if (sc2 != NULL)
304 break;
305 }
306 sc2->sc_tp->ts_recent = ticks;
307 syncache_drop(sc2, NULL);
308 tcpstat.tcps_sc_cacheoverflow++;
309 }
310
311 /* Initialize the entry's timer. */
312 SYNCACHE_TIMEOUT(sc, 0);
313
314 /* Put it into the bucket. */
315 TAILQ_INSERT_TAIL(&sch->sch_bucket, sc, sc_hash);
316 sch->sch_length++;
317 tcp_syncache.cache_count++;
318 tcpstat.tcps_sc_added++;
319 splx(s);
320}
321
322static void
323syncache_drop(sc, sch)
324 struct syncache *sc;
325 struct syncache_head *sch;
326{
327 int s;
328
329 if (sch == NULL) {
330#ifdef INET6
331 if (sc->sc_inc.inc_isipv6) {
332 sch = &tcp_syncache.hashbase[
333 SYNCACHE_HASH6(&sc->sc_inc, tcp_syncache.hashmask)];
334 } else
335#endif
336 {
337 sch = &tcp_syncache.hashbase[
338 SYNCACHE_HASH(&sc->sc_inc, tcp_syncache.hashmask)];
339 }
340 }
341
342 s = splnet();
343
344 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
345 sch->sch_length--;
346 tcp_syncache.cache_count--;
347
348 TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot], sc, sc_timerq);
349 if (TAILQ_EMPTY(&tcp_syncache.timerq[sc->sc_rxtslot]))
350 callout_stop(&tcp_syncache.tt_timerq[sc->sc_rxtslot]);
351 splx(s);
352
353 syncache_free(sc);
354}
355
356/*
357 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted.
358 * If we have retransmitted an entry the maximum number of times, expire it.
359 */
360static void
361syncache_timer(xslot)
362 void *xslot;
363{
364 intptr_t slot = (intptr_t)xslot;
365 struct syncache *sc, *nsc;
366 struct inpcb *inp;
367 int s;
368
369 s = splnet();
370 if (callout_pending(&tcp_syncache.tt_timerq[slot]) ||
371 !callout_active(&tcp_syncache.tt_timerq[slot])) {
372 splx(s);
373 return;
374 }
375 callout_deactivate(&tcp_syncache.tt_timerq[slot]);
376
377 nsc = TAILQ_FIRST(&tcp_syncache.timerq[slot]);
378 while (nsc != NULL) {
379 if (ticks < nsc->sc_rxttime)
380 break;
381 sc = nsc;
382 inp = sc->sc_tp->t_inpcb;
383 if (slot == SYNCACHE_MAXREXMTS ||
384 slot >= tcp_syncache.rexmt_limit ||
385 inp->inp_gencnt != sc->sc_inp_gencnt) {
386 nsc = TAILQ_NEXT(sc, sc_timerq);
387 syncache_drop(sc, NULL);
388 tcpstat.tcps_sc_stale++;
389 continue;
390 }
391 /*
392 * syncache_respond() may call back into the syncache to
393 * to modify another entry, so do not obtain the next
394 * entry on the timer chain until it has completed.
395 */
396 (void) syncache_respond(sc, NULL);
397 nsc = TAILQ_NEXT(sc, sc_timerq);
398 tcpstat.tcps_sc_retransmitted++;
399 TAILQ_REMOVE(&tcp_syncache.timerq[slot], sc, sc_timerq);
400 SYNCACHE_TIMEOUT(sc, slot + 1);
401 }
402 if (nsc != NULL)
403 callout_reset(&tcp_syncache.tt_timerq[slot],
404 nsc->sc_rxttime - ticks, syncache_timer, (void *)(slot));
405 splx(s);
406}
407
408/*
409 * Find an entry in the syncache.
410 */
411struct syncache *
412syncache_lookup(inc, schp)
413 struct in_conninfo *inc;
414 struct syncache_head **schp;
415{
416 struct syncache *sc;
417 struct syncache_head *sch;
418 int s;
419
420#ifdef INET6
421 if (inc->inc_isipv6) {
422 sch = &tcp_syncache.hashbase[
423 SYNCACHE_HASH6(inc, tcp_syncache.hashmask)];
424 *schp = sch;
425 s = splnet();
426 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
427 if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) {
428 splx(s);
429 return (sc);
430 }
431 }
432 splx(s);
433 } else
434#endif
435 {
436 sch = &tcp_syncache.hashbase[
437 SYNCACHE_HASH(inc, tcp_syncache.hashmask)];
438 *schp = sch;
439 s = splnet();
440 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
441#ifdef INET6
442 if (sc->sc_inc.inc_isipv6)
443 continue;
444#endif
445 if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) {
446 splx(s);
447 return (sc);
448 }
449 }
450 splx(s);
451 }
452 return (NULL);
453}
454
455/*
456 * This function is called when we get a RST for a
457 * non-existent connection, so that we can see if the
458 * connection is in the syn cache. If it is, zap it.
459 */
460void
461syncache_chkrst(inc, th)
462 struct in_conninfo *inc;
463 struct tcphdr *th;
464{
465 struct syncache *sc;
466 struct syncache_head *sch;
467
468 sc = syncache_lookup(inc, &sch);
469 if (sc == NULL)
470 return;
471 /*
472 * If the RST bit is set, check the sequence number to see
473 * if this is a valid reset segment.
474 * RFC 793 page 37:
475 * In all states except SYN-SENT, all reset (RST) segments
476 * are validated by checking their SEQ-fields. A reset is
477 * valid if its sequence number is in the window.
478 *
479 * The sequence number in the reset segment is normally an
480 * echo of our outgoing acknowlegement numbers, but some hosts
481 * send a reset with the sequence number at the rightmost edge
482 * of our receive window, and we have to handle this case.
483 */
484 if (SEQ_GEQ(th->th_seq, sc->sc_irs) &&
485 SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
486 syncache_drop(sc, sch);
487 tcpstat.tcps_sc_reset++;
488 }
489}
490
491void
492syncache_badack(inc)
493 struct in_conninfo *inc;
494{
495 struct syncache *sc;
496 struct syncache_head *sch;
497
498 sc = syncache_lookup(inc, &sch);
499 if (sc != NULL) {
500 syncache_drop(sc, sch);
501 tcpstat.tcps_sc_badack++;
502 }
503}
504
505void
506syncache_unreach(inc, th)
507 struct in_conninfo *inc;
508 struct tcphdr *th;
509{
510 struct syncache *sc;
511 struct syncache_head *sch;
512
513 /* we are called at splnet() here */
514 sc = syncache_lookup(inc, &sch);
515 if (sc == NULL)
516 return;
517
518 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */
519 if (ntohl(th->th_seq) != sc->sc_iss)
520 return;
521
522 /*
523 * If we've rertransmitted 3 times and this is our second error,
524 * we remove the entry. Otherwise, we allow it to continue on.
525 * This prevents us from incorrectly nuking an entry during a
526 * spurious network outage.
527 *
528 * See tcp_notify().
529 */
530 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxtslot < 3) {
531 sc->sc_flags |= SCF_UNREACH;
532 return;
533 }
534 syncache_drop(sc, sch);
535 tcpstat.tcps_sc_unreach++;
536}
537
538/*
539 * Build a new TCP socket structure from a syncache entry.
540 */
541static struct socket *
542syncache_socket(sc, lso)
543 struct syncache *sc;
544 struct socket *lso;
545{
546 struct inpcb *inp = NULL;
547 struct socket *so;
548 struct tcpcb *tp;
549
550 /*
551 * Ok, create the full blown connection, and set things up
552 * as they would have been set up if we had created the
553 * connection when the SYN arrived. If we can't create
554 * the connection, abort it.
555 */
556 so = sonewconn(lso, SS_ISCONNECTED);
557 if (so == NULL) {
558 /*
559 * Drop the connection; we will send a RST if the peer
560 * retransmits the ACK,
561 */
562 tcpstat.tcps_listendrop++;
563 goto abort;
564 }
565
566 inp = sotoinpcb(so);
567
568 /*
569 * Insert new socket into hash list.
570 */
571 inp->inp_inc.inc_isipv6 = sc->sc_inc.inc_isipv6;
572#ifdef INET6
573 if (sc->sc_inc.inc_isipv6) {
574 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
575 } else {
576 inp->inp_vflag &= ~INP_IPV6;
577 inp->inp_vflag |= INP_IPV4;
578#endif
579 inp->inp_laddr = sc->sc_inc.inc_laddr;
580#ifdef INET6
581 }
582#endif
583 inp->inp_lport = sc->sc_inc.inc_lport;
584 if (in_pcbinshash(inp) != 0) {
585 /*
586 * Undo the assignments above if we failed to
587 * put the PCB on the hash lists.
588 */
589#ifdef INET6
590 if (sc->sc_inc.inc_isipv6)
591 inp->in6p_laddr = in6addr_any;
592 else
593#endif
594 inp->inp_laddr.s_addr = INADDR_ANY;
595 inp->inp_lport = 0;
596 goto abort;
597 }
598#ifdef IPSEC
599 /* copy old policy into new socket's */
600 if (ipsec_copy_policy(sotoinpcb(lso)->inp_sp, inp->inp_sp))
601 printf("syncache_expand: could not copy policy\n");
602#endif
603#ifdef INET6
604 if (sc->sc_inc.inc_isipv6) {
605 struct inpcb *oinp = sotoinpcb(lso);
606 struct in6_addr laddr6;
607 struct sockaddr_in6 *sin6;
608 /*
609 * Inherit socket options from the listening socket.
610 * Note that in6p_inputopts are not (and should not be)
611 * copied, since it stores previously received options and is
612 * used to detect if each new option is different than the
613 * previous one and hence should be passed to a user.
614 * If we copied in6p_inputopts, a user would not be able to
615 * receive options just after calling the accept system call.
616 */
617 inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS;
618 if (oinp->in6p_outputopts)
619 inp->in6p_outputopts =
620 ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT);
621 inp->in6p_route = sc->sc_route6;
622 sc->sc_route6.ro_rt = NULL;
623
624 MALLOC(sin6, struct sockaddr_in6 *, sizeof *sin6,
625 M_SONAME, M_NOWAIT | M_ZERO);
626 if (sin6 == NULL)
627 goto abort;
628 sin6->sin6_family = AF_INET6;
629 sin6->sin6_len = sizeof(*sin6);
630 sin6->sin6_addr = sc->sc_inc.inc6_faddr;
631 sin6->sin6_port = sc->sc_inc.inc_fport;
632 laddr6 = inp->in6p_laddr;
633 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
634 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
635 if (in6_pcbconnect(inp, (struct sockaddr *)sin6, &proc0)) {
636 inp->in6p_laddr = laddr6;
637 FREE(sin6, M_SONAME);
638 goto abort;
639 }
640 FREE(sin6, M_SONAME);
641 } else
642#endif
643 {
644 struct in_addr laddr;
645 struct sockaddr_in *sin;
646
647 inp->inp_options = ip_srcroute();
648 if (inp->inp_options == NULL) {
649 inp->inp_options = sc->sc_ipopts;
650 sc->sc_ipopts = NULL;
651 }
652 inp->inp_route = sc->sc_route;
653 sc->sc_route.ro_rt = NULL;
654
655 MALLOC(sin, struct sockaddr_in *, sizeof *sin,
656 M_SONAME, M_NOWAIT | M_ZERO);
657 if (sin == NULL)
658 goto abort;
659 sin->sin_family = AF_INET;
660 sin->sin_len = sizeof(*sin);
661 sin->sin_addr = sc->sc_inc.inc_faddr;
662 sin->sin_port = sc->sc_inc.inc_fport;
663 bzero((caddr_t)sin->sin_zero, sizeof(sin->sin_zero));
664 laddr = inp->inp_laddr;
665 if (inp->inp_laddr.s_addr == INADDR_ANY)
666 inp->inp_laddr = sc->sc_inc.inc_laddr;
dadab5e9 667 if (in_pcbconnect(inp, (struct sockaddr *)sin, &thread0)) {
984263bc
MD
668 inp->inp_laddr = laddr;
669 FREE(sin, M_SONAME);
670 goto abort;
671 }
672 FREE(sin, M_SONAME);
673 }
674
675 tp = intotcpcb(inp);
676 tp->t_state = TCPS_SYN_RECEIVED;
677 tp->iss = sc->sc_iss;
678 tp->irs = sc->sc_irs;
679 tcp_rcvseqinit(tp);
680 tcp_sendseqinit(tp);
681 tp->snd_wl1 = sc->sc_irs;
682 tp->rcv_up = sc->sc_irs + 1;
683 tp->rcv_wnd = sc->sc_wnd;
684 tp->rcv_adv += tp->rcv_wnd;
685
686 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY);
687 if (sc->sc_flags & SCF_NOOPT)
688 tp->t_flags |= TF_NOOPT;
689 if (sc->sc_flags & SCF_WINSCALE) {
690 tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE;
691 tp->requested_s_scale = sc->sc_requested_s_scale;
692 tp->request_r_scale = sc->sc_request_r_scale;
693 }
694 if (sc->sc_flags & SCF_TIMESTAMP) {
695 tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP;
696 tp->ts_recent = sc->sc_tsrecent;
697 tp->ts_recent_age = ticks;
698 }
699 if (sc->sc_flags & SCF_CC) {
700 /*
701 * Initialization of the tcpcb for transaction;
702 * set SND.WND = SEG.WND,
703 * initialize CCsend and CCrecv.
704 */
705 tp->t_flags |= TF_REQ_CC|TF_RCVD_CC;
706 tp->cc_send = sc->sc_cc_send;
707 tp->cc_recv = sc->sc_cc_recv;
708 }
709
710 tcp_mss(tp, sc->sc_peer_mss);
711
712 /*
713 * If the SYN,ACK was retransmitted, reset cwnd to 1 segment.
714 */
715 if (sc->sc_rxtslot != 0)
716 tp->snd_cwnd = tp->t_maxseg;
717 callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp);
718
719 tcpstat.tcps_accepts++;
720 return (so);
721
722abort:
723 if (so != NULL)
724 (void) soabort(so);
725 return (NULL);
726}
727
728/*
729 * This function gets called when we receive an ACK for a
730 * socket in the LISTEN state. We look up the connection
731 * in the syncache, and if its there, we pull it out of
732 * the cache and turn it into a full-blown connection in
733 * the SYN-RECEIVED state.
734 */
735int
736syncache_expand(inc, th, sop, m)
737 struct in_conninfo *inc;
738 struct tcphdr *th;
739 struct socket **sop;
740 struct mbuf *m;
741{
742 struct syncache *sc;
743 struct syncache_head *sch;
744 struct socket *so;
745
746 sc = syncache_lookup(inc, &sch);
747 if (sc == NULL) {
748 /*
749 * There is no syncache entry, so see if this ACK is
750 * a returning syncookie. To do this, first:
751 * A. See if this socket has had a syncache entry dropped in
752 * the past. We don't want to accept a bogus syncookie
753 * if we've never received a SYN.
754 * B. check that the syncookie is valid. If it is, then
755 * cobble up a fake syncache entry, and return.
756 */
757 if (!tcp_syncookies)
758 return (0);
759 sc = syncookie_lookup(inc, th, *sop);
760 if (sc == NULL)
761 return (0);
762 sch = NULL;
763 tcpstat.tcps_sc_recvcookie++;
764 }
765
766 /*
767 * If seg contains an ACK, but not for our SYN/ACK, send a RST.
768 */
769 if (th->th_ack != sc->sc_iss + 1)
770 return (0);
771
772 so = syncache_socket(sc, *sop);
773 if (so == NULL) {
774#if 0
775resetandabort:
776 /* XXXjlemon check this - is this correct? */
777 (void) tcp_respond(NULL, m, m, th,
778 th->th_seq + tlen, (tcp_seq)0, TH_RST|TH_ACK);
779#endif
780 m_freem(m); /* XXX only needed for above */
781 tcpstat.tcps_sc_aborted++;
782 } else {
783 sc->sc_flags |= SCF_KEEPROUTE;
784 tcpstat.tcps_sc_completed++;
785 }
786 if (sch == NULL)
787 syncache_free(sc);
788 else
789 syncache_drop(sc, sch);
790 *sop = so;
791 return (1);
792}
793
794/*
795 * Given a LISTEN socket and an inbound SYN request, add
796 * this to the syn cache, and send back a segment:
797 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
798 * to the source.
799 *
800 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN.
801 * Doing so would require that we hold onto the data and deliver it
802 * to the application. However, if we are the target of a SYN-flood
803 * DoS attack, an attacker could send data which would eventually
804 * consume all available buffer space if it were ACKed. By not ACKing
805 * the data, we avoid this DoS scenario.
806 */
807int
808syncache_add(inc, to, th, sop, m)
809 struct in_conninfo *inc;
810 struct tcpopt *to;
811 struct tcphdr *th;
812 struct socket **sop;
813 struct mbuf *m;
814{
815 struct tcpcb *tp;
816 struct socket *so;
817 struct syncache *sc = NULL;
818 struct syncache_head *sch;
819 struct mbuf *ipopts = NULL;
820 struct rmxp_tao *taop;
821 int i, s, win;
822
823 so = *sop;
824 tp = sototcpcb(so);
825
826 /*
827 * Remember the IP options, if any.
828 */
829#ifdef INET6
830 if (!inc->inc_isipv6)
831#endif
832 ipopts = ip_srcroute();
833
834 /*
835 * See if we already have an entry for this connection.
836 * If we do, resend the SYN,ACK, and reset the retransmit timer.
837 *
838 * XXX
839 * should the syncache be re-initialized with the contents
840 * of the new SYN here (which may have different options?)
841 */
842 sc = syncache_lookup(inc, &sch);
843 if (sc != NULL) {
844 tcpstat.tcps_sc_dupsyn++;
845 if (ipopts) {
846 /*
847 * If we were remembering a previous source route,
848 * forget it and use the new one we've been given.
849 */
850 if (sc->sc_ipopts)
851 (void) m_free(sc->sc_ipopts);
852 sc->sc_ipopts = ipopts;
853 }
854 /*
855 * Update timestamp if present.
856 */
857 if (sc->sc_flags & SCF_TIMESTAMP)
858 sc->sc_tsrecent = to->to_tsval;
859 /*
860 * PCB may have changed, pick up new values.
861 */
862 sc->sc_tp = tp;
863 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt;
864 if (syncache_respond(sc, m) == 0) {
865 s = splnet();
866 TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot],
867 sc, sc_timerq);
868 SYNCACHE_TIMEOUT(sc, sc->sc_rxtslot);
869 splx(s);
870 tcpstat.tcps_sndacks++;
871 tcpstat.tcps_sndtotal++;
872 }
873 *sop = NULL;
874 return (1);
875 }
876
877 sc = zalloc(tcp_syncache.zone);
878 if (sc == NULL) {
879 /*
880 * The zone allocator couldn't provide more entries.
881 * Treat this as if the cache was full; drop the oldest
882 * entry and insert the new one.
883 */
884 s = splnet();
885 for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) {
886 sc = TAILQ_FIRST(&tcp_syncache.timerq[i]);
887 if (sc != NULL)
888 break;
889 }
890 sc->sc_tp->ts_recent = ticks;
891 syncache_drop(sc, NULL);
892 splx(s);
893 tcpstat.tcps_sc_zonefail++;
894 sc = zalloc(tcp_syncache.zone);
895 if (sc == NULL) {
896 if (ipopts)
897 (void) m_free(ipopts);
898 return (0);
899 }
900 }
901
902 /*
903 * Fill in the syncache values.
904 */
905 bzero(sc, sizeof(*sc));
906 sc->sc_tp = tp;
907 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt;
908 sc->sc_ipopts = ipopts;
909 sc->sc_inc.inc_fport = inc->inc_fport;
910 sc->sc_inc.inc_lport = inc->inc_lport;
911#ifdef INET6
912 sc->sc_inc.inc_isipv6 = inc->inc_isipv6;
913 if (inc->inc_isipv6) {
914 sc->sc_inc.inc6_faddr = inc->inc6_faddr;
915 sc->sc_inc.inc6_laddr = inc->inc6_laddr;
916 sc->sc_route6.ro_rt = NULL;
917 } else
918#endif
919 {
920 sc->sc_inc.inc_faddr = inc->inc_faddr;
921 sc->sc_inc.inc_laddr = inc->inc_laddr;
922 sc->sc_route.ro_rt = NULL;
923 }
924 sc->sc_irs = th->th_seq;
925 sc->sc_flags = 0;
926 sc->sc_peer_mss = to->to_flags & TOF_MSS ? to->to_mss : 0;
927 if (tcp_syncookies)
928 sc->sc_iss = syncookie_generate(sc);
929 else
930 sc->sc_iss = arc4random();
931
932 /* Initial receive window: clip sbspace to [0 .. TCP_MAXWIN] */
933 win = sbspace(&so->so_rcv);
934 win = imax(win, 0);
935 win = imin(win, TCP_MAXWIN);
936 sc->sc_wnd = win;
937
938 if (tcp_do_rfc1323) {
939 /*
940 * A timestamp received in a SYN makes
941 * it ok to send timestamp requests and replies.
942 */
943 if (to->to_flags & TOF_TS) {
944 sc->sc_tsrecent = to->to_tsval;
945 sc->sc_flags |= SCF_TIMESTAMP;
946 }
947 if (to->to_flags & TOF_SCALE) {
948 int wscale = 0;
949
950 /* Compute proper scaling value from buffer space */
951 while (wscale < TCP_MAX_WINSHIFT &&
952 (TCP_MAXWIN << wscale) < so->so_rcv.sb_hiwat)
953 wscale++;
954 sc->sc_request_r_scale = wscale;
955 sc->sc_requested_s_scale = to->to_requested_s_scale;
956 sc->sc_flags |= SCF_WINSCALE;
957 }
958 }
959 if (tcp_do_rfc1644) {
960 /*
961 * A CC or CC.new option received in a SYN makes
962 * it ok to send CC in subsequent segments.
963 */
964 if (to->to_flags & (TOF_CC|TOF_CCNEW)) {
965 sc->sc_cc_recv = to->to_cc;
966 sc->sc_cc_send = CC_INC(tcp_ccgen);
967 sc->sc_flags |= SCF_CC;
968 }
969 }
970 if (tp->t_flags & TF_NOOPT)
971 sc->sc_flags = SCF_NOOPT;
972
973 /*
974 * XXX
975 * We have the option here of not doing TAO (even if the segment
976 * qualifies) and instead fall back to a normal 3WHS via the syncache.
977 * This allows us to apply synflood protection to TAO-qualifying SYNs
978 * also. However, there should be a hueristic to determine when to
979 * do this, and is not present at the moment.
980 */
981
982 /*
983 * Perform TAO test on incoming CC (SEG.CC) option, if any.
984 * - compare SEG.CC against cached CC from the same host, if any.
985 * - if SEG.CC > chached value, SYN must be new and is accepted
986 * immediately: save new CC in the cache, mark the socket
987 * connected, enter ESTABLISHED state, turn on flag to
988 * send a SYN in the next segment.
989 * A virtual advertised window is set in rcv_adv to
990 * initialize SWS prevention. Then enter normal segment
991 * processing: drop SYN, process data and FIN.
992 * - otherwise do a normal 3-way handshake.
993 */
994 taop = tcp_gettaocache(&sc->sc_inc);
995 if ((to->to_flags & TOF_CC) != 0) {
996 if (((tp->t_flags & TF_NOPUSH) != 0) &&
997 sc->sc_flags & SCF_CC &&
998 taop != NULL && taop->tao_cc != 0 &&
999 CC_GT(to->to_cc, taop->tao_cc)) {
1000 sc->sc_rxtslot = 0;
1001 so = syncache_socket(sc, *sop);
1002 if (so != NULL) {
1003 sc->sc_flags |= SCF_KEEPROUTE;
1004 taop->tao_cc = to->to_cc;
1005 *sop = so;
1006 }
1007 syncache_free(sc);
1008 return (so != NULL);
1009 }
1010 } else {
1011 /*
1012 * No CC option, but maybe CC.NEW: invalidate cached value.
1013 */
1014 if (taop != NULL)
1015 taop->tao_cc = 0;
1016 }
1017 /*
1018 * TAO test failed or there was no CC option,
1019 * do a standard 3-way handshake.
1020 */
1021 if (syncache_respond(sc, m) == 0) {
1022 syncache_insert(sc, sch);
1023 tcpstat.tcps_sndacks++;
1024 tcpstat.tcps_sndtotal++;
1025 } else {
1026 syncache_free(sc);
1027 tcpstat.tcps_sc_dropped++;
1028 }
1029 *sop = NULL;
1030 return (1);
1031}
1032
1033static int
1034syncache_respond(sc, m)
1035 struct syncache *sc;
1036 struct mbuf *m;
1037{
1038 u_int8_t *optp;
1039 int optlen, error;
1040 u_int16_t tlen, hlen, mssopt;
1041 struct ip *ip = NULL;
1042 struct rtentry *rt;
1043 struct tcphdr *th;
1044#ifdef INET6
1045 struct ip6_hdr *ip6 = NULL;
1046#endif
1047
1048#ifdef INET6
1049 if (sc->sc_inc.inc_isipv6) {
1050 rt = tcp_rtlookup6(&sc->sc_inc);
1051 if (rt != NULL)
1052 mssopt = rt->rt_ifp->if_mtu -
1053 (sizeof(struct ip6_hdr) + sizeof(struct tcphdr));
1054 else
1055 mssopt = tcp_v6mssdflt;
1056 hlen = sizeof(struct ip6_hdr);
1057 } else
1058#endif
1059 {
1060 rt = tcp_rtlookup(&sc->sc_inc);
1061 if (rt != NULL)
1062 mssopt = rt->rt_ifp->if_mtu -
1063 (sizeof(struct ip) + sizeof(struct tcphdr));
1064 else
1065 mssopt = tcp_mssdflt;
1066 hlen = sizeof(struct ip);
1067 }
1068
1069 /* Compute the size of the TCP options. */
1070 if (sc->sc_flags & SCF_NOOPT) {
1071 optlen = 0;
1072 } else {
1073 optlen = TCPOLEN_MAXSEG +
1074 ((sc->sc_flags & SCF_WINSCALE) ? 4 : 0) +
1075 ((sc->sc_flags & SCF_TIMESTAMP) ? TCPOLEN_TSTAMP_APPA : 0) +
1076 ((sc->sc_flags & SCF_CC) ? TCPOLEN_CC_APPA * 2 : 0);
1077 }
1078 tlen = hlen + sizeof(struct tcphdr) + optlen;
1079
1080 /*
1081 * XXX
1082 * assume that the entire packet will fit in a header mbuf
1083 */
1084 KASSERT(max_linkhdr + tlen <= MHLEN, ("syncache: mbuf too small"));
1085
1086 /*
1087 * XXX shouldn't this reuse the mbuf if possible ?
1088 * Create the IP+TCP header from scratch.
1089 */
1090 if (m)
1091 m_freem(m);
1092
1093 m = m_gethdr(M_DONTWAIT, MT_HEADER);
1094 if (m == NULL)
1095 return (ENOBUFS);
1096 m->m_data += max_linkhdr;
1097 m->m_len = tlen;
1098 m->m_pkthdr.len = tlen;
1099 m->m_pkthdr.rcvif = NULL;
1100
1101#ifdef INET6
1102 if (sc->sc_inc.inc_isipv6) {
1103 ip6 = mtod(m, struct ip6_hdr *);
1104 ip6->ip6_vfc = IPV6_VERSION;
1105 ip6->ip6_nxt = IPPROTO_TCP;
1106 ip6->ip6_src = sc->sc_inc.inc6_laddr;
1107 ip6->ip6_dst = sc->sc_inc.inc6_faddr;
1108 ip6->ip6_plen = htons(tlen - hlen);
1109 /* ip6_hlim is set after checksum */
1110 /* ip6_flow = ??? */
1111
1112 th = (struct tcphdr *)(ip6 + 1);
1113 } else
1114#endif
1115 {
1116 ip = mtod(m, struct ip *);
1117 ip->ip_v = IPVERSION;
1118 ip->ip_hl = sizeof(struct ip) >> 2;
1119 ip->ip_len = tlen;
1120 ip->ip_id = 0;
1121 ip->ip_off = 0;
1122 ip->ip_sum = 0;
1123 ip->ip_p = IPPROTO_TCP;
1124 ip->ip_src = sc->sc_inc.inc_laddr;
1125 ip->ip_dst = sc->sc_inc.inc_faddr;
1126 ip->ip_ttl = sc->sc_tp->t_inpcb->inp_ip_ttl; /* XXX */
1127 ip->ip_tos = sc->sc_tp->t_inpcb->inp_ip_tos; /* XXX */
1128
1129 /*
1130 * See if we should do MTU discovery. Route lookups are expensive,
1131 * so we will only unset the DF bit if:
1132 *
1133 * 1) path_mtu_discovery is disabled
1134 * 2) the SCF_UNREACH flag has been set
1135 */
1136 if (path_mtu_discovery
1137 && ((sc->sc_flags & SCF_UNREACH) == 0)) {
1138 ip->ip_off |= IP_DF;
1139 }
1140
1141 th = (struct tcphdr *)(ip + 1);
1142 }
1143 th->th_sport = sc->sc_inc.inc_lport;
1144 th->th_dport = sc->sc_inc.inc_fport;
1145
1146 th->th_seq = htonl(sc->sc_iss);
1147 th->th_ack = htonl(sc->sc_irs + 1);
1148 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1149 th->th_x2 = 0;
1150 th->th_flags = TH_SYN|TH_ACK;
1151 th->th_win = htons(sc->sc_wnd);
1152 th->th_urp = 0;
1153
1154 /* Tack on the TCP options. */
1155 if (optlen == 0)
1156 goto no_options;
1157 optp = (u_int8_t *)(th + 1);
1158 *optp++ = TCPOPT_MAXSEG;
1159 *optp++ = TCPOLEN_MAXSEG;
1160 *optp++ = (mssopt >> 8) & 0xff;
1161 *optp++ = mssopt & 0xff;
1162
1163 if (sc->sc_flags & SCF_WINSCALE) {
1164 *((u_int32_t *)optp) = htonl(TCPOPT_NOP << 24 |
1165 TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 |
1166 sc->sc_request_r_scale);
1167 optp += 4;
1168 }
1169
1170 if (sc->sc_flags & SCF_TIMESTAMP) {
1171 u_int32_t *lp = (u_int32_t *)(optp);
1172
1173 /* Form timestamp option as shown in appendix A of RFC 1323. */
1174 *lp++ = htonl(TCPOPT_TSTAMP_HDR);
1175 *lp++ = htonl(ticks);
1176 *lp = htonl(sc->sc_tsrecent);
1177 optp += TCPOLEN_TSTAMP_APPA;
1178 }
1179
1180 /*
1181 * Send CC and CC.echo if we received CC from our peer.
1182 */
1183 if (sc->sc_flags & SCF_CC) {
1184 u_int32_t *lp = (u_int32_t *)(optp);
1185
1186 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC));
1187 *lp++ = htonl(sc->sc_cc_send);
1188 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CCECHO));
1189 *lp = htonl(sc->sc_cc_recv);
1190 optp += TCPOLEN_CC_APPA * 2;
1191 }
1192no_options:
1193
1194#ifdef INET6
1195 if (sc->sc_inc.inc_isipv6) {
1196 struct route_in6 *ro6 = &sc->sc_route6;
1197
1198 th->th_sum = 0;
1199 th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen, tlen - hlen);
1200 ip6->ip6_hlim = in6_selecthlim(NULL,
1201 ro6->ro_rt ? ro6->ro_rt->rt_ifp : NULL);
1202 error = ip6_output(m, NULL, ro6, 0, NULL, NULL,
1203 sc->sc_tp->t_inpcb);
1204 } else
1205#endif
1206 {
1207 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1208 htons(tlen - hlen + IPPROTO_TCP));
1209 m->m_pkthdr.csum_flags = CSUM_TCP;
1210 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1211 error = ip_output(m, sc->sc_ipopts, &sc->sc_route, 0, NULL,
1212 sc->sc_tp->t_inpcb);
1213 }
1214 return (error);
1215}
1216
1217/*
1218 * cookie layers:
1219 *
1220 * |. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .|
1221 * | peer iss |
1222 * | MD5(laddr,faddr,secret,lport,fport) |. . . . . . .|
1223 * | 0 |(A)| |
1224 * (A): peer mss index
1225 */
1226
1227/*
1228 * The values below are chosen to minimize the size of the tcp_secret
1229 * table, as well as providing roughly a 16 second lifetime for the cookie.
1230 */
1231
1232#define SYNCOOKIE_WNDBITS 5 /* exposed bits for window indexing */
1233#define SYNCOOKIE_TIMESHIFT 1 /* scale ticks to window time units */
1234
1235#define SYNCOOKIE_WNDMASK ((1 << SYNCOOKIE_WNDBITS) - 1)
1236#define SYNCOOKIE_NSECRETS (1 << SYNCOOKIE_WNDBITS)
1237#define SYNCOOKIE_TIMEOUT \
1238 (hz * (1 << SYNCOOKIE_WNDBITS) / (1 << SYNCOOKIE_TIMESHIFT))
1239#define SYNCOOKIE_DATAMASK ((3 << SYNCOOKIE_WNDBITS) | SYNCOOKIE_WNDMASK)
1240
1241static struct {
1242 u_int32_t ts_secbits[4];
1243 u_int ts_expire;
1244} tcp_secret[SYNCOOKIE_NSECRETS];
1245
1246static int tcp_msstab[] = { 0, 536, 1460, 8960 };
1247
1248static MD5_CTX syn_ctx;
1249
1250#define MD5Add(v) MD5Update(&syn_ctx, (u_char *)&v, sizeof(v))
1251
1252struct md5_add {
1253 u_int32_t laddr, faddr;
1254 u_int32_t secbits[4];
1255 u_int16_t lport, fport;
1256};
1257
1258#ifdef CTASSERT
1259CTASSERT(sizeof(struct md5_add) == 28);
1260#endif
1261
1262/*
1263 * Consider the problem of a recreated (and retransmitted) cookie. If the
1264 * original SYN was accepted, the connection is established. The second
1265 * SYN is inflight, and if it arrives with an ISN that falls within the
1266 * receive window, the connection is killed.
1267 *
1268 * However, since cookies have other problems, this may not be worth
1269 * worrying about.
1270 */
1271
1272static u_int32_t
1273syncookie_generate(struct syncache *sc)
1274{
1275 u_int32_t md5_buffer[4];
1276 u_int32_t data;
1277 int idx, i;
1278 struct md5_add add;
1279
1280 idx = ((ticks << SYNCOOKIE_TIMESHIFT) / hz) & SYNCOOKIE_WNDMASK;
1281 if (tcp_secret[idx].ts_expire < ticks) {
1282 for (i = 0; i < 4; i++)
1283 tcp_secret[idx].ts_secbits[i] = arc4random();
1284 tcp_secret[idx].ts_expire = ticks + SYNCOOKIE_TIMEOUT;
1285 }
1286 for (data = sizeof(tcp_msstab) / sizeof(int) - 1; data > 0; data--)
1287 if (tcp_msstab[data] <= sc->sc_peer_mss)
1288 break;
1289 data = (data << SYNCOOKIE_WNDBITS) | idx;
1290 data ^= sc->sc_irs; /* peer's iss */
1291 MD5Init(&syn_ctx);
1292#ifdef INET6
1293 if (sc->sc_inc.inc_isipv6) {
1294 MD5Add(sc->sc_inc.inc6_laddr);
1295 MD5Add(sc->sc_inc.inc6_faddr);
1296 add.laddr = 0;
1297 add.faddr = 0;
1298 } else
1299#endif
1300 {
1301 add.laddr = sc->sc_inc.inc_laddr.s_addr;
1302 add.faddr = sc->sc_inc.inc_faddr.s_addr;
1303 }
1304 add.lport = sc->sc_inc.inc_lport;
1305 add.fport = sc->sc_inc.inc_fport;
1306 add.secbits[0] = tcp_secret[idx].ts_secbits[0];
1307 add.secbits[1] = tcp_secret[idx].ts_secbits[1];
1308 add.secbits[2] = tcp_secret[idx].ts_secbits[2];
1309 add.secbits[3] = tcp_secret[idx].ts_secbits[3];
1310 MD5Add(add);
1311 MD5Final((u_char *)&md5_buffer, &syn_ctx);
1312 data ^= (md5_buffer[0] & ~SYNCOOKIE_WNDMASK);
1313 return (data);
1314}
1315
1316static struct syncache *
1317syncookie_lookup(inc, th, so)
1318 struct in_conninfo *inc;
1319 struct tcphdr *th;
1320 struct socket *so;
1321{
1322 u_int32_t md5_buffer[4];
1323 struct syncache *sc;
1324 u_int32_t data;
1325 int wnd, idx;
1326 struct md5_add add;
1327
1328 data = (th->th_ack - 1) ^ (th->th_seq - 1); /* remove ISS */
1329 idx = data & SYNCOOKIE_WNDMASK;
1330 if (tcp_secret[idx].ts_expire < ticks ||
1331 sototcpcb(so)->ts_recent + SYNCOOKIE_TIMEOUT < ticks)
1332 return (NULL);
1333 MD5Init(&syn_ctx);
1334#ifdef INET6
1335 if (inc->inc_isipv6) {
1336 MD5Add(inc->inc6_laddr);
1337 MD5Add(inc->inc6_faddr);
1338 add.laddr = 0;
1339 add.faddr = 0;
1340 } else
1341#endif
1342 {
1343 add.laddr = inc->inc_laddr.s_addr;
1344 add.faddr = inc->inc_faddr.s_addr;
1345 }
1346 add.lport = inc->inc_lport;
1347 add.fport = inc->inc_fport;
1348 add.secbits[0] = tcp_secret[idx].ts_secbits[0];
1349 add.secbits[1] = tcp_secret[idx].ts_secbits[1];
1350 add.secbits[2] = tcp_secret[idx].ts_secbits[2];
1351 add.secbits[3] = tcp_secret[idx].ts_secbits[3];
1352 MD5Add(add);
1353 MD5Final((u_char *)&md5_buffer, &syn_ctx);
1354 data ^= md5_buffer[0];
1355 if ((data & ~SYNCOOKIE_DATAMASK) != 0)
1356 return (NULL);
1357 data = data >> SYNCOOKIE_WNDBITS;
1358
1359 sc = zalloc(tcp_syncache.zone);
1360 if (sc == NULL)
1361 return (NULL);
1362 /*
1363 * Fill in the syncache values.
1364 * XXX duplicate code from syncache_add
1365 */
1366 sc->sc_ipopts = NULL;
1367 sc->sc_inc.inc_fport = inc->inc_fport;
1368 sc->sc_inc.inc_lport = inc->inc_lport;
1369#ifdef INET6
1370 sc->sc_inc.inc_isipv6 = inc->inc_isipv6;
1371 if (inc->inc_isipv6) {
1372 sc->sc_inc.inc6_faddr = inc->inc6_faddr;
1373 sc->sc_inc.inc6_laddr = inc->inc6_laddr;
1374 sc->sc_route6.ro_rt = NULL;
1375 } else
1376#endif
1377 {
1378 sc->sc_inc.inc_faddr = inc->inc_faddr;
1379 sc->sc_inc.inc_laddr = inc->inc_laddr;
1380 sc->sc_route.ro_rt = NULL;
1381 }
1382 sc->sc_irs = th->th_seq - 1;
1383 sc->sc_iss = th->th_ack - 1;
1384 wnd = sbspace(&so->so_rcv);
1385 wnd = imax(wnd, 0);
1386 wnd = imin(wnd, TCP_MAXWIN);
1387 sc->sc_wnd = wnd;
1388 sc->sc_flags = 0;
1389 sc->sc_rxtslot = 0;
1390 sc->sc_peer_mss = tcp_msstab[data];
1391 return (sc);
1392}