tcp/sack: Implement RFC3517bis
[dragonfly.git] / sys / netinet / tcp_input.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2002, 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2002, 2003, 2004 The DragonFly Project. All rights reserved.
4 *
5 * This code is derived from software contributed to The DragonFly Project
6 * by Jeffrey M. Hsu.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33
34/*
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
36 * The Regents of the University of California. All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
67 * $FreeBSD: src/sys/netinet/tcp_input.c,v 1.107.2.38 2003/05/21 04:46:41 cjc Exp $
68 */
69
70#include "opt_inet.h"
71#include "opt_inet6.h"
72#include "opt_ipsec.h"
73#include "opt_tcpdebug.h"
74#include "opt_tcp_input.h"
75
76#include <sys/param.h>
77#include <sys/systm.h>
78#include <sys/kernel.h>
79#include <sys/sysctl.h>
80#include <sys/malloc.h>
81#include <sys/mbuf.h>
82#include <sys/proc.h> /* for proc0 declaration */
83#include <sys/protosw.h>
84#include <sys/socket.h>
85#include <sys/socketvar.h>
86#include <sys/syslog.h>
87#include <sys/in_cksum.h>
88
89#include <sys/socketvar2.h>
90
91#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
92#include <machine/stdarg.h>
93
94#include <net/if.h>
95#include <net/route.h>
96
97#include <netinet/in.h>
98#include <netinet/in_systm.h>
99#include <netinet/ip.h>
100#include <netinet/ip_icmp.h> /* for ICMP_BANDLIM */
101#include <netinet/in_var.h>
102#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
103#include <netinet/in_pcb.h>
104#include <netinet/ip_var.h>
105#include <netinet/ip6.h>
106#include <netinet/icmp6.h>
107#include <netinet6/nd6.h>
108#include <netinet6/ip6_var.h>
109#include <netinet6/in6_pcb.h>
110#include <netinet/tcp.h>
111#include <netinet/tcp_fsm.h>
112#include <netinet/tcp_seq.h>
113#include <netinet/tcp_timer.h>
114#include <netinet/tcp_timer2.h>
115#include <netinet/tcp_var.h>
116#include <netinet6/tcp6_var.h>
117#include <netinet/tcpip.h>
118
119#ifdef TCPDEBUG
120#include <netinet/tcp_debug.h>
121
122u_char tcp_saveipgen[40]; /* the size must be of max ip header, now IPv6 */
123struct tcphdr tcp_savetcp;
124#endif
125
126#ifdef FAST_IPSEC
127#include <netproto/ipsec/ipsec.h>
128#include <netproto/ipsec/ipsec6.h>
129#endif
130
131#ifdef IPSEC
132#include <netinet6/ipsec.h>
133#include <netinet6/ipsec6.h>
134#include <netproto/key/key.h>
135#endif
136
137MALLOC_DEFINE(M_TSEGQ, "tseg_qent", "TCP segment queue entry");
138
139static int log_in_vain = 0;
140SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
141 &log_in_vain, 0, "Log all incoming TCP connections");
142
143static int blackhole = 0;
144SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
145 &blackhole, 0, "Do not send RST when dropping refused connections");
146
147int tcp_delack_enabled = 1;
148SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
149 &tcp_delack_enabled, 0,
150 "Delay ACK to try and piggyback it onto a data packet");
151
152#ifdef TCP_DROP_SYNFIN
153static int drop_synfin = 0;
154SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW,
155 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set");
156#endif
157
158static int tcp_do_limitedtransmit = 1;
159SYSCTL_INT(_net_inet_tcp, OID_AUTO, limitedtransmit, CTLFLAG_RW,
160 &tcp_do_limitedtransmit, 0, "Enable RFC 3042 (Limited Transmit)");
161
162static int tcp_do_early_retransmit = 1;
163SYSCTL_INT(_net_inet_tcp, OID_AUTO, earlyretransmit, CTLFLAG_RW,
164 &tcp_do_early_retransmit, 0, "Early retransmit");
165
166int tcp_aggregate_acks = 1;
167SYSCTL_INT(_net_inet_tcp, OID_AUTO, aggregate_acks, CTLFLAG_RW,
168 &tcp_aggregate_acks, 0, "Aggregate built-up acks into one ack");
169
170static int tcp_do_eifel_detect = 1;
171SYSCTL_INT(_net_inet_tcp, OID_AUTO, eifel, CTLFLAG_RW,
172 &tcp_do_eifel_detect, 0, "Eifel detection algorithm (RFC 3522)");
173
174static int tcp_do_abc = 1;
175SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc, CTLFLAG_RW,
176 &tcp_do_abc, 0,
177 "TCP Appropriate Byte Counting (RFC 3465)");
178
179/*
180 * Define as tunable for easy testing with SACK on and off.
181 * Warning: do not change setting in the middle of an existing active TCP flow,
182 * else strange things might happen to that flow.
183 */
184int tcp_do_sack = 1;
185SYSCTL_INT(_net_inet_tcp, OID_AUTO, sack, CTLFLAG_RW,
186 &tcp_do_sack, 0, "Enable SACK Algorithms");
187
188int tcp_do_smartsack = 1;
189SYSCTL_INT(_net_inet_tcp, OID_AUTO, smartsack, CTLFLAG_RW,
190 &tcp_do_smartsack, 0, "Enable Smart SACK Algorithms");
191
192int tcp_do_rescuesack = 1;
193SYSCTL_INT(_net_inet_tcp, OID_AUTO, rescuesack, CTLFLAG_RW,
194 &tcp_do_rescuesack, 0, "Rescue retransmission for SACK");
195
196int tcp_aggressive_rescuesack = 0;
197SYSCTL_INT(_net_inet_tcp, OID_AUTO, rescuesack_agg, CTLFLAG_RW,
198 &tcp_aggressive_rescuesack, 0, "Aggressive rescue retransmission for SACK");
199
200int tcp_do_rfc3517bis = 0;
201SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3517bis, CTLFLAG_RW,
202 &tcp_do_rfc3517bis, 0, "Enable RFC3517 update");
203
204SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0,
205 "TCP Segment Reassembly Queue");
206
207int tcp_reass_maxseg = 0;
208SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RD,
209 &tcp_reass_maxseg, 0,
210 "Global maximum number of TCP Segments in Reassembly Queue");
211
212int tcp_reass_qsize = 0;
213SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD,
214 &tcp_reass_qsize, 0,
215 "Global number of TCP Segments currently in Reassembly Queue");
216
217static int tcp_reass_overflows = 0;
218SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows, CTLFLAG_RD,
219 &tcp_reass_overflows, 0,
220 "Global number of TCP Segment Reassembly Queue Overflows");
221
222int tcp_do_autorcvbuf = 1;
223SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW,
224 &tcp_do_autorcvbuf, 0, "Enable automatic receive buffer sizing");
225
226int tcp_autorcvbuf_inc = 16*1024;
227SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW,
228 &tcp_autorcvbuf_inc, 0,
229 "Incrementor step size of automatic receive buffer");
230
231int tcp_autorcvbuf_max = 2*1024*1024;
232SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW,
233 &tcp_autorcvbuf_max, 0, "Max size of automatic receive buffer");
234
235int tcp_sosend_agglim = 2;
236SYSCTL_INT(_net_inet_tcp, OID_AUTO, sosend_agglim, CTLFLAG_RW,
237 &tcp_sosend_agglim, 0, "TCP sosend mbuf aggregation limit");
238
239int tcp_sosend_async = 1;
240SYSCTL_INT(_net_inet_tcp, OID_AUTO, sosend_async, CTLFLAG_RW,
241 &tcp_sosend_async, 0, "TCP asynchronized pru_send");
242
243static int tcp_ignore_redun_dsack = 1;
244SYSCTL_INT(_net_inet_tcp, OID_AUTO, ignore_redun_dsack, CTLFLAG_RW,
245 &tcp_ignore_redun_dsack, 0, "Ignore redundant DSACK");
246
247static void tcp_dooptions(struct tcpopt *, u_char *, int, boolean_t,
248 tcp_seq);
249static void tcp_pulloutofband(struct socket *,
250 struct tcphdr *, struct mbuf *, int);
251static int tcp_reass(struct tcpcb *, struct tcphdr *, int *,
252 struct mbuf *);
253static void tcp_xmit_timer(struct tcpcb *, int, tcp_seq);
254static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *, int);
255static void tcp_sack_rexmt(struct tcpcb *, struct tcphdr *);
256static boolean_t tcp_sack_limitedxmit(struct tcpcb *);
257static int tcp_rmx_msl(const struct tcpcb *);
258static void tcp_established(struct tcpcb *);
259
260/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
261#ifdef INET6
262#define ND6_HINT(tp) \
263do { \
264 if ((tp) && (tp)->t_inpcb && \
265 ((tp)->t_inpcb->inp_vflag & INP_IPV6) && \
266 (tp)->t_inpcb->in6p_route.ro_rt) \
267 nd6_nud_hint((tp)->t_inpcb->in6p_route.ro_rt, NULL, 0); \
268} while (0)
269#else
270#define ND6_HINT(tp)
271#endif
272
273/*
274 * Indicate whether this ack should be delayed. We can delay the ack if
275 * - delayed acks are enabled and
276 * - there is no delayed ack timer in progress and
277 * - our last ack wasn't a 0-sized window. We never want to delay
278 * the ack that opens up a 0-sized window.
279 */
280#define DELAY_ACK(tp) \
281 (tcp_delack_enabled && !tcp_callout_pending(tp, tp->tt_delack) && \
282 !(tp->t_flags & TF_RXWIN0SENT))
283
284#define acceptable_window_update(tp, th, tiwin) \
285 (SEQ_LT(tp->snd_wl1, th->th_seq) || \
286 (tp->snd_wl1 == th->th_seq && \
287 (SEQ_LT(tp->snd_wl2, th->th_ack) || \
288 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))
289
290#define iceildiv(n, d) (((n)+(d)-1) / (d))
291#define need_early_retransmit(tp, ownd) \
292 (tcp_do_early_retransmit && \
293 (tcp_do_eifel_detect && (tp->t_flags & TF_RCVD_TSTMP)) && \
294 ownd < (4 * tp->t_maxseg) && \
295 tp->t_dupacks + 1 >= iceildiv(ownd, tp->t_maxseg) && \
296 (!TCP_DO_SACK(tp) || ownd <= tp->t_maxseg || \
297 tcp_sack_has_sacked(&tp->scb, ownd - tp->t_maxseg)))
298
299static int
300tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
301{
302 struct tseg_qent *q;
303 struct tseg_qent *p = NULL;
304 struct tseg_qent *te;
305 struct socket *so = tp->t_inpcb->inp_socket;
306 int flags;
307
308 /*
309 * Call with th == NULL after become established to
310 * force pre-ESTABLISHED data up to user socket.
311 */
312 if (th == NULL)
313 goto present;
314
315 /*
316 * Limit the number of segments in the reassembly queue to prevent
317 * holding on to too many segments (and thus running out of mbufs).
318 * Make sure to let the missing segment through which caused this
319 * queue. Always keep one global queue entry spare to be able to
320 * process the missing segment.
321 */
322 if (th->th_seq != tp->rcv_nxt &&
323 tcp_reass_qsize + 1 >= tcp_reass_maxseg) {
324 tcp_reass_overflows++;
325 tcpstat.tcps_rcvmemdrop++;
326 m_freem(m);
327 /* no SACK block to report */
328 tp->reportblk.rblk_start = tp->reportblk.rblk_end;
329 return (0);
330 }
331
332 /* Allocate a new queue entry. */
333 te = kmalloc(sizeof(struct tseg_qent), M_TSEGQ, M_INTWAIT | M_NULLOK);
334 if (te == NULL) {
335 tcpstat.tcps_rcvmemdrop++;
336 m_freem(m);
337 /* no SACK block to report */
338 tp->reportblk.rblk_start = tp->reportblk.rblk_end;
339 return (0);
340 }
341 atomic_add_int(&tcp_reass_qsize, 1);
342
343 /*
344 * Find a segment which begins after this one does.
345 */
346 LIST_FOREACH(q, &tp->t_segq, tqe_q) {
347 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
348 break;
349 p = q;
350 }
351
352 /*
353 * If there is a preceding segment, it may provide some of
354 * our data already. If so, drop the data from the incoming
355 * segment. If it provides all of our data, drop us.
356 */
357 if (p != NULL) {
358 tcp_seq_diff_t i;
359
360 /* conversion to int (in i) handles seq wraparound */
361 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
362 if (i > 0) { /* overlaps preceding segment */
363 tp->t_flags |= (TF_DUPSEG | TF_ENCLOSESEG);
364 /* enclosing block starts w/ preceding segment */
365 tp->encloseblk.rblk_start = p->tqe_th->th_seq;
366 if (i >= *tlenp) {
367 /* preceding encloses incoming segment */
368 tp->encloseblk.rblk_end = TCP_SACK_BLKEND(
369 p->tqe_th->th_seq + p->tqe_len,
370 p->tqe_th->th_flags);
371 tcpstat.tcps_rcvduppack++;
372 tcpstat.tcps_rcvdupbyte += *tlenp;
373 m_freem(m);
374 kfree(te, M_TSEGQ);
375 atomic_add_int(&tcp_reass_qsize, -1);
376 /*
377 * Try to present any queued data
378 * at the left window edge to the user.
379 * This is needed after the 3-WHS
380 * completes.
381 */
382 goto present; /* ??? */
383 }
384 m_adj(m, i);
385 *tlenp -= i;
386 th->th_seq += i;
387 /* incoming segment end is enclosing block end */
388 tp->encloseblk.rblk_end = TCP_SACK_BLKEND(
389 th->th_seq + *tlenp, th->th_flags);
390 /* trim end of reported D-SACK block */
391 tp->reportblk.rblk_end = th->th_seq;
392 }
393 }
394 tcpstat.tcps_rcvoopack++;
395 tcpstat.tcps_rcvoobyte += *tlenp;
396
397 /*
398 * While we overlap succeeding segments trim them or,
399 * if they are completely covered, dequeue them.
400 */
401 while (q) {
402 tcp_seq_diff_t i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
403 tcp_seq qend = q->tqe_th->th_seq + q->tqe_len;
404 tcp_seq qend_sack = TCP_SACK_BLKEND(qend, q->tqe_th->th_flags);
405 struct tseg_qent *nq;
406
407 if (i <= 0)
408 break;
409 if (!(tp->t_flags & TF_DUPSEG)) { /* first time through */
410 tp->t_flags |= (TF_DUPSEG | TF_ENCLOSESEG);
411 tp->encloseblk = tp->reportblk;
412 /* report trailing duplicate D-SACK segment */
413 tp->reportblk.rblk_start = q->tqe_th->th_seq;
414 }
415 if ((tp->t_flags & TF_ENCLOSESEG) &&
416 SEQ_GT(qend_sack, tp->encloseblk.rblk_end)) {
417 /* extend enclosing block if one exists */
418 tp->encloseblk.rblk_end = qend_sack;
419 }
420 if (i < q->tqe_len) {
421 q->tqe_th->th_seq += i;
422 q->tqe_len -= i;
423 m_adj(q->tqe_m, i);
424 break;
425 }
426
427 nq = LIST_NEXT(q, tqe_q);
428 LIST_REMOVE(q, tqe_q);
429 m_freem(q->tqe_m);
430 kfree(q, M_TSEGQ);
431 atomic_add_int(&tcp_reass_qsize, -1);
432 q = nq;
433 }
434
435 /* Insert the new segment queue entry into place. */
436 te->tqe_m = m;
437 te->tqe_th = th;
438 te->tqe_len = *tlenp;
439
440 /* check if can coalesce with following segment */
441 if (q != NULL && (th->th_seq + *tlenp == q->tqe_th->th_seq)) {
442 tcp_seq tend = te->tqe_th->th_seq + te->tqe_len;
443 tcp_seq tend_sack = TCP_SACK_BLKEND(tend, te->tqe_th->th_flags);
444
445 te->tqe_len += q->tqe_len;
446 if (q->tqe_th->th_flags & TH_FIN)
447 te->tqe_th->th_flags |= TH_FIN;
448 m_cat(te->tqe_m, q->tqe_m);
449 tp->encloseblk.rblk_end = tend_sack;
450 /*
451 * When not reporting a duplicate segment, use
452 * the larger enclosing block as the SACK block.
453 */
454 if (!(tp->t_flags & TF_DUPSEG))
455 tp->reportblk.rblk_end = tend_sack;
456 LIST_REMOVE(q, tqe_q);
457 kfree(q, M_TSEGQ);
458 atomic_add_int(&tcp_reass_qsize, -1);
459 }
460
461 if (p == NULL) {
462 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
463 } else {
464 /* check if can coalesce with preceding segment */
465 if (p->tqe_th->th_seq + p->tqe_len == th->th_seq) {
466 p->tqe_len += te->tqe_len;
467 m_cat(p->tqe_m, te->tqe_m);
468 tp->encloseblk.rblk_start = p->tqe_th->th_seq;
469 /*
470 * When not reporting a duplicate segment, use
471 * the larger enclosing block as the SACK block.
472 */
473 if (!(tp->t_flags & TF_DUPSEG))
474 tp->reportblk.rblk_start = p->tqe_th->th_seq;
475 kfree(te, M_TSEGQ);
476 atomic_add_int(&tcp_reass_qsize, -1);
477 } else {
478 LIST_INSERT_AFTER(p, te, tqe_q);
479 }
480 }
481
482present:
483 /*
484 * Present data to user, advancing rcv_nxt through
485 * completed sequence space.
486 */
487 if (!TCPS_HAVEESTABLISHED(tp->t_state))
488 return (0);
489 q = LIST_FIRST(&tp->t_segq);
490 if (q == NULL || q->tqe_th->th_seq != tp->rcv_nxt)
491 return (0);
492 tp->rcv_nxt += q->tqe_len;
493 if (!(tp->t_flags & TF_DUPSEG)) {
494 /* no SACK block to report since ACK advanced */
495 tp->reportblk.rblk_start = tp->reportblk.rblk_end;
496 }
497 /* no enclosing block to report since ACK advanced */
498 tp->t_flags &= ~TF_ENCLOSESEG;
499 flags = q->tqe_th->th_flags & TH_FIN;
500 LIST_REMOVE(q, tqe_q);
501 KASSERT(LIST_EMPTY(&tp->t_segq) ||
502 LIST_FIRST(&tp->t_segq)->tqe_th->th_seq != tp->rcv_nxt,
503 ("segment not coalesced"));
504 if (so->so_state & SS_CANTRCVMORE) {
505 m_freem(q->tqe_m);
506 } else {
507 lwkt_gettoken(&so->so_rcv.ssb_token);
508 ssb_appendstream(&so->so_rcv, q->tqe_m);
509 lwkt_reltoken(&so->so_rcv.ssb_token);
510 }
511 kfree(q, M_TSEGQ);
512 atomic_add_int(&tcp_reass_qsize, -1);
513 ND6_HINT(tp);
514 sorwakeup(so);
515 return (flags);
516}
517
518/*
519 * TCP input routine, follows pages 65-76 of the
520 * protocol specification dated September, 1981 very closely.
521 */
522#ifdef INET6
523int
524tcp6_input(struct mbuf **mp, int *offp, int proto)
525{
526 struct mbuf *m = *mp;
527 struct in6_ifaddr *ia6;
528
529 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
530
531 /*
532 * draft-itojun-ipv6-tcp-to-anycast
533 * better place to put this in?
534 */
535 ia6 = ip6_getdstifaddr(m);
536 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
537 struct ip6_hdr *ip6;
538
539 ip6 = mtod(m, struct ip6_hdr *);
540 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
541 offsetof(struct ip6_hdr, ip6_dst));
542 return (IPPROTO_DONE);
543 }
544
545 tcp_input(mp, offp, proto);
546 return (IPPROTO_DONE);
547}
548#endif
549
550int
551tcp_input(struct mbuf **mp, int *offp, int proto)
552{
553 int off0;
554 struct tcphdr *th;
555 struct ip *ip = NULL;
556 struct ipovly *ipov;
557 struct inpcb *inp = NULL;
558 u_char *optp = NULL;
559 int optlen = 0;
560 int tlen, off;
561 int len = 0;
562 int drop_hdrlen;
563 struct tcpcb *tp = NULL;
564 int thflags;
565 struct socket *so = NULL;
566 int todrop, acked;
567 boolean_t ourfinisacked, needoutput = FALSE;
568 u_long tiwin;
569 int recvwin;
570 struct tcpopt to; /* options in this segment */
571 struct sockaddr_in *next_hop = NULL;
572 int rstreason; /* For badport_bandlim accounting purposes */
573 int cpu;
574 struct ip6_hdr *ip6 = NULL;
575 struct mbuf *m;
576#ifdef INET6
577 boolean_t isipv6;
578#else
579 const boolean_t isipv6 = FALSE;
580#endif
581#ifdef TCPDEBUG
582 short ostate = 0;
583#endif
584
585 off0 = *offp;
586 m = *mp;
587 *mp = NULL;
588
589 tcpstat.tcps_rcvtotal++;
590
591 if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED) {
592 struct m_tag *mtag;
593
594 mtag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
595 KKASSERT(mtag != NULL);
596 next_hop = m_tag_data(mtag);
597 }
598
599#ifdef INET6
600 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? TRUE : FALSE;
601#endif
602
603 if (isipv6) {
604 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */
605 ip6 = mtod(m, struct ip6_hdr *);
606 tlen = (sizeof *ip6) + ntohs(ip6->ip6_plen) - off0;
607 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) {
608 tcpstat.tcps_rcvbadsum++;
609 goto drop;
610 }
611 th = (struct tcphdr *)((caddr_t)ip6 + off0);
612
613 /*
614 * Be proactive about unspecified IPv6 address in source.
615 * As we use all-zero to indicate unbounded/unconnected pcb,
616 * unspecified IPv6 address can be used to confuse us.
617 *
618 * Note that packets with unspecified IPv6 destination is
619 * already dropped in ip6_input.
620 */
621 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
622 /* XXX stat */
623 goto drop;
624 }
625 } else {
626 /*
627 * Get IP and TCP header together in first mbuf.
628 * Note: IP leaves IP header in first mbuf.
629 */
630 if (off0 > sizeof(struct ip)) {
631 ip_stripoptions(m);
632 off0 = sizeof(struct ip);
633 }
634 /* already checked and pulled up in ip_demux() */
635 KASSERT(m->m_len >= sizeof(struct tcpiphdr),
636 ("TCP header not in one mbuf: m->m_len %d", m->m_len));
637 ip = mtod(m, struct ip *);
638 ipov = (struct ipovly *)ip;
639 th = (struct tcphdr *)((caddr_t)ip + off0);
640 tlen = ip->ip_len;
641
642 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
643 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
644 th->th_sum = m->m_pkthdr.csum_data;
645 else
646 th->th_sum = in_pseudo(ip->ip_src.s_addr,
647 ip->ip_dst.s_addr,
648 htonl(m->m_pkthdr.csum_data +
649 ip->ip_len +
650 IPPROTO_TCP));
651 th->th_sum ^= 0xffff;
652 } else {
653 /*
654 * Checksum extended TCP header and data.
655 */
656 len = sizeof(struct ip) + tlen;
657 bzero(ipov->ih_x1, sizeof ipov->ih_x1);
658 ipov->ih_len = (u_short)tlen;
659 ipov->ih_len = htons(ipov->ih_len);
660 th->th_sum = in_cksum(m, len);
661 }
662 if (th->th_sum) {
663 tcpstat.tcps_rcvbadsum++;
664 goto drop;
665 }
666#ifdef INET6
667 /* Re-initialization for later version check */
668 ip->ip_v = IPVERSION;
669#endif
670 }
671
672 /*
673 * Check that TCP offset makes sense,
674 * pull out TCP options and adjust length. XXX
675 */
676 off = th->th_off << 2;
677 /* already checked and pulled up in ip_demux() */
678 KASSERT(off >= sizeof(struct tcphdr) && off <= tlen,
679 ("bad TCP data offset %d (tlen %d)", off, tlen));
680 tlen -= off; /* tlen is used instead of ti->ti_len */
681 if (off > sizeof(struct tcphdr)) {
682 if (isipv6) {
683 IP6_EXTHDR_CHECK(m, off0, off, IPPROTO_DONE);
684 ip6 = mtod(m, struct ip6_hdr *);
685 th = (struct tcphdr *)((caddr_t)ip6 + off0);
686 } else {
687 /* already pulled up in ip_demux() */
688 KASSERT(m->m_len >= sizeof(struct ip) + off,
689 ("TCP header and options not in one mbuf: "
690 "m_len %d, off %d", m->m_len, off));
691 }
692 optlen = off - sizeof(struct tcphdr);
693 optp = (u_char *)(th + 1);
694 }
695 thflags = th->th_flags;
696
697#ifdef TCP_DROP_SYNFIN
698 /*
699 * If the drop_synfin option is enabled, drop all packets with
700 * both the SYN and FIN bits set. This prevents e.g. nmap from
701 * identifying the TCP/IP stack.
702 *
703 * This is a violation of the TCP specification.
704 */
705 if (drop_synfin && (thflags & (TH_SYN | TH_FIN)) == (TH_SYN | TH_FIN))
706 goto drop;
707#endif
708
709 /*
710 * Convert TCP protocol specific fields to host format.
711 */
712 th->th_seq = ntohl(th->th_seq);
713 th->th_ack = ntohl(th->th_ack);
714 th->th_win = ntohs(th->th_win);
715 th->th_urp = ntohs(th->th_urp);
716
717 /*
718 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options,
719 * until after ip6_savecontrol() is called and before other functions
720 * which don't want those proto headers.
721 * Because ip6_savecontrol() is going to parse the mbuf to
722 * search for data to be passed up to user-land, it wants mbuf
723 * parameters to be unchanged.
724 * XXX: the call of ip6_savecontrol() has been obsoleted based on
725 * latest version of the advanced API (20020110).
726 */
727 drop_hdrlen = off0 + off;
728
729 /*
730 * Locate pcb for segment.
731 */
732findpcb:
733 /* IPFIREWALL_FORWARD section */
734 if (next_hop != NULL && !isipv6) { /* IPv6 support is not there yet */
735 /*
736 * Transparently forwarded. Pretend to be the destination.
737 * already got one like this?
738 */
739 cpu = mycpu->gd_cpuid;
740 inp = in_pcblookup_hash(&tcbinfo[cpu],
741 ip->ip_src, th->th_sport,
742 ip->ip_dst, th->th_dport,
743 0, m->m_pkthdr.rcvif);
744 if (!inp) {
745 /*
746 * It's new. Try to find the ambushing socket.
747 */
748
749 /*
750 * The rest of the ipfw code stores the port in
751 * host order. XXX
752 * (The IP address is still in network order.)
753 */
754 in_port_t dport = next_hop->sin_port ?
755 htons(next_hop->sin_port) :
756 th->th_dport;
757
758 cpu = tcp_addrcpu(ip->ip_src.s_addr, th->th_sport,
759 next_hop->sin_addr.s_addr, dport);
760 inp = in_pcblookup_hash(&tcbinfo[cpu],
761 ip->ip_src, th->th_sport,
762 next_hop->sin_addr, dport,
763 1, m->m_pkthdr.rcvif);
764 }
765 } else {
766 if (isipv6) {
767 inp = in6_pcblookup_hash(&tcbinfo[0],
768 &ip6->ip6_src, th->th_sport,
769 &ip6->ip6_dst, th->th_dport,
770 1, m->m_pkthdr.rcvif);
771 } else {
772 cpu = mycpu->gd_cpuid;
773 inp = in_pcblookup_hash(&tcbinfo[cpu],
774 ip->ip_src, th->th_sport,
775 ip->ip_dst, th->th_dport,
776 1, m->m_pkthdr.rcvif);
777 }
778 }
779
780 /*
781 * If the state is CLOSED (i.e., TCB does not exist) then
782 * all data in the incoming segment is discarded.
783 * If the TCB exists but is in CLOSED state, it is embryonic,
784 * but should either do a listen or a connect soon.
785 */
786 if (inp == NULL) {
787 if (log_in_vain) {
788#ifdef INET6
789 char dbuf[INET6_ADDRSTRLEN+2], sbuf[INET6_ADDRSTRLEN+2];
790#else
791 char dbuf[sizeof "aaa.bbb.ccc.ddd"];
792 char sbuf[sizeof "aaa.bbb.ccc.ddd"];
793#endif
794 if (isipv6) {
795 strcpy(dbuf, "[");
796 strcat(dbuf, ip6_sprintf(&ip6->ip6_dst));
797 strcat(dbuf, "]");
798 strcpy(sbuf, "[");
799 strcat(sbuf, ip6_sprintf(&ip6->ip6_src));
800 strcat(sbuf, "]");
801 } else {
802 strcpy(dbuf, inet_ntoa(ip->ip_dst));
803 strcpy(sbuf, inet_ntoa(ip->ip_src));
804 }
805 switch (log_in_vain) {
806 case 1:
807 if (!(thflags & TH_SYN))
808 break;
809 case 2:
810 log(LOG_INFO,
811 "Connection attempt to TCP %s:%d "
812 "from %s:%d flags:0x%02x\n",
813 dbuf, ntohs(th->th_dport), sbuf,
814 ntohs(th->th_sport), thflags);
815 break;
816 default:
817 break;
818 }
819 }
820 if (blackhole) {
821 switch (blackhole) {
822 case 1:
823 if (thflags & TH_SYN)
824 goto drop;
825 break;
826 case 2:
827 goto drop;
828 default:
829 goto drop;
830 }
831 }
832 rstreason = BANDLIM_RST_CLOSEDPORT;
833 goto dropwithreset;
834 }
835
836#ifdef IPSEC
837 if (isipv6) {
838 if (ipsec6_in_reject_so(m, inp->inp_socket)) {
839 ipsec6stat.in_polvio++;
840 goto drop;
841 }
842 } else {
843 if (ipsec4_in_reject_so(m, inp->inp_socket)) {
844 ipsecstat.in_polvio++;
845 goto drop;
846 }
847 }
848#endif
849#ifdef FAST_IPSEC
850 if (isipv6) {
851 if (ipsec6_in_reject(m, inp))
852 goto drop;
853 } else {
854 if (ipsec4_in_reject(m, inp))
855 goto drop;
856 }
857#endif
858 /* Check the minimum TTL for socket. */
859#ifdef INET6
860 if ((isipv6 ? ip6->ip6_hlim : ip->ip_ttl) < inp->inp_ip_minttl)
861 goto drop;
862#endif
863
864 tp = intotcpcb(inp);
865 if (tp == NULL) {
866 rstreason = BANDLIM_RST_CLOSEDPORT;
867 goto dropwithreset;
868 }
869 if (tp->t_state <= TCPS_CLOSED)
870 goto drop;
871
872 so = inp->inp_socket;
873
874#ifdef TCPDEBUG
875 if (so->so_options & SO_DEBUG) {
876 ostate = tp->t_state;
877 if (isipv6)
878 bcopy(ip6, tcp_saveipgen, sizeof(*ip6));
879 else
880 bcopy(ip, tcp_saveipgen, sizeof(*ip));
881 tcp_savetcp = *th;
882 }
883#endif
884
885 bzero(&to, sizeof to);
886
887 if (so->so_options & SO_ACCEPTCONN) {
888 struct in_conninfo inc;
889
890#ifdef INET6
891 inc.inc_isipv6 = (isipv6 == TRUE);
892#endif
893 if (isipv6) {
894 inc.inc6_faddr = ip6->ip6_src;
895 inc.inc6_laddr = ip6->ip6_dst;
896 inc.inc6_route.ro_rt = NULL; /* XXX */
897 } else {
898 inc.inc_faddr = ip->ip_src;
899 inc.inc_laddr = ip->ip_dst;
900 inc.inc_route.ro_rt = NULL; /* XXX */
901 }
902 inc.inc_fport = th->th_sport;
903 inc.inc_lport = th->th_dport;
904
905 /*
906 * If the state is LISTEN then ignore segment if it contains
907 * a RST. If the segment contains an ACK then it is bad and
908 * send a RST. If it does not contain a SYN then it is not
909 * interesting; drop it.
910 *
911 * If the state is SYN_RECEIVED (syncache) and seg contains
912 * an ACK, but not for our SYN/ACK, send a RST. If the seg
913 * contains a RST, check the sequence number to see if it
914 * is a valid reset segment.
915 */
916 if ((thflags & (TH_RST | TH_ACK | TH_SYN)) != TH_SYN) {
917 if ((thflags & (TH_RST | TH_ACK | TH_SYN)) == TH_ACK) {
918 if (!syncache_expand(&inc, th, &so, m)) {
919 /*
920 * No syncache entry, or ACK was not
921 * for our SYN/ACK. Send a RST.
922 */
923 tcpstat.tcps_badsyn++;
924 rstreason = BANDLIM_RST_OPENPORT;
925 goto dropwithreset;
926 }
927
928 /*
929 * Could not complete 3-way handshake,
930 * connection is being closed down, and
931 * syncache will free mbuf.
932 */
933 if (so == NULL)
934 return(IPPROTO_DONE);
935
936 /*
937 * We must be in the correct protocol thread
938 * for this connection.
939 */
940 KKASSERT(so->so_port == &curthread->td_msgport);
941
942 /*
943 * Socket is created in state SYN_RECEIVED.
944 * Continue processing segment.
945 */
946 inp = so->so_pcb;
947 tp = intotcpcb(inp);
948 /*
949 * This is what would have happened in
950 * tcp_output() when the SYN,ACK was sent.
951 */
952 tp->snd_up = tp->snd_una;
953 tp->snd_max = tp->snd_nxt = tp->iss + 1;
954 tp->last_ack_sent = tp->rcv_nxt;
955
956 goto after_listen;
957 }
958 if (thflags & TH_RST) {
959 syncache_chkrst(&inc, th);
960 goto drop;
961 }
962 if (thflags & TH_ACK) {
963 syncache_badack(&inc);
964 tcpstat.tcps_badsyn++;
965 rstreason = BANDLIM_RST_OPENPORT;
966 goto dropwithreset;
967 }
968 goto drop;
969 }
970
971 /*
972 * Segment's flags are (SYN) or (SYN | FIN).
973 */
974#ifdef INET6
975 /*
976 * If deprecated address is forbidden,
977 * we do not accept SYN to deprecated interface
978 * address to prevent any new inbound connection from
979 * getting established.
980 * When we do not accept SYN, we send a TCP RST,
981 * with deprecated source address (instead of dropping
982 * it). We compromise it as it is much better for peer
983 * to send a RST, and RST will be the final packet
984 * for the exchange.
985 *
986 * If we do not forbid deprecated addresses, we accept
987 * the SYN packet. RFC2462 does not suggest dropping
988 * SYN in this case.
989 * If we decipher RFC2462 5.5.4, it says like this:
990 * 1. use of deprecated addr with existing
991 * communication is okay - "SHOULD continue to be
992 * used"
993 * 2. use of it with new communication:
994 * (2a) "SHOULD NOT be used if alternate address
995 * with sufficient scope is available"
996 * (2b) nothing mentioned otherwise.
997 * Here we fall into (2b) case as we have no choice in
998 * our source address selection - we must obey the peer.
999 *
1000 * The wording in RFC2462 is confusing, and there are
1001 * multiple description text for deprecated address
1002 * handling - worse, they are not exactly the same.
1003 * I believe 5.5.4 is the best one, so we follow 5.5.4.
1004 */
1005 if (isipv6 && !ip6_use_deprecated) {
1006 struct in6_ifaddr *ia6;
1007
1008 if ((ia6 = ip6_getdstifaddr(m)) &&
1009 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
1010 tp = NULL;
1011 rstreason = BANDLIM_RST_OPENPORT;
1012 goto dropwithreset;
1013 }
1014 }
1015#endif
1016 /*
1017 * If it is from this socket, drop it, it must be forged.
1018 * Don't bother responding if the destination was a broadcast.
1019 */
1020 if (th->th_dport == th->th_sport) {
1021 if (isipv6) {
1022 if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
1023 &ip6->ip6_src))
1024 goto drop;
1025 } else {
1026 if (ip->ip_dst.s_addr == ip->ip_src.s_addr)
1027 goto drop;
1028 }
1029 }
1030 /*
1031 * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN
1032 *
1033 * Note that it is quite possible to receive unicast
1034 * link-layer packets with a broadcast IP address. Use
1035 * in_broadcast() to find them.
1036 */
1037 if (m->m_flags & (M_BCAST | M_MCAST))
1038 goto drop;
1039 if (isipv6) {
1040 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1041 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
1042 goto drop;
1043 } else {
1044 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1045 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1046 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1047 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
1048 goto drop;
1049 }
1050 /*
1051 * SYN appears to be valid; create compressed TCP state
1052 * for syncache, or perform t/tcp connection.
1053 */
1054 if (so->so_qlen <= so->so_qlimit) {
1055 tcp_dooptions(&to, optp, optlen, TRUE, th->th_ack);
1056 if (!syncache_add(&inc, &to, th, so, m))
1057 goto drop;
1058
1059 /*
1060 * Entry added to syncache, mbuf used to
1061 * send SYN,ACK packet.
1062 */
1063 return(IPPROTO_DONE);
1064 }
1065 goto drop;
1066 }
1067
1068after_listen:
1069 /*
1070 * Should not happen - syncache should pick up these connections.
1071 *
1072 * Once we are past handling listen sockets we must be in the
1073 * correct protocol processing thread.
1074 */
1075 KASSERT(tp->t_state != TCPS_LISTEN, ("tcp_input: TCPS_LISTEN state"));
1076 KKASSERT(so->so_port == &curthread->td_msgport);
1077
1078 /* Unscale the window into a 32-bit value. */
1079 if (!(thflags & TH_SYN))
1080 tiwin = th->th_win << tp->snd_scale;
1081 else
1082 tiwin = th->th_win;
1083
1084 /*
1085 * This is the second part of the MSS DoS prevention code (after
1086 * minmss on the sending side) and it deals with too many too small
1087 * tcp packets in a too short timeframe (1 second).
1088 *
1089 * XXX Removed. This code was crap. It does not scale to network
1090 * speed, and default values break NFS. Gone.
1091 */
1092 /* REMOVED */
1093
1094 /*
1095 * Segment received on connection.
1096 *
1097 * Reset idle time and keep-alive timer. Don't waste time if less
1098 * then a second has elapsed.
1099 */
1100 if ((int)(ticks - tp->t_rcvtime) > hz)
1101 tcp_timer_keep_activity(tp, thflags);
1102
1103 /*
1104 * Process options.
1105 * XXX this is tradtitional behavior, may need to be cleaned up.
1106 */
1107 tcp_dooptions(&to, optp, optlen, (thflags & TH_SYN) != 0, th->th_ack);
1108 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1109 if ((to.to_flags & TOF_SCALE) && (tp->t_flags & TF_REQ_SCALE)) {
1110 tp->t_flags |= TF_RCVD_SCALE;
1111 tp->snd_scale = to.to_requested_s_scale;
1112 }
1113
1114 /*
1115 * Initial send window; will be updated upon next ACK
1116 */
1117 tp->snd_wnd = th->th_win;
1118
1119 if (to.to_flags & TOF_TS) {
1120 tp->t_flags |= TF_RCVD_TSTMP;
1121 tp->ts_recent = to.to_tsval;
1122 tp->ts_recent_age = ticks;
1123 }
1124 if (!(to.to_flags & TOF_MSS))
1125 to.to_mss = 0;
1126 tcp_mss(tp, to.to_mss);
1127 /*
1128 * Only set the TF_SACK_PERMITTED per-connection flag
1129 * if we got a SACK_PERMITTED option from the other side
1130 * and the global tcp_do_sack variable is true.
1131 */
1132 if (tcp_do_sack && (to.to_flags & TOF_SACK_PERMITTED))
1133 tp->t_flags |= TF_SACK_PERMITTED;
1134 }
1135
1136 /*
1137 * Header prediction: check for the two common cases
1138 * of a uni-directional data xfer. If the packet has
1139 * no control flags, is in-sequence, the window didn't
1140 * change and we're not retransmitting, it's a
1141 * candidate. If the length is zero and the ack moved
1142 * forward, we're the sender side of the xfer. Just
1143 * free the data acked & wake any higher level process
1144 * that was blocked waiting for space. If the length
1145 * is non-zero and the ack didn't move, we're the
1146 * receiver side. If we're getting packets in-order
1147 * (the reassembly queue is empty), add the data to
1148 * the socket buffer and note that we need a delayed ack.
1149 * Make sure that the hidden state-flags are also off.
1150 * Since we check for TCPS_ESTABLISHED above, it can only
1151 * be TH_NEEDSYN.
1152 */
1153 if (tp->t_state == TCPS_ESTABLISHED &&
1154 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1155 !(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)) &&
1156 (!(to.to_flags & TOF_TS) ||
1157 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) &&
1158 th->th_seq == tp->rcv_nxt &&
1159 tp->snd_nxt == tp->snd_max) {
1160
1161 /*
1162 * If last ACK falls within this segment's sequence numbers,
1163 * record the timestamp.
1164 * NOTE that the test is modified according to the latest
1165 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1166 */
1167 if ((to.to_flags & TOF_TS) &&
1168 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1169 tp->ts_recent_age = ticks;
1170 tp->ts_recent = to.to_tsval;
1171 }
1172
1173 if (tlen == 0) {
1174 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1175 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1176 tp->snd_cwnd >= tp->snd_wnd &&
1177 !IN_FASTRECOVERY(tp)) {
1178 /*
1179 * This is a pure ack for outstanding data.
1180 */
1181 ++tcpstat.tcps_predack;
1182 /*
1183 * "bad retransmit" recovery
1184 *
1185 * If Eifel detection applies, then
1186 * it is deterministic, so use it
1187 * unconditionally over the old heuristic.
1188 * Otherwise, fall back to the old heuristic.
1189 */
1190 if (tcp_do_eifel_detect &&
1191 (to.to_flags & TOF_TS) && to.to_tsecr &&
1192 (tp->t_flags & TF_FIRSTACCACK)) {
1193 /* Eifel detection applicable. */
1194 if (to.to_tsecr < tp->t_rexmtTS) {
1195 tcp_revert_congestion_state(tp);
1196 ++tcpstat.tcps_eifeldetected;
1197 if (tp->t_rxtshift != 1 ||
1198 ticks >= tp->t_badrxtwin)
1199 ++tcpstat.tcps_rttcantdetect;
1200 }
1201 } else if (tp->t_rxtshift == 1 &&
1202 ticks < tp->t_badrxtwin) {
1203 tcp_revert_congestion_state(tp);
1204 ++tcpstat.tcps_rttdetected;
1205 }
1206 tp->t_flags &= ~(TF_FIRSTACCACK |
1207 TF_FASTREXMT | TF_EARLYREXMT);
1208 /*
1209 * Recalculate the retransmit timer / rtt.
1210 *
1211 * Some machines (certain windows boxes)
1212 * send broken timestamp replies during the
1213 * SYN+ACK phase, ignore timestamps of 0.
1214 */
1215 if ((to.to_flags & TOF_TS) && to.to_tsecr) {
1216 tcp_xmit_timer(tp,
1217 ticks - to.to_tsecr + 1,
1218 th->th_ack);
1219 } else if (tp->t_rtttime &&
1220 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1221 tcp_xmit_timer(tp,
1222 ticks - tp->t_rtttime,
1223 th->th_ack);
1224 }
1225 tcp_xmit_bandwidth_limit(tp, th->th_ack);
1226 acked = th->th_ack - tp->snd_una;
1227 tcpstat.tcps_rcvackpack++;
1228 tcpstat.tcps_rcvackbyte += acked;
1229 sbdrop(&so->so_snd.sb, acked);
1230 tp->snd_recover = th->th_ack - 1;
1231 tp->snd_una = th->th_ack;
1232 tp->t_dupacks = 0;
1233 /*
1234 * Update window information.
1235 */
1236 if (tiwin != tp->snd_wnd &&
1237 acceptable_window_update(tp, th, tiwin)) {
1238 /* keep track of pure window updates */
1239 if (tp->snd_wl2 == th->th_ack &&
1240 tiwin > tp->snd_wnd)
1241 tcpstat.tcps_rcvwinupd++;
1242 tp->snd_wnd = tiwin;
1243 tp->snd_wl1 = th->th_seq;
1244 tp->snd_wl2 = th->th_ack;
1245 if (tp->snd_wnd > tp->max_sndwnd)
1246 tp->max_sndwnd = tp->snd_wnd;
1247 }
1248 m_freem(m);
1249 ND6_HINT(tp); /* some progress has been done */
1250 /*
1251 * If all outstanding data are acked, stop
1252 * retransmit timer, otherwise restart timer
1253 * using current (possibly backed-off) value.
1254 * If process is waiting for space,
1255 * wakeup/selwakeup/signal. If data
1256 * are ready to send, let tcp_output
1257 * decide between more output or persist.
1258 */
1259 if (tp->snd_una == tp->snd_max) {
1260 tcp_callout_stop(tp, tp->tt_rexmt);
1261 } else if (!tcp_callout_active(tp,
1262 tp->tt_persist)) {
1263 tcp_callout_reset(tp, tp->tt_rexmt,
1264 tp->t_rxtcur, tcp_timer_rexmt);
1265 }
1266 sowwakeup(so);
1267 if (so->so_snd.ssb_cc > 0)
1268 tcp_output(tp);
1269 return(IPPROTO_DONE);
1270 }
1271 } else if (tiwin == tp->snd_wnd &&
1272 th->th_ack == tp->snd_una &&
1273 LIST_EMPTY(&tp->t_segq) &&
1274 tlen <= ssb_space(&so->so_rcv)) {
1275 u_long newsize = 0; /* automatic sockbuf scaling */
1276 /*
1277 * This is a pure, in-sequence data packet
1278 * with nothing on the reassembly queue and
1279 * we have enough buffer space to take it.
1280 */
1281 ++tcpstat.tcps_preddat;
1282 tp->rcv_nxt += tlen;
1283 tcpstat.tcps_rcvpack++;
1284 tcpstat.tcps_rcvbyte += tlen;
1285 ND6_HINT(tp); /* some progress has been done */
1286 /*
1287 * Automatic sizing of receive socket buffer. Often the send
1288 * buffer size is not optimally adjusted to the actual network
1289 * conditions at hand (delay bandwidth product). Setting the
1290 * buffer size too small limits throughput on links with high
1291 * bandwidth and high delay (eg. trans-continental/oceanic links).
1292 *
1293 * On the receive side the socket buffer memory is only rarely
1294 * used to any significant extent. This allows us to be much
1295 * more aggressive in scaling the receive socket buffer. For
1296 * the case that the buffer space is actually used to a large
1297 * extent and we run out of kernel memory we can simply drop
1298 * the new segments; TCP on the sender will just retransmit it
1299 * later. Setting the buffer size too big may only consume too
1300 * much kernel memory if the application doesn't read() from
1301 * the socket or packet loss or reordering makes use of the
1302 * reassembly queue.
1303 *
1304 * The criteria to step up the receive buffer one notch are:
1305 * 1. the number of bytes received during the time it takes
1306 * one timestamp to be reflected back to us (the RTT);
1307 * 2. received bytes per RTT is within seven eighth of the
1308 * current socket buffer size;
1309 * 3. receive buffer size has not hit maximal automatic size;
1310 *
1311 * This algorithm does one step per RTT at most and only if
1312 * we receive a bulk stream w/o packet losses or reorderings.
1313 * Shrinking the buffer during idle times is not necessary as
1314 * it doesn't consume any memory when idle.
1315 *
1316 * TODO: Only step up if the application is actually serving
1317 * the buffer to better manage the socket buffer resources.
1318 */
1319 if (tcp_do_autorcvbuf &&
1320 to.to_tsecr &&
1321 (so->so_rcv.ssb_flags & SSB_AUTOSIZE)) {
1322 if (to.to_tsecr > tp->rfbuf_ts &&
1323 to.to_tsecr - tp->rfbuf_ts < hz) {
1324 if (tp->rfbuf_cnt >
1325 (so->so_rcv.ssb_hiwat / 8 * 7) &&
1326 so->so_rcv.ssb_hiwat <
1327 tcp_autorcvbuf_max) {
1328 newsize =
1329 ulmin(so->so_rcv.ssb_hiwat +
1330 tcp_autorcvbuf_inc,
1331 tcp_autorcvbuf_max);
1332 }
1333 /* Start over with next RTT. */
1334 tp->rfbuf_ts = 0;
1335 tp->rfbuf_cnt = 0;
1336 } else
1337 tp->rfbuf_cnt += tlen; /* add up */
1338 }
1339 /*
1340 * Add data to socket buffer.
1341 */
1342 if (so->so_state & SS_CANTRCVMORE) {
1343 m_freem(m);
1344 } else {
1345 /*
1346 * Set new socket buffer size, give up when
1347 * limit is reached.
1348 *
1349 * Adjusting the size can mess up ACK
1350 * sequencing when pure window updates are
1351 * being avoided (which is the default),
1352 * so force an ack.
1353 */
1354 lwkt_gettoken(&so->so_rcv.ssb_token);
1355 if (newsize) {
1356 tp->t_flags |= TF_RXRESIZED;
1357 if (!ssb_reserve(&so->so_rcv, newsize,
1358 so, NULL)) {
1359 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_AUTOSIZE);
1360 }
1361 if (newsize >=
1362 (TCP_MAXWIN << tp->rcv_scale)) {
1363 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_AUTOSIZE);
1364 }
1365 }
1366 m_adj(m, drop_hdrlen); /* delayed header drop */
1367 ssb_appendstream(&so->so_rcv, m);
1368 lwkt_reltoken(&so->so_rcv.ssb_token);
1369 }
1370 sorwakeup(so);
1371 /*
1372 * This code is responsible for most of the ACKs
1373 * the TCP stack sends back after receiving a data
1374 * packet. Note that the DELAY_ACK check fails if
1375 * the delack timer is already running, which results
1376 * in an ack being sent every other packet (which is
1377 * what we want).
1378 *
1379 * We then further aggregate acks by not actually
1380 * sending one until the protocol thread has completed
1381 * processing the current backlog of packets. This
1382 * does not delay the ack any further, but allows us
1383 * to take advantage of the packet aggregation that
1384 * high speed NICs do (usually blocks of 8-10 packets)
1385 * to send a single ack rather then four or five acks,
1386 * greatly reducing the ack rate, the return channel
1387 * bandwidth, and the protocol overhead on both ends.
1388 *
1389 * Since this also has the effect of slowing down
1390 * the exponential slow-start ramp-up, systems with
1391 * very large bandwidth-delay products might want
1392 * to turn the feature off.
1393 */
1394 if (DELAY_ACK(tp)) {
1395 tcp_callout_reset(tp, tp->tt_delack,
1396 tcp_delacktime, tcp_timer_delack);
1397 } else if (tcp_aggregate_acks) {
1398 tp->t_flags |= TF_ACKNOW;
1399 if (!(tp->t_flags & TF_ONOUTPUTQ)) {
1400 tp->t_flags |= TF_ONOUTPUTQ;
1401 tp->tt_cpu = mycpu->gd_cpuid;
1402 TAILQ_INSERT_TAIL(
1403 &tcpcbackq[tp->tt_cpu],
1404 tp, t_outputq);
1405 }
1406 } else {
1407 tp->t_flags |= TF_ACKNOW;
1408 tcp_output(tp);
1409 }
1410 return(IPPROTO_DONE);
1411 }
1412 }
1413
1414 /*
1415 * Calculate amount of space in receive window,
1416 * and then do TCP input processing.
1417 * Receive window is amount of space in rcv queue,
1418 * but not less than advertised window.
1419 */
1420 recvwin = ssb_space(&so->so_rcv);
1421 if (recvwin < 0)
1422 recvwin = 0;
1423 tp->rcv_wnd = imax(recvwin, (int)(tp->rcv_adv - tp->rcv_nxt));
1424
1425 /* Reset receive buffer auto scaling when not in bulk receive mode. */
1426 tp->rfbuf_ts = 0;
1427 tp->rfbuf_cnt = 0;
1428
1429 switch (tp->t_state) {
1430 /*
1431 * If the state is SYN_RECEIVED:
1432 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1433 */
1434 case TCPS_SYN_RECEIVED:
1435 if ((thflags & TH_ACK) &&
1436 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1437 SEQ_GT(th->th_ack, tp->snd_max))) {
1438 rstreason = BANDLIM_RST_OPENPORT;
1439 goto dropwithreset;
1440 }
1441 break;
1442
1443 /*
1444 * If the state is SYN_SENT:
1445 * if seg contains an ACK, but not for our SYN, drop the input.
1446 * if seg contains a RST, then drop the connection.
1447 * if seg does not contain SYN, then drop it.
1448 * Otherwise this is an acceptable SYN segment
1449 * initialize tp->rcv_nxt and tp->irs
1450 * if seg contains ack then advance tp->snd_una
1451 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1452 * arrange for segment to be acked (eventually)
1453 * continue processing rest of data/controls, beginning with URG
1454 */
1455 case TCPS_SYN_SENT:
1456 if ((thflags & TH_ACK) &&
1457 (SEQ_LEQ(th->th_ack, tp->iss) ||
1458 SEQ_GT(th->th_ack, tp->snd_max))) {
1459 rstreason = BANDLIM_UNLIMITED;
1460 goto dropwithreset;
1461 }
1462 if (thflags & TH_RST) {
1463 if (thflags & TH_ACK)
1464 tp = tcp_drop(tp, ECONNREFUSED);
1465 goto drop;
1466 }
1467 if (!(thflags & TH_SYN))
1468 goto drop;
1469
1470 tp->irs = th->th_seq;
1471 tcp_rcvseqinit(tp);
1472 if (thflags & TH_ACK) {
1473 /* Our SYN was acked. */
1474 tcpstat.tcps_connects++;
1475 soisconnected(so);
1476 /* Do window scaling on this connection? */
1477 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
1478 (TF_RCVD_SCALE | TF_REQ_SCALE))
1479 tp->rcv_scale = tp->request_r_scale;
1480 tp->rcv_adv += tp->rcv_wnd;
1481 tp->snd_una++; /* SYN is acked */
1482 tcp_callout_stop(tp, tp->tt_rexmt);
1483 /*
1484 * If there's data, delay ACK; if there's also a FIN
1485 * ACKNOW will be turned on later.
1486 */
1487 if (DELAY_ACK(tp) && tlen != 0) {
1488 tcp_callout_reset(tp, tp->tt_delack,
1489 tcp_delacktime, tcp_timer_delack);
1490 } else {
1491 tp->t_flags |= TF_ACKNOW;
1492 }
1493 /*
1494 * Received <SYN,ACK> in SYN_SENT[*] state.
1495 * Transitions:
1496 * SYN_SENT --> ESTABLISHED
1497 * SYN_SENT* --> FIN_WAIT_1
1498 */
1499 tp->t_starttime = ticks;
1500 if (tp->t_flags & TF_NEEDFIN) {
1501 tp->t_state = TCPS_FIN_WAIT_1;
1502 tp->t_flags &= ~TF_NEEDFIN;
1503 thflags &= ~TH_SYN;
1504 } else {
1505 tcp_established(tp);
1506 }
1507 } else {
1508 /*
1509 * Received initial SYN in SYN-SENT[*] state =>
1510 * simultaneous open.
1511 * Do 3-way handshake:
1512 * SYN-SENT -> SYN-RECEIVED
1513 * SYN-SENT* -> SYN-RECEIVED*
1514 */
1515 tp->t_flags |= TF_ACKNOW;
1516 tcp_callout_stop(tp, tp->tt_rexmt);
1517 tp->t_state = TCPS_SYN_RECEIVED;
1518 }
1519
1520 /*
1521 * Advance th->th_seq to correspond to first data byte.
1522 * If data, trim to stay within window,
1523 * dropping FIN if necessary.
1524 */
1525 th->th_seq++;
1526 if (tlen > tp->rcv_wnd) {
1527 todrop = tlen - tp->rcv_wnd;
1528 m_adj(m, -todrop);
1529 tlen = tp->rcv_wnd;
1530 thflags &= ~TH_FIN;
1531 tcpstat.tcps_rcvpackafterwin++;
1532 tcpstat.tcps_rcvbyteafterwin += todrop;
1533 }
1534 tp->snd_wl1 = th->th_seq - 1;
1535 tp->rcv_up = th->th_seq;
1536 /*
1537 * Client side of transaction: already sent SYN and data.
1538 * If the remote host used T/TCP to validate the SYN,
1539 * our data will be ACK'd; if so, enter normal data segment
1540 * processing in the middle of step 5, ack processing.
1541 * Otherwise, goto step 6.
1542 */
1543 if (thflags & TH_ACK)
1544 goto process_ACK;
1545
1546 goto step6;
1547
1548 /*
1549 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
1550 * do normal processing (we no longer bother with T/TCP).
1551 */
1552 case TCPS_LAST_ACK:
1553 case TCPS_CLOSING:
1554 case TCPS_TIME_WAIT:
1555 break; /* continue normal processing */
1556 }
1557
1558 /*
1559 * States other than LISTEN or SYN_SENT.
1560 * First check the RST flag and sequence number since reset segments
1561 * are exempt from the timestamp and connection count tests. This
1562 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
1563 * below which allowed reset segments in half the sequence space
1564 * to fall though and be processed (which gives forged reset
1565 * segments with a random sequence number a 50 percent chance of
1566 * killing a connection).
1567 * Then check timestamp, if present.
1568 * Then check the connection count, if present.
1569 * Then check that at least some bytes of segment are within
1570 * receive window. If segment begins before rcv_nxt,
1571 * drop leading data (and SYN); if nothing left, just ack.
1572 *
1573 *
1574 * If the RST bit is set, check the sequence number to see
1575 * if this is a valid reset segment.
1576 * RFC 793 page 37:
1577 * In all states except SYN-SENT, all reset (RST) segments
1578 * are validated by checking their SEQ-fields. A reset is
1579 * valid if its sequence number is in the window.
1580 * Note: this does not take into account delayed ACKs, so
1581 * we should test against last_ack_sent instead of rcv_nxt.
1582 * The sequence number in the reset segment is normally an
1583 * echo of our outgoing acknowledgement numbers, but some hosts
1584 * send a reset with the sequence number at the rightmost edge
1585 * of our receive window, and we have to handle this case.
1586 * If we have multiple segments in flight, the intial reset
1587 * segment sequence numbers will be to the left of last_ack_sent,
1588 * but they will eventually catch up.
1589 * In any case, it never made sense to trim reset segments to
1590 * fit the receive window since RFC 1122 says:
1591 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
1592 *
1593 * A TCP SHOULD allow a received RST segment to include data.
1594 *
1595 * DISCUSSION
1596 * It has been suggested that a RST segment could contain
1597 * ASCII text that encoded and explained the cause of the
1598 * RST. No standard has yet been established for such
1599 * data.
1600 *
1601 * If the reset segment passes the sequence number test examine
1602 * the state:
1603 * SYN_RECEIVED STATE:
1604 * If passive open, return to LISTEN state.
1605 * If active open, inform user that connection was refused.
1606 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
1607 * Inform user that connection was reset, and close tcb.
1608 * CLOSING, LAST_ACK STATES:
1609 * Close the tcb.
1610 * TIME_WAIT STATE:
1611 * Drop the segment - see Stevens, vol. 2, p. 964 and
1612 * RFC 1337.
1613 */
1614 if (thflags & TH_RST) {
1615 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
1616 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
1617 switch (tp->t_state) {
1618
1619 case TCPS_SYN_RECEIVED:
1620 so->so_error = ECONNREFUSED;
1621 goto close;
1622
1623 case TCPS_ESTABLISHED:
1624 case TCPS_FIN_WAIT_1:
1625 case TCPS_FIN_WAIT_2:
1626 case TCPS_CLOSE_WAIT:
1627 so->so_error = ECONNRESET;
1628 close:
1629 tp->t_state = TCPS_CLOSED;
1630 tcpstat.tcps_drops++;
1631 tp = tcp_close(tp);
1632 break;
1633
1634 case TCPS_CLOSING:
1635 case TCPS_LAST_ACK:
1636 tp = tcp_close(tp);
1637 break;
1638
1639 case TCPS_TIME_WAIT:
1640 break;
1641 }
1642 }
1643 goto drop;
1644 }
1645
1646 /*
1647 * RFC 1323 PAWS: If we have a timestamp reply on this segment
1648 * and it's less than ts_recent, drop it.
1649 */
1650 if ((to.to_flags & TOF_TS) && tp->ts_recent != 0 &&
1651 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
1652
1653 /* Check to see if ts_recent is over 24 days old. */
1654 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) {
1655 /*
1656 * Invalidate ts_recent. If this segment updates
1657 * ts_recent, the age will be reset later and ts_recent
1658 * will get a valid value. If it does not, setting
1659 * ts_recent to zero will at least satisfy the
1660 * requirement that zero be placed in the timestamp
1661 * echo reply when ts_recent isn't valid. The
1662 * age isn't reset until we get a valid ts_recent
1663 * because we don't want out-of-order segments to be
1664 * dropped when ts_recent is old.
1665 */
1666 tp->ts_recent = 0;
1667 } else {
1668 tcpstat.tcps_rcvduppack++;
1669 tcpstat.tcps_rcvdupbyte += tlen;
1670 tcpstat.tcps_pawsdrop++;
1671 if (tlen)
1672 goto dropafterack;
1673 goto drop;
1674 }
1675 }
1676
1677 /*
1678 * In the SYN-RECEIVED state, validate that the packet belongs to
1679 * this connection before trimming the data to fit the receive
1680 * window. Check the sequence number versus IRS since we know
1681 * the sequence numbers haven't wrapped. This is a partial fix
1682 * for the "LAND" DoS attack.
1683 */
1684 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
1685 rstreason = BANDLIM_RST_OPENPORT;
1686 goto dropwithreset;
1687 }
1688
1689 todrop = tp->rcv_nxt - th->th_seq;
1690 if (todrop > 0) {
1691 if (TCP_DO_SACK(tp)) {
1692 /* Report duplicate segment at head of packet. */
1693 tp->reportblk.rblk_start = th->th_seq;
1694 tp->reportblk.rblk_end = TCP_SACK_BLKEND(
1695 th->th_seq + tlen, thflags);
1696 if (SEQ_GT(tp->reportblk.rblk_end, tp->rcv_nxt))
1697 tp->reportblk.rblk_end = tp->rcv_nxt;
1698 tp->t_flags |= (TF_DUPSEG | TF_SACKLEFT | TF_ACKNOW);
1699 }
1700 if (thflags & TH_SYN) {
1701 thflags &= ~TH_SYN;
1702 th->th_seq++;
1703 if (th->th_urp > 1)
1704 th->th_urp--;
1705 else
1706 thflags &= ~TH_URG;
1707 todrop--;
1708 }
1709 /*
1710 * Following if statement from Stevens, vol. 2, p. 960.
1711 */
1712 if (todrop > tlen ||
1713 (todrop == tlen && !(thflags & TH_FIN))) {
1714 /*
1715 * Any valid FIN must be to the left of the window.
1716 * At this point the FIN must be a duplicate or out
1717 * of sequence; drop it.
1718 */
1719 thflags &= ~TH_FIN;
1720
1721 /*
1722 * Send an ACK to resynchronize and drop any data.
1723 * But keep on processing for RST or ACK.
1724 */
1725 tp->t_flags |= TF_ACKNOW;
1726 todrop = tlen;
1727 tcpstat.tcps_rcvduppack++;
1728 tcpstat.tcps_rcvdupbyte += todrop;
1729 } else {
1730 tcpstat.tcps_rcvpartduppack++;
1731 tcpstat.tcps_rcvpartdupbyte += todrop;
1732 }
1733 drop_hdrlen += todrop; /* drop from the top afterwards */
1734 th->th_seq += todrop;
1735 tlen -= todrop;
1736 if (th->th_urp > todrop)
1737 th->th_urp -= todrop;
1738 else {
1739 thflags &= ~TH_URG;
1740 th->th_urp = 0;
1741 }
1742 }
1743
1744 /*
1745 * If new data are received on a connection after the
1746 * user processes are gone, then RST the other end.
1747 */
1748 if ((so->so_state & SS_NOFDREF) &&
1749 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
1750 tp = tcp_close(tp);
1751 tcpstat.tcps_rcvafterclose++;
1752 rstreason = BANDLIM_UNLIMITED;
1753 goto dropwithreset;
1754 }
1755
1756 /*
1757 * If segment ends after window, drop trailing data
1758 * (and PUSH and FIN); if nothing left, just ACK.
1759 */
1760 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
1761 if (todrop > 0) {
1762 tcpstat.tcps_rcvpackafterwin++;
1763 if (todrop >= tlen) {
1764 tcpstat.tcps_rcvbyteafterwin += tlen;
1765 /*
1766 * If a new connection request is received
1767 * while in TIME_WAIT, drop the old connection
1768 * and start over if the sequence numbers
1769 * are above the previous ones.
1770 */
1771 if (thflags & TH_SYN &&
1772 tp->t_state == TCPS_TIME_WAIT &&
1773 SEQ_GT(th->th_seq, tp->rcv_nxt)) {
1774 tp = tcp_close(tp);
1775 goto findpcb;
1776 }
1777 /*
1778 * If window is closed can only take segments at
1779 * window edge, and have to drop data and PUSH from
1780 * incoming segments. Continue processing, but
1781 * remember to ack. Otherwise, drop segment
1782 * and ack.
1783 */
1784 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
1785 tp->t_flags |= TF_ACKNOW;
1786 tcpstat.tcps_rcvwinprobe++;
1787 } else
1788 goto dropafterack;
1789 } else
1790 tcpstat.tcps_rcvbyteafterwin += todrop;
1791 m_adj(m, -todrop);
1792 tlen -= todrop;
1793 thflags &= ~(TH_PUSH | TH_FIN);
1794 }
1795
1796 /*
1797 * If last ACK falls within this segment's sequence numbers,
1798 * record its timestamp.
1799 * NOTE:
1800 * 1) That the test incorporates suggestions from the latest
1801 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1802 * 2) That updating only on newer timestamps interferes with
1803 * our earlier PAWS tests, so this check should be solely
1804 * predicated on the sequence space of this segment.
1805 * 3) That we modify the segment boundary check to be
1806 * Last.ACK.Sent <= SEG.SEQ + SEG.LEN
1807 * instead of RFC1323's
1808 * Last.ACK.Sent < SEG.SEQ + SEG.LEN,
1809 * This modified check allows us to overcome RFC1323's
1810 * limitations as described in Stevens TCP/IP Illustrated
1811 * Vol. 2 p.869. In such cases, we can still calculate the
1812 * RTT correctly when RCV.NXT == Last.ACK.Sent.
1813 */
1814 if ((to.to_flags & TOF_TS) && SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
1815 SEQ_LEQ(tp->last_ack_sent, (th->th_seq + tlen
1816 + ((thflags & TH_SYN) != 0)
1817 + ((thflags & TH_FIN) != 0)))) {
1818 tp->ts_recent_age = ticks;
1819 tp->ts_recent = to.to_tsval;
1820 }
1821
1822 /*
1823 * If a SYN is in the window, then this is an
1824 * error and we send an RST and drop the connection.
1825 */
1826 if (thflags & TH_SYN) {
1827 tp = tcp_drop(tp, ECONNRESET);
1828 rstreason = BANDLIM_UNLIMITED;
1829 goto dropwithreset;
1830 }
1831
1832 /*
1833 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
1834 * flag is on (half-synchronized state), then queue data for
1835 * later processing; else drop segment and return.
1836 */
1837 if (!(thflags & TH_ACK)) {
1838 if (tp->t_state == TCPS_SYN_RECEIVED ||
1839 (tp->t_flags & TF_NEEDSYN))
1840 goto step6;
1841 else
1842 goto drop;
1843 }
1844
1845 /*
1846 * Ack processing.
1847 */
1848 switch (tp->t_state) {
1849 /*
1850 * In SYN_RECEIVED state, the ACK acknowledges our SYN, so enter
1851 * ESTABLISHED state and continue processing.
1852 * The ACK was checked above.
1853 */
1854 case TCPS_SYN_RECEIVED:
1855
1856 tcpstat.tcps_connects++;
1857 soisconnected(so);
1858 /* Do window scaling? */
1859 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
1860 (TF_RCVD_SCALE | TF_REQ_SCALE))
1861 tp->rcv_scale = tp->request_r_scale;
1862 /*
1863 * Make transitions:
1864 * SYN-RECEIVED -> ESTABLISHED
1865 * SYN-RECEIVED* -> FIN-WAIT-1
1866 */
1867 tp->t_starttime = ticks;
1868 if (tp->t_flags & TF_NEEDFIN) {
1869 tp->t_state = TCPS_FIN_WAIT_1;
1870 tp->t_flags &= ~TF_NEEDFIN;
1871 } else {
1872 tcp_established(tp);
1873 }
1874 /*
1875 * If segment contains data or ACK, will call tcp_reass()
1876 * later; if not, do so now to pass queued data to user.
1877 */
1878 if (tlen == 0 && !(thflags & TH_FIN))
1879 tcp_reass(tp, NULL, NULL, NULL);
1880 /* fall into ... */
1881
1882 /*
1883 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1884 * ACKs. If the ack is in the range
1885 * tp->snd_una < th->th_ack <= tp->snd_max
1886 * then advance tp->snd_una to th->th_ack and drop
1887 * data from the retransmission queue. If this ACK reflects
1888 * more up to date window information we update our window information.
1889 */
1890 case TCPS_ESTABLISHED:
1891 case TCPS_FIN_WAIT_1:
1892 case TCPS_FIN_WAIT_2:
1893 case TCPS_CLOSE_WAIT:
1894 case TCPS_CLOSING:
1895 case TCPS_LAST_ACK:
1896 case TCPS_TIME_WAIT:
1897
1898 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
1899 if (TCP_DO_SACK(tp))
1900 tcp_sack_update_scoreboard(tp, &to);
1901 if (!tcp_callout_active(tp, tp->tt_rexmt) ||
1902 th->th_ack != tp->snd_una) {
1903 tcpstat.tcps_rcvdupack++;
1904 tp->t_dupacks = 0;
1905 break;
1906 }
1907 if (tlen != 0 || tiwin != tp->snd_wnd) {
1908 if (!tcp_do_rfc3517bis ||
1909 !TCP_DO_SACK(tp) ||
1910 (to.to_flags &
1911 (TOF_SACK | TOF_SACK_REDUNDANT))
1912 != TOF_SACK) {
1913 tp->t_dupacks = 0;
1914 break;
1915 }
1916 /*
1917 * Update window information.
1918 */
1919 if (tiwin != tp->snd_wnd &&
1920 acceptable_window_update(tp, th, tiwin)) {
1921 /* keep track of pure window updates */
1922 if (tlen == 0 &&
1923 tp->snd_wl2 == th->th_ack &&
1924 tiwin > tp->snd_wnd)
1925 tcpstat.tcps_rcvwinupd++;
1926 tp->snd_wnd = tiwin;
1927 tp->snd_wl1 = th->th_seq;
1928 tp->snd_wl2 = th->th_ack;
1929 if (tp->snd_wnd > tp->max_sndwnd)
1930 tp->max_sndwnd = tp->snd_wnd;
1931 }
1932 }
1933 tcpstat.tcps_rcvdupack++;
1934
1935 /*
1936 * We have outstanding data (other than
1937 * a window probe), this is a completely
1938 * duplicate ack (ie, window info didn't
1939 * change), the ack is the biggest we've
1940 * seen and we've seen exactly our rexmt
1941 * threshhold of them, so assume a packet
1942 * has been dropped and retransmit it.
1943 * Kludge snd_nxt & the congestion
1944 * window so we send only this one
1945 * packet.
1946 */
1947 if (IN_FASTRECOVERY(tp)) {
1948 if (TCP_DO_SACK(tp)) {
1949 /* No artifical cwnd inflation. */
1950 tcp_sack_rexmt(tp, th);
1951 } else {
1952 /*
1953 * Dup acks mean that packets
1954 * have left the network
1955 * (they're now cached at the
1956 * receiver) so bump cwnd by
1957 * the amount in the receiver
1958 * to keep a constant cwnd
1959 * packets in the network.
1960 */
1961 tp->snd_cwnd += tp->t_maxseg;
1962 tcp_output(tp);
1963 }
1964 } else if (SEQ_LT(th->th_ack, tp->snd_recover)) {
1965 tp->t_dupacks = 0;
1966 break;
1967 } else if (tcp_ignore_redun_dsack && TCP_DO_SACK(tp) &&
1968 (to.to_flags & (TOF_DSACK | TOF_SACK_REDUNDANT)) ==
1969 (TOF_DSACK | TOF_SACK_REDUNDANT)) {
1970 /*
1971 * If the ACK carries DSACK and other
1972 * SACK blocks carry information that
1973 * we have already known, don't count
1974 * this ACK as duplicate ACK. This
1975 * prevents spurious early retransmit
1976 * and fast retransmit. This also
1977 * meets the requirement of RFC3042
1978 * that new segments should not be sent
1979 * if the SACK blocks do not contain
1980 * new information (XXX we actually
1981 * loosen the requirment that only DSACK
1982 * is checked here).
1983 *
1984 * This kind of ACKs are usually sent
1985 * after spurious retransmit.
1986 */
1987 /* Do nothing; don't change t_dupacks */
1988 } else if (++tp->t_dupacks == tp->t_rxtthresh) {
1989 tcp_seq old_snd_nxt;
1990 u_int win;
1991
1992fastretransmit:
1993 if (tcp_do_eifel_detect &&
1994 (tp->t_flags & TF_RCVD_TSTMP)) {
1995 tcp_save_congestion_state(tp);
1996 tp->t_flags |= TF_FASTREXMT;
1997 }
1998 /*
1999 * We know we're losing at the current
2000 * window size, so do congestion avoidance:
2001 * set ssthresh to half the current window
2002 * and pull our congestion window back to the
2003 * new ssthresh.
2004 */
2005 win = min(tp->snd_wnd, tp->snd_cwnd) / 2 /
2006 tp->t_maxseg;
2007 if (win < 2)
2008 win = 2;
2009 tp->snd_ssthresh = win * tp->t_maxseg;
2010 ENTER_FASTRECOVERY(tp);
2011 tp->snd_recover = tp->snd_max;
2012 tcp_callout_stop(tp, tp->tt_rexmt);
2013 tp->t_rtttime = 0;
2014 old_snd_nxt = tp->snd_nxt;
2015 tp->snd_nxt = th->th_ack;
2016 tp->snd_cwnd = tp->t_maxseg;
2017 tcp_output(tp);
2018 ++tcpstat.tcps_sndfastrexmit;
2019 tp->snd_cwnd = tp->snd_ssthresh;
2020 tp->rexmt_high = tp->snd_nxt;
2021 tp->t_flags &= ~TF_SACKRESCUED;
2022 if (SEQ_GT(old_snd_nxt, tp->snd_nxt))
2023 tp->snd_nxt = old_snd_nxt;
2024 KASSERT(tp->snd_limited <= 2,
2025 ("tp->snd_limited too big"));
2026 if (TCP_DO_SACK(tp))
2027 tcp_sack_rexmt(tp, th);
2028 else
2029 tp->snd_cwnd += tp->t_maxseg *
2030 (tp->t_dupacks - tp->snd_limited);
2031 } else if (tcp_do_rfc3517bis && TCP_DO_SACK(tp)) {
2032 if (tcp_sack_islost(&tp->scb, tp->snd_una))
2033 goto fastretransmit;
2034 if (tcp_do_limitedtransmit) {
2035 /* outstanding data */
2036 uint32_t ownd =
2037 tp->snd_max - tp->snd_una;
2038
2039 if (!tcp_sack_limitedxmit(tp) &&
2040 need_early_retransmit(tp, ownd)) {
2041 ++tcpstat.tcps_sndearlyrexmit;
2042 tp->t_flags |= TF_EARLYREXMT;
2043 goto fastretransmit;
2044 }
2045 }
2046 } else if (tcp_do_limitedtransmit) {
2047 u_long oldcwnd = tp->snd_cwnd;
2048 tcp_seq oldsndmax = tp->snd_max;
2049 tcp_seq oldsndnxt = tp->snd_nxt;
2050 /* outstanding data */
2051 uint32_t ownd = tp->snd_max - tp->snd_una;
2052 u_int sent;
2053
2054 KASSERT(tp->t_dupacks == 1 ||
2055 tp->t_dupacks == 2,
2056 ("dupacks not 1 or 2"));
2057 if (tp->t_dupacks == 1)
2058 tp->snd_limited = 0;
2059 tp->snd_nxt = tp->snd_max;
2060 tp->snd_cwnd = ownd +
2061 (tp->t_dupacks - tp->snd_limited) *
2062 tp->t_maxseg;
2063 tcp_output(tp);
2064
2065 if (SEQ_LT(oldsndnxt, oldsndmax)) {
2066 KASSERT(SEQ_GEQ(oldsndnxt, tp->snd_una),
2067 ("snd_una moved in other threads"));
2068 tp->snd_nxt = oldsndnxt;
2069 }
2070 tp->snd_cwnd = oldcwnd;
2071 sent = tp->snd_max - oldsndmax;
2072 if (sent > tp->t_maxseg) {
2073 KASSERT((tp->t_dupacks == 2 &&
2074 tp->snd_limited == 0) ||
2075 (sent == tp->t_maxseg + 1 &&
2076 tp->t_flags & TF_SENTFIN),
2077 ("sent too much"));
2078 KASSERT(sent <= tp->t_maxseg * 2,
2079 ("sent too many segments"));
2080 tp->snd_limited = 2;
2081 tcpstat.tcps_sndlimited += 2;
2082 } else if (sent > 0) {
2083 ++tp->snd_limited;
2084 ++tcpstat.tcps_sndlimited;
2085 } else if (need_early_retransmit(tp, ownd)) {
2086 ++tcpstat.tcps_sndearlyrexmit;
2087 tp->t_flags |= TF_EARLYREXMT;
2088 goto fastretransmit;
2089 }
2090 }
2091 if (tlen != 0)
2092 break;
2093 else
2094 goto drop;
2095 }
2096
2097 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), ("th_ack <= snd_una"));
2098 tp->t_dupacks = 0;
2099 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2100 /*
2101 * Detected optimistic ACK attack.
2102 * Force slow-start to de-synchronize attack.
2103 */
2104 tp->snd_cwnd = tp->t_maxseg;
2105 tp->snd_wacked = 0;
2106
2107 tcpstat.tcps_rcvacktoomuch++;
2108 goto dropafterack;
2109 }
2110 /*
2111 * If we reach this point, ACK is not a duplicate,
2112 * i.e., it ACKs something we sent.
2113 */
2114 if (tp->t_flags & TF_NEEDSYN) {
2115 /*
2116 * T/TCP: Connection was half-synchronized, and our
2117 * SYN has been ACK'd (so connection is now fully
2118 * synchronized). Go to non-starred state,
2119 * increment snd_una for ACK of SYN, and check if
2120 * we can do window scaling.
2121 */
2122 tp->t_flags &= ~TF_NEEDSYN;
2123 tp->snd_una++;
2124 /* Do window scaling? */
2125 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
2126 (TF_RCVD_SCALE | TF_REQ_SCALE))
2127 tp->rcv_scale = tp->request_r_scale;
2128 }
2129
2130process_ACK:
2131 acked = th->th_ack - tp->snd_una;
2132 tcpstat.tcps_rcvackpack++;
2133 tcpstat.tcps_rcvackbyte += acked;
2134
2135 if (tcp_do_eifel_detect && acked > 0 &&
2136 (to.to_flags & TOF_TS) && (to.to_tsecr != 0) &&
2137 (tp->t_flags & TF_FIRSTACCACK)) {
2138 /* Eifel detection applicable. */
2139 if (to.to_tsecr < tp->t_rexmtTS) {
2140 ++tcpstat.tcps_eifeldetected;
2141 tcp_revert_congestion_state(tp);
2142 if (tp->t_rxtshift != 1 ||
2143 ticks >= tp->t_badrxtwin)
2144 ++tcpstat.tcps_rttcantdetect;
2145 }
2146 } else if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) {
2147 /*
2148 * If we just performed our first retransmit,
2149 * and the ACK arrives within our recovery window,
2150 * then it was a mistake to do the retransmit
2151 * in the first place. Recover our original cwnd
2152 * and ssthresh, and proceed to transmit where we
2153 * left off.
2154 */
2155 tcp_revert_congestion_state(tp);
2156 ++tcpstat.tcps_rttdetected;
2157 }
2158
2159 /*
2160 * If we have a timestamp reply, update smoothed
2161 * round trip time. If no timestamp is present but
2162 * transmit timer is running and timed sequence
2163 * number was acked, update smoothed round trip time.
2164 * Since we now have an rtt measurement, cancel the
2165 * timer backoff (cf., Phil Karn's retransmit alg.).
2166 * Recompute the initial retransmit timer.
2167 *
2168 * Some machines (certain windows boxes) send broken
2169 * timestamp replies during the SYN+ACK phase, ignore
2170 * timestamps of 0.
2171 */
2172 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0))
2173 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1, th->th_ack);
2174 else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq))
2175 tcp_xmit_timer(tp, ticks - tp->t_rtttime, th->th_ack);
2176 tcp_xmit_bandwidth_limit(tp, th->th_ack);
2177
2178 /*
2179 * If no data (only SYN) was ACK'd,
2180 * skip rest of ACK processing.
2181 */
2182 if (acked == 0)
2183 goto step6;
2184
2185 /* Stop looking for an acceptable ACK since one was received. */
2186 tp->t_flags &= ~(TF_FIRSTACCACK | TF_FASTREXMT | TF_EARLYREXMT);
2187
2188 if (acked > so->so_snd.ssb_cc) {
2189 tp->snd_wnd -= so->so_snd.ssb_cc;
2190 sbdrop(&so->so_snd.sb, (int)so->so_snd.ssb_cc);
2191 ourfinisacked = TRUE;
2192 } else {
2193 sbdrop(&so->so_snd.sb, acked);
2194 tp->snd_wnd -= acked;
2195 ourfinisacked = FALSE;
2196 }
2197 sowwakeup(so);
2198
2199 /*
2200 * Update window information.
2201 */
2202 if (acceptable_window_update(tp, th, tiwin)) {
2203 /* keep track of pure window updates */
2204 if (tlen == 0 && tp->snd_wl2 == th->th_ack &&
2205 tiwin > tp->snd_wnd)
2206 tcpstat.tcps_rcvwinupd++;
2207 tp->snd_wnd = tiwin;
2208 tp->snd_wl1 = th->th_seq;
2209 tp->snd_wl2 = th->th_ack;
2210 if (tp->snd_wnd > tp->max_sndwnd)
2211 tp->max_sndwnd = tp->snd_wnd;
2212 needoutput = TRUE;
2213 }
2214
2215 tp->snd_una = th->th_ack;
2216 if (TCP_DO_SACK(tp))
2217 tcp_sack_update_scoreboard(tp, &to);
2218 if (IN_FASTRECOVERY(tp)) {
2219 if (SEQ_GEQ(th->th_ack, tp->snd_recover)) {
2220 EXIT_FASTRECOVERY(tp);
2221 needoutput = TRUE;
2222 /*
2223 * If the congestion window was inflated
2224 * to account for the other side's
2225 * cached packets, retract it.
2226 */
2227 if (!TCP_DO_SACK(tp))
2228 tp->snd_cwnd = tp->snd_ssthresh;
2229
2230 /*
2231 * Window inflation should have left us
2232 * with approximately snd_ssthresh outstanding
2233 * data. But, in case we would be inclined
2234 * to send a burst, better do it using
2235 * slow start.
2236 */
2237 if (SEQ_GT(th->th_ack + tp->snd_cwnd,
2238 tp->snd_max + 2 * tp->t_maxseg))
2239 tp->snd_cwnd =
2240 (tp->snd_max - tp->snd_una) +
2241 2 * tp->t_maxseg;
2242
2243 tp->snd_wacked = 0;
2244 } else {
2245 if (TCP_DO_SACK(tp)) {
2246 tp->snd_max_rexmt = tp->snd_max;
2247 tcp_sack_rexmt(tp, th);
2248 } else {
2249 tcp_newreno_partial_ack(tp, th, acked);
2250 }
2251 needoutput = FALSE;
2252 }
2253 } else {
2254 /*
2255 * Open the congestion window. When in slow-start,
2256 * open exponentially: maxseg per packet. Otherwise,
2257 * open linearly: maxseg per window.
2258 */
2259 if (tp->snd_cwnd <= tp->snd_ssthresh) {
2260 u_int abc_sslimit =
2261 (SEQ_LT(tp->snd_nxt, tp->snd_max) ?
2262 tp->t_maxseg : 2 * tp->t_maxseg);
2263
2264 /* slow-start */
2265 tp->snd_cwnd += tcp_do_abc ?
2266 min(acked, abc_sslimit) : tp->t_maxseg;
2267 } else {
2268 /* linear increase */
2269 tp->snd_wacked += tcp_do_abc ? acked :
2270 tp->t_maxseg;
2271 if (tp->snd_wacked >= tp->snd_cwnd) {
2272 tp->snd_wacked -= tp->snd_cwnd;
2273 tp->snd_cwnd += tp->t_maxseg;
2274 }
2275 }
2276 tp->snd_cwnd = min(tp->snd_cwnd,
2277 TCP_MAXWIN << tp->snd_scale);
2278 tp->snd_recover = th->th_ack - 1;
2279 }
2280 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2281 tp->snd_nxt = tp->snd_una;
2282
2283 /*
2284 * If all outstanding data is acked, stop retransmit
2285 * timer and remember to restart (more output or persist).
2286 * If there is more data to be acked, restart retransmit
2287 * timer, using current (possibly backed-off) value.
2288 */
2289 if (th->th_ack == tp->snd_max) {
2290 tcp_callout_stop(tp, tp->tt_rexmt);
2291 needoutput = TRUE;
2292 } else if (!tcp_callout_active(tp, tp->tt_persist)) {
2293 tcp_callout_reset(tp, tp->tt_rexmt, tp->t_rxtcur,
2294 tcp_timer_rexmt);
2295 }
2296
2297 switch (tp->t_state) {
2298 /*
2299 * In FIN_WAIT_1 STATE in addition to the processing
2300 * for the ESTABLISHED state if our FIN is now acknowledged
2301 * then enter FIN_WAIT_2.
2302 */
2303 case TCPS_FIN_WAIT_1:
2304 if (ourfinisacked) {
2305 /*
2306 * If we can't receive any more
2307 * data, then closing user can proceed.
2308 * Starting the timer is contrary to the
2309 * specification, but if we don't get a FIN
2310 * we'll hang forever.
2311 */
2312 if (so->so_state & SS_CANTRCVMORE) {
2313 soisdisconnected(so);
2314 tcp_callout_reset(tp, tp->tt_2msl,
2315 tp->t_maxidle, tcp_timer_2msl);
2316 }
2317 tp->t_state = TCPS_FIN_WAIT_2;
2318 }
2319 break;
2320
2321 /*
2322 * In CLOSING STATE in addition to the processing for
2323 * the ESTABLISHED state if the ACK acknowledges our FIN
2324 * then enter the TIME-WAIT state, otherwise ignore
2325 * the segment.
2326 */
2327 case TCPS_CLOSING:
2328 if (ourfinisacked) {
2329 tp->t_state = TCPS_TIME_WAIT;
2330 tcp_canceltimers(tp);
2331 tcp_callout_reset(tp, tp->tt_2msl,
2332 2 * tcp_rmx_msl(tp),
2333 tcp_timer_2msl);
2334 soisdisconnected(so);
2335 }
2336 break;
2337
2338 /*
2339 * In LAST_ACK, we may still be waiting for data to drain
2340 * and/or to be acked, as well as for the ack of our FIN.
2341 * If our FIN is now acknowledged, delete the TCB,
2342 * enter the closed state and return.
2343 */
2344 case TCPS_LAST_ACK:
2345 if (ourfinisacked) {
2346 tp = tcp_close(tp);
2347 goto drop;
2348 }
2349 break;
2350
2351 /*
2352 * In TIME_WAIT state the only thing that should arrive
2353 * is a retransmission of the remote FIN. Acknowledge
2354 * it and restart the finack timer.
2355 */
2356 case TCPS_TIME_WAIT:
2357 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp),
2358 tcp_timer_2msl);
2359 goto dropafterack;
2360 }
2361 }
2362
2363step6:
2364 /*
2365 * Update window information.
2366 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2367 */
2368 if ((thflags & TH_ACK) &&
2369 acceptable_window_update(tp, th, tiwin)) {
2370 /* keep track of pure window updates */
2371 if (tlen == 0 && tp->snd_wl2 == th->th_ack &&
2372 tiwin > tp->snd_wnd)
2373 tcpstat.tcps_rcvwinupd++;
2374 tp->snd_wnd = tiwin;
2375 tp->snd_wl1 = th->th_seq;
2376 tp->snd_wl2 = th->th_ack;
2377 if (tp->snd_wnd > tp->max_sndwnd)
2378 tp->max_sndwnd = tp->snd_wnd;
2379 needoutput = TRUE;
2380 }
2381
2382 /*
2383 * Process segments with URG.
2384 */
2385 if ((thflags & TH_URG) && th->th_urp &&
2386 !TCPS_HAVERCVDFIN(tp->t_state)) {
2387 /*
2388 * This is a kludge, but if we receive and accept
2389 * random urgent pointers, we'll crash in
2390 * soreceive. It's hard to imagine someone
2391 * actually wanting to send this much urgent data.
2392 */
2393 if (th->th_urp + so->so_rcv.ssb_cc > sb_max) {
2394 th->th_urp = 0; /* XXX */
2395 thflags &= ~TH_URG; /* XXX */
2396 goto dodata; /* XXX */
2397 }
2398 /*
2399 * If this segment advances the known urgent pointer,
2400 * then mark the data stream. This should not happen
2401 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2402 * a FIN has been received from the remote side.
2403 * In these states we ignore the URG.
2404 *
2405 * According to RFC961 (Assigned Protocols),
2406 * the urgent pointer points to the last octet
2407 * of urgent data. We continue, however,
2408 * to consider it to indicate the first octet
2409 * of data past the urgent section as the original
2410 * spec states (in one of two places).
2411 */
2412 if (SEQ_GT(th->th_seq + th->th_urp, tp->rcv_up)) {
2413 tp->rcv_up = th->th_seq + th->th_urp;
2414 so->so_oobmark = so->so_rcv.ssb_cc +
2415 (tp->rcv_up - tp->rcv_nxt) - 1;
2416 if (so->so_oobmark == 0)
2417 sosetstate(so, SS_RCVATMARK);
2418 sohasoutofband(so);
2419 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2420 }
2421 /*
2422 * Remove out of band data so doesn't get presented to user.
2423 * This can happen independent of advancing the URG pointer,
2424 * but if two URG's are pending at once, some out-of-band
2425 * data may creep in... ick.
2426 */
2427 if (th->th_urp <= (u_long)tlen &&
2428 !(so->so_options & SO_OOBINLINE)) {
2429 /* hdr drop is delayed */
2430 tcp_pulloutofband(so, th, m, drop_hdrlen);
2431 }
2432 } else {
2433 /*
2434 * If no out of band data is expected,
2435 * pull receive urgent pointer along
2436 * with the receive window.
2437 */
2438 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
2439 tp->rcv_up = tp->rcv_nxt;
2440 }
2441
2442dodata: /* XXX */
2443 /*
2444 * Process the segment text, merging it into the TCP sequencing queue,
2445 * and arranging for acknowledgment of receipt if necessary.
2446 * This process logically involves adjusting tp->rcv_wnd as data
2447 * is presented to the user (this happens in tcp_usrreq.c,
2448 * case PRU_RCVD). If a FIN has already been received on this
2449 * connection then we just ignore the text.
2450 */
2451 if ((tlen || (thflags & TH_FIN)) && !TCPS_HAVERCVDFIN(tp->t_state)) {
2452 m_adj(m, drop_hdrlen); /* delayed header drop */
2453 /*
2454 * Insert segment which includes th into TCP reassembly queue
2455 * with control block tp. Set thflags to whether reassembly now
2456 * includes a segment with FIN. This handles the common case
2457 * inline (segment is the next to be received on an established
2458 * connection, and the queue is empty), avoiding linkage into
2459 * and removal from the queue and repetition of various
2460 * conversions.
2461 * Set DELACK for segments received in order, but ack
2462 * immediately when segments are out of order (so
2463 * fast retransmit can work).
2464 */
2465 if (th->th_seq == tp->rcv_nxt &&
2466 LIST_EMPTY(&tp->t_segq) &&
2467 TCPS_HAVEESTABLISHED(tp->t_state)) {
2468 if (DELAY_ACK(tp)) {
2469 tcp_callout_reset(tp, tp->tt_delack,
2470 tcp_delacktime, tcp_timer_delack);
2471 } else {
2472 tp->t_flags |= TF_ACKNOW;
2473 }
2474 tp->rcv_nxt += tlen;
2475 thflags = th->th_flags & TH_FIN;
2476 tcpstat.tcps_rcvpack++;
2477 tcpstat.tcps_rcvbyte += tlen;
2478 ND6_HINT(tp);
2479 if (so->so_state & SS_CANTRCVMORE) {
2480 m_freem(m);
2481 } else {
2482 lwkt_gettoken(&so->so_rcv.ssb_token);
2483 ssb_appendstream(&so->so_rcv, m);
2484 lwkt_reltoken(&so->so_rcv.ssb_token);
2485 }
2486 sorwakeup(so);
2487 } else {
2488 if (!(tp->t_flags & TF_DUPSEG)) {
2489 /* Initialize SACK report block. */
2490 tp->reportblk.rblk_start = th->th_seq;
2491 tp->reportblk.rblk_end = TCP_SACK_BLKEND(
2492 th->th_seq + tlen, thflags);
2493 }
2494 thflags = tcp_reass(tp, th, &tlen, m);
2495 tp->t_flags |= TF_ACKNOW;
2496 }
2497
2498 /*
2499 * Note the amount of data that peer has sent into
2500 * our window, in order to estimate the sender's
2501 * buffer size.
2502 */
2503 len = so->so_rcv.ssb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
2504 } else {
2505 m_freem(m);
2506 thflags &= ~TH_FIN;
2507 }
2508
2509 /*
2510 * If FIN is received ACK the FIN and let the user know
2511 * that the connection is closing.
2512 */
2513 if (thflags & TH_FIN) {
2514 if (!TCPS_HAVERCVDFIN(tp->t_state)) {
2515 socantrcvmore(so);
2516 /*
2517 * If connection is half-synchronized
2518 * (ie NEEDSYN flag on) then delay ACK,
2519 * so it may be piggybacked when SYN is sent.
2520 * Otherwise, since we received a FIN then no
2521 * more input can be expected, send ACK now.
2522 */
2523 if (DELAY_ACK(tp) && (tp->t_flags & TF_NEEDSYN)) {
2524 tcp_callout_reset(tp, tp->tt_delack,
2525 tcp_delacktime, tcp_timer_delack);
2526 } else {
2527 tp->t_flags |= TF_ACKNOW;
2528 }
2529 tp->rcv_nxt++;
2530 }
2531
2532 switch (tp->t_state) {
2533 /*
2534 * In SYN_RECEIVED and ESTABLISHED STATES
2535 * enter the CLOSE_WAIT state.
2536 */
2537 case TCPS_SYN_RECEIVED:
2538 tp->t_starttime = ticks;
2539 /*FALLTHROUGH*/
2540 case TCPS_ESTABLISHED:
2541 tp->t_state = TCPS_CLOSE_WAIT;
2542 break;
2543
2544 /*
2545 * If still in FIN_WAIT_1 STATE FIN has not been acked so
2546 * enter the CLOSING state.
2547 */
2548 case TCPS_FIN_WAIT_1:
2549 tp->t_state = TCPS_CLOSING;
2550 break;
2551
2552 /*
2553 * In FIN_WAIT_2 state enter the TIME_WAIT state,
2554 * starting the time-wait timer, turning off the other
2555 * standard timers.
2556 */
2557 case TCPS_FIN_WAIT_2:
2558 tp->t_state = TCPS_TIME_WAIT;
2559 tcp_canceltimers(tp);
2560 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp),
2561 tcp_timer_2msl);
2562 soisdisconnected(so);
2563 break;
2564
2565 /*
2566 * In TIME_WAIT state restart the 2 MSL time_wait timer.
2567 */
2568 case TCPS_TIME_WAIT:
2569 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp),
2570 tcp_timer_2msl);
2571 break;
2572 }
2573 }
2574
2575#ifdef TCPDEBUG
2576 if (so->so_options & SO_DEBUG)
2577 tcp_trace(TA_INPUT, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0);
2578#endif
2579
2580 /*
2581 * Return any desired output.
2582 */
2583 if (needoutput || (tp->t_flags & TF_ACKNOW))
2584 tcp_output(tp);
2585 tcp_sack_report_cleanup(tp);
2586 return(IPPROTO_DONE);
2587
2588dropafterack:
2589 /*
2590 * Generate an ACK dropping incoming segment if it occupies
2591 * sequence space, where the ACK reflects our state.
2592 *
2593 * We can now skip the test for the RST flag since all
2594 * paths to this code happen after packets containing
2595 * RST have been dropped.
2596 *
2597 * In the SYN-RECEIVED state, don't send an ACK unless the
2598 * segment we received passes the SYN-RECEIVED ACK test.
2599 * If it fails send a RST. This breaks the loop in the
2600 * "LAND" DoS attack, and also prevents an ACK storm
2601 * between two listening ports that have been sent forged
2602 * SYN segments, each with the source address of the other.
2603 */
2604 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
2605 (SEQ_GT(tp->snd_una, th->th_ack) ||
2606 SEQ_GT(th->th_ack, tp->snd_max)) ) {
2607 rstreason = BANDLIM_RST_OPENPORT;
2608 goto dropwithreset;
2609 }
2610#ifdef TCPDEBUG
2611 if (so->so_options & SO_DEBUG)
2612 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0);
2613#endif
2614 m_freem(m);
2615 tp->t_flags |= TF_ACKNOW;
2616 tcp_output(tp);
2617 tcp_sack_report_cleanup(tp);
2618 return(IPPROTO_DONE);
2619
2620dropwithreset:
2621 /*
2622 * Generate a RST, dropping incoming segment.
2623 * Make ACK acceptable to originator of segment.
2624 * Don't bother to respond if destination was broadcast/multicast.
2625 */
2626 if ((thflags & TH_RST) || m->m_flags & (M_BCAST | M_MCAST))
2627 goto drop;
2628 if (isipv6) {
2629 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
2630 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
2631 goto drop;
2632 } else {
2633 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
2634 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
2635 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
2636 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
2637 goto drop;
2638 }
2639 /* IPv6 anycast check is done at tcp6_input() */
2640
2641 /*
2642 * Perform bandwidth limiting.
2643 */
2644#ifdef ICMP_BANDLIM
2645 if (badport_bandlim(rstreason) < 0)
2646 goto drop;
2647#endif
2648
2649#ifdef TCPDEBUG
2650 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
2651 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0);
2652#endif
2653 if (thflags & TH_ACK)
2654 /* mtod() below is safe as long as hdr dropping is delayed */
2655 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, th->th_ack,
2656 TH_RST);
2657 else {
2658 if (thflags & TH_SYN)
2659 tlen++;
2660 /* mtod() below is safe as long as hdr dropping is delayed */
2661 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq + tlen,
2662 (tcp_seq)0, TH_RST | TH_ACK);
2663 }
2664 if (tp != NULL)
2665 tcp_sack_report_cleanup(tp);
2666 return(IPPROTO_DONE);
2667
2668drop:
2669 /*
2670 * Drop space held by incoming segment and return.
2671 */
2672#ifdef TCPDEBUG
2673 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
2674 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0);
2675#endif
2676 m_freem(m);
2677 if (tp != NULL)
2678 tcp_sack_report_cleanup(tp);
2679 return(IPPROTO_DONE);
2680}
2681
2682/*
2683 * Parse TCP options and place in tcpopt.
2684 */
2685static void
2686tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, boolean_t is_syn,
2687 tcp_seq ack)
2688{
2689 int opt, optlen, i;
2690
2691 to->to_flags = 0;
2692 for (; cnt > 0; cnt -= optlen, cp += optlen) {
2693 opt = cp[0];
2694 if (opt == TCPOPT_EOL)
2695 break;
2696 if (opt == TCPOPT_NOP)
2697 optlen = 1;
2698 else {
2699 if (cnt < 2)
2700 break;
2701 optlen = cp[1];
2702 if (optlen < 2 || optlen > cnt)
2703 break;
2704 }
2705 switch (opt) {
2706 case TCPOPT_MAXSEG:
2707 if (optlen != TCPOLEN_MAXSEG)
2708 continue;
2709 if (!is_syn)
2710 continue;
2711 to->to_flags |= TOF_MSS;
2712 bcopy(cp + 2, &to->to_mss, sizeof to->to_mss);
2713 to->to_mss = ntohs(to->to_mss);
2714 break;
2715 case TCPOPT_WINDOW:
2716 if (optlen != TCPOLEN_WINDOW)
2717 continue;
2718 if (!is_syn)
2719 continue;
2720 to->to_flags |= TOF_SCALE;
2721 to->to_requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT);
2722 break;
2723 case TCPOPT_TIMESTAMP:
2724 if (optlen != TCPOLEN_TIMESTAMP)
2725 continue;
2726 to->to_flags |= TOF_TS;
2727 bcopy(cp + 2, &to->to_tsval, sizeof to->to_tsval);
2728 to->to_tsval = ntohl(to->to_tsval);
2729 bcopy(cp + 6, &to->to_tsecr, sizeof to->to_tsecr);
2730 to->to_tsecr = ntohl(to->to_tsecr);
2731 /*
2732 * If echoed timestamp is later than the current time,
2733 * fall back to non RFC1323 RTT calculation.
2734 */
2735 if (to->to_tsecr != 0 && TSTMP_GT(to->to_tsecr, ticks))
2736 to->to_tsecr = 0;
2737 break;
2738 case TCPOPT_SACK_PERMITTED:
2739 if (optlen != TCPOLEN_SACK_PERMITTED)
2740 continue;
2741 if (!is_syn)
2742 continue;
2743 to->to_flags |= TOF_SACK_PERMITTED;
2744 break;
2745 case TCPOPT_SACK:
2746 if ((optlen - 2) & 0x07) /* not multiple of 8 */
2747 continue;
2748 to->to_nsackblocks = (optlen - 2) / 8;
2749 to->to_sackblocks = (struct raw_sackblock *) (cp + 2);
2750 to->to_flags |= TOF_SACK;
2751 for (i = 0; i < to->to_nsackblocks; i++) {
2752 struct raw_sackblock *r = &to->to_sackblocks[i];
2753
2754 r->rblk_start = ntohl(r->rblk_start);
2755 r->rblk_end = ntohl(r->rblk_end);
2756
2757 if (SEQ_LEQ(r->rblk_end, r->rblk_start)) {
2758 /*
2759 * Invalid SACK block; discard all
2760 * SACK blocks
2761 */
2762 tcpstat.tcps_rcvbadsackopt++;
2763 to->to_nsackblocks = 0;
2764 to->to_sackblocks = NULL;
2765 to->to_flags &= ~TOF_SACK;
2766 break;
2767 }
2768 }
2769 if ((to->to_flags & TOF_SACK) &&
2770 tcp_sack_ndsack_blocks(to->to_sackblocks,
2771 to->to_nsackblocks, ack))
2772 to->to_flags |= TOF_DSACK;
2773 break;
2774#ifdef TCP_SIGNATURE
2775 /*
2776 * XXX In order to reply to a host which has set the
2777 * TCP_SIGNATURE option in its initial SYN, we have to
2778 * record the fact that the option was observed here
2779 * for the syncache code to perform the correct response.
2780 */
2781 case TCPOPT_SIGNATURE:
2782 if (optlen != TCPOLEN_SIGNATURE)
2783 continue;
2784 to->to_flags |= (TOF_SIGNATURE | TOF_SIGLEN);
2785 break;
2786#endif /* TCP_SIGNATURE */
2787 default:
2788 continue;
2789 }
2790 }
2791}
2792
2793/*
2794 * Pull out of band byte out of a segment so
2795 * it doesn't appear in the user's data queue.
2796 * It is still reflected in the segment length for
2797 * sequencing purposes.
2798 * "off" is the delayed to be dropped hdrlen.
2799 */
2800static void
2801tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, int off)
2802{
2803 int cnt = off + th->th_urp - 1;
2804
2805 while (cnt >= 0) {
2806 if (m->m_len > cnt) {
2807 char *cp = mtod(m, caddr_t) + cnt;
2808 struct tcpcb *tp = sototcpcb(so);
2809
2810 tp->t_iobc = *cp;
2811 tp->t_oobflags |= TCPOOB_HAVEDATA;
2812 bcopy(cp + 1, cp, m->m_len - cnt - 1);
2813 m->m_len--;
2814 if (m->m_flags & M_PKTHDR)
2815 m->m_pkthdr.len--;
2816 return;
2817 }
2818 cnt -= m->m_len;
2819 m = m->m_next;
2820 if (m == NULL)
2821 break;
2822 }
2823 panic("tcp_pulloutofband");
2824}
2825
2826/*
2827 * Collect new round-trip time estimate
2828 * and update averages and current timeout.
2829 */
2830static void
2831tcp_xmit_timer(struct tcpcb *tp, int rtt, tcp_seq ack)
2832{
2833 int rebaserto = 0;
2834
2835 tcpstat.tcps_rttupdated++;
2836 tp->t_rttupdated++;
2837 if ((tp->t_flags & TF_REBASERTO) && SEQ_GT(ack, tp->snd_max_prev)) {
2838#ifdef DEBUG_EIFEL_RESPONSE
2839 kprintf("srtt/rttvar, prev %d/%d, cur %d/%d, ",
2840 tp->t_srtt_prev, tp->t_rttvar_prev,
2841 tp->t_srtt, tp->t_rttvar);
2842#endif
2843
2844 tcpstat.tcps_eifelresponse++;
2845 rebaserto = 1;
2846 tp->t_flags &= ~TF_REBASERTO;
2847 tp->t_srtt = max(tp->t_srtt_prev, (rtt << TCP_RTT_SHIFT));
2848 tp->t_rttvar = max(tp->t_rttvar_prev,
2849 (rtt << (TCP_RTTVAR_SHIFT - 1)));
2850 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
2851 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2852
2853#ifdef DEBUG_EIFEL_RESPONSE
2854 kprintf("new %d/%d ", tp->t_srtt, tp->t_rttvar);
2855#endif
2856 } else if (tp->t_srtt != 0) {
2857 int delta;
2858
2859 /*
2860 * srtt is stored as fixed point with 5 bits after the
2861 * binary point (i.e., scaled by 8). The following magic
2862 * is equivalent to the smoothing algorithm in rfc793 with
2863 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
2864 * point). Adjust rtt to origin 0.
2865 */
2866 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
2867 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
2868
2869 if ((tp->t_srtt += delta) <= 0)
2870 tp->t_srtt = 1;
2871
2872 /*
2873 * We accumulate a smoothed rtt variance (actually, a
2874 * smoothed mean difference), then set the retransmit
2875 * timer to smoothed rtt + 4 times the smoothed variance.
2876 * rttvar is stored as fixed point with 4 bits after the
2877 * binary point (scaled by 16). The following is
2878 * equivalent to rfc793 smoothing with an alpha of .75
2879 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
2880 * rfc793's wired-in beta.
2881 */
2882 if (delta < 0)
2883 delta = -delta;
2884 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
2885 if ((tp->t_rttvar += delta) <= 0)
2886 tp->t_rttvar = 1;
2887 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
2888 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2889 } else {
2890 /*
2891 * No rtt measurement yet - use the unsmoothed rtt.
2892 * Set the variance to half the rtt (so our first
2893 * retransmit happens at 3*rtt).
2894 */
2895 tp->t_srtt = rtt << TCP_RTT_SHIFT;
2896 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
2897 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2898 }
2899 tp->t_rtttime = 0;
2900 tp->t_rxtshift = 0;
2901
2902#ifdef DEBUG_EIFEL_RESPONSE
2903 if (rebaserto) {
2904 kprintf("| rxtcur prev %d, old %d, ",
2905 tp->t_rxtcur_prev, tp->t_rxtcur);
2906 }
2907#endif
2908
2909 /*
2910 * the retransmit should happen at rtt + 4 * rttvar.
2911 * Because of the way we do the smoothing, srtt and rttvar
2912 * will each average +1/2 tick of bias. When we compute
2913 * the retransmit timer, we want 1/2 tick of rounding and
2914 * 1 extra tick because of +-1/2 tick uncertainty in the
2915 * firing of the timer. The bias will give us exactly the
2916 * 1.5 tick we need. But, because the bias is
2917 * statistical, we have to test that we don't drop below
2918 * the minimum feasible timer (which is 2 ticks).
2919 */
2920 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
2921 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
2922
2923 if (rebaserto) {
2924 if (tp->t_rxtcur < tp->t_rxtcur_prev + tcp_eifel_rtoinc) {
2925 /*
2926 * RFC4015 requires that the new RTO is at least
2927 * 2*G (tcp_eifel_rtoinc) greater then the RTO
2928 * (t_rxtcur_prev) when the spurious retransmit
2929 * timeout happens.
2930 *
2931 * The above condition could be true, if the SRTT
2932 * and RTTVAR used to calculate t_rxtcur_prev
2933 * resulted in a value less than t_rttmin. So
2934 * simply increasing SRTT by tcp_eifel_rtoinc when
2935 * preparing for the Eifel response in
2936 * tcp_save_congestion_state() could not ensure
2937 * that the new RTO will be tcp_eifel_rtoinc greater
2938 * t_rxtcur_prev.
2939 */
2940 tp->t_rxtcur = tp->t_rxtcur_prev + tcp_eifel_rtoinc;
2941 }
2942#ifdef DEBUG_EIFEL_RESPONSE
2943 kprintf("new %d\n", tp->t_rxtcur);
2944#endif
2945 }
2946
2947 /*
2948 * We received an ack for a packet that wasn't retransmitted;
2949 * it is probably safe to discard any error indications we've
2950 * received recently. This isn't quite right, but close enough
2951 * for now (a route might have failed after we sent a segment,
2952 * and the return path might not be symmetrical).
2953 */
2954 tp->t_softerror = 0;
2955}
2956
2957/*
2958 * Determine a reasonable value for maxseg size.
2959 * If the route is known, check route for mtu.
2960 * If none, use an mss that can be handled on the outgoing
2961 * interface without forcing IP to fragment; if bigger than
2962 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
2963 * to utilize large mbufs. If no route is found, route has no mtu,
2964 * or the destination isn't local, use a default, hopefully conservative
2965 * size (usually 512 or the default IP max size, but no more than the mtu
2966 * of the interface), as we can't discover anything about intervening
2967 * gateways or networks. We also initialize the congestion/slow start
2968 * window to be a single segment if the destination isn't local.
2969 * While looking at the routing entry, we also initialize other path-dependent
2970 * parameters from pre-set or cached values in the routing entry.
2971 *
2972 * Also take into account the space needed for options that we
2973 * send regularly. Make maxseg shorter by that amount to assure
2974 * that we can send maxseg amount of data even when the options
2975 * are present. Store the upper limit of the length of options plus
2976 * data in maxopd.
2977 *
2978 * NOTE that this routine is only called when we process an incoming
2979 * segment, for outgoing segments only tcp_mssopt is called.
2980 */
2981void
2982tcp_mss(struct tcpcb *tp, int offer)
2983{
2984 struct rtentry *rt;
2985 struct ifnet *ifp;
2986 int rtt, mss;
2987 u_long bufsize;
2988 struct inpcb *inp = tp->t_inpcb;
2989 struct socket *so;
2990#ifdef INET6
2991 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) ? TRUE : FALSE);
2992 size_t min_protoh = isipv6 ?
2993 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
2994 sizeof(struct tcpiphdr);
2995#else
2996 const boolean_t isipv6 = FALSE;
2997 const size_t min_protoh = sizeof(struct tcpiphdr);
2998#endif
2999
3000 if (isipv6)
3001 rt = tcp_rtlookup6(&inp->inp_inc);
3002 else
3003 rt = tcp_rtlookup(&inp->inp_inc);
3004 if (rt == NULL) {
3005 tp->t_maxopd = tp->t_maxseg =
3006 (isipv6 ? tcp_v6mssdflt : tcp_mssdflt);
3007 return;
3008 }
3009 ifp = rt->rt_ifp;
3010 so = inp->inp_socket;
3011
3012 /*
3013 * Offer == 0 means that there was no MSS on the SYN segment,
3014 * in this case we use either the interface mtu or tcp_mssdflt.
3015 *
3016 * An offer which is too large will be cut down later.
3017 */
3018 if (offer == 0) {
3019 if (isipv6) {
3020 if (in6_localaddr(&inp->in6p_faddr)) {
3021 offer = ND_IFINFO(rt->rt_ifp)->linkmtu -
3022 min_protoh;
3023 } else {
3024 offer = tcp_v6mssdflt;
3025 }
3026 } else {
3027 if (in_localaddr(inp->inp_faddr))
3028 offer = ifp->if_mtu - min_protoh;
3029 else
3030 offer = tcp_mssdflt;
3031 }
3032 }
3033
3034 /*
3035 * Prevent DoS attack with too small MSS. Round up
3036 * to at least minmss.
3037 *
3038 * Sanity check: make sure that maxopd will be large
3039 * enough to allow some data on segments even is the
3040 * all the option space is used (40bytes). Otherwise
3041 * funny things may happen in tcp_output.
3042 */
3043 offer = max(offer, tcp_minmss);
3044 offer = max(offer, 64);
3045
3046 rt->rt_rmx.rmx_mssopt = offer;
3047
3048 /*
3049 * While we're here, check if there's an initial rtt
3050 * or rttvar. Convert from the route-table units
3051 * to scaled multiples of the slow timeout timer.
3052 */
3053 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) {
3054 /*
3055 * XXX the lock bit for RTT indicates that the value
3056 * is also a minimum value; this is subject to time.
3057 */
3058 if (rt->rt_rmx.rmx_locks & RTV_RTT)
3059 tp->t_rttmin = rtt / (RTM_RTTUNIT / hz);
3060 tp->t_srtt = rtt / (RTM_RTTUNIT / (hz * TCP_RTT_SCALE));
3061 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
3062 tcpstat.tcps_usedrtt++;
3063 if (rt->rt_rmx.rmx_rttvar) {
3064 tp->t_rttvar = rt->rt_rmx.rmx_rttvar /
3065 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE));
3066 tcpstat.tcps_usedrttvar++;
3067 } else {
3068 /* default variation is +- 1 rtt */
3069 tp->t_rttvar =
3070 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
3071 }
3072 TCPT_RANGESET(tp->t_rxtcur,
3073 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
3074 tp->t_rttmin, TCPTV_REXMTMAX);
3075 }
3076
3077 /*
3078 * if there's an mtu associated with the route, use it
3079 * else, use the link mtu. Take the smaller of mss or offer
3080 * as our final mss.
3081 */
3082 if (rt->rt_rmx.rmx_mtu) {
3083 mss = rt->rt_rmx.rmx_mtu - min_protoh;
3084 } else {
3085 if (isipv6)
3086 mss = ND_IFINFO(rt->rt_ifp)->linkmtu - min_protoh;
3087 else
3088 mss = ifp->if_mtu - min_protoh;
3089 }
3090 mss = min(mss, offer);
3091
3092 /*
3093 * maxopd stores the maximum length of data AND options
3094 * in a segment; maxseg is the amount of data in a normal
3095 * segment. We need to store this value (maxopd) apart
3096 * from maxseg, because now every segment carries options
3097 * and thus we normally have somewhat less data in segments.
3098 */
3099 tp->t_maxopd = mss;
3100
3101 if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
3102 ((tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
3103 mss -= TCPOLEN_TSTAMP_APPA;
3104
3105#if (MCLBYTES & (MCLBYTES - 1)) == 0
3106 if (mss > MCLBYTES)
3107 mss &= ~(MCLBYTES-1);
3108#else
3109 if (mss > MCLBYTES)
3110 mss = mss / MCLBYTES * MCLBYTES;
3111#endif
3112 /*
3113 * If there's a pipesize, change the socket buffer
3114 * to that size. Make the socket buffers an integral
3115 * number of mss units; if the mss is larger than
3116 * the socket buffer, decrease the mss.
3117 */
3118#ifdef RTV_SPIPE
3119 if ((bufsize = rt->rt_rmx.rmx_sendpipe) == 0)
3120#endif
3121 bufsize = so->so_snd.ssb_hiwat;
3122 if (bufsize < mss)
3123 mss = bufsize;
3124 else {
3125 bufsize = roundup(bufsize, mss);
3126 if (bufsize > sb_max)
3127 bufsize = sb_max;
3128 if (bufsize > so->so_snd.ssb_hiwat)
3129 ssb_reserve(&so->so_snd, bufsize, so, NULL);
3130 }
3131 tp->t_maxseg = mss;
3132
3133#ifdef RTV_RPIPE
3134 if ((bufsize = rt->rt_rmx.rmx_recvpipe) == 0)
3135#endif
3136 bufsize = so->so_rcv.ssb_hiwat;
3137 if (bufsize > mss) {
3138 bufsize = roundup(bufsize, mss);
3139 if (bufsize > sb_max)
3140 bufsize = sb_max;
3141 if (bufsize > so->so_rcv.ssb_hiwat) {
3142 lwkt_gettoken(&so->so_rcv.ssb_token);
3143 ssb_reserve(&so->so_rcv, bufsize, so, NULL);
3144 lwkt_reltoken(&so->so_rcv.ssb_token);
3145 }
3146 }
3147
3148 /*
3149 * Set the slow-start flight size
3150 *
3151 * NOTE: t_maxseg must have been configured!
3152 */
3153 tp->snd_cwnd = tcp_initial_window(tp);
3154
3155 if (rt->rt_rmx.rmx_ssthresh) {
3156 /*
3157 * There's some sort of gateway or interface
3158 * buffer limit on the path. Use this to set
3159 * the slow start threshhold, but set the
3160 * threshold to no less than 2*mss.
3161 */
3162 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh);
3163 tcpstat.tcps_usedssthresh++;
3164 }
3165}
3166
3167/*
3168 * Determine the MSS option to send on an outgoing SYN.
3169 */
3170int
3171tcp_mssopt(struct tcpcb *tp)
3172{
3173 struct rtentry *rt;
3174#ifdef INET6
3175 boolean_t isipv6 =
3176 ((tp->t_inpcb->inp_vflag & INP_IPV6) ? TRUE : FALSE);
3177 int min_protoh = isipv6 ?
3178 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
3179 sizeof(struct tcpiphdr);
3180#else
3181 const boolean_t isipv6 = FALSE;
3182 const size_t min_protoh = sizeof(struct tcpiphdr);
3183#endif
3184
3185 if (isipv6)
3186 rt = tcp_rtlookup6(&tp->t_inpcb->inp_inc);
3187 else
3188 rt = tcp_rtlookup(&tp->t_inpcb->inp_inc);
3189 if (rt == NULL)
3190 return (isipv6 ? tcp_v6mssdflt : tcp_mssdflt);
3191
3192 return (rt->rt_ifp->if_mtu - min_protoh);
3193}
3194
3195/*
3196 * When a partial ack arrives, force the retransmission of the
3197 * next unacknowledged segment. Do not exit Fast Recovery.
3198 *
3199 * Implement the Slow-but-Steady variant of NewReno by restarting the
3200 * the retransmission timer. Turn it off here so it can be restarted
3201 * later in tcp_output().
3202 */
3203static void
3204tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th, int acked)
3205{
3206 tcp_seq old_snd_nxt = tp->snd_nxt;
3207 u_long ocwnd = tp->snd_cwnd;
3208
3209 tcp_callout_stop(tp, tp->tt_rexmt);
3210 tp->t_rtttime = 0;
3211 tp->snd_nxt = th->th_ack;
3212 /* Set snd_cwnd to one segment beyond acknowledged offset. */
3213 tp->snd_cwnd = tp->t_maxseg;
3214 tp->t_flags |= TF_ACKNOW;
3215 tcp_output(tp);
3216 if (SEQ_GT(old_snd_nxt, tp->snd_nxt))
3217 tp->snd_nxt = old_snd_nxt;
3218 /* partial window deflation */
3219 if (ocwnd > acked)
3220 tp->snd_cwnd = ocwnd - acked + tp->t_maxseg;
3221 else
3222 tp->snd_cwnd = tp->t_maxseg;
3223}
3224
3225/*
3226 * In contrast to the Slow-but-Steady NewReno variant,
3227 * we do not reset the retransmission timer for SACK retransmissions,
3228 * except when retransmitting snd_una.
3229 */
3230static void
3231tcp_sack_rexmt(struct tcpcb *tp, struct tcphdr *th)
3232{
3233 tcp_seq old_snd_nxt = tp->snd_nxt;
3234 u_long ocwnd = tp->snd_cwnd;
3235 uint32_t pipe;
3236 int nseg = 0; /* consecutive new segments */
3237 int nseg_rexmt = 0; /* retransmitted segments */
3238#define MAXBURST 4 /* limit burst of new packets on partial ack */
3239
3240 tp->t_rtttime = 0;
3241 pipe = tcp_sack_compute_pipe(tp);
3242 while ((tcp_seq_diff_t)(ocwnd - pipe) >= (tcp_seq_diff_t)tp->t_maxseg &&
3243 (!tcp_do_smartsack || nseg < MAXBURST)) {
3244 tcp_seq old_snd_max, old_rexmt_high, nextrexmt;
3245 uint32_t sent, seglen;
3246 boolean_t rescue;
3247 int error;
3248
3249 old_rexmt_high = tp->rexmt_high;
3250 if (!tcp_sack_nextseg(tp, &nextrexmt, &seglen, &rescue)) {
3251 tp->rexmt_high = old_rexmt_high;
3252 break;
3253 }
3254
3255 /*
3256 * If the next tranmission is a rescue retranmission,
3257 * we check whether we have already sent some data
3258 * (either new segments or retransmitted segments)
3259 * into the the network or not. Since the idea of rescue
3260 * retransmission is to sustain ACK clock, as long as
3261 * some segments are in the network, ACK clock will be
3262 * kept ticking.
3263 */
3264 if (rescue && (nseg_rexmt > 0 || nseg > 0)) {
3265 tp->rexmt_high = old_rexmt_high;
3266 break;
3267 }
3268
3269 if (nextrexmt == tp->snd_max)
3270 ++nseg;
3271 else
3272 ++nseg_rexmt;
3273 tp->snd_nxt = nextrexmt;
3274 tp->snd_cwnd = nextrexmt - tp->snd_una + seglen;
3275 old_snd_max = tp->snd_max;
3276 if (nextrexmt == tp->snd_una)
3277 tcp_callout_stop(tp, tp->tt_rexmt);
3278 error = tcp_output(tp);
3279 if (error != 0) {
3280 tp->rexmt_high = old_rexmt_high;
3281 break;
3282 }
3283 sent = tp->snd_nxt - nextrexmt;
3284 if (sent <= 0) {
3285 tp->rexmt_high = old_rexmt_high;
3286 break;
3287 }
3288 pipe += sent;
3289 tcpstat.tcps_sndsackpack++;
3290 tcpstat.tcps_sndsackbyte += sent;
3291
3292 if (rescue) {
3293 tcpstat.tcps_sackrescue++;
3294 tp->rexmt_rescue = tp->snd_nxt;
3295 tp->t_flags |= TF_SACKRESCUED;
3296 break;
3297 }
3298 if (SEQ_LT(nextrexmt, old_snd_max) &&
3299 SEQ_LT(tp->rexmt_high, tp->snd_nxt)) {
3300 tp->rexmt_high = seq_min(tp->snd_nxt, old_snd_max);
3301 if (tcp_aggressive_rescuesack &&
3302 (tp->t_flags & TF_SACKRESCUED) &&
3303 SEQ_LT(tp->rexmt_rescue, tp->rexmt_high)) {
3304 /* Drag RescueRxt along with HighRxt */
3305 tp->rexmt_rescue = tp->rexmt_high;
3306 }
3307 }
3308 }
3309 if (SEQ_GT(old_snd_nxt, tp->snd_nxt))
3310 tp->snd_nxt = old_snd_nxt;
3311 tp->snd_cwnd = ocwnd;
3312}
3313
3314static boolean_t
3315tcp_sack_limitedxmit(struct tcpcb *tp)
3316{
3317 tcp_seq oldsndnxt = tp->snd_nxt;
3318 tcp_seq oldsndmax = tp->snd_max;
3319 u_long ocwnd = tp->snd_cwnd;
3320 uint32_t pipe;
3321 boolean_t ret = FALSE;
3322
3323 tp->rexmt_high = tp->snd_una - 1;
3324 pipe = tcp_sack_compute_pipe(tp);
3325 while ((tcp_seq_diff_t)(ocwnd - pipe) >= (tcp_seq_diff_t)tp->t_maxseg) {
3326 uint32_t sent;
3327 tcp_seq next;
3328 int error;
3329
3330 next = tp->snd_nxt = tp->snd_max;
3331 tp->snd_cwnd = tp->snd_nxt - tp->snd_una + tp->t_maxseg;
3332
3333 error = tcp_output(tp);
3334 if (error)
3335 break;
3336
3337 sent = tp->snd_nxt - next;
3338 if (sent <= 0)
3339 break;
3340 pipe += sent;
3341 ++tcpstat.tcps_sndlimited;
3342 ret = TRUE;
3343 }
3344
3345 if (SEQ_LT(oldsndnxt, oldsndmax)) {
3346 KASSERT(SEQ_GEQ(oldsndnxt, tp->snd_una),
3347 ("snd_una moved in other threads"));
3348 tp->snd_nxt = oldsndnxt;
3349 }
3350 tp->snd_cwnd = ocwnd;
3351
3352 return ret;
3353}
3354
3355/*
3356 * Reset idle time and keep-alive timer, typically called when a valid
3357 * tcp packet is received but may also be called when FASTKEEP is set
3358 * to prevent the previous long-timeout from calculating to a drop.
3359 *
3360 * Only update t_rcvtime for non-SYN packets.
3361 *
3362 * Handle the case where one side thinks the connection is established
3363 * but the other side has, say, rebooted without cleaning out the
3364 * connection. The SYNs could be construed as an attack and wind
3365 * up ignored, but in case it isn't an attack we can validate the
3366 * connection by forcing a keepalive.
3367 */
3368void
3369tcp_timer_keep_activity(struct tcpcb *tp, int thflags)
3370{
3371 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
3372 if ((thflags & (TH_SYN | TH_ACK)) == TH_SYN) {
3373 tp->t_flags |= TF_KEEPALIVE;
3374 tcp_callout_reset(tp, tp->tt_keep, hz / 2,
3375 tcp_timer_keep);
3376 } else {
3377 tp->t_rcvtime = ticks;
3378 tp->t_flags &= ~TF_KEEPALIVE;
3379 tcp_callout_reset(tp, tp->tt_keep,
3380 tp->t_keepidle,
3381 tcp_timer_keep);
3382 }
3383 }
3384}
3385
3386static int
3387tcp_rmx_msl(const struct tcpcb *tp)
3388{
3389 struct rtentry *rt;
3390 struct inpcb *inp = tp->t_inpcb;
3391 int msl;
3392#ifdef INET6
3393 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) ? TRUE : FALSE);
3394#else
3395 const boolean_t isipv6 = FALSE;
3396#endif
3397
3398 if (isipv6)
3399 rt = tcp_rtlookup6(&inp->inp_inc);
3400 else
3401 rt = tcp_rtlookup(&inp->inp_inc);
3402 if (rt == NULL || rt->rt_rmx.rmx_msl == 0)
3403 return tcp_msl;
3404
3405 msl = (rt->rt_rmx.rmx_msl * hz) / 1000;
3406 if (msl == 0)
3407 msl = 1;
3408
3409 return msl;
3410}
3411
3412static void
3413tcp_established(struct tcpcb *tp)
3414{
3415 tp->t_state = TCPS_ESTABLISHED;
3416 tcp_callout_reset(tp, tp->tt_keep, tp->t_keepidle, tcp_timer_keep);
3417
3418 if (tp->t_rxtsyn > 0) {
3419 /*
3420 * RFC6298:
3421 * "If the timer expires awaiting the ACK of a SYN segment
3422 * and the TCP implementation is using an RTO less than 3
3423 * seconds, the RTO MUST be re-initialized to 3 seconds
3424 * when data transmission begins"
3425 */
3426 if (tp->t_rxtcur < TCPTV_RTOBASE3)
3427 tp->t_rxtcur = TCPTV_RTOBASE3;
3428 }
3429}