2 * Copyright (c) 2002, 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2002, 2003, 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
67 * $FreeBSD: src/sys/netinet/tcp_input.c,v 1.107.2.38 2003/05/21 04:46:41 cjc Exp $
71 #include "opt_inet6.h"
72 #include "opt_ipsec.h"
73 #include "opt_tcpdebug.h"
74 #include "opt_tcp_input.h"
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/kernel.h>
79 #include <sys/sysctl.h>
80 #include <sys/malloc.h>
82 #include <sys/proc.h> /* for proc0 declaration */
83 #include <sys/protosw.h>
84 #include <sys/socket.h>
85 #include <sys/socketvar.h>
86 #include <sys/syslog.h>
87 #include <sys/in_cksum.h>
89 #include <sys/socketvar2.h>
91 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
92 #include <machine/stdarg.h>
95 #include <net/route.h>
97 #include <netinet/in.h>
98 #include <netinet/in_systm.h>
99 #include <netinet/ip.h>
100 #include <netinet/ip_icmp.h> /* for ICMP_BANDLIM */
101 #include <netinet/in_var.h>
102 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
103 #include <netinet/in_pcb.h>
104 #include <netinet/ip_var.h>
105 #include <netinet/ip6.h>
106 #include <netinet/icmp6.h>
107 #include <netinet6/nd6.h>
108 #include <netinet6/ip6_var.h>
109 #include <netinet6/in6_pcb.h>
110 #include <netinet/tcp.h>
111 #include <netinet/tcp_fsm.h>
112 #include <netinet/tcp_seq.h>
113 #include <netinet/tcp_timer.h>
114 #include <netinet/tcp_timer2.h>
115 #include <netinet/tcp_var.h>
116 #include <netinet6/tcp6_var.h>
117 #include <netinet/tcpip.h>
120 #include <netinet/tcp_debug.h>
122 u_char tcp_saveipgen[40]; /* the size must be of max ip header, now IPv6 */
123 struct tcphdr tcp_savetcp;
127 #include <netproto/ipsec/ipsec.h>
128 #include <netproto/ipsec/ipsec6.h>
132 #include <netinet6/ipsec.h>
133 #include <netinet6/ipsec6.h>
134 #include <netproto/key/key.h>
137 MALLOC_DEFINE(M_TSEGQ, "tseg_qent", "TCP segment queue entry");
139 static int log_in_vain = 0;
140 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
141 &log_in_vain, 0, "Log all incoming TCP connections");
143 static int blackhole = 0;
144 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
145 &blackhole, 0, "Do not send RST when dropping refused connections");
147 int tcp_delack_enabled = 1;
148 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
149 &tcp_delack_enabled, 0,
150 "Delay ACK to try and piggyback it onto a data packet");
152 #ifdef TCP_DROP_SYNFIN
153 static int drop_synfin = 0;
154 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW,
155 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set");
158 static int tcp_do_limitedtransmit = 1;
159 SYSCTL_INT(_net_inet_tcp, OID_AUTO, limitedtransmit, CTLFLAG_RW,
160 &tcp_do_limitedtransmit, 0, "Enable RFC 3042 (Limited Transmit)");
162 static int tcp_do_early_retransmit = 1;
163 SYSCTL_INT(_net_inet_tcp, OID_AUTO, earlyretransmit, CTLFLAG_RW,
164 &tcp_do_early_retransmit, 0, "Early retransmit");
166 int tcp_aggregate_acks = 1;
167 SYSCTL_INT(_net_inet_tcp, OID_AUTO, aggregate_acks, CTLFLAG_RW,
168 &tcp_aggregate_acks, 0, "Aggregate built-up acks into one ack");
170 static int tcp_do_eifel_detect = 1;
171 SYSCTL_INT(_net_inet_tcp, OID_AUTO, eifel, CTLFLAG_RW,
172 &tcp_do_eifel_detect, 0, "Eifel detection algorithm (RFC 3522)");
174 static int tcp_do_abc = 1;
175 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc, CTLFLAG_RW,
177 "TCP Appropriate Byte Counting (RFC 3465)");
180 * The following value actually takes range [25ms, 250ms],
181 * given that most modern systems use 1ms ~ 10ms as the unit
182 * of timestamp option.
184 static u_int tcp_paws_tolerance = 25;
185 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, paws_tolerance, CTLFLAG_RW,
186 &tcp_paws_tolerance, 0, "RFC1323 PAWS tolerance");
189 * Define as tunable for easy testing with SACK on and off.
190 * Warning: do not change setting in the middle of an existing active TCP flow,
191 * else strange things might happen to that flow.
194 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sack, CTLFLAG_RW,
195 &tcp_do_sack, 0, "Enable SACK Algorithms");
197 int tcp_do_smartsack = 1;
198 SYSCTL_INT(_net_inet_tcp, OID_AUTO, smartsack, CTLFLAG_RW,
199 &tcp_do_smartsack, 0, "Enable Smart SACK Algorithms");
201 int tcp_do_rescuesack = 1;
202 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rescuesack, CTLFLAG_RW,
203 &tcp_do_rescuesack, 0, "Rescue retransmission for SACK");
205 int tcp_aggressive_rescuesack = 0;
206 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rescuesack_agg, CTLFLAG_RW,
207 &tcp_aggressive_rescuesack, 0, "Aggressive rescue retransmission for SACK");
209 static int tcp_force_sackrxt = 1;
210 SYSCTL_INT(_net_inet_tcp, OID_AUTO, force_sackrxt, CTLFLAG_RW,
211 &tcp_force_sackrxt, 0, "Allowed forced SACK retransmit burst");
213 int tcp_do_rfc6675 = 1;
214 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc6675, CTLFLAG_RW,
215 &tcp_do_rfc6675, 0, "Enable RFC6675");
217 int tcp_rfc6675_rxt = 0;
218 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc6675_rxt, CTLFLAG_RW,
219 &tcp_rfc6675_rxt, 0, "Enable RFC6675 retransmit");
221 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0,
222 "TCP Segment Reassembly Queue");
224 int tcp_reass_maxseg = 0;
225 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RD,
226 &tcp_reass_maxseg, 0,
227 "Global maximum number of TCP Segments in Reassembly Queue");
229 int tcp_reass_qsize = 0;
230 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD,
232 "Global number of TCP Segments currently in Reassembly Queue");
234 static int tcp_reass_overflows = 0;
235 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows, CTLFLAG_RD,
236 &tcp_reass_overflows, 0,
237 "Global number of TCP Segment Reassembly Queue Overflows");
239 int tcp_do_autorcvbuf = 1;
240 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW,
241 &tcp_do_autorcvbuf, 0, "Enable automatic receive buffer sizing");
243 int tcp_autorcvbuf_inc = 16*1024;
244 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW,
245 &tcp_autorcvbuf_inc, 0,
246 "Incrementor step size of automatic receive buffer");
248 int tcp_autorcvbuf_max = 2*1024*1024;
249 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW,
250 &tcp_autorcvbuf_max, 0, "Max size of automatic receive buffer");
252 int tcp_sosend_agglim = 3;
253 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sosend_agglim, CTLFLAG_RW,
254 &tcp_sosend_agglim, 0, "TCP sosend mbuf aggregation limit");
256 int tcp_sosend_async = 1;
257 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sosend_async, CTLFLAG_RW,
258 &tcp_sosend_async, 0, "TCP asynchronized pru_send");
260 static int tcp_ignore_redun_dsack = 1;
261 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ignore_redun_dsack, CTLFLAG_RW,
262 &tcp_ignore_redun_dsack, 0, "Ignore redundant DSACK");
264 static void tcp_dooptions(struct tcpopt *, u_char *, int, boolean_t,
266 static void tcp_pulloutofband(struct socket *,
267 struct tcphdr *, struct mbuf *, int);
268 static int tcp_reass(struct tcpcb *, struct tcphdr *, int *,
270 static void tcp_xmit_timer(struct tcpcb *, int, tcp_seq);
271 static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *, int);
272 static void tcp_sack_rexmt(struct tcpcb *, boolean_t);
273 static boolean_t tcp_sack_limitedxmit(struct tcpcb *);
274 static int tcp_rmx_msl(const struct tcpcb *);
275 static void tcp_established(struct tcpcb *);
276 static boolean_t tcp_recv_dupack(struct tcpcb *, tcp_seq, u_int);
278 /* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
280 #define ND6_HINT(tp) \
282 if ((tp) && (tp)->t_inpcb && \
283 ((tp)->t_inpcb->inp_vflag & INP_IPV6) && \
284 (tp)->t_inpcb->in6p_route.ro_rt) \
285 nd6_nud_hint((tp)->t_inpcb->in6p_route.ro_rt, NULL, 0); \
292 * Indicate whether this ack should be delayed. We can delay the ack if
293 * - delayed acks are enabled and
294 * - there is no delayed ack timer in progress and
295 * - our last ack wasn't a 0-sized window. We never want to delay
296 * the ack that opens up a 0-sized window.
298 #define DELAY_ACK(tp) \
299 (tcp_delack_enabled && !tcp_callout_pending(tp, tp->tt_delack) && \
300 !(tp->t_flags & TF_RXWIN0SENT))
302 #define acceptable_window_update(tp, th, tiwin) \
303 (SEQ_LT(tp->snd_wl1, th->th_seq) || \
304 (tp->snd_wl1 == th->th_seq && \
305 (SEQ_LT(tp->snd_wl2, th->th_ack) || \
306 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))
308 #define iceildiv(n, d) (((n)+(d)-1) / (d))
309 #define need_early_retransmit(tp, ownd) \
310 (tcp_do_early_retransmit && \
311 (tcp_do_eifel_detect && (tp->t_flags & TF_RCVD_TSTMP)) && \
312 ownd < ((tp->t_rxtthresh + 1) * tp->t_maxseg) && \
313 tp->t_dupacks + 1 >= iceildiv(ownd, tp->t_maxseg) && \
314 (!TCP_DO_SACK(tp) || ownd <= tp->t_maxseg || \
315 tcp_sack_has_sacked(&tp->scb, ownd - tp->t_maxseg)))
318 * Returns TRUE, if this segment can be merged with the last
319 * pending segment in the reassemble queue and this segment
320 * does not overlap with the pending segment immediately
321 * preceeding the last pending segment.
323 static __inline boolean_t
324 tcp_paws_canreasslast(const struct tcpcb *tp, const struct tcphdr *th, int tlen)
326 const struct tseg_qent *last, *prev;
328 last = TAILQ_LAST(&tp->t_segq, tsegqe_head);
332 /* This segment comes immediately after the last pending segment */
333 if (last->tqe_th->th_seq + last->tqe_len == th->th_seq) {
334 if (last->tqe_th->th_flags & TH_FIN) {
335 /* No segments should follow segment w/ FIN */
341 if (th->th_seq + tlen != last->tqe_th->th_seq)
343 /* This segment comes immediately before the last pending segment */
345 prev = TAILQ_PREV(last, tsegqe_head, tqe_q);
348 * No pending preceeding segment, we assume this segment
349 * could be reassembled.
354 /* This segment does not overlap with the preceeding segment */
355 if (SEQ_GEQ(th->th_seq, prev->tqe_th->th_seq + prev->tqe_len))
362 tcp_ncr_update_rxtthresh(struct tcpcb *tp)
364 int old_rxtthresh = tp->t_rxtthresh;
365 uint32_t ownd = tp->snd_max - tp->snd_una;
367 tp->t_rxtthresh = max(tcprexmtthresh, ((ownd / tp->t_maxseg) >> 1));
368 if (tp->t_rxtthresh != old_rxtthresh) {
369 tcp_sack_update_lostseq(&tp->scb, tp->snd_una,
370 tp->t_maxseg, tp->t_rxtthresh);
375 tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
378 struct tseg_qent *p = NULL;
379 struct tseg_qent *te;
380 struct socket *so = tp->t_inpcb->inp_socket;
384 * Call with th == NULL after become established to
385 * force pre-ESTABLISHED data up to user socket.
391 * Limit the number of segments in the reassembly queue to prevent
392 * holding on to too many segments (and thus running out of mbufs).
393 * Make sure to let the missing segment through which caused this
394 * queue. Always keep one global queue entry spare to be able to
395 * process the missing segment.
397 if (th->th_seq != tp->rcv_nxt &&
398 tcp_reass_qsize + 1 >= tcp_reass_maxseg) {
399 tcp_reass_overflows++;
400 tcpstat.tcps_rcvmemdrop++;
402 /* no SACK block to report */
403 tp->reportblk.rblk_start = tp->reportblk.rblk_end;
407 /* Allocate a new queue entry. */
408 te = kmalloc(sizeof(struct tseg_qent), M_TSEGQ, M_INTWAIT | M_NULLOK);
410 tcpstat.tcps_rcvmemdrop++;
412 /* no SACK block to report */
413 tp->reportblk.rblk_start = tp->reportblk.rblk_end;
416 atomic_add_int(&tcp_reass_qsize, 1);
418 if (th->th_flags & TH_FIN)
419 tp->t_flags |= TF_QUEDFIN;
422 * Find a segment which begins after this one does.
424 TAILQ_FOREACH(q, &tp->t_segq, tqe_q) {
425 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
431 * If there is a preceding segment, it may provide some of
432 * our data already. If so, drop the data from the incoming
433 * segment. If it provides all of our data, drop us.
438 /* conversion to int (in i) handles seq wraparound */
439 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
440 if (i > 0) { /* overlaps preceding segment */
442 (TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG);
443 /* enclosing block starts w/ preceding segment */
444 tp->encloseblk.rblk_start = p->tqe_th->th_seq;
446 if (th->th_flags & TH_FIN)
447 p->tqe_th->th_flags |= TH_FIN;
449 /* preceding encloses incoming segment */
450 tp->encloseblk.rblk_end = TCP_SACK_BLKEND(
451 p->tqe_th->th_seq + p->tqe_len,
452 p->tqe_th->th_flags);
453 tcpstat.tcps_rcvduppack++;
454 tcpstat.tcps_rcvdupbyte += *tlenp;
457 atomic_add_int(&tcp_reass_qsize, -1);
459 * Try to present any queued data
460 * at the left window edge to the user.
461 * This is needed after the 3-WHS
464 goto present; /* ??? */
469 /* incoming segment end is enclosing block end */
470 tp->encloseblk.rblk_end = TCP_SACK_BLKEND(
471 th->th_seq + *tlenp, th->th_flags);
472 /* trim end of reported D-SACK block */
473 tp->reportblk.rblk_end = th->th_seq;
476 tcpstat.tcps_rcvoopack++;
477 tcpstat.tcps_rcvoobyte += *tlenp;
480 * While we overlap succeeding segments trim them or,
481 * if they are completely covered, dequeue them.
484 tcp_seq_diff_t i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
485 tcp_seq qend = q->tqe_th->th_seq + q->tqe_len;
486 tcp_seq qend_sack = TCP_SACK_BLKEND(qend, q->tqe_th->th_flags);
487 struct tseg_qent *nq;
491 if (!(tp->sack_flags & TSACK_F_DUPSEG)) {
492 /* first time through */
493 tp->sack_flags |= (TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG);
494 tp->encloseblk = tp->reportblk;
495 /* report trailing duplicate D-SACK segment */
496 tp->reportblk.rblk_start = q->tqe_th->th_seq;
498 if ((tp->sack_flags & TSACK_F_ENCLOSESEG) &&
499 SEQ_GT(qend_sack, tp->encloseblk.rblk_end)) {
500 /* extend enclosing block if one exists */
501 tp->encloseblk.rblk_end = qend_sack;
503 if (i < q->tqe_len) {
504 q->tqe_th->th_seq += i;
510 if (q->tqe_th->th_flags & TH_FIN)
511 th->th_flags |= TH_FIN;
513 nq = TAILQ_NEXT(q, tqe_q);
514 TAILQ_REMOVE(&tp->t_segq, q, tqe_q);
517 atomic_add_int(&tcp_reass_qsize, -1);
521 /* Insert the new segment queue entry into place. */
524 te->tqe_len = *tlenp;
526 /* check if can coalesce with following segment */
527 if (q != NULL && (th->th_seq + *tlenp == q->tqe_th->th_seq)) {
530 te->tqe_len += q->tqe_len;
531 if (q->tqe_th->th_flags & TH_FIN)
532 te->tqe_th->th_flags |= TH_FIN;
533 tend_sack = TCP_SACK_BLKEND(te->tqe_th->th_seq + te->tqe_len,
534 te->tqe_th->th_flags);
536 m_cat(te->tqe_m, q->tqe_m);
537 tp->encloseblk.rblk_end = tend_sack;
539 * When not reporting a duplicate segment, use
540 * the larger enclosing block as the SACK block.
542 if (!(tp->sack_flags & TSACK_F_DUPSEG))
543 tp->reportblk.rblk_end = tend_sack;
544 TAILQ_REMOVE(&tp->t_segq, q, tqe_q);
546 atomic_add_int(&tcp_reass_qsize, -1);
550 TAILQ_INSERT_HEAD(&tp->t_segq, te, tqe_q);
552 /* check if can coalesce with preceding segment */
553 if (p->tqe_th->th_seq + p->tqe_len == th->th_seq) {
554 if (te->tqe_th->th_flags & TH_FIN)
555 p->tqe_th->th_flags |= TH_FIN;
556 p->tqe_len += te->tqe_len;
557 m_cat(p->tqe_m, te->tqe_m);
558 tp->encloseblk.rblk_start = p->tqe_th->th_seq;
560 * When not reporting a duplicate segment, use
561 * the larger enclosing block as the SACK block.
563 if (!(tp->sack_flags & TSACK_F_DUPSEG))
564 tp->reportblk.rblk_start = p->tqe_th->th_seq;
566 atomic_add_int(&tcp_reass_qsize, -1);
568 TAILQ_INSERT_AFTER(&tp->t_segq, p, te, tqe_q);
574 * Present data to user, advancing rcv_nxt through
575 * completed sequence space.
577 if (!TCPS_HAVEESTABLISHED(tp->t_state))
579 q = TAILQ_FIRST(&tp->t_segq);
580 if (q == NULL || q->tqe_th->th_seq != tp->rcv_nxt)
582 tp->rcv_nxt += q->tqe_len;
583 if (!(tp->sack_flags & TSACK_F_DUPSEG)) {
584 /* no SACK block to report since ACK advanced */
585 tp->reportblk.rblk_start = tp->reportblk.rblk_end;
587 /* no enclosing block to report since ACK advanced */
588 tp->sack_flags &= ~TSACK_F_ENCLOSESEG;
589 flags = q->tqe_th->th_flags & TH_FIN;
590 TAILQ_REMOVE(&tp->t_segq, q, tqe_q);
591 KASSERT(TAILQ_EMPTY(&tp->t_segq) ||
592 TAILQ_FIRST(&tp->t_segq)->tqe_th->th_seq != tp->rcv_nxt,
593 ("segment not coalesced"));
594 if (so->so_state & SS_CANTRCVMORE) {
597 lwkt_gettoken(&so->so_rcv.ssb_token);
598 ssb_appendstream(&so->so_rcv, q->tqe_m);
599 lwkt_reltoken(&so->so_rcv.ssb_token);
602 atomic_add_int(&tcp_reass_qsize, -1);
609 * TCP input routine, follows pages 65-76 of the
610 * protocol specification dated September, 1981 very closely.
614 tcp6_input(struct mbuf **mp, int *offp, int proto)
616 struct mbuf *m = *mp;
617 struct in6_ifaddr *ia6;
619 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
622 * draft-itojun-ipv6-tcp-to-anycast
623 * better place to put this in?
625 ia6 = ip6_getdstifaddr(m);
626 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
627 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
628 offsetof(struct ip6_hdr, ip6_dst));
629 return (IPPROTO_DONE);
632 tcp_input(mp, offp, proto);
633 return (IPPROTO_DONE);
638 tcp_input(struct mbuf **mp, int *offp, int proto)
642 struct ip *ip = NULL;
644 struct inpcb *inp = NULL;
650 struct tcpcb *tp = NULL;
652 struct socket *so = NULL;
654 boolean_t ourfinisacked, needoutput = FALSE, delayed_dupack = FALSE;
655 tcp_seq th_dupack = 0; /* XXX gcc warning */
656 u_int to_flags = 0; /* XXX gcc warning */
659 struct tcpopt to; /* options in this segment */
660 struct sockaddr_in *next_hop = NULL;
661 int rstreason; /* For badport_bandlim accounting purposes */
663 struct ip6_hdr *ip6 = NULL;
668 const boolean_t isipv6 = FALSE;
678 tcpstat.tcps_rcvtotal++;
680 if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED) {
683 mtag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
684 KKASSERT(mtag != NULL);
685 next_hop = m_tag_data(mtag);
689 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? TRUE : FALSE;
693 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */
694 ip6 = mtod(m, struct ip6_hdr *);
695 tlen = (sizeof *ip6) + ntohs(ip6->ip6_plen) - off0;
696 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) {
697 tcpstat.tcps_rcvbadsum++;
700 th = (struct tcphdr *)((caddr_t)ip6 + off0);
703 * Be proactive about unspecified IPv6 address in source.
704 * As we use all-zero to indicate unbounded/unconnected pcb,
705 * unspecified IPv6 address can be used to confuse us.
707 * Note that packets with unspecified IPv6 destination is
708 * already dropped in ip6_input.
710 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
716 * Get IP and TCP header together in first mbuf.
717 * Note: IP leaves IP header in first mbuf.
719 if (off0 > sizeof(struct ip)) {
721 off0 = sizeof(struct ip);
723 /* already checked and pulled up in ip_demux() */
724 KASSERT(m->m_len >= sizeof(struct tcpiphdr),
725 ("TCP header not in one mbuf: m->m_len %d", m->m_len));
726 ip = mtod(m, struct ip *);
727 ipov = (struct ipovly *)ip;
728 th = (struct tcphdr *)((caddr_t)ip + off0);
731 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
732 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
733 th->th_sum = m->m_pkthdr.csum_data;
735 th->th_sum = in_pseudo(ip->ip_src.s_addr,
737 htonl(m->m_pkthdr.csum_data +
740 th->th_sum ^= 0xffff;
743 * Checksum extended TCP header and data.
745 len = sizeof(struct ip) + tlen;
746 bzero(ipov->ih_x1, sizeof ipov->ih_x1);
747 ipov->ih_len = (u_short)tlen;
748 ipov->ih_len = htons(ipov->ih_len);
749 th->th_sum = in_cksum(m, len);
752 tcpstat.tcps_rcvbadsum++;
756 /* Re-initialization for later version check */
757 ip->ip_v = IPVERSION;
762 * Check that TCP offset makes sense,
763 * pull out TCP options and adjust length. XXX
765 off = th->th_off << 2;
766 /* already checked and pulled up in ip_demux() */
767 KASSERT(off >= sizeof(struct tcphdr) && off <= tlen,
768 ("bad TCP data offset %d (tlen %d)", off, tlen));
769 tlen -= off; /* tlen is used instead of ti->ti_len */
770 if (off > sizeof(struct tcphdr)) {
772 IP6_EXTHDR_CHECK(m, off0, off, IPPROTO_DONE);
773 ip6 = mtod(m, struct ip6_hdr *);
774 th = (struct tcphdr *)((caddr_t)ip6 + off0);
776 /* already pulled up in ip_demux() */
777 KASSERT(m->m_len >= sizeof(struct ip) + off,
778 ("TCP header and options not in one mbuf: "
779 "m_len %d, off %d", m->m_len, off));
781 optlen = off - sizeof(struct tcphdr);
782 optp = (u_char *)(th + 1);
784 thflags = th->th_flags;
786 #ifdef TCP_DROP_SYNFIN
788 * If the drop_synfin option is enabled, drop all packets with
789 * both the SYN and FIN bits set. This prevents e.g. nmap from
790 * identifying the TCP/IP stack.
792 * This is a violation of the TCP specification.
794 if (drop_synfin && (thflags & (TH_SYN | TH_FIN)) == (TH_SYN | TH_FIN))
799 * Convert TCP protocol specific fields to host format.
801 th->th_seq = ntohl(th->th_seq);
802 th->th_ack = ntohl(th->th_ack);
803 th->th_win = ntohs(th->th_win);
804 th->th_urp = ntohs(th->th_urp);
807 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options,
808 * until after ip6_savecontrol() is called and before other functions
809 * which don't want those proto headers.
810 * Because ip6_savecontrol() is going to parse the mbuf to
811 * search for data to be passed up to user-land, it wants mbuf
812 * parameters to be unchanged.
813 * XXX: the call of ip6_savecontrol() has been obsoleted based on
814 * latest version of the advanced API (20020110).
816 drop_hdrlen = off0 + off;
819 * Locate pcb for segment.
822 /* IPFIREWALL_FORWARD section */
823 if (next_hop != NULL && !isipv6) { /* IPv6 support is not there yet */
825 * Transparently forwarded. Pretend to be the destination.
826 * already got one like this?
828 cpu = mycpu->gd_cpuid;
829 inp = in_pcblookup_hash(&tcbinfo[cpu],
830 ip->ip_src, th->th_sport,
831 ip->ip_dst, th->th_dport,
832 0, m->m_pkthdr.rcvif);
835 * It's new. Try to find the ambushing socket.
839 * The rest of the ipfw code stores the port in
841 * (The IP address is still in network order.)
843 in_port_t dport = next_hop->sin_port ?
844 htons(next_hop->sin_port) :
847 cpu = tcp_addrcpu(ip->ip_src.s_addr, th->th_sport,
848 next_hop->sin_addr.s_addr, dport);
849 inp = in_pcblookup_hash(&tcbinfo[cpu],
850 ip->ip_src, th->th_sport,
851 next_hop->sin_addr, dport,
852 1, m->m_pkthdr.rcvif);
856 inp = in6_pcblookup_hash(&tcbinfo[0],
857 &ip6->ip6_src, th->th_sport,
858 &ip6->ip6_dst, th->th_dport,
859 1, m->m_pkthdr.rcvif);
861 cpu = mycpu->gd_cpuid;
862 inp = in_pcblookup_hash(&tcbinfo[cpu],
863 ip->ip_src, th->th_sport,
864 ip->ip_dst, th->th_dport,
865 1, m->m_pkthdr.rcvif);
870 * If the state is CLOSED (i.e., TCB does not exist) then
871 * all data in the incoming segment is discarded.
872 * If the TCB exists but is in CLOSED state, it is embryonic,
873 * but should either do a listen or a connect soon.
878 char dbuf[INET6_ADDRSTRLEN+2], sbuf[INET6_ADDRSTRLEN+2];
880 char dbuf[sizeof "aaa.bbb.ccc.ddd"];
881 char sbuf[sizeof "aaa.bbb.ccc.ddd"];
885 strcat(dbuf, ip6_sprintf(&ip6->ip6_dst));
888 strcat(sbuf, ip6_sprintf(&ip6->ip6_src));
891 strcpy(dbuf, inet_ntoa(ip->ip_dst));
892 strcpy(sbuf, inet_ntoa(ip->ip_src));
894 switch (log_in_vain) {
896 if (!(thflags & TH_SYN))
900 "Connection attempt to TCP %s:%d "
901 "from %s:%d flags:0x%02x\n",
902 dbuf, ntohs(th->th_dport), sbuf,
903 ntohs(th->th_sport), thflags);
912 if (thflags & TH_SYN)
921 rstreason = BANDLIM_RST_CLOSEDPORT;
927 if (ipsec6_in_reject_so(m, inp->inp_socket)) {
928 ipsec6stat.in_polvio++;
932 if (ipsec4_in_reject_so(m, inp->inp_socket)) {
933 ipsecstat.in_polvio++;
940 if (ipsec6_in_reject(m, inp))
943 if (ipsec4_in_reject(m, inp))
947 /* Check the minimum TTL for socket. */
949 if ((isipv6 ? ip6->ip6_hlim : ip->ip_ttl) < inp->inp_ip_minttl)
955 rstreason = BANDLIM_RST_CLOSEDPORT;
958 if (tp->t_state <= TCPS_CLOSED)
961 so = inp->inp_socket;
964 if (so->so_options & SO_DEBUG) {
965 ostate = tp->t_state;
967 bcopy(ip6, tcp_saveipgen, sizeof(*ip6));
969 bcopy(ip, tcp_saveipgen, sizeof(*ip));
974 bzero(&to, sizeof to);
976 if (so->so_options & SO_ACCEPTCONN) {
977 struct in_conninfo inc;
980 inc.inc_isipv6 = (isipv6 == TRUE);
983 inc.inc6_faddr = ip6->ip6_src;
984 inc.inc6_laddr = ip6->ip6_dst;
985 inc.inc6_route.ro_rt = NULL; /* XXX */
987 inc.inc_faddr = ip->ip_src;
988 inc.inc_laddr = ip->ip_dst;
989 inc.inc_route.ro_rt = NULL; /* XXX */
991 inc.inc_fport = th->th_sport;
992 inc.inc_lport = th->th_dport;
995 * If the state is LISTEN then ignore segment if it contains
996 * a RST. If the segment contains an ACK then it is bad and
997 * send a RST. If it does not contain a SYN then it is not
998 * interesting; drop it.
1000 * If the state is SYN_RECEIVED (syncache) and seg contains
1001 * an ACK, but not for our SYN/ACK, send a RST. If the seg
1002 * contains a RST, check the sequence number to see if it
1003 * is a valid reset segment.
1005 if ((thflags & (TH_RST | TH_ACK | TH_SYN)) != TH_SYN) {
1006 if ((thflags & (TH_RST | TH_ACK | TH_SYN)) == TH_ACK) {
1007 if (!syncache_expand(&inc, th, &so, m)) {
1009 * No syncache entry, or ACK was not
1010 * for our SYN/ACK. Send a RST.
1012 tcpstat.tcps_badsyn++;
1013 rstreason = BANDLIM_RST_OPENPORT;
1018 * Could not complete 3-way handshake,
1019 * connection is being closed down, and
1020 * syncache will free mbuf.
1023 return(IPPROTO_DONE);
1026 * We must be in the correct protocol thread
1027 * for this connection.
1029 KKASSERT(so->so_port == &curthread->td_msgport);
1032 * Socket is created in state SYN_RECEIVED.
1033 * Continue processing segment.
1036 tp = intotcpcb(inp);
1038 * This is what would have happened in
1039 * tcp_output() when the SYN,ACK was sent.
1041 tp->snd_up = tp->snd_una;
1042 tp->snd_max = tp->snd_nxt = tp->iss + 1;
1043 tp->last_ack_sent = tp->rcv_nxt;
1047 if (thflags & TH_RST) {
1048 syncache_chkrst(&inc, th);
1051 if (thflags & TH_ACK) {
1052 syncache_badack(&inc);
1053 tcpstat.tcps_badsyn++;
1054 rstreason = BANDLIM_RST_OPENPORT;
1061 * Segment's flags are (SYN) or (SYN | FIN).
1065 * If deprecated address is forbidden,
1066 * we do not accept SYN to deprecated interface
1067 * address to prevent any new inbound connection from
1068 * getting established.
1069 * When we do not accept SYN, we send a TCP RST,
1070 * with deprecated source address (instead of dropping
1071 * it). We compromise it as it is much better for peer
1072 * to send a RST, and RST will be the final packet
1075 * If we do not forbid deprecated addresses, we accept
1076 * the SYN packet. RFC2462 does not suggest dropping
1078 * If we decipher RFC2462 5.5.4, it says like this:
1079 * 1. use of deprecated addr with existing
1080 * communication is okay - "SHOULD continue to be
1082 * 2. use of it with new communication:
1083 * (2a) "SHOULD NOT be used if alternate address
1084 * with sufficient scope is available"
1085 * (2b) nothing mentioned otherwise.
1086 * Here we fall into (2b) case as we have no choice in
1087 * our source address selection - we must obey the peer.
1089 * The wording in RFC2462 is confusing, and there are
1090 * multiple description text for deprecated address
1091 * handling - worse, they are not exactly the same.
1092 * I believe 5.5.4 is the best one, so we follow 5.5.4.
1094 if (isipv6 && !ip6_use_deprecated) {
1095 struct in6_ifaddr *ia6;
1097 if ((ia6 = ip6_getdstifaddr(m)) &&
1098 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
1100 rstreason = BANDLIM_RST_OPENPORT;
1106 * If it is from this socket, drop it, it must be forged.
1107 * Don't bother responding if the destination was a broadcast.
1109 if (th->th_dport == th->th_sport) {
1111 if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
1115 if (ip->ip_dst.s_addr == ip->ip_src.s_addr)
1120 * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN
1122 * Note that it is quite possible to receive unicast
1123 * link-layer packets with a broadcast IP address. Use
1124 * in_broadcast() to find them.
1126 if (m->m_flags & (M_BCAST | M_MCAST))
1129 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1130 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
1133 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1134 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1135 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1136 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
1140 * SYN appears to be valid; create compressed TCP state
1141 * for syncache, or perform t/tcp connection.
1143 if (so->so_qlen <= so->so_qlimit) {
1144 tcp_dooptions(&to, optp, optlen, TRUE, th->th_ack);
1145 if (!syncache_add(&inc, &to, th, so, m))
1149 * Entry added to syncache, mbuf used to
1150 * send SYN,ACK packet.
1152 return(IPPROTO_DONE);
1159 * Should not happen - syncache should pick up these connections.
1161 * Once we are past handling listen sockets we must be in the
1162 * correct protocol processing thread.
1164 KASSERT(tp->t_state != TCPS_LISTEN, ("tcp_input: TCPS_LISTEN state"));
1165 KKASSERT(so->so_port == &curthread->td_msgport);
1167 /* Unscale the window into a 32-bit value. */
1168 if (!(thflags & TH_SYN))
1169 tiwin = th->th_win << tp->snd_scale;
1174 * This is the second part of the MSS DoS prevention code (after
1175 * minmss on the sending side) and it deals with too many too small
1176 * tcp packets in a too short timeframe (1 second).
1178 * XXX Removed. This code was crap. It does not scale to network
1179 * speed, and default values break NFS. Gone.
1184 * Segment received on connection.
1186 * Reset idle time and keep-alive timer. Don't waste time if less
1187 * then a second has elapsed.
1189 if ((int)(ticks - tp->t_rcvtime) > hz)
1190 tcp_timer_keep_activity(tp, thflags);
1194 * XXX this is tradtitional behavior, may need to be cleaned up.
1196 tcp_dooptions(&to, optp, optlen, (thflags & TH_SYN) != 0, th->th_ack);
1197 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1198 if ((to.to_flags & TOF_SCALE) && (tp->t_flags & TF_REQ_SCALE)) {
1199 tp->t_flags |= TF_RCVD_SCALE;
1200 tp->snd_scale = to.to_requested_s_scale;
1204 * Initial send window; will be updated upon next ACK
1206 tp->snd_wnd = th->th_win;
1208 if (to.to_flags & TOF_TS) {
1209 tp->t_flags |= TF_RCVD_TSTMP;
1210 tp->ts_recent = to.to_tsval;
1211 tp->ts_recent_age = ticks;
1213 if (!(to.to_flags & TOF_MSS))
1215 tcp_mss(tp, to.to_mss);
1217 * Only set the TF_SACK_PERMITTED per-connection flag
1218 * if we got a SACK_PERMITTED option from the other side
1219 * and the global tcp_do_sack variable is true.
1221 if (tcp_do_sack && (to.to_flags & TOF_SACK_PERMITTED))
1222 tp->t_flags |= TF_SACK_PERMITTED;
1226 * Header prediction: check for the two common cases
1227 * of a uni-directional data xfer. If the packet has
1228 * no control flags, is in-sequence, the window didn't
1229 * change and we're not retransmitting, it's a
1230 * candidate. If the length is zero and the ack moved
1231 * forward, we're the sender side of the xfer. Just
1232 * free the data acked & wake any higher level process
1233 * that was blocked waiting for space. If the length
1234 * is non-zero and the ack didn't move, we're the
1235 * receiver side. If we're getting packets in-order
1236 * (the reassembly queue is empty), add the data to
1237 * the socket buffer and note that we need a delayed ack.
1238 * Make sure that the hidden state-flags are also off.
1239 * Since we check for TCPS_ESTABLISHED above, it can only
1242 if (tp->t_state == TCPS_ESTABLISHED &&
1243 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1244 !(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)) &&
1245 (!(to.to_flags & TOF_TS) ||
1246 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) &&
1247 th->th_seq == tp->rcv_nxt &&
1248 tp->snd_nxt == tp->snd_max) {
1251 * If last ACK falls within this segment's sequence numbers,
1252 * record the timestamp.
1253 * NOTE that the test is modified according to the latest
1254 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1256 if ((to.to_flags & TOF_TS) &&
1257 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1258 tp->ts_recent_age = ticks;
1259 tp->ts_recent = to.to_tsval;
1263 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1264 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1265 tp->snd_cwnd >= tp->snd_wnd &&
1266 !IN_FASTRECOVERY(tp)) {
1268 * This is a pure ack for outstanding data.
1270 ++tcpstat.tcps_predack;
1272 * "bad retransmit" recovery
1274 * If Eifel detection applies, then
1275 * it is deterministic, so use it
1276 * unconditionally over the old heuristic.
1277 * Otherwise, fall back to the old heuristic.
1279 if (tcp_do_eifel_detect &&
1280 (to.to_flags & TOF_TS) && to.to_tsecr &&
1281 (tp->rxt_flags & TRXT_F_FIRSTACCACK)) {
1282 /* Eifel detection applicable. */
1283 if (to.to_tsecr < tp->t_rexmtTS) {
1284 tcp_revert_congestion_state(tp);
1285 ++tcpstat.tcps_eifeldetected;
1286 if (tp->t_rxtshift != 1 ||
1287 ticks >= tp->t_badrxtwin)
1288 ++tcpstat.tcps_rttcantdetect;
1290 } else if (tp->t_rxtshift == 1 &&
1291 ticks < tp->t_badrxtwin) {
1292 tcp_revert_congestion_state(tp);
1293 ++tcpstat.tcps_rttdetected;
1295 tp->rxt_flags &= ~(TRXT_F_FIRSTACCACK |
1296 TRXT_F_FASTREXMT | TRXT_F_EARLYREXMT);
1298 * Recalculate the retransmit timer / rtt.
1300 * Some machines (certain windows boxes)
1301 * send broken timestamp replies during the
1302 * SYN+ACK phase, ignore timestamps of 0.
1304 if ((to.to_flags & TOF_TS) && to.to_tsecr) {
1306 ticks - to.to_tsecr + 1,
1308 } else if (tp->t_rtttime &&
1309 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1311 ticks - tp->t_rtttime,
1314 tcp_xmit_bandwidth_limit(tp, th->th_ack);
1315 acked = th->th_ack - tp->snd_una;
1316 tcpstat.tcps_rcvackpack++;
1317 tcpstat.tcps_rcvackbyte += acked;
1318 sbdrop(&so->so_snd.sb, acked);
1319 tp->snd_recover = th->th_ack - 1;
1320 tp->snd_una = th->th_ack;
1323 * Update window information.
1325 if (tiwin != tp->snd_wnd &&
1326 acceptable_window_update(tp, th, tiwin)) {
1327 /* keep track of pure window updates */
1328 if (tp->snd_wl2 == th->th_ack &&
1329 tiwin > tp->snd_wnd)
1330 tcpstat.tcps_rcvwinupd++;
1331 tp->snd_wnd = tiwin;
1332 tp->snd_wl1 = th->th_seq;
1333 tp->snd_wl2 = th->th_ack;
1334 if (tp->snd_wnd > tp->max_sndwnd)
1335 tp->max_sndwnd = tp->snd_wnd;
1338 ND6_HINT(tp); /* some progress has been done */
1340 * If all outstanding data are acked, stop
1341 * retransmit timer, otherwise restart timer
1342 * using current (possibly backed-off) value.
1343 * If process is waiting for space,
1344 * wakeup/selwakeup/signal. If data
1345 * are ready to send, let tcp_output
1346 * decide between more output or persist.
1348 if (tp->snd_una == tp->snd_max) {
1349 tcp_callout_stop(tp, tp->tt_rexmt);
1350 } else if (!tcp_callout_active(tp,
1352 tcp_callout_reset(tp, tp->tt_rexmt,
1353 tp->t_rxtcur, tcp_timer_rexmt);
1356 if (so->so_snd.ssb_cc > 0)
1358 return(IPPROTO_DONE);
1360 } else if (tiwin == tp->snd_wnd &&
1361 th->th_ack == tp->snd_una &&
1362 TAILQ_EMPTY(&tp->t_segq) &&
1363 tlen <= ssb_space(&so->so_rcv)) {
1364 u_long newsize = 0; /* automatic sockbuf scaling */
1366 * This is a pure, in-sequence data packet
1367 * with nothing on the reassembly queue and
1368 * we have enough buffer space to take it.
1370 ++tcpstat.tcps_preddat;
1371 tp->rcv_nxt += tlen;
1372 tcpstat.tcps_rcvpack++;
1373 tcpstat.tcps_rcvbyte += tlen;
1374 ND6_HINT(tp); /* some progress has been done */
1376 * Automatic sizing of receive socket buffer. Often the send
1377 * buffer size is not optimally adjusted to the actual network
1378 * conditions at hand (delay bandwidth product). Setting the
1379 * buffer size too small limits throughput on links with high
1380 * bandwidth and high delay (eg. trans-continental/oceanic links).
1382 * On the receive side the socket buffer memory is only rarely
1383 * used to any significant extent. This allows us to be much
1384 * more aggressive in scaling the receive socket buffer. For
1385 * the case that the buffer space is actually used to a large
1386 * extent and we run out of kernel memory we can simply drop
1387 * the new segments; TCP on the sender will just retransmit it
1388 * later. Setting the buffer size too big may only consume too
1389 * much kernel memory if the application doesn't read() from
1390 * the socket or packet loss or reordering makes use of the
1393 * The criteria to step up the receive buffer one notch are:
1394 * 1. the number of bytes received during the time it takes
1395 * one timestamp to be reflected back to us (the RTT);
1396 * 2. received bytes per RTT is within seven eighth of the
1397 * current socket buffer size;
1398 * 3. receive buffer size has not hit maximal automatic size;
1400 * This algorithm does one step per RTT at most and only if
1401 * we receive a bulk stream w/o packet losses or reorderings.
1402 * Shrinking the buffer during idle times is not necessary as
1403 * it doesn't consume any memory when idle.
1405 * TODO: Only step up if the application is actually serving
1406 * the buffer to better manage the socket buffer resources.
1408 if (tcp_do_autorcvbuf &&
1410 (so->so_rcv.ssb_flags & SSB_AUTOSIZE)) {
1411 if (to.to_tsecr > tp->rfbuf_ts &&
1412 to.to_tsecr - tp->rfbuf_ts < hz) {
1414 (so->so_rcv.ssb_hiwat / 8 * 7) &&
1415 so->so_rcv.ssb_hiwat <
1416 tcp_autorcvbuf_max) {
1418 ulmin(so->so_rcv.ssb_hiwat +
1420 tcp_autorcvbuf_max);
1422 /* Start over with next RTT. */
1426 tp->rfbuf_cnt += tlen; /* add up */
1429 * Add data to socket buffer.
1431 if (so->so_state & SS_CANTRCVMORE) {
1435 * Set new socket buffer size, give up when
1438 * Adjusting the size can mess up ACK
1439 * sequencing when pure window updates are
1440 * being avoided (which is the default),
1443 lwkt_gettoken(&so->so_rcv.ssb_token);
1445 tp->t_flags |= TF_RXRESIZED;
1446 if (!ssb_reserve(&so->so_rcv, newsize,
1448 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_AUTOSIZE);
1451 (TCP_MAXWIN << tp->rcv_scale)) {
1452 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_AUTOSIZE);
1455 m_adj(m, drop_hdrlen); /* delayed header drop */
1456 ssb_appendstream(&so->so_rcv, m);
1457 lwkt_reltoken(&so->so_rcv.ssb_token);
1461 * This code is responsible for most of the ACKs
1462 * the TCP stack sends back after receiving a data
1463 * packet. Note that the DELAY_ACK check fails if
1464 * the delack timer is already running, which results
1465 * in an ack being sent every other packet (which is
1468 * We then further aggregate acks by not actually
1469 * sending one until the protocol thread has completed
1470 * processing the current backlog of packets. This
1471 * does not delay the ack any further, but allows us
1472 * to take advantage of the packet aggregation that
1473 * high speed NICs do (usually blocks of 8-10 packets)
1474 * to send a single ack rather then four or five acks,
1475 * greatly reducing the ack rate, the return channel
1476 * bandwidth, and the protocol overhead on both ends.
1478 * Since this also has the effect of slowing down
1479 * the exponential slow-start ramp-up, systems with
1480 * very large bandwidth-delay products might want
1481 * to turn the feature off.
1483 if (DELAY_ACK(tp)) {
1484 tcp_callout_reset(tp, tp->tt_delack,
1485 tcp_delacktime, tcp_timer_delack);
1486 } else if (tcp_aggregate_acks) {
1487 tp->t_flags |= TF_ACKNOW;
1488 if (!(tp->t_flags & TF_ONOUTPUTQ)) {
1489 tp->t_flags |= TF_ONOUTPUTQ;
1490 tp->tt_cpu = mycpu->gd_cpuid;
1492 &tcpcbackq[tp->tt_cpu],
1496 tp->t_flags |= TF_ACKNOW;
1499 return(IPPROTO_DONE);
1504 * Calculate amount of space in receive window,
1505 * and then do TCP input processing.
1506 * Receive window is amount of space in rcv queue,
1507 * but not less than advertised window.
1509 recvwin = ssb_space(&so->so_rcv);
1512 tp->rcv_wnd = imax(recvwin, (int)(tp->rcv_adv - tp->rcv_nxt));
1514 /* Reset receive buffer auto scaling when not in bulk receive mode. */
1518 switch (tp->t_state) {
1520 * If the state is SYN_RECEIVED:
1521 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1523 case TCPS_SYN_RECEIVED:
1524 if ((thflags & TH_ACK) &&
1525 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1526 SEQ_GT(th->th_ack, tp->snd_max))) {
1527 rstreason = BANDLIM_RST_OPENPORT;
1533 * If the state is SYN_SENT:
1534 * if seg contains an ACK, but not for our SYN, drop the input.
1535 * if seg contains a RST, then drop the connection.
1536 * if seg does not contain SYN, then drop it.
1537 * Otherwise this is an acceptable SYN segment
1538 * initialize tp->rcv_nxt and tp->irs
1539 * if seg contains ack then advance tp->snd_una
1540 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1541 * arrange for segment to be acked (eventually)
1542 * continue processing rest of data/controls, beginning with URG
1545 if ((thflags & TH_ACK) &&
1546 (SEQ_LEQ(th->th_ack, tp->iss) ||
1547 SEQ_GT(th->th_ack, tp->snd_max))) {
1548 rstreason = BANDLIM_UNLIMITED;
1551 if (thflags & TH_RST) {
1552 if (thflags & TH_ACK)
1553 tp = tcp_drop(tp, ECONNREFUSED);
1556 if (!(thflags & TH_SYN))
1559 tp->irs = th->th_seq;
1561 if (thflags & TH_ACK) {
1562 /* Our SYN was acked. */
1563 tcpstat.tcps_connects++;
1565 /* Do window scaling on this connection? */
1566 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
1567 (TF_RCVD_SCALE | TF_REQ_SCALE))
1568 tp->rcv_scale = tp->request_r_scale;
1569 tp->rcv_adv += tp->rcv_wnd;
1570 tp->snd_una++; /* SYN is acked */
1571 tcp_callout_stop(tp, tp->tt_rexmt);
1573 * If there's data, delay ACK; if there's also a FIN
1574 * ACKNOW will be turned on later.
1576 if (DELAY_ACK(tp) && tlen != 0) {
1577 tcp_callout_reset(tp, tp->tt_delack,
1578 tcp_delacktime, tcp_timer_delack);
1580 tp->t_flags |= TF_ACKNOW;
1583 * Received <SYN,ACK> in SYN_SENT[*] state.
1585 * SYN_SENT --> ESTABLISHED
1586 * SYN_SENT* --> FIN_WAIT_1
1588 tp->t_starttime = ticks;
1589 if (tp->t_flags & TF_NEEDFIN) {
1590 tp->t_state = TCPS_FIN_WAIT_1;
1591 tp->t_flags &= ~TF_NEEDFIN;
1594 tcp_established(tp);
1598 * Received initial SYN in SYN-SENT[*] state =>
1599 * simultaneous open.
1600 * Do 3-way handshake:
1601 * SYN-SENT -> SYN-RECEIVED
1602 * SYN-SENT* -> SYN-RECEIVED*
1604 tp->t_flags |= TF_ACKNOW;
1605 tcp_callout_stop(tp, tp->tt_rexmt);
1606 tp->t_state = TCPS_SYN_RECEIVED;
1610 * Advance th->th_seq to correspond to first data byte.
1611 * If data, trim to stay within window,
1612 * dropping FIN if necessary.
1615 if (tlen > tp->rcv_wnd) {
1616 todrop = tlen - tp->rcv_wnd;
1620 tcpstat.tcps_rcvpackafterwin++;
1621 tcpstat.tcps_rcvbyteafterwin += todrop;
1623 tp->snd_wl1 = th->th_seq - 1;
1624 tp->rcv_up = th->th_seq;
1626 * Client side of transaction: already sent SYN and data.
1627 * If the remote host used T/TCP to validate the SYN,
1628 * our data will be ACK'd; if so, enter normal data segment
1629 * processing in the middle of step 5, ack processing.
1630 * Otherwise, goto step 6.
1632 if (thflags & TH_ACK)
1638 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
1639 * do normal processing (we no longer bother with T/TCP).
1643 case TCPS_TIME_WAIT:
1644 break; /* continue normal processing */
1648 * States other than LISTEN or SYN_SENT.
1649 * First check the RST flag and sequence number since reset segments
1650 * are exempt from the timestamp and connection count tests. This
1651 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
1652 * below which allowed reset segments in half the sequence space
1653 * to fall though and be processed (which gives forged reset
1654 * segments with a random sequence number a 50 percent chance of
1655 * killing a connection).
1656 * Then check timestamp, if present.
1657 * Then check the connection count, if present.
1658 * Then check that at least some bytes of segment are within
1659 * receive window. If segment begins before rcv_nxt,
1660 * drop leading data (and SYN); if nothing left, just ack.
1663 * If the RST bit is set, check the sequence number to see
1664 * if this is a valid reset segment.
1666 * In all states except SYN-SENT, all reset (RST) segments
1667 * are validated by checking their SEQ-fields. A reset is
1668 * valid if its sequence number is in the window.
1669 * Note: this does not take into account delayed ACKs, so
1670 * we should test against last_ack_sent instead of rcv_nxt.
1671 * The sequence number in the reset segment is normally an
1672 * echo of our outgoing acknowledgement numbers, but some hosts
1673 * send a reset with the sequence number at the rightmost edge
1674 * of our receive window, and we have to handle this case.
1675 * If we have multiple segments in flight, the intial reset
1676 * segment sequence numbers will be to the left of last_ack_sent,
1677 * but they will eventually catch up.
1678 * In any case, it never made sense to trim reset segments to
1679 * fit the receive window since RFC 1122 says:
1680 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
1682 * A TCP SHOULD allow a received RST segment to include data.
1685 * It has been suggested that a RST segment could contain
1686 * ASCII text that encoded and explained the cause of the
1687 * RST. No standard has yet been established for such
1690 * If the reset segment passes the sequence number test examine
1692 * SYN_RECEIVED STATE:
1693 * If passive open, return to LISTEN state.
1694 * If active open, inform user that connection was refused.
1695 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
1696 * Inform user that connection was reset, and close tcb.
1697 * CLOSING, LAST_ACK STATES:
1700 * Drop the segment - see Stevens, vol. 2, p. 964 and
1703 if (thflags & TH_RST) {
1704 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
1705 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
1706 switch (tp->t_state) {
1708 case TCPS_SYN_RECEIVED:
1709 so->so_error = ECONNREFUSED;
1712 case TCPS_ESTABLISHED:
1713 case TCPS_FIN_WAIT_1:
1714 case TCPS_FIN_WAIT_2:
1715 case TCPS_CLOSE_WAIT:
1716 so->so_error = ECONNRESET;
1718 tp->t_state = TCPS_CLOSED;
1719 tcpstat.tcps_drops++;
1728 case TCPS_TIME_WAIT:
1736 * RFC 1323 PAWS: If we have a timestamp reply on this segment
1737 * and it's less than ts_recent, drop it.
1739 if ((to.to_flags & TOF_TS) && tp->ts_recent != 0 &&
1740 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
1741 /* Check to see if ts_recent is over 24 days old. */
1742 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) {
1744 * Invalidate ts_recent. If this segment updates
1745 * ts_recent, the age will be reset later and ts_recent
1746 * will get a valid value. If it does not, setting
1747 * ts_recent to zero will at least satisfy the
1748 * requirement that zero be placed in the timestamp
1749 * echo reply when ts_recent isn't valid. The
1750 * age isn't reset until we get a valid ts_recent
1751 * because we don't want out-of-order segments to be
1752 * dropped when ts_recent is old.
1755 } else if (tcp_paws_tolerance && tlen != 0 &&
1756 tp->t_state == TCPS_ESTABLISHED &&
1757 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK&&
1758 !(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)) &&
1759 th->th_ack == tp->snd_una &&
1760 tiwin == tp->snd_wnd &&
1761 TSTMP_GEQ(to.to_tsval + tcp_paws_tolerance, tp->ts_recent)&&
1762 (th->th_seq == tp->rcv_nxt ||
1763 (SEQ_GT(th->th_seq, tp->rcv_nxt) &&
1764 tcp_paws_canreasslast(tp, th, tlen)))) {
1766 * This tends to prevent valid new segments from being
1767 * dropped by the reordered segments sent by the fast
1768 * retransmission algorithm on the sending side, i.e.
1769 * the fast retransmitted segment w/ larger timestamp
1770 * arrives earlier than the previously sent new segments
1771 * w/ smaller timestamp.
1773 * If following conditions are met, the segment is
1775 * - The segment contains data
1776 * - The connection is established
1777 * - The header does not contain important flags
1778 * - SYN or FIN is not needed
1779 * - It does not acknowledge new data
1780 * - Receive window is not changed
1781 * - The timestamp is within "acceptable" range
1782 * - The new segment is what we are expecting or
1783 * the new segment could be merged w/ the last
1784 * pending segment on the reassemble queue
1786 tcpstat.tcps_pawsaccept++;
1787 tcpstat.tcps_pawsdrop++;
1789 tcpstat.tcps_rcvduppack++;
1790 tcpstat.tcps_rcvdupbyte += tlen;
1791 tcpstat.tcps_pawsdrop++;
1799 * In the SYN-RECEIVED state, validate that the packet belongs to
1800 * this connection before trimming the data to fit the receive
1801 * window. Check the sequence number versus IRS since we know
1802 * the sequence numbers haven't wrapped. This is a partial fix
1803 * for the "LAND" DoS attack.
1805 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
1806 rstreason = BANDLIM_RST_OPENPORT;
1810 todrop = tp->rcv_nxt - th->th_seq;
1812 if (TCP_DO_SACK(tp)) {
1813 /* Report duplicate segment at head of packet. */
1814 tp->reportblk.rblk_start = th->th_seq;
1815 tp->reportblk.rblk_end = TCP_SACK_BLKEND(
1816 th->th_seq + tlen, thflags);
1817 if (SEQ_GT(tp->reportblk.rblk_end, tp->rcv_nxt))
1818 tp->reportblk.rblk_end = tp->rcv_nxt;
1819 tp->sack_flags |= (TSACK_F_DUPSEG | TSACK_F_SACKLEFT);
1820 tp->t_flags |= TF_ACKNOW;
1822 if (thflags & TH_SYN) {
1832 * Following if statement from Stevens, vol. 2, p. 960.
1834 if (todrop > tlen ||
1835 (todrop == tlen && !(thflags & TH_FIN))) {
1837 * Any valid FIN must be to the left of the window.
1838 * At this point the FIN must be a duplicate or out
1839 * of sequence; drop it.
1844 * Send an ACK to resynchronize and drop any data.
1845 * But keep on processing for RST or ACK.
1847 tp->t_flags |= TF_ACKNOW;
1849 tcpstat.tcps_rcvduppack++;
1850 tcpstat.tcps_rcvdupbyte += todrop;
1852 tcpstat.tcps_rcvpartduppack++;
1853 tcpstat.tcps_rcvpartdupbyte += todrop;
1855 drop_hdrlen += todrop; /* drop from the top afterwards */
1856 th->th_seq += todrop;
1858 if (th->th_urp > todrop)
1859 th->th_urp -= todrop;
1867 * If new data are received on a connection after the
1868 * user processes are gone, then RST the other end.
1870 if ((so->so_state & SS_NOFDREF) &&
1871 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
1873 tcpstat.tcps_rcvafterclose++;
1874 rstreason = BANDLIM_UNLIMITED;
1879 * If segment ends after window, drop trailing data
1880 * (and PUSH and FIN); if nothing left, just ACK.
1882 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
1884 tcpstat.tcps_rcvpackafterwin++;
1885 if (todrop >= tlen) {
1886 tcpstat.tcps_rcvbyteafterwin += tlen;
1888 * If a new connection request is received
1889 * while in TIME_WAIT, drop the old connection
1890 * and start over if the sequence numbers
1891 * are above the previous ones.
1893 if (thflags & TH_SYN &&
1894 tp->t_state == TCPS_TIME_WAIT &&
1895 SEQ_GT(th->th_seq, tp->rcv_nxt)) {
1900 * If window is closed can only take segments at
1901 * window edge, and have to drop data and PUSH from
1902 * incoming segments. Continue processing, but
1903 * remember to ack. Otherwise, drop segment
1906 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
1907 tp->t_flags |= TF_ACKNOW;
1908 tcpstat.tcps_rcvwinprobe++;
1912 tcpstat.tcps_rcvbyteafterwin += todrop;
1915 thflags &= ~(TH_PUSH | TH_FIN);
1919 * If last ACK falls within this segment's sequence numbers,
1920 * record its timestamp.
1922 * 1) That the test incorporates suggestions from the latest
1923 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1924 * 2) That updating only on newer timestamps interferes with
1925 * our earlier PAWS tests, so this check should be solely
1926 * predicated on the sequence space of this segment.
1927 * 3) That we modify the segment boundary check to be
1928 * Last.ACK.Sent <= SEG.SEQ + SEG.LEN
1929 * instead of RFC1323's
1930 * Last.ACK.Sent < SEG.SEQ + SEG.LEN,
1931 * This modified check allows us to overcome RFC1323's
1932 * limitations as described in Stevens TCP/IP Illustrated
1933 * Vol. 2 p.869. In such cases, we can still calculate the
1934 * RTT correctly when RCV.NXT == Last.ACK.Sent.
1936 if ((to.to_flags & TOF_TS) && SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
1937 SEQ_LEQ(tp->last_ack_sent, (th->th_seq + tlen
1938 + ((thflags & TH_SYN) != 0)
1939 + ((thflags & TH_FIN) != 0)))) {
1940 tp->ts_recent_age = ticks;
1941 tp->ts_recent = to.to_tsval;
1945 * If a SYN is in the window, then this is an
1946 * error and we send an RST and drop the connection.
1948 if (thflags & TH_SYN) {
1949 tp = tcp_drop(tp, ECONNRESET);
1950 rstreason = BANDLIM_UNLIMITED;
1955 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
1956 * flag is on (half-synchronized state), then queue data for
1957 * later processing; else drop segment and return.
1959 if (!(thflags & TH_ACK)) {
1960 if (tp->t_state == TCPS_SYN_RECEIVED ||
1961 (tp->t_flags & TF_NEEDSYN))
1970 switch (tp->t_state) {
1972 * In SYN_RECEIVED state, the ACK acknowledges our SYN, so enter
1973 * ESTABLISHED state and continue processing.
1974 * The ACK was checked above.
1976 case TCPS_SYN_RECEIVED:
1978 tcpstat.tcps_connects++;
1980 /* Do window scaling? */
1981 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
1982 (TF_RCVD_SCALE | TF_REQ_SCALE))
1983 tp->rcv_scale = tp->request_r_scale;
1986 * SYN-RECEIVED -> ESTABLISHED
1987 * SYN-RECEIVED* -> FIN-WAIT-1
1989 tp->t_starttime = ticks;
1990 if (tp->t_flags & TF_NEEDFIN) {
1991 tp->t_state = TCPS_FIN_WAIT_1;
1992 tp->t_flags &= ~TF_NEEDFIN;
1994 tcp_established(tp);
1997 * If segment contains data or ACK, will call tcp_reass()
1998 * later; if not, do so now to pass queued data to user.
2000 if (tlen == 0 && !(thflags & TH_FIN))
2001 tcp_reass(tp, NULL, NULL, NULL);
2005 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
2006 * ACKs. If the ack is in the range
2007 * tp->snd_una < th->th_ack <= tp->snd_max
2008 * then advance tp->snd_una to th->th_ack and drop
2009 * data from the retransmission queue. If this ACK reflects
2010 * more up to date window information we update our window information.
2012 case TCPS_ESTABLISHED:
2013 case TCPS_FIN_WAIT_1:
2014 case TCPS_FIN_WAIT_2:
2015 case TCPS_CLOSE_WAIT:
2018 case TCPS_TIME_WAIT:
2020 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
2021 boolean_t maynotdup = FALSE;
2023 if (TCP_DO_SACK(tp))
2024 tcp_sack_update_scoreboard(tp, &to);
2026 if (tlen != 0 || tiwin != tp->snd_wnd ||
2027 ((thflags & TH_FIN) && !(tp->t_flags & TF_SAWFIN)))
2030 if (!tcp_callout_active(tp, tp->tt_rexmt) ||
2031 th->th_ack != tp->snd_una) {
2033 tcpstat.tcps_rcvdupack++;
2038 #define DELAY_DUPACK \
2040 delayed_dupack = TRUE; \
2041 th_dupack = th->th_ack; \
2042 to_flags = to.to_flags; \
2045 if (!tcp_do_rfc6675 ||
2048 (TOF_SACK | TOF_SACK_REDUNDANT))
2056 if ((thflags & TH_FIN) && !(tp->t_flags & TF_QUEDFIN)) {
2058 * This could happen, if the reassemable
2059 * queue overflew or was drained. Don't
2060 * drop this FIN here; defer the duplicated
2061 * ACK processing until this FIN gets queued.
2068 if (tcp_recv_dupack(tp, th->th_ack, to.to_flags))
2074 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), ("th_ack <= snd_una"));
2076 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2078 * Detected optimistic ACK attack.
2079 * Force slow-start to de-synchronize attack.
2081 tp->snd_cwnd = tp->t_maxseg;
2084 tcpstat.tcps_rcvacktoomuch++;
2088 * If we reach this point, ACK is not a duplicate,
2089 * i.e., it ACKs something we sent.
2091 if (tp->t_flags & TF_NEEDSYN) {
2093 * T/TCP: Connection was half-synchronized, and our
2094 * SYN has been ACK'd (so connection is now fully
2095 * synchronized). Go to non-starred state,
2096 * increment snd_una for ACK of SYN, and check if
2097 * we can do window scaling.
2099 tp->t_flags &= ~TF_NEEDSYN;
2101 /* Do window scaling? */
2102 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
2103 (TF_RCVD_SCALE | TF_REQ_SCALE))
2104 tp->rcv_scale = tp->request_r_scale;
2108 acked = th->th_ack - tp->snd_una;
2109 tcpstat.tcps_rcvackpack++;
2110 tcpstat.tcps_rcvackbyte += acked;
2112 if (tcp_do_eifel_detect && acked > 0 &&
2113 (to.to_flags & TOF_TS) && (to.to_tsecr != 0) &&
2114 (tp->rxt_flags & TRXT_F_FIRSTACCACK)) {
2115 /* Eifel detection applicable. */
2116 if (to.to_tsecr < tp->t_rexmtTS) {
2117 ++tcpstat.tcps_eifeldetected;
2118 tcp_revert_congestion_state(tp);
2119 if (tp->t_rxtshift != 1 ||
2120 ticks >= tp->t_badrxtwin)
2121 ++tcpstat.tcps_rttcantdetect;
2123 } else if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) {
2125 * If we just performed our first retransmit,
2126 * and the ACK arrives within our recovery window,
2127 * then it was a mistake to do the retransmit
2128 * in the first place. Recover our original cwnd
2129 * and ssthresh, and proceed to transmit where we
2132 tcp_revert_congestion_state(tp);
2133 ++tcpstat.tcps_rttdetected;
2137 * If we have a timestamp reply, update smoothed
2138 * round trip time. If no timestamp is present but
2139 * transmit timer is running and timed sequence
2140 * number was acked, update smoothed round trip time.
2141 * Since we now have an rtt measurement, cancel the
2142 * timer backoff (cf., Phil Karn's retransmit alg.).
2143 * Recompute the initial retransmit timer.
2145 * Some machines (certain windows boxes) send broken
2146 * timestamp replies during the SYN+ACK phase, ignore
2149 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0))
2150 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1, th->th_ack);
2151 else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq))
2152 tcp_xmit_timer(tp, ticks - tp->t_rtttime, th->th_ack);
2153 tcp_xmit_bandwidth_limit(tp, th->th_ack);
2156 * If no data (only SYN) was ACK'd,
2157 * skip rest of ACK processing.
2162 /* Stop looking for an acceptable ACK since one was received. */
2163 tp->rxt_flags &= ~(TRXT_F_FIRSTACCACK |
2164 TRXT_F_FASTREXMT | TRXT_F_EARLYREXMT);
2166 if (acked > so->so_snd.ssb_cc) {
2167 tp->snd_wnd -= so->so_snd.ssb_cc;
2168 sbdrop(&so->so_snd.sb, (int)so->so_snd.ssb_cc);
2169 ourfinisacked = TRUE;
2171 sbdrop(&so->so_snd.sb, acked);
2172 tp->snd_wnd -= acked;
2173 ourfinisacked = FALSE;
2178 * Update window information.
2180 if (acceptable_window_update(tp, th, tiwin)) {
2181 /* keep track of pure window updates */
2182 if (tlen == 0 && tp->snd_wl2 == th->th_ack &&
2183 tiwin > tp->snd_wnd)
2184 tcpstat.tcps_rcvwinupd++;
2185 tp->snd_wnd = tiwin;
2186 tp->snd_wl1 = th->th_seq;
2187 tp->snd_wl2 = th->th_ack;
2188 if (tp->snd_wnd > tp->max_sndwnd)
2189 tp->max_sndwnd = tp->snd_wnd;
2193 tp->snd_una = th->th_ack;
2194 if (TCP_DO_SACK(tp))
2195 tcp_sack_update_scoreboard(tp, &to);
2196 if (IN_FASTRECOVERY(tp)) {
2197 if (SEQ_GEQ(th->th_ack, tp->snd_recover)) {
2198 EXIT_FASTRECOVERY(tp);
2201 * If the congestion window was inflated
2202 * to account for the other side's
2203 * cached packets, retract it.
2205 if (!TCP_DO_SACK(tp))
2206 tp->snd_cwnd = tp->snd_ssthresh;
2209 * Window inflation should have left us
2210 * with approximately snd_ssthresh outstanding
2211 * data. But, in case we would be inclined
2212 * to send a burst, better do it using
2215 if (SEQ_GT(th->th_ack + tp->snd_cwnd,
2216 tp->snd_max + 2 * tp->t_maxseg))
2218 (tp->snd_max - tp->snd_una) +
2223 if (TCP_DO_SACK(tp)) {
2224 tp->snd_max_rexmt = tp->snd_max;
2226 tp->snd_una == tp->rexmt_high);
2228 tcp_newreno_partial_ack(tp, th, acked);
2234 * Open the congestion window. When in slow-start,
2235 * open exponentially: maxseg per packet. Otherwise,
2236 * open linearly: maxseg per window.
2238 if (tp->snd_cwnd <= tp->snd_ssthresh) {
2240 (SEQ_LT(tp->snd_nxt, tp->snd_max) ?
2241 tp->t_maxseg : 2 * tp->t_maxseg);
2244 tp->snd_cwnd += tcp_do_abc ?
2245 min(acked, abc_sslimit) : tp->t_maxseg;
2247 /* linear increase */
2248 tp->snd_wacked += tcp_do_abc ? acked :
2250 if (tp->snd_wacked >= tp->snd_cwnd) {
2251 tp->snd_wacked -= tp->snd_cwnd;
2252 tp->snd_cwnd += tp->t_maxseg;
2255 tp->snd_cwnd = min(tp->snd_cwnd,
2256 TCP_MAXWIN << tp->snd_scale);
2257 tp->snd_recover = th->th_ack - 1;
2259 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2260 tp->snd_nxt = tp->snd_una;
2263 * If all outstanding data is acked, stop retransmit
2264 * timer and remember to restart (more output or persist).
2265 * If there is more data to be acked, restart retransmit
2266 * timer, using current (possibly backed-off) value.
2268 if (th->th_ack == tp->snd_max) {
2269 tcp_callout_stop(tp, tp->tt_rexmt);
2271 } else if (!tcp_callout_active(tp, tp->tt_persist)) {
2272 tcp_callout_reset(tp, tp->tt_rexmt, tp->t_rxtcur,
2276 switch (tp->t_state) {
2278 * In FIN_WAIT_1 STATE in addition to the processing
2279 * for the ESTABLISHED state if our FIN is now acknowledged
2280 * then enter FIN_WAIT_2.
2282 case TCPS_FIN_WAIT_1:
2283 if (ourfinisacked) {
2285 * If we can't receive any more
2286 * data, then closing user can proceed.
2287 * Starting the timer is contrary to the
2288 * specification, but if we don't get a FIN
2289 * we'll hang forever.
2291 if (so->so_state & SS_CANTRCVMORE) {
2292 soisdisconnected(so);
2293 tcp_callout_reset(tp, tp->tt_2msl,
2294 tp->t_maxidle, tcp_timer_2msl);
2296 tp->t_state = TCPS_FIN_WAIT_2;
2301 * In CLOSING STATE in addition to the processing for
2302 * the ESTABLISHED state if the ACK acknowledges our FIN
2303 * then enter the TIME-WAIT state, otherwise ignore
2307 if (ourfinisacked) {
2308 tp->t_state = TCPS_TIME_WAIT;
2309 tcp_canceltimers(tp);
2310 tcp_callout_reset(tp, tp->tt_2msl,
2311 2 * tcp_rmx_msl(tp),
2313 soisdisconnected(so);
2318 * In LAST_ACK, we may still be waiting for data to drain
2319 * and/or to be acked, as well as for the ack of our FIN.
2320 * If our FIN is now acknowledged, delete the TCB,
2321 * enter the closed state and return.
2324 if (ourfinisacked) {
2331 * In TIME_WAIT state the only thing that should arrive
2332 * is a retransmission of the remote FIN. Acknowledge
2333 * it and restart the finack timer.
2335 case TCPS_TIME_WAIT:
2336 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp),
2344 * Update window information.
2345 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2347 if ((thflags & TH_ACK) &&
2348 acceptable_window_update(tp, th, tiwin)) {
2349 /* keep track of pure window updates */
2350 if (tlen == 0 && tp->snd_wl2 == th->th_ack &&
2351 tiwin > tp->snd_wnd)
2352 tcpstat.tcps_rcvwinupd++;
2353 tp->snd_wnd = tiwin;
2354 tp->snd_wl1 = th->th_seq;
2355 tp->snd_wl2 = th->th_ack;
2356 if (tp->snd_wnd > tp->max_sndwnd)
2357 tp->max_sndwnd = tp->snd_wnd;
2362 * Process segments with URG.
2364 if ((thflags & TH_URG) && th->th_urp &&
2365 !TCPS_HAVERCVDFIN(tp->t_state)) {
2367 * This is a kludge, but if we receive and accept
2368 * random urgent pointers, we'll crash in
2369 * soreceive. It's hard to imagine someone
2370 * actually wanting to send this much urgent data.
2372 if (th->th_urp + so->so_rcv.ssb_cc > sb_max) {
2373 th->th_urp = 0; /* XXX */
2374 thflags &= ~TH_URG; /* XXX */
2375 goto dodata; /* XXX */
2378 * If this segment advances the known urgent pointer,
2379 * then mark the data stream. This should not happen
2380 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2381 * a FIN has been received from the remote side.
2382 * In these states we ignore the URG.
2384 * According to RFC961 (Assigned Protocols),
2385 * the urgent pointer points to the last octet
2386 * of urgent data. We continue, however,
2387 * to consider it to indicate the first octet
2388 * of data past the urgent section as the original
2389 * spec states (in one of two places).
2391 if (SEQ_GT(th->th_seq + th->th_urp, tp->rcv_up)) {
2392 tp->rcv_up = th->th_seq + th->th_urp;
2393 so->so_oobmark = so->so_rcv.ssb_cc +
2394 (tp->rcv_up - tp->rcv_nxt) - 1;
2395 if (so->so_oobmark == 0)
2396 sosetstate(so, SS_RCVATMARK);
2398 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2401 * Remove out of band data so doesn't get presented to user.
2402 * This can happen independent of advancing the URG pointer,
2403 * but if two URG's are pending at once, some out-of-band
2404 * data may creep in... ick.
2406 if (th->th_urp <= (u_long)tlen &&
2407 !(so->so_options & SO_OOBINLINE)) {
2408 /* hdr drop is delayed */
2409 tcp_pulloutofband(so, th, m, drop_hdrlen);
2413 * If no out of band data is expected,
2414 * pull receive urgent pointer along
2415 * with the receive window.
2417 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
2418 tp->rcv_up = tp->rcv_nxt;
2423 * Process the segment text, merging it into the TCP sequencing queue,
2424 * and arranging for acknowledgment of receipt if necessary.
2425 * This process logically involves adjusting tp->rcv_wnd as data
2426 * is presented to the user (this happens in tcp_usrreq.c,
2427 * case PRU_RCVD). If a FIN has already been received on this
2428 * connection then we just ignore the text.
2430 if ((tlen || (thflags & TH_FIN)) && !TCPS_HAVERCVDFIN(tp->t_state)) {
2431 if (thflags & TH_FIN)
2432 tp->t_flags |= TF_SAWFIN;
2433 m_adj(m, drop_hdrlen); /* delayed header drop */
2435 * Insert segment which includes th into TCP reassembly queue
2436 * with control block tp. Set thflags to whether reassembly now
2437 * includes a segment with FIN. This handles the common case
2438 * inline (segment is the next to be received on an established
2439 * connection, and the queue is empty), avoiding linkage into
2440 * and removal from the queue and repetition of various
2442 * Set DELACK for segments received in order, but ack
2443 * immediately when segments are out of order (so
2444 * fast retransmit can work).
2446 if (th->th_seq == tp->rcv_nxt &&
2447 TAILQ_EMPTY(&tp->t_segq) &&
2448 TCPS_HAVEESTABLISHED(tp->t_state)) {
2449 if (thflags & TH_FIN)
2450 tp->t_flags |= TF_QUEDFIN;
2451 if (DELAY_ACK(tp)) {
2452 tcp_callout_reset(tp, tp->tt_delack,
2453 tcp_delacktime, tcp_timer_delack);
2455 tp->t_flags |= TF_ACKNOW;
2457 tp->rcv_nxt += tlen;
2458 thflags = th->th_flags & TH_FIN;
2459 tcpstat.tcps_rcvpack++;
2460 tcpstat.tcps_rcvbyte += tlen;
2462 if (so->so_state & SS_CANTRCVMORE) {
2465 lwkt_gettoken(&so->so_rcv.ssb_token);
2466 ssb_appendstream(&so->so_rcv, m);
2467 lwkt_reltoken(&so->so_rcv.ssb_token);
2471 if (!(tp->sack_flags & TSACK_F_DUPSEG)) {
2472 /* Initialize SACK report block. */
2473 tp->reportblk.rblk_start = th->th_seq;
2474 tp->reportblk.rblk_end = TCP_SACK_BLKEND(
2475 th->th_seq + tlen, thflags);
2477 thflags = tcp_reass(tp, th, &tlen, m);
2478 tp->t_flags |= TF_ACKNOW;
2482 * Note the amount of data that peer has sent into
2483 * our window, in order to estimate the sender's
2486 len = so->so_rcv.ssb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
2493 * If FIN is received ACK the FIN and let the user know
2494 * that the connection is closing.
2496 if (thflags & TH_FIN) {
2497 if (!TCPS_HAVERCVDFIN(tp->t_state)) {
2500 * If connection is half-synchronized
2501 * (ie NEEDSYN flag on) then delay ACK,
2502 * so it may be piggybacked when SYN is sent.
2503 * Otherwise, since we received a FIN then no
2504 * more input can be expected, send ACK now.
2506 if (DELAY_ACK(tp) && (tp->t_flags & TF_NEEDSYN)) {
2507 tcp_callout_reset(tp, tp->tt_delack,
2508 tcp_delacktime, tcp_timer_delack);
2510 tp->t_flags |= TF_ACKNOW;
2515 switch (tp->t_state) {
2517 * In SYN_RECEIVED and ESTABLISHED STATES
2518 * enter the CLOSE_WAIT state.
2520 case TCPS_SYN_RECEIVED:
2521 tp->t_starttime = ticks;
2523 case TCPS_ESTABLISHED:
2524 tp->t_state = TCPS_CLOSE_WAIT;
2528 * If still in FIN_WAIT_1 STATE FIN has not been acked so
2529 * enter the CLOSING state.
2531 case TCPS_FIN_WAIT_1:
2532 tp->t_state = TCPS_CLOSING;
2536 * In FIN_WAIT_2 state enter the TIME_WAIT state,
2537 * starting the time-wait timer, turning off the other
2540 case TCPS_FIN_WAIT_2:
2541 tp->t_state = TCPS_TIME_WAIT;
2542 tcp_canceltimers(tp);
2543 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp),
2545 soisdisconnected(so);
2549 * In TIME_WAIT state restart the 2 MSL time_wait timer.
2551 case TCPS_TIME_WAIT:
2552 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp),
2559 if (so->so_options & SO_DEBUG)
2560 tcp_trace(TA_INPUT, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0);
2564 * Delayed duplicated ACK processing
2566 if (delayed_dupack && tcp_recv_dupack(tp, th_dupack, to_flags))
2570 * Return any desired output.
2572 if (needoutput || (tp->t_flags & TF_ACKNOW))
2574 tcp_sack_report_cleanup(tp);
2575 return(IPPROTO_DONE);
2579 * Generate an ACK dropping incoming segment if it occupies
2580 * sequence space, where the ACK reflects our state.
2582 * We can now skip the test for the RST flag since all
2583 * paths to this code happen after packets containing
2584 * RST have been dropped.
2586 * In the SYN-RECEIVED state, don't send an ACK unless the
2587 * segment we received passes the SYN-RECEIVED ACK test.
2588 * If it fails send a RST. This breaks the loop in the
2589 * "LAND" DoS attack, and also prevents an ACK storm
2590 * between two listening ports that have been sent forged
2591 * SYN segments, each with the source address of the other.
2593 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
2594 (SEQ_GT(tp->snd_una, th->th_ack) ||
2595 SEQ_GT(th->th_ack, tp->snd_max)) ) {
2596 rstreason = BANDLIM_RST_OPENPORT;
2600 if (so->so_options & SO_DEBUG)
2601 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0);
2604 tp->t_flags |= TF_ACKNOW;
2606 tcp_sack_report_cleanup(tp);
2607 return(IPPROTO_DONE);
2611 * Generate a RST, dropping incoming segment.
2612 * Make ACK acceptable to originator of segment.
2613 * Don't bother to respond if destination was broadcast/multicast.
2615 if ((thflags & TH_RST) || m->m_flags & (M_BCAST | M_MCAST))
2618 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
2619 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
2622 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
2623 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
2624 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
2625 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
2628 /* IPv6 anycast check is done at tcp6_input() */
2631 * Perform bandwidth limiting.
2634 if (badport_bandlim(rstreason) < 0)
2639 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
2640 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0);
2642 if (thflags & TH_ACK)
2643 /* mtod() below is safe as long as hdr dropping is delayed */
2644 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, th->th_ack,
2647 if (thflags & TH_SYN)
2649 /* mtod() below is safe as long as hdr dropping is delayed */
2650 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq + tlen,
2651 (tcp_seq)0, TH_RST | TH_ACK);
2654 tcp_sack_report_cleanup(tp);
2655 return(IPPROTO_DONE);
2659 * Drop space held by incoming segment and return.
2662 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
2663 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0);
2667 tcp_sack_report_cleanup(tp);
2668 return(IPPROTO_DONE);
2672 * Parse TCP options and place in tcpopt.
2675 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, boolean_t is_syn,
2681 for (; cnt > 0; cnt -= optlen, cp += optlen) {
2683 if (opt == TCPOPT_EOL)
2685 if (opt == TCPOPT_NOP)
2691 if (optlen < 2 || optlen > cnt)
2696 if (optlen != TCPOLEN_MAXSEG)
2700 to->to_flags |= TOF_MSS;
2701 bcopy(cp + 2, &to->to_mss, sizeof to->to_mss);
2702 to->to_mss = ntohs(to->to_mss);
2705 if (optlen != TCPOLEN_WINDOW)
2709 to->to_flags |= TOF_SCALE;
2710 to->to_requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT);
2712 case TCPOPT_TIMESTAMP:
2713 if (optlen != TCPOLEN_TIMESTAMP)
2715 to->to_flags |= TOF_TS;
2716 bcopy(cp + 2, &to->to_tsval, sizeof to->to_tsval);
2717 to->to_tsval = ntohl(to->to_tsval);
2718 bcopy(cp + 6, &to->to_tsecr, sizeof to->to_tsecr);
2719 to->to_tsecr = ntohl(to->to_tsecr);
2721 * If echoed timestamp is later than the current time,
2722 * fall back to non RFC1323 RTT calculation.
2724 if (to->to_tsecr != 0 && TSTMP_GT(to->to_tsecr, ticks))
2727 case TCPOPT_SACK_PERMITTED:
2728 if (optlen != TCPOLEN_SACK_PERMITTED)
2732 to->to_flags |= TOF_SACK_PERMITTED;
2735 if ((optlen - 2) & 0x07) /* not multiple of 8 */
2737 to->to_nsackblocks = (optlen - 2) / 8;
2738 to->to_sackblocks = (struct raw_sackblock *) (cp + 2);
2739 to->to_flags |= TOF_SACK;
2740 for (i = 0; i < to->to_nsackblocks; i++) {
2741 struct raw_sackblock *r = &to->to_sackblocks[i];
2743 r->rblk_start = ntohl(r->rblk_start);
2744 r->rblk_end = ntohl(r->rblk_end);
2746 if (SEQ_LEQ(r->rblk_end, r->rblk_start)) {
2748 * Invalid SACK block; discard all
2751 tcpstat.tcps_rcvbadsackopt++;
2752 to->to_nsackblocks = 0;
2753 to->to_sackblocks = NULL;
2754 to->to_flags &= ~TOF_SACK;
2758 if ((to->to_flags & TOF_SACK) &&
2759 tcp_sack_ndsack_blocks(to->to_sackblocks,
2760 to->to_nsackblocks, ack))
2761 to->to_flags |= TOF_DSACK;
2763 #ifdef TCP_SIGNATURE
2765 * XXX In order to reply to a host which has set the
2766 * TCP_SIGNATURE option in its initial SYN, we have to
2767 * record the fact that the option was observed here
2768 * for the syncache code to perform the correct response.
2770 case TCPOPT_SIGNATURE:
2771 if (optlen != TCPOLEN_SIGNATURE)
2773 to->to_flags |= (TOF_SIGNATURE | TOF_SIGLEN);
2775 #endif /* TCP_SIGNATURE */
2783 * Pull out of band byte out of a segment so
2784 * it doesn't appear in the user's data queue.
2785 * It is still reflected in the segment length for
2786 * sequencing purposes.
2787 * "off" is the delayed to be dropped hdrlen.
2790 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, int off)
2792 int cnt = off + th->th_urp - 1;
2795 if (m->m_len > cnt) {
2796 char *cp = mtod(m, caddr_t) + cnt;
2797 struct tcpcb *tp = sototcpcb(so);
2800 tp->t_oobflags |= TCPOOB_HAVEDATA;
2801 bcopy(cp + 1, cp, m->m_len - cnt - 1);
2803 if (m->m_flags & M_PKTHDR)
2812 panic("tcp_pulloutofband");
2816 * Collect new round-trip time estimate
2817 * and update averages and current timeout.
2820 tcp_xmit_timer(struct tcpcb *tp, int rtt, tcp_seq ack)
2824 tcpstat.tcps_rttupdated++;
2826 if ((tp->rxt_flags & TRXT_F_REBASERTO) &&
2827 SEQ_GT(ack, tp->snd_max_prev)) {
2828 #ifdef DEBUG_EIFEL_RESPONSE
2829 kprintf("srtt/rttvar, prev %d/%d, cur %d/%d, ",
2830 tp->t_srtt_prev, tp->t_rttvar_prev,
2831 tp->t_srtt, tp->t_rttvar);
2834 tcpstat.tcps_eifelresponse++;
2836 tp->rxt_flags &= ~TRXT_F_REBASERTO;
2837 tp->t_srtt = max(tp->t_srtt_prev, (rtt << TCP_RTT_SHIFT));
2838 tp->t_rttvar = max(tp->t_rttvar_prev,
2839 (rtt << (TCP_RTTVAR_SHIFT - 1)));
2840 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
2841 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2843 #ifdef DEBUG_EIFEL_RESPONSE
2844 kprintf("new %d/%d ", tp->t_srtt, tp->t_rttvar);
2846 } else if (tp->t_srtt != 0) {
2850 * srtt is stored as fixed point with 5 bits after the
2851 * binary point (i.e., scaled by 8). The following magic
2852 * is equivalent to the smoothing algorithm in rfc793 with
2853 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
2854 * point). Adjust rtt to origin 0.
2856 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
2857 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
2859 if ((tp->t_srtt += delta) <= 0)
2863 * We accumulate a smoothed rtt variance (actually, a
2864 * smoothed mean difference), then set the retransmit
2865 * timer to smoothed rtt + 4 times the smoothed variance.
2866 * rttvar is stored as fixed point with 4 bits after the
2867 * binary point (scaled by 16). The following is
2868 * equivalent to rfc793 smoothing with an alpha of .75
2869 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
2870 * rfc793's wired-in beta.
2874 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
2875 if ((tp->t_rttvar += delta) <= 0)
2877 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
2878 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2881 * No rtt measurement yet - use the unsmoothed rtt.
2882 * Set the variance to half the rtt (so our first
2883 * retransmit happens at 3*rtt).
2885 tp->t_srtt = rtt << TCP_RTT_SHIFT;
2886 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
2887 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2892 #ifdef DEBUG_EIFEL_RESPONSE
2894 kprintf("| rxtcur prev %d, old %d, ",
2895 tp->t_rxtcur_prev, tp->t_rxtcur);
2900 * the retransmit should happen at rtt + 4 * rttvar.
2901 * Because of the way we do the smoothing, srtt and rttvar
2902 * will each average +1/2 tick of bias. When we compute
2903 * the retransmit timer, we want 1/2 tick of rounding and
2904 * 1 extra tick because of +-1/2 tick uncertainty in the
2905 * firing of the timer. The bias will give us exactly the
2906 * 1.5 tick we need. But, because the bias is
2907 * statistical, we have to test that we don't drop below
2908 * the minimum feasible timer (which is 2 ticks).
2910 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
2911 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
2914 if (tp->t_rxtcur < tp->t_rxtcur_prev + tcp_eifel_rtoinc) {
2916 * RFC4015 requires that the new RTO is at least
2917 * 2*G (tcp_eifel_rtoinc) greater then the RTO
2918 * (t_rxtcur_prev) when the spurious retransmit
2921 * The above condition could be true, if the SRTT
2922 * and RTTVAR used to calculate t_rxtcur_prev
2923 * resulted in a value less than t_rttmin. So
2924 * simply increasing SRTT by tcp_eifel_rtoinc when
2925 * preparing for the Eifel response could not ensure
2926 * that the new RTO will be tcp_eifel_rtoinc greater
2929 tp->t_rxtcur = tp->t_rxtcur_prev + tcp_eifel_rtoinc;
2931 #ifdef DEBUG_EIFEL_RESPONSE
2932 kprintf("new %d\n", tp->t_rxtcur);
2937 * We received an ack for a packet that wasn't retransmitted;
2938 * it is probably safe to discard any error indications we've
2939 * received recently. This isn't quite right, but close enough
2940 * for now (a route might have failed after we sent a segment,
2941 * and the return path might not be symmetrical).
2943 tp->t_softerror = 0;
2947 * Determine a reasonable value for maxseg size.
2948 * If the route is known, check route for mtu.
2949 * If none, use an mss that can be handled on the outgoing
2950 * interface without forcing IP to fragment; if bigger than
2951 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
2952 * to utilize large mbufs. If no route is found, route has no mtu,
2953 * or the destination isn't local, use a default, hopefully conservative
2954 * size (usually 512 or the default IP max size, but no more than the mtu
2955 * of the interface), as we can't discover anything about intervening
2956 * gateways or networks. We also initialize the congestion/slow start
2957 * window to be a single segment if the destination isn't local.
2958 * While looking at the routing entry, we also initialize other path-dependent
2959 * parameters from pre-set or cached values in the routing entry.
2961 * Also take into account the space needed for options that we
2962 * send regularly. Make maxseg shorter by that amount to assure
2963 * that we can send maxseg amount of data even when the options
2964 * are present. Store the upper limit of the length of options plus
2967 * NOTE that this routine is only called when we process an incoming
2968 * segment, for outgoing segments only tcp_mssopt is called.
2971 tcp_mss(struct tcpcb *tp, int offer)
2977 struct inpcb *inp = tp->t_inpcb;
2980 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) ? TRUE : FALSE);
2981 size_t min_protoh = isipv6 ?
2982 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
2983 sizeof(struct tcpiphdr);
2985 const boolean_t isipv6 = FALSE;
2986 const size_t min_protoh = sizeof(struct tcpiphdr);
2990 rt = tcp_rtlookup6(&inp->inp_inc);
2992 rt = tcp_rtlookup(&inp->inp_inc);
2994 tp->t_maxopd = tp->t_maxseg =
2995 (isipv6 ? tcp_v6mssdflt : tcp_mssdflt);
2999 so = inp->inp_socket;
3002 * Offer == 0 means that there was no MSS on the SYN segment,
3003 * in this case we use either the interface mtu or tcp_mssdflt.
3005 * An offer which is too large will be cut down later.
3009 if (in6_localaddr(&inp->in6p_faddr)) {
3010 offer = ND_IFINFO(rt->rt_ifp)->linkmtu -
3013 offer = tcp_v6mssdflt;
3016 if (in_localaddr(inp->inp_faddr))
3017 offer = ifp->if_mtu - min_protoh;
3019 offer = tcp_mssdflt;
3024 * Prevent DoS attack with too small MSS. Round up
3025 * to at least minmss.
3027 * Sanity check: make sure that maxopd will be large
3028 * enough to allow some data on segments even is the
3029 * all the option space is used (40bytes). Otherwise
3030 * funny things may happen in tcp_output.
3032 offer = max(offer, tcp_minmss);
3033 offer = max(offer, 64);
3035 rt->rt_rmx.rmx_mssopt = offer;
3038 * While we're here, check if there's an initial rtt
3039 * or rttvar. Convert from the route-table units
3040 * to scaled multiples of the slow timeout timer.
3042 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) {
3044 * XXX the lock bit for RTT indicates that the value
3045 * is also a minimum value; this is subject to time.
3047 if (rt->rt_rmx.rmx_locks & RTV_RTT)
3048 tp->t_rttmin = rtt / (RTM_RTTUNIT / hz);
3049 tp->t_srtt = rtt / (RTM_RTTUNIT / (hz * TCP_RTT_SCALE));
3050 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
3051 tcpstat.tcps_usedrtt++;
3052 if (rt->rt_rmx.rmx_rttvar) {
3053 tp->t_rttvar = rt->rt_rmx.rmx_rttvar /
3054 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE));
3055 tcpstat.tcps_usedrttvar++;
3057 /* default variation is +- 1 rtt */
3059 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
3061 TCPT_RANGESET(tp->t_rxtcur,
3062 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
3063 tp->t_rttmin, TCPTV_REXMTMAX);
3067 * if there's an mtu associated with the route, use it
3068 * else, use the link mtu. Take the smaller of mss or offer
3071 if (rt->rt_rmx.rmx_mtu) {
3072 mss = rt->rt_rmx.rmx_mtu - min_protoh;
3075 mss = ND_IFINFO(rt->rt_ifp)->linkmtu - min_protoh;
3077 mss = ifp->if_mtu - min_protoh;
3079 mss = min(mss, offer);
3082 * maxopd stores the maximum length of data AND options
3083 * in a segment; maxseg is the amount of data in a normal
3084 * segment. We need to store this value (maxopd) apart
3085 * from maxseg, because now every segment carries options
3086 * and thus we normally have somewhat less data in segments.
3090 if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
3091 ((tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
3092 mss -= TCPOLEN_TSTAMP_APPA;
3094 #if (MCLBYTES & (MCLBYTES - 1)) == 0
3096 mss &= ~(MCLBYTES-1);
3099 mss = mss / MCLBYTES * MCLBYTES;
3102 * If there's a pipesize, change the socket buffer
3103 * to that size. Make the socket buffers an integral
3104 * number of mss units; if the mss is larger than
3105 * the socket buffer, decrease the mss.
3108 if ((bufsize = rt->rt_rmx.rmx_sendpipe) == 0)
3110 bufsize = so->so_snd.ssb_hiwat;
3114 bufsize = roundup(bufsize, mss);
3115 if (bufsize > sb_max)
3117 if (bufsize > so->so_snd.ssb_hiwat)
3118 ssb_reserve(&so->so_snd, bufsize, so, NULL);
3123 if ((bufsize = rt->rt_rmx.rmx_recvpipe) == 0)
3125 bufsize = so->so_rcv.ssb_hiwat;
3126 if (bufsize > mss) {
3127 bufsize = roundup(bufsize, mss);
3128 if (bufsize > sb_max)
3130 if (bufsize > so->so_rcv.ssb_hiwat) {
3131 lwkt_gettoken(&so->so_rcv.ssb_token);
3132 ssb_reserve(&so->so_rcv, bufsize, so, NULL);
3133 lwkt_reltoken(&so->so_rcv.ssb_token);
3138 * Set the slow-start flight size
3140 * NOTE: t_maxseg must have been configured!
3142 tp->snd_cwnd = tcp_initial_window(tp);
3144 if (rt->rt_rmx.rmx_ssthresh) {
3146 * There's some sort of gateway or interface
3147 * buffer limit on the path. Use this to set
3148 * the slow start threshhold, but set the
3149 * threshold to no less than 2*mss.
3151 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh);
3152 tcpstat.tcps_usedssthresh++;
3157 * Determine the MSS option to send on an outgoing SYN.
3160 tcp_mssopt(struct tcpcb *tp)
3165 ((tp->t_inpcb->inp_vflag & INP_IPV6) ? TRUE : FALSE);
3166 int min_protoh = isipv6 ?
3167 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
3168 sizeof(struct tcpiphdr);
3170 const boolean_t isipv6 = FALSE;
3171 const size_t min_protoh = sizeof(struct tcpiphdr);
3175 rt = tcp_rtlookup6(&tp->t_inpcb->inp_inc);
3177 rt = tcp_rtlookup(&tp->t_inpcb->inp_inc);
3179 return (isipv6 ? tcp_v6mssdflt : tcp_mssdflt);
3181 return (rt->rt_ifp->if_mtu - min_protoh);
3185 * When a partial ack arrives, force the retransmission of the
3186 * next unacknowledged segment. Do not exit Fast Recovery.
3188 * Implement the Slow-but-Steady variant of NewReno by restarting the
3189 * the retransmission timer. Turn it off here so it can be restarted
3190 * later in tcp_output().
3193 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th, int acked)
3195 tcp_seq old_snd_nxt = tp->snd_nxt;
3196 u_long ocwnd = tp->snd_cwnd;
3198 tcp_callout_stop(tp, tp->tt_rexmt);
3200 tp->snd_nxt = th->th_ack;
3201 /* Set snd_cwnd to one segment beyond acknowledged offset. */
3202 tp->snd_cwnd = tp->t_maxseg;
3203 tp->t_flags |= TF_ACKNOW;
3205 if (SEQ_GT(old_snd_nxt, tp->snd_nxt))
3206 tp->snd_nxt = old_snd_nxt;
3207 /* partial window deflation */
3209 tp->snd_cwnd = ocwnd - acked + tp->t_maxseg;
3211 tp->snd_cwnd = tp->t_maxseg;
3215 * In contrast to the Slow-but-Steady NewReno variant,
3216 * we do not reset the retransmission timer for SACK retransmissions,
3217 * except when retransmitting snd_una.
3220 tcp_sack_rexmt(struct tcpcb *tp, boolean_t force)
3222 tcp_seq old_snd_nxt = tp->snd_nxt;
3223 u_long ocwnd = tp->snd_cwnd;
3225 int nseg = 0; /* consecutive new segments */
3226 int nseg_rexmt = 0; /* retransmitted segments */
3228 #define MAXBURST 4 /* limit burst of new packets on partial ack */
3231 uint32_t unsacked = tcp_sack_first_unsacked_len(tp);
3234 * Try to fill the first hole in the receiver's
3237 maxrexmt = howmany(unsacked, tp->t_maxseg);
3238 if (maxrexmt > tcp_force_sackrxt)
3239 maxrexmt = tcp_force_sackrxt;
3243 pipe = tcp_sack_compute_pipe(tp);
3244 while (((tcp_seq_diff_t)(ocwnd - pipe) >= (tcp_seq_diff_t)tp->t_maxseg
3245 || (force && nseg_rexmt < maxrexmt && nseg == 0)) &&
3246 (!tcp_do_smartsack || nseg < MAXBURST)) {
3247 tcp_seq old_snd_max, old_rexmt_high, nextrexmt;
3248 uint32_t sent, seglen;
3252 old_rexmt_high = tp->rexmt_high;
3253 if (!tcp_sack_nextseg(tp, &nextrexmt, &seglen, &rescue)) {
3254 tp->rexmt_high = old_rexmt_high;
3259 * If the next tranmission is a rescue retranmission,
3260 * we check whether we have already sent some data
3261 * (either new segments or retransmitted segments)
3262 * into the the network or not. Since the idea of rescue
3263 * retransmission is to sustain ACK clock, as long as
3264 * some segments are in the network, ACK clock will be
3267 if (rescue && (nseg_rexmt > 0 || nseg > 0)) {
3268 tp->rexmt_high = old_rexmt_high;
3272 if (nextrexmt == tp->snd_max)
3276 tp->snd_nxt = nextrexmt;
3277 tp->snd_cwnd = nextrexmt - tp->snd_una + seglen;
3278 old_snd_max = tp->snd_max;
3279 if (nextrexmt == tp->snd_una)
3280 tcp_callout_stop(tp, tp->tt_rexmt);
3281 tp->t_flags |= TF_XMITNOW;
3282 error = tcp_output(tp);
3284 tp->rexmt_high = old_rexmt_high;
3287 sent = tp->snd_nxt - nextrexmt;
3289 tp->rexmt_high = old_rexmt_high;
3293 tcpstat.tcps_sndsackpack++;
3294 tcpstat.tcps_sndsackbyte += sent;
3297 tcpstat.tcps_sackrescue++;
3298 tp->rexmt_rescue = tp->snd_nxt;
3299 tp->sack_flags |= TSACK_F_SACKRESCUED;
3302 if (SEQ_LT(nextrexmt, old_snd_max) &&
3303 SEQ_LT(tp->rexmt_high, tp->snd_nxt)) {
3304 tp->rexmt_high = seq_min(tp->snd_nxt, old_snd_max);
3305 if (tcp_aggressive_rescuesack &&
3306 (tp->sack_flags & TSACK_F_SACKRESCUED) &&
3307 SEQ_LT(tp->rexmt_rescue, tp->rexmt_high)) {
3308 /* Drag RescueRxt along with HighRxt */
3309 tp->rexmt_rescue = tp->rexmt_high;
3313 if (SEQ_GT(old_snd_nxt, tp->snd_nxt))
3314 tp->snd_nxt = old_snd_nxt;
3315 tp->snd_cwnd = ocwnd;
3319 * Return TRUE, if some new segments are sent
3322 tcp_sack_limitedxmit(struct tcpcb *tp)
3324 tcp_seq oldsndnxt = tp->snd_nxt;
3325 tcp_seq oldsndmax = tp->snd_max;
3326 u_long ocwnd = tp->snd_cwnd;
3327 uint32_t pipe, sent;
3328 boolean_t ret = FALSE;
3329 tcp_seq_diff_t cwnd_left;
3332 tp->rexmt_high = tp->snd_una - 1;
3333 pipe = tcp_sack_compute_pipe(tp);
3334 cwnd_left = (tcp_seq_diff_t)(ocwnd - pipe);
3335 if (cwnd_left < (tcp_seq_diff_t)tp->t_maxseg)
3338 next = tp->snd_nxt = tp->snd_max;
3339 tp->snd_cwnd = tp->snd_nxt - tp->snd_una +
3340 rounddown(cwnd_left, tp->t_maxseg);
3342 tp->t_flags |= TF_XMITNOW;
3345 sent = tp->snd_nxt - next;
3347 tcpstat.tcps_sndlimited += howmany(sent, tp->t_maxseg);
3351 if (SEQ_LT(oldsndnxt, oldsndmax)) {
3352 KASSERT(SEQ_GEQ(oldsndnxt, tp->snd_una),
3353 ("snd_una moved in other threads"));
3354 tp->snd_nxt = oldsndnxt;
3356 tp->snd_cwnd = ocwnd;
3358 if (ret && TCP_DO_NCR(tp))
3359 tcp_ncr_update_rxtthresh(tp);
3365 * Reset idle time and keep-alive timer, typically called when a valid
3366 * tcp packet is received but may also be called when FASTKEEP is set
3367 * to prevent the previous long-timeout from calculating to a drop.
3369 * Only update t_rcvtime for non-SYN packets.
3371 * Handle the case where one side thinks the connection is established
3372 * but the other side has, say, rebooted without cleaning out the
3373 * connection. The SYNs could be construed as an attack and wind
3374 * up ignored, but in case it isn't an attack we can validate the
3375 * connection by forcing a keepalive.
3378 tcp_timer_keep_activity(struct tcpcb *tp, int thflags)
3380 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
3381 if ((thflags & (TH_SYN | TH_ACK)) == TH_SYN) {
3382 tp->t_flags |= TF_KEEPALIVE;
3383 tcp_callout_reset(tp, tp->tt_keep, hz / 2,
3386 tp->t_rcvtime = ticks;
3387 tp->t_flags &= ~TF_KEEPALIVE;
3388 tcp_callout_reset(tp, tp->tt_keep,
3396 tcp_rmx_msl(const struct tcpcb *tp)
3399 struct inpcb *inp = tp->t_inpcb;
3402 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) ? TRUE : FALSE);
3404 const boolean_t isipv6 = FALSE;
3408 rt = tcp_rtlookup6(&inp->inp_inc);
3410 rt = tcp_rtlookup(&inp->inp_inc);
3411 if (rt == NULL || rt->rt_rmx.rmx_msl == 0)
3414 msl = (rt->rt_rmx.rmx_msl * hz) / 1000;
3422 tcp_established(struct tcpcb *tp)
3424 tp->t_state = TCPS_ESTABLISHED;
3425 tcp_callout_reset(tp, tp->tt_keep, tp->t_keepidle, tcp_timer_keep);
3427 if (tp->t_rxtsyn > 0) {
3430 * "If the timer expires awaiting the ACK of a SYN segment
3431 * and the TCP implementation is using an RTO less than 3
3432 * seconds, the RTO MUST be re-initialized to 3 seconds
3433 * when data transmission begins"
3435 if (tp->t_rxtcur < TCPTV_RTOBASE3)
3436 tp->t_rxtcur = TCPTV_RTOBASE3;
3441 * Returns TRUE, if the ACK should be dropped
3444 tcp_recv_dupack(struct tcpcb *tp, tcp_seq th_ack, u_int to_flags)
3446 boolean_t fast_sack_rexmt = TRUE;
3448 tcpstat.tcps_rcvdupack++;
3451 * We have outstanding data (other than a window probe),
3452 * this is a completely duplicate ack (ie, window info
3453 * didn't change), the ack is the biggest we've seen and
3454 * we've seen exactly our rexmt threshhold of them, so
3455 * assume a packet has been dropped and retransmit it.
3456 * Kludge snd_nxt & the congestion window so we send only
3459 if (IN_FASTRECOVERY(tp)) {
3460 if (TCP_DO_SACK(tp)) {
3461 boolean_t force = FALSE;
3463 if (tp->snd_una == tp->rexmt_high &&
3464 (to_flags & (TOF_SACK | TOF_SACK_REDUNDANT)) ==
3467 * New segments got SACKed and
3468 * no retransmit yet.
3473 /* No artifical cwnd inflation. */
3474 tcp_sack_rexmt(tp, force);
3477 * Dup acks mean that packets have left
3478 * the network (they're now cached at the
3479 * receiver) so bump cwnd by the amount in
3480 * the receiver to keep a constant cwnd
3481 * packets in the network.
3483 tp->snd_cwnd += tp->t_maxseg;
3487 } else if (SEQ_LT(th_ack, tp->snd_recover)) {
3490 } else if (tcp_ignore_redun_dsack && TCP_DO_SACK(tp) &&
3491 (to_flags & (TOF_DSACK | TOF_SACK_REDUNDANT)) ==
3492 (TOF_DSACK | TOF_SACK_REDUNDANT)) {
3494 * If the ACK carries DSACK and other SACK blocks
3495 * carry information that we have already known,
3496 * don't count this ACK as duplicate ACK. This
3497 * prevents spurious early retransmit and fast
3498 * retransmit. This also meets the requirement of
3499 * RFC3042 that new segments should not be sent if
3500 * the SACK blocks do not contain new information
3501 * (XXX we actually loosen the requirment that only
3502 * DSACK is checked here).
3504 * This kind of ACKs are usually sent after spurious
3507 /* Do nothing; don't change t_dupacks */
3509 } else if (tp->t_dupacks == 0 && TCP_DO_NCR(tp)) {
3510 tcp_ncr_update_rxtthresh(tp);
3513 if (++tp->t_dupacks == tp->t_rxtthresh) {
3514 tcp_seq old_snd_nxt;
3518 if (tcp_do_eifel_detect && (tp->t_flags & TF_RCVD_TSTMP)) {
3519 tcp_save_congestion_state(tp);
3520 tp->rxt_flags |= TRXT_F_FASTREXMT;
3523 * We know we're losing at the current window size,
3524 * so do congestion avoidance: set ssthresh to half
3525 * the current window and pull our congestion window
3526 * back to the new ssthresh.
3528 win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
3531 tp->snd_ssthresh = win * tp->t_maxseg;
3532 ENTER_FASTRECOVERY(tp);
3533 tp->snd_recover = tp->snd_max;
3534 tcp_callout_stop(tp, tp->tt_rexmt);
3536 old_snd_nxt = tp->snd_nxt;
3537 tp->snd_nxt = th_ack;
3538 if (TCP_DO_SACK(tp)) {
3541 rxtlen = tcp_sack_first_unsacked_len(tp);
3542 if (rxtlen > tp->t_maxseg)
3543 rxtlen = tp->t_maxseg;
3544 tp->snd_cwnd = rxtlen;
3546 tp->snd_cwnd = tp->t_maxseg;
3549 ++tcpstat.tcps_sndfastrexmit;
3550 tp->snd_cwnd = tp->snd_ssthresh;
3551 tp->rexmt_high = tp->snd_nxt;
3552 tp->sack_flags &= ~TSACK_F_SACKRESCUED;
3553 if (SEQ_GT(old_snd_nxt, tp->snd_nxt))
3554 tp->snd_nxt = old_snd_nxt;
3555 KASSERT(tp->snd_limited <= 2, ("tp->snd_limited too big"));
3556 if (TCP_DO_SACK(tp)) {
3557 if (fast_sack_rexmt)
3558 tcp_sack_rexmt(tp, FALSE);
3560 tp->snd_cwnd += tp->t_maxseg *
3561 (tp->t_dupacks - tp->snd_limited);
3563 } else if ((tcp_do_rfc6675 && TCP_DO_SACK(tp)) || TCP_DO_NCR(tp)) {
3565 * The RFC6675 recommends to reduce the byte threshold,
3566 * and enter fast retransmit if IsLost(snd_una). However,
3567 * if we use IsLost(snd_una) based fast retransmit here,
3568 * segments reordering will cause spurious retransmit. So
3569 * we defer the IsLost(snd_una) based fast retransmit until
3570 * the extended limited transmit can't send any segments and
3571 * early retransmit can't be done.
3573 if (tcp_rfc6675_rxt && tcp_do_rfc6675 &&
3574 tcp_sack_islost(&tp->scb, tp->snd_una))
3575 goto fastretransmit;
3577 if (tcp_do_limitedtransmit || TCP_DO_NCR(tp)) {
3578 if (!tcp_sack_limitedxmit(tp)) {
3579 /* outstanding data */
3580 uint32_t ownd = tp->snd_max - tp->snd_una;
3582 if (need_early_retransmit(tp, ownd)) {
3583 ++tcpstat.tcps_sndearlyrexmit;
3584 tp->rxt_flags |= TRXT_F_EARLYREXMT;
3585 goto fastretransmit;
3586 } else if (tcp_do_rfc6675 &&
3587 tcp_sack_islost(&tp->scb, tp->snd_una)) {
3588 fast_sack_rexmt = FALSE;
3589 goto fastretransmit;
3593 } else if (tcp_do_limitedtransmit) {
3594 u_long oldcwnd = tp->snd_cwnd;
3595 tcp_seq oldsndmax = tp->snd_max;
3596 tcp_seq oldsndnxt = tp->snd_nxt;
3597 /* outstanding data */
3598 uint32_t ownd = tp->snd_max - tp->snd_una;
3601 KASSERT(tp->t_dupacks == 1 || tp->t_dupacks == 2,
3602 ("dupacks not 1 or 2"));
3603 if (tp->t_dupacks == 1)
3604 tp->snd_limited = 0;
3605 tp->snd_nxt = tp->snd_max;
3606 tp->snd_cwnd = ownd +
3607 (tp->t_dupacks - tp->snd_limited) * tp->t_maxseg;
3608 tp->t_flags |= TF_XMITNOW;
3611 if (SEQ_LT(oldsndnxt, oldsndmax)) {
3612 KASSERT(SEQ_GEQ(oldsndnxt, tp->snd_una),
3613 ("snd_una moved in other threads"));
3614 tp->snd_nxt = oldsndnxt;
3616 tp->snd_cwnd = oldcwnd;
3617 sent = tp->snd_max - oldsndmax;
3618 if (sent > tp->t_maxseg) {
3619 KASSERT((tp->t_dupacks == 2 && tp->snd_limited == 0) ||
3620 (sent == tp->t_maxseg + 1 &&
3621 (tp->t_flags & TF_SENTFIN)),
3623 KASSERT(sent <= tp->t_maxseg * 2,
3624 ("sent too many segments"));
3625 tp->snd_limited = 2;
3626 tcpstat.tcps_sndlimited += 2;
3627 } else if (sent > 0) {
3629 ++tcpstat.tcps_sndlimited;
3630 } else if (need_early_retransmit(tp, ownd)) {
3631 ++tcpstat.tcps_sndearlyrexmit;
3632 tp->rxt_flags |= TRXT_F_EARLYREXMT;
3633 goto fastretransmit;