kgdb: Add TUI mode as an option
[dragonfly.git] / sys / netinet / tcp_input.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2002, 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2002, 2003, 2004 The DragonFly Project. All rights reserved.
4 *
5 * This code is derived from software contributed to The DragonFly Project
6 * by Jeffrey M. Hsu.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33
34/*
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
36 * The Regents of the University of California. All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
67 * $FreeBSD: src/sys/netinet/tcp_input.c,v 1.107.2.38 2003/05/21 04:46:41 cjc Exp $
68 */
69
70#include "opt_inet.h"
71#include "opt_inet6.h"
72#include "opt_ipsec.h"
73#include "opt_tcpdebug.h"
74#include "opt_tcp_input.h"
75
76#include <sys/param.h>
77#include <sys/systm.h>
78#include <sys/kernel.h>
79#include <sys/sysctl.h>
80#include <sys/malloc.h>
81#include <sys/mbuf.h>
82#include <sys/proc.h> /* for proc0 declaration */
83#include <sys/protosw.h>
84#include <sys/socket.h>
85#include <sys/socketvar.h>
86#include <sys/syslog.h>
87#include <sys/in_cksum.h>
88
89#include <sys/socketvar2.h>
90
91#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
92#include <machine/stdarg.h>
93
94#include <net/if.h>
95#include <net/route.h>
96
97#include <netinet/in.h>
98#include <netinet/in_systm.h>
99#include <netinet/ip.h>
100#include <netinet/ip_icmp.h> /* for ICMP_BANDLIM */
101#include <netinet/in_var.h>
102#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
103#include <netinet/in_pcb.h>
104#include <netinet/ip_var.h>
105#include <netinet/ip6.h>
106#include <netinet/icmp6.h>
107#include <netinet6/nd6.h>
108#include <netinet6/ip6_var.h>
109#include <netinet6/in6_pcb.h>
110#include <netinet/tcp.h>
111#include <netinet/tcp_fsm.h>
112#include <netinet/tcp_seq.h>
113#include <netinet/tcp_timer.h>
114#include <netinet/tcp_timer2.h>
115#include <netinet/tcp_var.h>
116#include <netinet6/tcp6_var.h>
117#include <netinet/tcpip.h>
118
119#ifdef TCPDEBUG
120#include <netinet/tcp_debug.h>
121
122u_char tcp_saveipgen[40]; /* the size must be of max ip header, now IPv6 */
123struct tcphdr tcp_savetcp;
124#endif
125
126#ifdef FAST_IPSEC
127#include <netproto/ipsec/ipsec.h>
128#include <netproto/ipsec/ipsec6.h>
129#endif
130
131#ifdef IPSEC
132#include <netinet6/ipsec.h>
133#include <netinet6/ipsec6.h>
134#include <netproto/key/key.h>
135#endif
136
137MALLOC_DEFINE(M_TSEGQ, "tseg_qent", "TCP segment queue entry");
138
139static int log_in_vain = 0;
140SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
141 &log_in_vain, 0, "Log all incoming TCP connections");
142
143static int blackhole = 0;
144SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
145 &blackhole, 0, "Do not send RST when dropping refused connections");
146
147int tcp_delack_enabled = 1;
148SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
149 &tcp_delack_enabled, 0,
150 "Delay ACK to try and piggyback it onto a data packet");
151
152#ifdef TCP_DROP_SYNFIN
153static int drop_synfin = 0;
154SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW,
155 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set");
156#endif
157
158static int tcp_do_limitedtransmit = 1;
159SYSCTL_INT(_net_inet_tcp, OID_AUTO, limitedtransmit, CTLFLAG_RW,
160 &tcp_do_limitedtransmit, 0, "Enable RFC 3042 (Limited Transmit)");
161
162static int tcp_do_early_retransmit = 1;
163SYSCTL_INT(_net_inet_tcp, OID_AUTO, earlyretransmit, CTLFLAG_RW,
164 &tcp_do_early_retransmit, 0, "Early retransmit");
165
166int tcp_aggregate_acks = 1;
167SYSCTL_INT(_net_inet_tcp, OID_AUTO, aggregate_acks, CTLFLAG_RW,
168 &tcp_aggregate_acks, 0, "Aggregate built-up acks into one ack");
169
170static int tcp_do_eifel_detect = 1;
171SYSCTL_INT(_net_inet_tcp, OID_AUTO, eifel, CTLFLAG_RW,
172 &tcp_do_eifel_detect, 0, "Eifel detection algorithm (RFC 3522)");
173
174static int tcp_do_abc = 1;
175SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc, CTLFLAG_RW,
176 &tcp_do_abc, 0,
177 "TCP Appropriate Byte Counting (RFC 3465)");
178
179/*
180 * Define as tunable for easy testing with SACK on and off.
181 * Warning: do not change setting in the middle of an existing active TCP flow,
182 * else strange things might happen to that flow.
183 */
184int tcp_do_sack = 1;
185SYSCTL_INT(_net_inet_tcp, OID_AUTO, sack, CTLFLAG_RW,
186 &tcp_do_sack, 0, "Enable SACK Algorithms");
187
188int tcp_do_smartsack = 1;
189SYSCTL_INT(_net_inet_tcp, OID_AUTO, smartsack, CTLFLAG_RW,
190 &tcp_do_smartsack, 0, "Enable Smart SACK Algorithms");
191
192int tcp_do_rescuesack = 1;
193SYSCTL_INT(_net_inet_tcp, OID_AUTO, rescuesack, CTLFLAG_RW,
194 &tcp_do_rescuesack, 0, "Rescue retransmission for SACK");
195
196int tcp_aggressive_rescuesack = 0;
197SYSCTL_INT(_net_inet_tcp, OID_AUTO, rescuesack_agg, CTLFLAG_RW,
198 &tcp_aggressive_rescuesack, 0, "Aggressive rescue retransmission for SACK");
199
200int tcp_do_rfc3517bis = 0;
201SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3517bis, CTLFLAG_RW,
202 &tcp_do_rfc3517bis, 0, "Enable RFC3517 update");
203
204int tcp_rfc3517bis_rxt = 0;
205SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3517bis_rxt, CTLFLAG_RW,
206 &tcp_rfc3517bis_rxt, 0, "Enable RFC3517 retransmit update");
207
208SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0,
209 "TCP Segment Reassembly Queue");
210
211int tcp_reass_maxseg = 0;
212SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RD,
213 &tcp_reass_maxseg, 0,
214 "Global maximum number of TCP Segments in Reassembly Queue");
215
216int tcp_reass_qsize = 0;
217SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD,
218 &tcp_reass_qsize, 0,
219 "Global number of TCP Segments currently in Reassembly Queue");
220
221static int tcp_reass_overflows = 0;
222SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows, CTLFLAG_RD,
223 &tcp_reass_overflows, 0,
224 "Global number of TCP Segment Reassembly Queue Overflows");
225
226int tcp_do_autorcvbuf = 1;
227SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW,
228 &tcp_do_autorcvbuf, 0, "Enable automatic receive buffer sizing");
229
230int tcp_autorcvbuf_inc = 16*1024;
231SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW,
232 &tcp_autorcvbuf_inc, 0,
233 "Incrementor step size of automatic receive buffer");
234
235int tcp_autorcvbuf_max = 2*1024*1024;
236SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW,
237 &tcp_autorcvbuf_max, 0, "Max size of automatic receive buffer");
238
239int tcp_sosend_agglim = 2;
240SYSCTL_INT(_net_inet_tcp, OID_AUTO, sosend_agglim, CTLFLAG_RW,
241 &tcp_sosend_agglim, 0, "TCP sosend mbuf aggregation limit");
242
243int tcp_sosend_async = 1;
244SYSCTL_INT(_net_inet_tcp, OID_AUTO, sosend_async, CTLFLAG_RW,
245 &tcp_sosend_async, 0, "TCP asynchronized pru_send");
246
247static int tcp_ignore_redun_dsack = 1;
248SYSCTL_INT(_net_inet_tcp, OID_AUTO, ignore_redun_dsack, CTLFLAG_RW,
249 &tcp_ignore_redun_dsack, 0, "Ignore redundant DSACK");
250
251static void tcp_dooptions(struct tcpopt *, u_char *, int, boolean_t,
252 tcp_seq);
253static void tcp_pulloutofband(struct socket *,
254 struct tcphdr *, struct mbuf *, int);
255static int tcp_reass(struct tcpcb *, struct tcphdr *, int *,
256 struct mbuf *);
257static void tcp_xmit_timer(struct tcpcb *, int, tcp_seq);
258static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *, int);
259static void tcp_sack_rexmt(struct tcpcb *, struct tcphdr *);
260static boolean_t tcp_sack_limitedxmit(struct tcpcb *);
261static int tcp_rmx_msl(const struct tcpcb *);
262static void tcp_established(struct tcpcb *);
263
264/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
265#ifdef INET6
266#define ND6_HINT(tp) \
267do { \
268 if ((tp) && (tp)->t_inpcb && \
269 ((tp)->t_inpcb->inp_vflag & INP_IPV6) && \
270 (tp)->t_inpcb->in6p_route.ro_rt) \
271 nd6_nud_hint((tp)->t_inpcb->in6p_route.ro_rt, NULL, 0); \
272} while (0)
273#else
274#define ND6_HINT(tp)
275#endif
276
277/*
278 * Indicate whether this ack should be delayed. We can delay the ack if
279 * - delayed acks are enabled and
280 * - there is no delayed ack timer in progress and
281 * - our last ack wasn't a 0-sized window. We never want to delay
282 * the ack that opens up a 0-sized window.
283 */
284#define DELAY_ACK(tp) \
285 (tcp_delack_enabled && !tcp_callout_pending(tp, tp->tt_delack) && \
286 !(tp->t_flags & TF_RXWIN0SENT))
287
288#define acceptable_window_update(tp, th, tiwin) \
289 (SEQ_LT(tp->snd_wl1, th->th_seq) || \
290 (tp->snd_wl1 == th->th_seq && \
291 (SEQ_LT(tp->snd_wl2, th->th_ack) || \
292 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))
293
294#define iceildiv(n, d) (((n)+(d)-1) / (d))
295#define need_early_retransmit(tp, ownd) \
296 (tcp_do_early_retransmit && \
297 (tcp_do_eifel_detect && (tp->t_flags & TF_RCVD_TSTMP)) && \
298 ownd < ((tp->t_rxtthresh + 1) * tp->t_maxseg) && \
299 tp->t_dupacks + 1 >= iceildiv(ownd, tp->t_maxseg) && \
300 (!TCP_DO_SACK(tp) || ownd <= tp->t_maxseg || \
301 tcp_sack_has_sacked(&tp->scb, ownd - tp->t_maxseg)))
302
303static int
304tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
305{
306 struct tseg_qent *q;
307 struct tseg_qent *p = NULL;
308 struct tseg_qent *te;
309 struct socket *so = tp->t_inpcb->inp_socket;
310 int flags;
311
312 /*
313 * Call with th == NULL after become established to
314 * force pre-ESTABLISHED data up to user socket.
315 */
316 if (th == NULL)
317 goto present;
318
319 /*
320 * Limit the number of segments in the reassembly queue to prevent
321 * holding on to too many segments (and thus running out of mbufs).
322 * Make sure to let the missing segment through which caused this
323 * queue. Always keep one global queue entry spare to be able to
324 * process the missing segment.
325 */
326 if (th->th_seq != tp->rcv_nxt &&
327 tcp_reass_qsize + 1 >= tcp_reass_maxseg) {
328 tcp_reass_overflows++;
329 tcpstat.tcps_rcvmemdrop++;
330 m_freem(m);
331 /* no SACK block to report */
332 tp->reportblk.rblk_start = tp->reportblk.rblk_end;
333 return (0);
334 }
335
336 /* Allocate a new queue entry. */
337 te = kmalloc(sizeof(struct tseg_qent), M_TSEGQ, M_INTWAIT | M_NULLOK);
338 if (te == NULL) {
339 tcpstat.tcps_rcvmemdrop++;
340 m_freem(m);
341 /* no SACK block to report */
342 tp->reportblk.rblk_start = tp->reportblk.rblk_end;
343 return (0);
344 }
345 atomic_add_int(&tcp_reass_qsize, 1);
346
347 /*
348 * Find a segment which begins after this one does.
349 */
350 LIST_FOREACH(q, &tp->t_segq, tqe_q) {
351 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
352 break;
353 p = q;
354 }
355
356 /*
357 * If there is a preceding segment, it may provide some of
358 * our data already. If so, drop the data from the incoming
359 * segment. If it provides all of our data, drop us.
360 */
361 if (p != NULL) {
362 tcp_seq_diff_t i;
363
364 /* conversion to int (in i) handles seq wraparound */
365 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
366 if (i > 0) { /* overlaps preceding segment */
367 tp->sack_flags |=
368 (TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG);
369 /* enclosing block starts w/ preceding segment */
370 tp->encloseblk.rblk_start = p->tqe_th->th_seq;
371 if (i >= *tlenp) {
372 /* preceding encloses incoming segment */
373 tp->encloseblk.rblk_end = TCP_SACK_BLKEND(
374 p->tqe_th->th_seq + p->tqe_len,
375 p->tqe_th->th_flags);
376 tcpstat.tcps_rcvduppack++;
377 tcpstat.tcps_rcvdupbyte += *tlenp;
378 m_freem(m);
379 kfree(te, M_TSEGQ);
380 atomic_add_int(&tcp_reass_qsize, -1);
381 /*
382 * Try to present any queued data
383 * at the left window edge to the user.
384 * This is needed after the 3-WHS
385 * completes.
386 */
387 goto present; /* ??? */
388 }
389 m_adj(m, i);
390 *tlenp -= i;
391 th->th_seq += i;
392 /* incoming segment end is enclosing block end */
393 tp->encloseblk.rblk_end = TCP_SACK_BLKEND(
394 th->th_seq + *tlenp, th->th_flags);
395 /* trim end of reported D-SACK block */
396 tp->reportblk.rblk_end = th->th_seq;
397 }
398 }
399 tcpstat.tcps_rcvoopack++;
400 tcpstat.tcps_rcvoobyte += *tlenp;
401
402 /*
403 * While we overlap succeeding segments trim them or,
404 * if they are completely covered, dequeue them.
405 */
406 while (q) {
407 tcp_seq_diff_t i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
408 tcp_seq qend = q->tqe_th->th_seq + q->tqe_len;
409 tcp_seq qend_sack = TCP_SACK_BLKEND(qend, q->tqe_th->th_flags);
410 struct tseg_qent *nq;
411
412 if (i <= 0)
413 break;
414 if (!(tp->sack_flags & TSACK_F_DUPSEG)) {
415 /* first time through */
416 tp->sack_flags |= (TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG);
417 tp->encloseblk = tp->reportblk;
418 /* report trailing duplicate D-SACK segment */
419 tp->reportblk.rblk_start = q->tqe_th->th_seq;
420 }
421 if ((tp->sack_flags & TSACK_F_ENCLOSESEG) &&
422 SEQ_GT(qend_sack, tp->encloseblk.rblk_end)) {
423 /* extend enclosing block if one exists */
424 tp->encloseblk.rblk_end = qend_sack;
425 }
426 if (i < q->tqe_len) {
427 q->tqe_th->th_seq += i;
428 q->tqe_len -= i;
429 m_adj(q->tqe_m, i);
430 break;
431 }
432
433 nq = LIST_NEXT(q, tqe_q);
434 LIST_REMOVE(q, tqe_q);
435 m_freem(q->tqe_m);
436 kfree(q, M_TSEGQ);
437 atomic_add_int(&tcp_reass_qsize, -1);
438 q = nq;
439 }
440
441 /* Insert the new segment queue entry into place. */
442 te->tqe_m = m;
443 te->tqe_th = th;
444 te->tqe_len = *tlenp;
445
446 /* check if can coalesce with following segment */
447 if (q != NULL && (th->th_seq + *tlenp == q->tqe_th->th_seq)) {
448 tcp_seq tend = te->tqe_th->th_seq + te->tqe_len;
449 tcp_seq tend_sack = TCP_SACK_BLKEND(tend, te->tqe_th->th_flags);
450
451 te->tqe_len += q->tqe_len;
452 if (q->tqe_th->th_flags & TH_FIN)
453 te->tqe_th->th_flags |= TH_FIN;
454 m_cat(te->tqe_m, q->tqe_m);
455 tp->encloseblk.rblk_end = tend_sack;
456 /*
457 * When not reporting a duplicate segment, use
458 * the larger enclosing block as the SACK block.
459 */
460 if (!(tp->sack_flags & TSACK_F_DUPSEG))
461 tp->reportblk.rblk_end = tend_sack;
462 LIST_REMOVE(q, tqe_q);
463 kfree(q, M_TSEGQ);
464 atomic_add_int(&tcp_reass_qsize, -1);
465 }
466
467 if (p == NULL) {
468 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
469 } else {
470 /* check if can coalesce with preceding segment */
471 if (p->tqe_th->th_seq + p->tqe_len == th->th_seq) {
472 p->tqe_len += te->tqe_len;
473 m_cat(p->tqe_m, te->tqe_m);
474 tp->encloseblk.rblk_start = p->tqe_th->th_seq;
475 /*
476 * When not reporting a duplicate segment, use
477 * the larger enclosing block as the SACK block.
478 */
479 if (!(tp->sack_flags & TSACK_F_DUPSEG))
480 tp->reportblk.rblk_start = p->tqe_th->th_seq;
481 kfree(te, M_TSEGQ);
482 atomic_add_int(&tcp_reass_qsize, -1);
483 } else {
484 LIST_INSERT_AFTER(p, te, tqe_q);
485 }
486 }
487
488present:
489 /*
490 * Present data to user, advancing rcv_nxt through
491 * completed sequence space.
492 */
493 if (!TCPS_HAVEESTABLISHED(tp->t_state))
494 return (0);
495 q = LIST_FIRST(&tp->t_segq);
496 if (q == NULL || q->tqe_th->th_seq != tp->rcv_nxt)
497 return (0);
498 tp->rcv_nxt += q->tqe_len;
499 if (!(tp->sack_flags & TSACK_F_DUPSEG)) {
500 /* no SACK block to report since ACK advanced */
501 tp->reportblk.rblk_start = tp->reportblk.rblk_end;
502 }
503 /* no enclosing block to report since ACK advanced */
504 tp->sack_flags &= ~TSACK_F_ENCLOSESEG;
505 flags = q->tqe_th->th_flags & TH_FIN;
506 LIST_REMOVE(q, tqe_q);
507 KASSERT(LIST_EMPTY(&tp->t_segq) ||
508 LIST_FIRST(&tp->t_segq)->tqe_th->th_seq != tp->rcv_nxt,
509 ("segment not coalesced"));
510 if (so->so_state & SS_CANTRCVMORE) {
511 m_freem(q->tqe_m);
512 } else {
513 lwkt_gettoken(&so->so_rcv.ssb_token);
514 ssb_appendstream(&so->so_rcv, q->tqe_m);
515 lwkt_reltoken(&so->so_rcv.ssb_token);
516 }
517 kfree(q, M_TSEGQ);
518 atomic_add_int(&tcp_reass_qsize, -1);
519 ND6_HINT(tp);
520 sorwakeup(so);
521 return (flags);
522}
523
524/*
525 * TCP input routine, follows pages 65-76 of the
526 * protocol specification dated September, 1981 very closely.
527 */
528#ifdef INET6
529int
530tcp6_input(struct mbuf **mp, int *offp, int proto)
531{
532 struct mbuf *m = *mp;
533 struct in6_ifaddr *ia6;
534
535 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
536
537 /*
538 * draft-itojun-ipv6-tcp-to-anycast
539 * better place to put this in?
540 */
541 ia6 = ip6_getdstifaddr(m);
542 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
543 struct ip6_hdr *ip6;
544
545 ip6 = mtod(m, struct ip6_hdr *);
546 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
547 offsetof(struct ip6_hdr, ip6_dst));
548 return (IPPROTO_DONE);
549 }
550
551 tcp_input(mp, offp, proto);
552 return (IPPROTO_DONE);
553}
554#endif
555
556int
557tcp_input(struct mbuf **mp, int *offp, int proto)
558{
559 int off0;
560 struct tcphdr *th;
561 struct ip *ip = NULL;
562 struct ipovly *ipov;
563 struct inpcb *inp = NULL;
564 u_char *optp = NULL;
565 int optlen = 0;
566 int tlen, off;
567 int len = 0;
568 int drop_hdrlen;
569 struct tcpcb *tp = NULL;
570 int thflags;
571 struct socket *so = NULL;
572 int todrop, acked;
573 boolean_t ourfinisacked, needoutput = FALSE;
574 u_long tiwin;
575 int recvwin;
576 struct tcpopt to; /* options in this segment */
577 struct sockaddr_in *next_hop = NULL;
578 int rstreason; /* For badport_bandlim accounting purposes */
579 int cpu;
580 struct ip6_hdr *ip6 = NULL;
581 struct mbuf *m;
582#ifdef INET6
583 boolean_t isipv6;
584#else
585 const boolean_t isipv6 = FALSE;
586#endif
587#ifdef TCPDEBUG
588 short ostate = 0;
589#endif
590
591 off0 = *offp;
592 m = *mp;
593 *mp = NULL;
594
595 tcpstat.tcps_rcvtotal++;
596
597 if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED) {
598 struct m_tag *mtag;
599
600 mtag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
601 KKASSERT(mtag != NULL);
602 next_hop = m_tag_data(mtag);
603 }
604
605#ifdef INET6
606 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? TRUE : FALSE;
607#endif
608
609 if (isipv6) {
610 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */
611 ip6 = mtod(m, struct ip6_hdr *);
612 tlen = (sizeof *ip6) + ntohs(ip6->ip6_plen) - off0;
613 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) {
614 tcpstat.tcps_rcvbadsum++;
615 goto drop;
616 }
617 th = (struct tcphdr *)((caddr_t)ip6 + off0);
618
619 /*
620 * Be proactive about unspecified IPv6 address in source.
621 * As we use all-zero to indicate unbounded/unconnected pcb,
622 * unspecified IPv6 address can be used to confuse us.
623 *
624 * Note that packets with unspecified IPv6 destination is
625 * already dropped in ip6_input.
626 */
627 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
628 /* XXX stat */
629 goto drop;
630 }
631 } else {
632 /*
633 * Get IP and TCP header together in first mbuf.
634 * Note: IP leaves IP header in first mbuf.
635 */
636 if (off0 > sizeof(struct ip)) {
637 ip_stripoptions(m);
638 off0 = sizeof(struct ip);
639 }
640 /* already checked and pulled up in ip_demux() */
641 KASSERT(m->m_len >= sizeof(struct tcpiphdr),
642 ("TCP header not in one mbuf: m->m_len %d", m->m_len));
643 ip = mtod(m, struct ip *);
644 ipov = (struct ipovly *)ip;
645 th = (struct tcphdr *)((caddr_t)ip + off0);
646 tlen = ip->ip_len;
647
648 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
649 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
650 th->th_sum = m->m_pkthdr.csum_data;
651 else
652 th->th_sum = in_pseudo(ip->ip_src.s_addr,
653 ip->ip_dst.s_addr,
654 htonl(m->m_pkthdr.csum_data +
655 ip->ip_len +
656 IPPROTO_TCP));
657 th->th_sum ^= 0xffff;
658 } else {
659 /*
660 * Checksum extended TCP header and data.
661 */
662 len = sizeof(struct ip) + tlen;
663 bzero(ipov->ih_x1, sizeof ipov->ih_x1);
664 ipov->ih_len = (u_short)tlen;
665 ipov->ih_len = htons(ipov->ih_len);
666 th->th_sum = in_cksum(m, len);
667 }
668 if (th->th_sum) {
669 tcpstat.tcps_rcvbadsum++;
670 goto drop;
671 }
672#ifdef INET6
673 /* Re-initialization for later version check */
674 ip->ip_v = IPVERSION;
675#endif
676 }
677
678 /*
679 * Check that TCP offset makes sense,
680 * pull out TCP options and adjust length. XXX
681 */
682 off = th->th_off << 2;
683 /* already checked and pulled up in ip_demux() */
684 KASSERT(off >= sizeof(struct tcphdr) && off <= tlen,
685 ("bad TCP data offset %d (tlen %d)", off, tlen));
686 tlen -= off; /* tlen is used instead of ti->ti_len */
687 if (off > sizeof(struct tcphdr)) {
688 if (isipv6) {
689 IP6_EXTHDR_CHECK(m, off0, off, IPPROTO_DONE);
690 ip6 = mtod(m, struct ip6_hdr *);
691 th = (struct tcphdr *)((caddr_t)ip6 + off0);
692 } else {
693 /* already pulled up in ip_demux() */
694 KASSERT(m->m_len >= sizeof(struct ip) + off,
695 ("TCP header and options not in one mbuf: "
696 "m_len %d, off %d", m->m_len, off));
697 }
698 optlen = off - sizeof(struct tcphdr);
699 optp = (u_char *)(th + 1);
700 }
701 thflags = th->th_flags;
702
703#ifdef TCP_DROP_SYNFIN
704 /*
705 * If the drop_synfin option is enabled, drop all packets with
706 * both the SYN and FIN bits set. This prevents e.g. nmap from
707 * identifying the TCP/IP stack.
708 *
709 * This is a violation of the TCP specification.
710 */
711 if (drop_synfin && (thflags & (TH_SYN | TH_FIN)) == (TH_SYN | TH_FIN))
712 goto drop;
713#endif
714
715 /*
716 * Convert TCP protocol specific fields to host format.
717 */
718 th->th_seq = ntohl(th->th_seq);
719 th->th_ack = ntohl(th->th_ack);
720 th->th_win = ntohs(th->th_win);
721 th->th_urp = ntohs(th->th_urp);
722
723 /*
724 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options,
725 * until after ip6_savecontrol() is called and before other functions
726 * which don't want those proto headers.
727 * Because ip6_savecontrol() is going to parse the mbuf to
728 * search for data to be passed up to user-land, it wants mbuf
729 * parameters to be unchanged.
730 * XXX: the call of ip6_savecontrol() has been obsoleted based on
731 * latest version of the advanced API (20020110).
732 */
733 drop_hdrlen = off0 + off;
734
735 /*
736 * Locate pcb for segment.
737 */
738findpcb:
739 /* IPFIREWALL_FORWARD section */
740 if (next_hop != NULL && !isipv6) { /* IPv6 support is not there yet */
741 /*
742 * Transparently forwarded. Pretend to be the destination.
743 * already got one like this?
744 */
745 cpu = mycpu->gd_cpuid;
746 inp = in_pcblookup_hash(&tcbinfo[cpu],
747 ip->ip_src, th->th_sport,
748 ip->ip_dst, th->th_dport,
749 0, m->m_pkthdr.rcvif);
750 if (!inp) {
751 /*
752 * It's new. Try to find the ambushing socket.
753 */
754
755 /*
756 * The rest of the ipfw code stores the port in
757 * host order. XXX
758 * (The IP address is still in network order.)
759 */
760 in_port_t dport = next_hop->sin_port ?
761 htons(next_hop->sin_port) :
762 th->th_dport;
763
764 cpu = tcp_addrcpu(ip->ip_src.s_addr, th->th_sport,
765 next_hop->sin_addr.s_addr, dport);
766 inp = in_pcblookup_hash(&tcbinfo[cpu],
767 ip->ip_src, th->th_sport,
768 next_hop->sin_addr, dport,
769 1, m->m_pkthdr.rcvif);
770 }
771 } else {
772 if (isipv6) {
773 inp = in6_pcblookup_hash(&tcbinfo[0],
774 &ip6->ip6_src, th->th_sport,
775 &ip6->ip6_dst, th->th_dport,
776 1, m->m_pkthdr.rcvif);
777 } else {
778 cpu = mycpu->gd_cpuid;
779 inp = in_pcblookup_hash(&tcbinfo[cpu],
780 ip->ip_src, th->th_sport,
781 ip->ip_dst, th->th_dport,
782 1, m->m_pkthdr.rcvif);
783 }
784 }
785
786 /*
787 * If the state is CLOSED (i.e., TCB does not exist) then
788 * all data in the incoming segment is discarded.
789 * If the TCB exists but is in CLOSED state, it is embryonic,
790 * but should either do a listen or a connect soon.
791 */
792 if (inp == NULL) {
793 if (log_in_vain) {
794#ifdef INET6
795 char dbuf[INET6_ADDRSTRLEN+2], sbuf[INET6_ADDRSTRLEN+2];
796#else
797 char dbuf[sizeof "aaa.bbb.ccc.ddd"];
798 char sbuf[sizeof "aaa.bbb.ccc.ddd"];
799#endif
800 if (isipv6) {
801 strcpy(dbuf, "[");
802 strcat(dbuf, ip6_sprintf(&ip6->ip6_dst));
803 strcat(dbuf, "]");
804 strcpy(sbuf, "[");
805 strcat(sbuf, ip6_sprintf(&ip6->ip6_src));
806 strcat(sbuf, "]");
807 } else {
808 strcpy(dbuf, inet_ntoa(ip->ip_dst));
809 strcpy(sbuf, inet_ntoa(ip->ip_src));
810 }
811 switch (log_in_vain) {
812 case 1:
813 if (!(thflags & TH_SYN))
814 break;
815 case 2:
816 log(LOG_INFO,
817 "Connection attempt to TCP %s:%d "
818 "from %s:%d flags:0x%02x\n",
819 dbuf, ntohs(th->th_dport), sbuf,
820 ntohs(th->th_sport), thflags);
821 break;
822 default:
823 break;
824 }
825 }
826 if (blackhole) {
827 switch (blackhole) {
828 case 1:
829 if (thflags & TH_SYN)
830 goto drop;
831 break;
832 case 2:
833 goto drop;
834 default:
835 goto drop;
836 }
837 }
838 rstreason = BANDLIM_RST_CLOSEDPORT;
839 goto dropwithreset;
840 }
841
842#ifdef IPSEC
843 if (isipv6) {
844 if (ipsec6_in_reject_so(m, inp->inp_socket)) {
845 ipsec6stat.in_polvio++;
846 goto drop;
847 }
848 } else {
849 if (ipsec4_in_reject_so(m, inp->inp_socket)) {
850 ipsecstat.in_polvio++;
851 goto drop;
852 }
853 }
854#endif
855#ifdef FAST_IPSEC
856 if (isipv6) {
857 if (ipsec6_in_reject(m, inp))
858 goto drop;
859 } else {
860 if (ipsec4_in_reject(m, inp))
861 goto drop;
862 }
863#endif
864 /* Check the minimum TTL for socket. */
865#ifdef INET6
866 if ((isipv6 ? ip6->ip6_hlim : ip->ip_ttl) < inp->inp_ip_minttl)
867 goto drop;
868#endif
869
870 tp = intotcpcb(inp);
871 if (tp == NULL) {
872 rstreason = BANDLIM_RST_CLOSEDPORT;
873 goto dropwithreset;
874 }
875 if (tp->t_state <= TCPS_CLOSED)
876 goto drop;
877
878 so = inp->inp_socket;
879
880#ifdef TCPDEBUG
881 if (so->so_options & SO_DEBUG) {
882 ostate = tp->t_state;
883 if (isipv6)
884 bcopy(ip6, tcp_saveipgen, sizeof(*ip6));
885 else
886 bcopy(ip, tcp_saveipgen, sizeof(*ip));
887 tcp_savetcp = *th;
888 }
889#endif
890
891 bzero(&to, sizeof to);
892
893 if (so->so_options & SO_ACCEPTCONN) {
894 struct in_conninfo inc;
895
896#ifdef INET6
897 inc.inc_isipv6 = (isipv6 == TRUE);
898#endif
899 if (isipv6) {
900 inc.inc6_faddr = ip6->ip6_src;
901 inc.inc6_laddr = ip6->ip6_dst;
902 inc.inc6_route.ro_rt = NULL; /* XXX */
903 } else {
904 inc.inc_faddr = ip->ip_src;
905 inc.inc_laddr = ip->ip_dst;
906 inc.inc_route.ro_rt = NULL; /* XXX */
907 }
908 inc.inc_fport = th->th_sport;
909 inc.inc_lport = th->th_dport;
910
911 /*
912 * If the state is LISTEN then ignore segment if it contains
913 * a RST. If the segment contains an ACK then it is bad and
914 * send a RST. If it does not contain a SYN then it is not
915 * interesting; drop it.
916 *
917 * If the state is SYN_RECEIVED (syncache) and seg contains
918 * an ACK, but not for our SYN/ACK, send a RST. If the seg
919 * contains a RST, check the sequence number to see if it
920 * is a valid reset segment.
921 */
922 if ((thflags & (TH_RST | TH_ACK | TH_SYN)) != TH_SYN) {
923 if ((thflags & (TH_RST | TH_ACK | TH_SYN)) == TH_ACK) {
924 if (!syncache_expand(&inc, th, &so, m)) {
925 /*
926 * No syncache entry, or ACK was not
927 * for our SYN/ACK. Send a RST.
928 */
929 tcpstat.tcps_badsyn++;
930 rstreason = BANDLIM_RST_OPENPORT;
931 goto dropwithreset;
932 }
933
934 /*
935 * Could not complete 3-way handshake,
936 * connection is being closed down, and
937 * syncache will free mbuf.
938 */
939 if (so == NULL)
940 return(IPPROTO_DONE);
941
942 /*
943 * We must be in the correct protocol thread
944 * for this connection.
945 */
946 KKASSERT(so->so_port == &curthread->td_msgport);
947
948 /*
949 * Socket is created in state SYN_RECEIVED.
950 * Continue processing segment.
951 */
952 inp = so->so_pcb;
953 tp = intotcpcb(inp);
954 /*
955 * This is what would have happened in
956 * tcp_output() when the SYN,ACK was sent.
957 */
958 tp->snd_up = tp->snd_una;
959 tp->snd_max = tp->snd_nxt = tp->iss + 1;
960 tp->last_ack_sent = tp->rcv_nxt;
961
962 goto after_listen;
963 }
964 if (thflags & TH_RST) {
965 syncache_chkrst(&inc, th);
966 goto drop;
967 }
968 if (thflags & TH_ACK) {
969 syncache_badack(&inc);
970 tcpstat.tcps_badsyn++;
971 rstreason = BANDLIM_RST_OPENPORT;
972 goto dropwithreset;
973 }
974 goto drop;
975 }
976
977 /*
978 * Segment's flags are (SYN) or (SYN | FIN).
979 */
980#ifdef INET6
981 /*
982 * If deprecated address is forbidden,
983 * we do not accept SYN to deprecated interface
984 * address to prevent any new inbound connection from
985 * getting established.
986 * When we do not accept SYN, we send a TCP RST,
987 * with deprecated source address (instead of dropping
988 * it). We compromise it as it is much better for peer
989 * to send a RST, and RST will be the final packet
990 * for the exchange.
991 *
992 * If we do not forbid deprecated addresses, we accept
993 * the SYN packet. RFC2462 does not suggest dropping
994 * SYN in this case.
995 * If we decipher RFC2462 5.5.4, it says like this:
996 * 1. use of deprecated addr with existing
997 * communication is okay - "SHOULD continue to be
998 * used"
999 * 2. use of it with new communication:
1000 * (2a) "SHOULD NOT be used if alternate address
1001 * with sufficient scope is available"
1002 * (2b) nothing mentioned otherwise.
1003 * Here we fall into (2b) case as we have no choice in
1004 * our source address selection - we must obey the peer.
1005 *
1006 * The wording in RFC2462 is confusing, and there are
1007 * multiple description text for deprecated address
1008 * handling - worse, they are not exactly the same.
1009 * I believe 5.5.4 is the best one, so we follow 5.5.4.
1010 */
1011 if (isipv6 && !ip6_use_deprecated) {
1012 struct in6_ifaddr *ia6;
1013
1014 if ((ia6 = ip6_getdstifaddr(m)) &&
1015 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
1016 tp = NULL;
1017 rstreason = BANDLIM_RST_OPENPORT;
1018 goto dropwithreset;
1019 }
1020 }
1021#endif
1022 /*
1023 * If it is from this socket, drop it, it must be forged.
1024 * Don't bother responding if the destination was a broadcast.
1025 */
1026 if (th->th_dport == th->th_sport) {
1027 if (isipv6) {
1028 if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
1029 &ip6->ip6_src))
1030 goto drop;
1031 } else {
1032 if (ip->ip_dst.s_addr == ip->ip_src.s_addr)
1033 goto drop;
1034 }
1035 }
1036 /*
1037 * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN
1038 *
1039 * Note that it is quite possible to receive unicast
1040 * link-layer packets with a broadcast IP address. Use
1041 * in_broadcast() to find them.
1042 */
1043 if (m->m_flags & (M_BCAST | M_MCAST))
1044 goto drop;
1045 if (isipv6) {
1046 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1047 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
1048 goto drop;
1049 } else {
1050 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1051 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1052 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1053 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
1054 goto drop;
1055 }
1056 /*
1057 * SYN appears to be valid; create compressed TCP state
1058 * for syncache, or perform t/tcp connection.
1059 */
1060 if (so->so_qlen <= so->so_qlimit) {
1061 tcp_dooptions(&to, optp, optlen, TRUE, th->th_ack);
1062 if (!syncache_add(&inc, &to, th, so, m))
1063 goto drop;
1064
1065 /*
1066 * Entry added to syncache, mbuf used to
1067 * send SYN,ACK packet.
1068 */
1069 return(IPPROTO_DONE);
1070 }
1071 goto drop;
1072 }
1073
1074after_listen:
1075 /*
1076 * Should not happen - syncache should pick up these connections.
1077 *
1078 * Once we are past handling listen sockets we must be in the
1079 * correct protocol processing thread.
1080 */
1081 KASSERT(tp->t_state != TCPS_LISTEN, ("tcp_input: TCPS_LISTEN state"));
1082 KKASSERT(so->so_port == &curthread->td_msgport);
1083
1084 /* Unscale the window into a 32-bit value. */
1085 if (!(thflags & TH_SYN))
1086 tiwin = th->th_win << tp->snd_scale;
1087 else
1088 tiwin = th->th_win;
1089
1090 /*
1091 * This is the second part of the MSS DoS prevention code (after
1092 * minmss on the sending side) and it deals with too many too small
1093 * tcp packets in a too short timeframe (1 second).
1094 *
1095 * XXX Removed. This code was crap. It does not scale to network
1096 * speed, and default values break NFS. Gone.
1097 */
1098 /* REMOVED */
1099
1100 /*
1101 * Segment received on connection.
1102 *
1103 * Reset idle time and keep-alive timer. Don't waste time if less
1104 * then a second has elapsed.
1105 */
1106 if ((int)(ticks - tp->t_rcvtime) > hz)
1107 tcp_timer_keep_activity(tp, thflags);
1108
1109 /*
1110 * Process options.
1111 * XXX this is tradtitional behavior, may need to be cleaned up.
1112 */
1113 tcp_dooptions(&to, optp, optlen, (thflags & TH_SYN) != 0, th->th_ack);
1114 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1115 if ((to.to_flags & TOF_SCALE) && (tp->t_flags & TF_REQ_SCALE)) {
1116 tp->t_flags |= TF_RCVD_SCALE;
1117 tp->snd_scale = to.to_requested_s_scale;
1118 }
1119
1120 /*
1121 * Initial send window; will be updated upon next ACK
1122 */
1123 tp->snd_wnd = th->th_win;
1124
1125 if (to.to_flags & TOF_TS) {
1126 tp->t_flags |= TF_RCVD_TSTMP;
1127 tp->ts_recent = to.to_tsval;
1128 tp->ts_recent_age = ticks;
1129 }
1130 if (!(to.to_flags & TOF_MSS))
1131 to.to_mss = 0;
1132 tcp_mss(tp, to.to_mss);
1133 /*
1134 * Only set the TF_SACK_PERMITTED per-connection flag
1135 * if we got a SACK_PERMITTED option from the other side
1136 * and the global tcp_do_sack variable is true.
1137 */
1138 if (tcp_do_sack && (to.to_flags & TOF_SACK_PERMITTED))
1139 tp->t_flags |= TF_SACK_PERMITTED;
1140 }
1141
1142 /*
1143 * Header prediction: check for the two common cases
1144 * of a uni-directional data xfer. If the packet has
1145 * no control flags, is in-sequence, the window didn't
1146 * change and we're not retransmitting, it's a
1147 * candidate. If the length is zero and the ack moved
1148 * forward, we're the sender side of the xfer. Just
1149 * free the data acked & wake any higher level process
1150 * that was blocked waiting for space. If the length
1151 * is non-zero and the ack didn't move, we're the
1152 * receiver side. If we're getting packets in-order
1153 * (the reassembly queue is empty), add the data to
1154 * the socket buffer and note that we need a delayed ack.
1155 * Make sure that the hidden state-flags are also off.
1156 * Since we check for TCPS_ESTABLISHED above, it can only
1157 * be TH_NEEDSYN.
1158 */
1159 if (tp->t_state == TCPS_ESTABLISHED &&
1160 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1161 !(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)) &&
1162 (!(to.to_flags & TOF_TS) ||
1163 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) &&
1164 th->th_seq == tp->rcv_nxt &&
1165 tp->snd_nxt == tp->snd_max) {
1166
1167 /*
1168 * If last ACK falls within this segment's sequence numbers,
1169 * record the timestamp.
1170 * NOTE that the test is modified according to the latest
1171 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1172 */
1173 if ((to.to_flags & TOF_TS) &&
1174 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1175 tp->ts_recent_age = ticks;
1176 tp->ts_recent = to.to_tsval;
1177 }
1178
1179 if (tlen == 0) {
1180 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1181 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1182 tp->snd_cwnd >= tp->snd_wnd &&
1183 !IN_FASTRECOVERY(tp)) {
1184 /*
1185 * This is a pure ack for outstanding data.
1186 */
1187 ++tcpstat.tcps_predack;
1188 /*
1189 * "bad retransmit" recovery
1190 *
1191 * If Eifel detection applies, then
1192 * it is deterministic, so use it
1193 * unconditionally over the old heuristic.
1194 * Otherwise, fall back to the old heuristic.
1195 */
1196 if (tcp_do_eifel_detect &&
1197 (to.to_flags & TOF_TS) && to.to_tsecr &&
1198 (tp->rxt_flags & TRXT_F_FIRSTACCACK)) {
1199 /* Eifel detection applicable. */
1200 if (to.to_tsecr < tp->t_rexmtTS) {
1201 tcp_revert_congestion_state(tp);
1202 ++tcpstat.tcps_eifeldetected;
1203 if (tp->t_rxtshift != 1 ||
1204 ticks >= tp->t_badrxtwin)
1205 ++tcpstat.tcps_rttcantdetect;
1206 }
1207 } else if (tp->t_rxtshift == 1 &&
1208 ticks < tp->t_badrxtwin) {
1209 tcp_revert_congestion_state(tp);
1210 ++tcpstat.tcps_rttdetected;
1211 }
1212 tp->rxt_flags &= ~(TRXT_F_FIRSTACCACK |
1213 TRXT_F_FASTREXMT | TRXT_F_EARLYREXMT);
1214 /*
1215 * Recalculate the retransmit timer / rtt.
1216 *
1217 * Some machines (certain windows boxes)
1218 * send broken timestamp replies during the
1219 * SYN+ACK phase, ignore timestamps of 0.
1220 */
1221 if ((to.to_flags & TOF_TS) && to.to_tsecr) {
1222 tcp_xmit_timer(tp,
1223 ticks - to.to_tsecr + 1,
1224 th->th_ack);
1225 } else if (tp->t_rtttime &&
1226 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1227 tcp_xmit_timer(tp,
1228 ticks - tp->t_rtttime,
1229 th->th_ack);
1230 }
1231 tcp_xmit_bandwidth_limit(tp, th->th_ack);
1232 acked = th->th_ack - tp->snd_una;
1233 tcpstat.tcps_rcvackpack++;
1234 tcpstat.tcps_rcvackbyte += acked;
1235 sbdrop(&so->so_snd.sb, acked);
1236 tp->snd_recover = th->th_ack - 1;
1237 tp->snd_una = th->th_ack;
1238 tp->t_dupacks = 0;
1239 /*
1240 * Update window information.
1241 */
1242 if (tiwin != tp->snd_wnd &&
1243 acceptable_window_update(tp, th, tiwin)) {
1244 /* keep track of pure window updates */
1245 if (tp->snd_wl2 == th->th_ack &&
1246 tiwin > tp->snd_wnd)
1247 tcpstat.tcps_rcvwinupd++;
1248 tp->snd_wnd = tiwin;
1249 tp->snd_wl1 = th->th_seq;
1250 tp->snd_wl2 = th->th_ack;
1251 if (tp->snd_wnd > tp->max_sndwnd)
1252 tp->max_sndwnd = tp->snd_wnd;
1253 }
1254 m_freem(m);
1255 ND6_HINT(tp); /* some progress has been done */
1256 /*
1257 * If all outstanding data are acked, stop
1258 * retransmit timer, otherwise restart timer
1259 * using current (possibly backed-off) value.
1260 * If process is waiting for space,
1261 * wakeup/selwakeup/signal. If data
1262 * are ready to send, let tcp_output
1263 * decide between more output or persist.
1264 */
1265 if (tp->snd_una == tp->snd_max) {
1266 tcp_callout_stop(tp, tp->tt_rexmt);
1267 } else if (!tcp_callout_active(tp,
1268 tp->tt_persist)) {
1269 tcp_callout_reset(tp, tp->tt_rexmt,
1270 tp->t_rxtcur, tcp_timer_rexmt);
1271 }
1272 sowwakeup(so);
1273 if (so->so_snd.ssb_cc > 0)
1274 tcp_output(tp);
1275 return(IPPROTO_DONE);
1276 }
1277 } else if (tiwin == tp->snd_wnd &&
1278 th->th_ack == tp->snd_una &&
1279 LIST_EMPTY(&tp->t_segq) &&
1280 tlen <= ssb_space(&so->so_rcv)) {
1281 u_long newsize = 0; /* automatic sockbuf scaling */
1282 /*
1283 * This is a pure, in-sequence data packet
1284 * with nothing on the reassembly queue and
1285 * we have enough buffer space to take it.
1286 */
1287 ++tcpstat.tcps_preddat;
1288 tp->rcv_nxt += tlen;
1289 tcpstat.tcps_rcvpack++;
1290 tcpstat.tcps_rcvbyte += tlen;
1291 ND6_HINT(tp); /* some progress has been done */
1292 /*
1293 * Automatic sizing of receive socket buffer. Often the send
1294 * buffer size is not optimally adjusted to the actual network
1295 * conditions at hand (delay bandwidth product). Setting the
1296 * buffer size too small limits throughput on links with high
1297 * bandwidth and high delay (eg. trans-continental/oceanic links).
1298 *
1299 * On the receive side the socket buffer memory is only rarely
1300 * used to any significant extent. This allows us to be much
1301 * more aggressive in scaling the receive socket buffer. For
1302 * the case that the buffer space is actually used to a large
1303 * extent and we run out of kernel memory we can simply drop
1304 * the new segments; TCP on the sender will just retransmit it
1305 * later. Setting the buffer size too big may only consume too
1306 * much kernel memory if the application doesn't read() from
1307 * the socket or packet loss or reordering makes use of the
1308 * reassembly queue.
1309 *
1310 * The criteria to step up the receive buffer one notch are:
1311 * 1. the number of bytes received during the time it takes
1312 * one timestamp to be reflected back to us (the RTT);
1313 * 2. received bytes per RTT is within seven eighth of the
1314 * current socket buffer size;
1315 * 3. receive buffer size has not hit maximal automatic size;
1316 *
1317 * This algorithm does one step per RTT at most and only if
1318 * we receive a bulk stream w/o packet losses or reorderings.
1319 * Shrinking the buffer during idle times is not necessary as
1320 * it doesn't consume any memory when idle.
1321 *
1322 * TODO: Only step up if the application is actually serving
1323 * the buffer to better manage the socket buffer resources.
1324 */
1325 if (tcp_do_autorcvbuf &&
1326 to.to_tsecr &&
1327 (so->so_rcv.ssb_flags & SSB_AUTOSIZE)) {
1328 if (to.to_tsecr > tp->rfbuf_ts &&
1329 to.to_tsecr - tp->rfbuf_ts < hz) {
1330 if (tp->rfbuf_cnt >
1331 (so->so_rcv.ssb_hiwat / 8 * 7) &&
1332 so->so_rcv.ssb_hiwat <
1333 tcp_autorcvbuf_max) {
1334 newsize =
1335 ulmin(so->so_rcv.ssb_hiwat +
1336 tcp_autorcvbuf_inc,
1337 tcp_autorcvbuf_max);
1338 }
1339 /* Start over with next RTT. */
1340 tp->rfbuf_ts = 0;
1341 tp->rfbuf_cnt = 0;
1342 } else
1343 tp->rfbuf_cnt += tlen; /* add up */
1344 }
1345 /*
1346 * Add data to socket buffer.
1347 */
1348 if (so->so_state & SS_CANTRCVMORE) {
1349 m_freem(m);
1350 } else {
1351 /*
1352 * Set new socket buffer size, give up when
1353 * limit is reached.
1354 *
1355 * Adjusting the size can mess up ACK
1356 * sequencing when pure window updates are
1357 * being avoided (which is the default),
1358 * so force an ack.
1359 */
1360 lwkt_gettoken(&so->so_rcv.ssb_token);
1361 if (newsize) {
1362 tp->t_flags |= TF_RXRESIZED;
1363 if (!ssb_reserve(&so->so_rcv, newsize,
1364 so, NULL)) {
1365 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_AUTOSIZE);
1366 }
1367 if (newsize >=
1368 (TCP_MAXWIN << tp->rcv_scale)) {
1369 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_AUTOSIZE);
1370 }
1371 }
1372 m_adj(m, drop_hdrlen); /* delayed header drop */
1373 ssb_appendstream(&so->so_rcv, m);
1374 lwkt_reltoken(&so->so_rcv.ssb_token);
1375 }
1376 sorwakeup(so);
1377 /*
1378 * This code is responsible for most of the ACKs
1379 * the TCP stack sends back after receiving a data
1380 * packet. Note that the DELAY_ACK check fails if
1381 * the delack timer is already running, which results
1382 * in an ack being sent every other packet (which is
1383 * what we want).
1384 *
1385 * We then further aggregate acks by not actually
1386 * sending one until the protocol thread has completed
1387 * processing the current backlog of packets. This
1388 * does not delay the ack any further, but allows us
1389 * to take advantage of the packet aggregation that
1390 * high speed NICs do (usually blocks of 8-10 packets)
1391 * to send a single ack rather then four or five acks,
1392 * greatly reducing the ack rate, the return channel
1393 * bandwidth, and the protocol overhead on both ends.
1394 *
1395 * Since this also has the effect of slowing down
1396 * the exponential slow-start ramp-up, systems with
1397 * very large bandwidth-delay products might want
1398 * to turn the feature off.
1399 */
1400 if (DELAY_ACK(tp)) {
1401 tcp_callout_reset(tp, tp->tt_delack,
1402 tcp_delacktime, tcp_timer_delack);
1403 } else if (tcp_aggregate_acks) {
1404 tp->t_flags |= TF_ACKNOW;
1405 if (!(tp->t_flags & TF_ONOUTPUTQ)) {
1406 tp->t_flags |= TF_ONOUTPUTQ;
1407 tp->tt_cpu = mycpu->gd_cpuid;
1408 TAILQ_INSERT_TAIL(
1409 &tcpcbackq[tp->tt_cpu],
1410 tp, t_outputq);
1411 }
1412 } else {
1413 tp->t_flags |= TF_ACKNOW;
1414 tcp_output(tp);
1415 }
1416 return(IPPROTO_DONE);
1417 }
1418 }
1419
1420 /*
1421 * Calculate amount of space in receive window,
1422 * and then do TCP input processing.
1423 * Receive window is amount of space in rcv queue,
1424 * but not less than advertised window.
1425 */
1426 recvwin = ssb_space(&so->so_rcv);
1427 if (recvwin < 0)
1428 recvwin = 0;
1429 tp->rcv_wnd = imax(recvwin, (int)(tp->rcv_adv - tp->rcv_nxt));
1430
1431 /* Reset receive buffer auto scaling when not in bulk receive mode. */
1432 tp->rfbuf_ts = 0;
1433 tp->rfbuf_cnt = 0;
1434
1435 switch (tp->t_state) {
1436 /*
1437 * If the state is SYN_RECEIVED:
1438 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1439 */
1440 case TCPS_SYN_RECEIVED:
1441 if ((thflags & TH_ACK) &&
1442 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1443 SEQ_GT(th->th_ack, tp->snd_max))) {
1444 rstreason = BANDLIM_RST_OPENPORT;
1445 goto dropwithreset;
1446 }
1447 break;
1448
1449 /*
1450 * If the state is SYN_SENT:
1451 * if seg contains an ACK, but not for our SYN, drop the input.
1452 * if seg contains a RST, then drop the connection.
1453 * if seg does not contain SYN, then drop it.
1454 * Otherwise this is an acceptable SYN segment
1455 * initialize tp->rcv_nxt and tp->irs
1456 * if seg contains ack then advance tp->snd_una
1457 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1458 * arrange for segment to be acked (eventually)
1459 * continue processing rest of data/controls, beginning with URG
1460 */
1461 case TCPS_SYN_SENT:
1462 if ((thflags & TH_ACK) &&
1463 (SEQ_LEQ(th->th_ack, tp->iss) ||
1464 SEQ_GT(th->th_ack, tp->snd_max))) {
1465 rstreason = BANDLIM_UNLIMITED;
1466 goto dropwithreset;
1467 }
1468 if (thflags & TH_RST) {
1469 if (thflags & TH_ACK)
1470 tp = tcp_drop(tp, ECONNREFUSED);
1471 goto drop;
1472 }
1473 if (!(thflags & TH_SYN))
1474 goto drop;
1475
1476 tp->irs = th->th_seq;
1477 tcp_rcvseqinit(tp);
1478 if (thflags & TH_ACK) {
1479 /* Our SYN was acked. */
1480 tcpstat.tcps_connects++;
1481 soisconnected(so);
1482 /* Do window scaling on this connection? */
1483 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
1484 (TF_RCVD_SCALE | TF_REQ_SCALE))
1485 tp->rcv_scale = tp->request_r_scale;
1486 tp->rcv_adv += tp->rcv_wnd;
1487 tp->snd_una++; /* SYN is acked */
1488 tcp_callout_stop(tp, tp->tt_rexmt);
1489 /*
1490 * If there's data, delay ACK; if there's also a FIN
1491 * ACKNOW will be turned on later.
1492 */
1493 if (DELAY_ACK(tp) && tlen != 0) {
1494 tcp_callout_reset(tp, tp->tt_delack,
1495 tcp_delacktime, tcp_timer_delack);
1496 } else {
1497 tp->t_flags |= TF_ACKNOW;
1498 }
1499 /*
1500 * Received <SYN,ACK> in SYN_SENT[*] state.
1501 * Transitions:
1502 * SYN_SENT --> ESTABLISHED
1503 * SYN_SENT* --> FIN_WAIT_1
1504 */
1505 tp->t_starttime = ticks;
1506 if (tp->t_flags & TF_NEEDFIN) {
1507 tp->t_state = TCPS_FIN_WAIT_1;
1508 tp->t_flags &= ~TF_NEEDFIN;
1509 thflags &= ~TH_SYN;
1510 } else {
1511 tcp_established(tp);
1512 }
1513 } else {
1514 /*
1515 * Received initial SYN in SYN-SENT[*] state =>
1516 * simultaneous open.
1517 * Do 3-way handshake:
1518 * SYN-SENT -> SYN-RECEIVED
1519 * SYN-SENT* -> SYN-RECEIVED*
1520 */
1521 tp->t_flags |= TF_ACKNOW;
1522 tcp_callout_stop(tp, tp->tt_rexmt);
1523 tp->t_state = TCPS_SYN_RECEIVED;
1524 }
1525
1526 /*
1527 * Advance th->th_seq to correspond to first data byte.
1528 * If data, trim to stay within window,
1529 * dropping FIN if necessary.
1530 */
1531 th->th_seq++;
1532 if (tlen > tp->rcv_wnd) {
1533 todrop = tlen - tp->rcv_wnd;
1534 m_adj(m, -todrop);
1535 tlen = tp->rcv_wnd;
1536 thflags &= ~TH_FIN;
1537 tcpstat.tcps_rcvpackafterwin++;
1538 tcpstat.tcps_rcvbyteafterwin += todrop;
1539 }
1540 tp->snd_wl1 = th->th_seq - 1;
1541 tp->rcv_up = th->th_seq;
1542 /*
1543 * Client side of transaction: already sent SYN and data.
1544 * If the remote host used T/TCP to validate the SYN,
1545 * our data will be ACK'd; if so, enter normal data segment
1546 * processing in the middle of step 5, ack processing.
1547 * Otherwise, goto step 6.
1548 */
1549 if (thflags & TH_ACK)
1550 goto process_ACK;
1551
1552 goto step6;
1553
1554 /*
1555 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
1556 * do normal processing (we no longer bother with T/TCP).
1557 */
1558 case TCPS_LAST_ACK:
1559 case TCPS_CLOSING:
1560 case TCPS_TIME_WAIT:
1561 break; /* continue normal processing */
1562 }
1563
1564 /*
1565 * States other than LISTEN or SYN_SENT.
1566 * First check the RST flag and sequence number since reset segments
1567 * are exempt from the timestamp and connection count tests. This
1568 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
1569 * below which allowed reset segments in half the sequence space
1570 * to fall though and be processed (which gives forged reset
1571 * segments with a random sequence number a 50 percent chance of
1572 * killing a connection).
1573 * Then check timestamp, if present.
1574 * Then check the connection count, if present.
1575 * Then check that at least some bytes of segment are within
1576 * receive window. If segment begins before rcv_nxt,
1577 * drop leading data (and SYN); if nothing left, just ack.
1578 *
1579 *
1580 * If the RST bit is set, check the sequence number to see
1581 * if this is a valid reset segment.
1582 * RFC 793 page 37:
1583 * In all states except SYN-SENT, all reset (RST) segments
1584 * are validated by checking their SEQ-fields. A reset is
1585 * valid if its sequence number is in the window.
1586 * Note: this does not take into account delayed ACKs, so
1587 * we should test against last_ack_sent instead of rcv_nxt.
1588 * The sequence number in the reset segment is normally an
1589 * echo of our outgoing acknowledgement numbers, but some hosts
1590 * send a reset with the sequence number at the rightmost edge
1591 * of our receive window, and we have to handle this case.
1592 * If we have multiple segments in flight, the intial reset
1593 * segment sequence numbers will be to the left of last_ack_sent,
1594 * but they will eventually catch up.
1595 * In any case, it never made sense to trim reset segments to
1596 * fit the receive window since RFC 1122 says:
1597 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
1598 *
1599 * A TCP SHOULD allow a received RST segment to include data.
1600 *
1601 * DISCUSSION
1602 * It has been suggested that a RST segment could contain
1603 * ASCII text that encoded and explained the cause of the
1604 * RST. No standard has yet been established for such
1605 * data.
1606 *
1607 * If the reset segment passes the sequence number test examine
1608 * the state:
1609 * SYN_RECEIVED STATE:
1610 * If passive open, return to LISTEN state.
1611 * If active open, inform user that connection was refused.
1612 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
1613 * Inform user that connection was reset, and close tcb.
1614 * CLOSING, LAST_ACK STATES:
1615 * Close the tcb.
1616 * TIME_WAIT STATE:
1617 * Drop the segment - see Stevens, vol. 2, p. 964 and
1618 * RFC 1337.
1619 */
1620 if (thflags & TH_RST) {
1621 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
1622 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
1623 switch (tp->t_state) {
1624
1625 case TCPS_SYN_RECEIVED:
1626 so->so_error = ECONNREFUSED;
1627 goto close;
1628
1629 case TCPS_ESTABLISHED:
1630 case TCPS_FIN_WAIT_1:
1631 case TCPS_FIN_WAIT_2:
1632 case TCPS_CLOSE_WAIT:
1633 so->so_error = ECONNRESET;
1634 close:
1635 tp->t_state = TCPS_CLOSED;
1636 tcpstat.tcps_drops++;
1637 tp = tcp_close(tp);
1638 break;
1639
1640 case TCPS_CLOSING:
1641 case TCPS_LAST_ACK:
1642 tp = tcp_close(tp);
1643 break;
1644
1645 case TCPS_TIME_WAIT:
1646 break;
1647 }
1648 }
1649 goto drop;
1650 }
1651
1652 /*
1653 * RFC 1323 PAWS: If we have a timestamp reply on this segment
1654 * and it's less than ts_recent, drop it.
1655 */
1656 if ((to.to_flags & TOF_TS) && tp->ts_recent != 0 &&
1657 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
1658
1659 /* Check to see if ts_recent is over 24 days old. */
1660 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) {
1661 /*
1662 * Invalidate ts_recent. If this segment updates
1663 * ts_recent, the age will be reset later and ts_recent
1664 * will get a valid value. If it does not, setting
1665 * ts_recent to zero will at least satisfy the
1666 * requirement that zero be placed in the timestamp
1667 * echo reply when ts_recent isn't valid. The
1668 * age isn't reset until we get a valid ts_recent
1669 * because we don't want out-of-order segments to be
1670 * dropped when ts_recent is old.
1671 */
1672 tp->ts_recent = 0;
1673 } else {
1674 tcpstat.tcps_rcvduppack++;
1675 tcpstat.tcps_rcvdupbyte += tlen;
1676 tcpstat.tcps_pawsdrop++;
1677 if (tlen)
1678 goto dropafterack;
1679 goto drop;
1680 }
1681 }
1682
1683 /*
1684 * In the SYN-RECEIVED state, validate that the packet belongs to
1685 * this connection before trimming the data to fit the receive
1686 * window. Check the sequence number versus IRS since we know
1687 * the sequence numbers haven't wrapped. This is a partial fix
1688 * for the "LAND" DoS attack.
1689 */
1690 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
1691 rstreason = BANDLIM_RST_OPENPORT;
1692 goto dropwithreset;
1693 }
1694
1695 todrop = tp->rcv_nxt - th->th_seq;
1696 if (todrop > 0) {
1697 if (TCP_DO_SACK(tp)) {
1698 /* Report duplicate segment at head of packet. */
1699 tp->reportblk.rblk_start = th->th_seq;
1700 tp->reportblk.rblk_end = TCP_SACK_BLKEND(
1701 th->th_seq + tlen, thflags);
1702 if (SEQ_GT(tp->reportblk.rblk_end, tp->rcv_nxt))
1703 tp->reportblk.rblk_end = tp->rcv_nxt;
1704 tp->sack_flags |= (TSACK_F_DUPSEG | TSACK_F_SACKLEFT);
1705 tp->t_flags |= TF_ACKNOW;
1706 }
1707 if (thflags & TH_SYN) {
1708 thflags &= ~TH_SYN;
1709 th->th_seq++;
1710 if (th->th_urp > 1)
1711 th->th_urp--;
1712 else
1713 thflags &= ~TH_URG;
1714 todrop--;
1715 }
1716 /*
1717 * Following if statement from Stevens, vol. 2, p. 960.
1718 */
1719 if (todrop > tlen ||
1720 (todrop == tlen && !(thflags & TH_FIN))) {
1721 /*
1722 * Any valid FIN must be to the left of the window.
1723 * At this point the FIN must be a duplicate or out
1724 * of sequence; drop it.
1725 */
1726 thflags &= ~TH_FIN;
1727
1728 /*
1729 * Send an ACK to resynchronize and drop any data.
1730 * But keep on processing for RST or ACK.
1731 */
1732 tp->t_flags |= TF_ACKNOW;
1733 todrop = tlen;
1734 tcpstat.tcps_rcvduppack++;
1735 tcpstat.tcps_rcvdupbyte += todrop;
1736 } else {
1737 tcpstat.tcps_rcvpartduppack++;
1738 tcpstat.tcps_rcvpartdupbyte += todrop;
1739 }
1740 drop_hdrlen += todrop; /* drop from the top afterwards */
1741 th->th_seq += todrop;
1742 tlen -= todrop;
1743 if (th->th_urp > todrop)
1744 th->th_urp -= todrop;
1745 else {
1746 thflags &= ~TH_URG;
1747 th->th_urp = 0;
1748 }
1749 }
1750
1751 /*
1752 * If new data are received on a connection after the
1753 * user processes are gone, then RST the other end.
1754 */
1755 if ((so->so_state & SS_NOFDREF) &&
1756 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
1757 tp = tcp_close(tp);
1758 tcpstat.tcps_rcvafterclose++;
1759 rstreason = BANDLIM_UNLIMITED;
1760 goto dropwithreset;
1761 }
1762
1763 /*
1764 * If segment ends after window, drop trailing data
1765 * (and PUSH and FIN); if nothing left, just ACK.
1766 */
1767 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
1768 if (todrop > 0) {
1769 tcpstat.tcps_rcvpackafterwin++;
1770 if (todrop >= tlen) {
1771 tcpstat.tcps_rcvbyteafterwin += tlen;
1772 /*
1773 * If a new connection request is received
1774 * while in TIME_WAIT, drop the old connection
1775 * and start over if the sequence numbers
1776 * are above the previous ones.
1777 */
1778 if (thflags & TH_SYN &&
1779 tp->t_state == TCPS_TIME_WAIT &&
1780 SEQ_GT(th->th_seq, tp->rcv_nxt)) {
1781 tp = tcp_close(tp);
1782 goto findpcb;
1783 }
1784 /*
1785 * If window is closed can only take segments at
1786 * window edge, and have to drop data and PUSH from
1787 * incoming segments. Continue processing, but
1788 * remember to ack. Otherwise, drop segment
1789 * and ack.
1790 */
1791 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
1792 tp->t_flags |= TF_ACKNOW;
1793 tcpstat.tcps_rcvwinprobe++;
1794 } else
1795 goto dropafterack;
1796 } else
1797 tcpstat.tcps_rcvbyteafterwin += todrop;
1798 m_adj(m, -todrop);
1799 tlen -= todrop;
1800 thflags &= ~(TH_PUSH | TH_FIN);
1801 }
1802
1803 /*
1804 * If last ACK falls within this segment's sequence numbers,
1805 * record its timestamp.
1806 * NOTE:
1807 * 1) That the test incorporates suggestions from the latest
1808 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1809 * 2) That updating only on newer timestamps interferes with
1810 * our earlier PAWS tests, so this check should be solely
1811 * predicated on the sequence space of this segment.
1812 * 3) That we modify the segment boundary check to be
1813 * Last.ACK.Sent <= SEG.SEQ + SEG.LEN
1814 * instead of RFC1323's
1815 * Last.ACK.Sent < SEG.SEQ + SEG.LEN,
1816 * This modified check allows us to overcome RFC1323's
1817 * limitations as described in Stevens TCP/IP Illustrated
1818 * Vol. 2 p.869. In such cases, we can still calculate the
1819 * RTT correctly when RCV.NXT == Last.ACK.Sent.
1820 */
1821 if ((to.to_flags & TOF_TS) && SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
1822 SEQ_LEQ(tp->last_ack_sent, (th->th_seq + tlen
1823 + ((thflags & TH_SYN) != 0)
1824 + ((thflags & TH_FIN) != 0)))) {
1825 tp->ts_recent_age = ticks;
1826 tp->ts_recent = to.to_tsval;
1827 }
1828
1829 /*
1830 * If a SYN is in the window, then this is an
1831 * error and we send an RST and drop the connection.
1832 */
1833 if (thflags & TH_SYN) {
1834 tp = tcp_drop(tp, ECONNRESET);
1835 rstreason = BANDLIM_UNLIMITED;
1836 goto dropwithreset;
1837 }
1838
1839 /*
1840 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
1841 * flag is on (half-synchronized state), then queue data for
1842 * later processing; else drop segment and return.
1843 */
1844 if (!(thflags & TH_ACK)) {
1845 if (tp->t_state == TCPS_SYN_RECEIVED ||
1846 (tp->t_flags & TF_NEEDSYN))
1847 goto step6;
1848 else
1849 goto drop;
1850 }
1851
1852 /*
1853 * Ack processing.
1854 */
1855 switch (tp->t_state) {
1856 /*
1857 * In SYN_RECEIVED state, the ACK acknowledges our SYN, so enter
1858 * ESTABLISHED state and continue processing.
1859 * The ACK was checked above.
1860 */
1861 case TCPS_SYN_RECEIVED:
1862
1863 tcpstat.tcps_connects++;
1864 soisconnected(so);
1865 /* Do window scaling? */
1866 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
1867 (TF_RCVD_SCALE | TF_REQ_SCALE))
1868 tp->rcv_scale = tp->request_r_scale;
1869 /*
1870 * Make transitions:
1871 * SYN-RECEIVED -> ESTABLISHED
1872 * SYN-RECEIVED* -> FIN-WAIT-1
1873 */
1874 tp->t_starttime = ticks;
1875 if (tp->t_flags & TF_NEEDFIN) {
1876 tp->t_state = TCPS_FIN_WAIT_1;
1877 tp->t_flags &= ~TF_NEEDFIN;
1878 } else {
1879 tcp_established(tp);
1880 }
1881 /*
1882 * If segment contains data or ACK, will call tcp_reass()
1883 * later; if not, do so now to pass queued data to user.
1884 */
1885 if (tlen == 0 && !(thflags & TH_FIN))
1886 tcp_reass(tp, NULL, NULL, NULL);
1887 /* fall into ... */
1888
1889 /*
1890 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1891 * ACKs. If the ack is in the range
1892 * tp->snd_una < th->th_ack <= tp->snd_max
1893 * then advance tp->snd_una to th->th_ack and drop
1894 * data from the retransmission queue. If this ACK reflects
1895 * more up to date window information we update our window information.
1896 */
1897 case TCPS_ESTABLISHED:
1898 case TCPS_FIN_WAIT_1:
1899 case TCPS_FIN_WAIT_2:
1900 case TCPS_CLOSE_WAIT:
1901 case TCPS_CLOSING:
1902 case TCPS_LAST_ACK:
1903 case TCPS_TIME_WAIT:
1904
1905 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
1906 if (TCP_DO_SACK(tp))
1907 tcp_sack_update_scoreboard(tp, &to);
1908 if (!tcp_callout_active(tp, tp->tt_rexmt) ||
1909 th->th_ack != tp->snd_una) {
1910 if (tlen == 0 && tiwin == tp->snd_wnd)
1911 tcpstat.tcps_rcvdupack++;
1912 tp->t_dupacks = 0;
1913 break;
1914 }
1915 if (tlen != 0 || tiwin != tp->snd_wnd) {
1916 if (!tcp_do_rfc3517bis ||
1917 !TCP_DO_SACK(tp) ||
1918 (to.to_flags &
1919 (TOF_SACK | TOF_SACK_REDUNDANT))
1920 != TOF_SACK) {
1921 tp->t_dupacks = 0;
1922 break;
1923 }
1924 /*
1925 * Update window information.
1926 */
1927 if (tiwin != tp->snd_wnd &&
1928 acceptable_window_update(tp, th, tiwin)) {
1929 /* keep track of pure window updates */
1930 if (tlen == 0 &&
1931 tp->snd_wl2 == th->th_ack &&
1932 tiwin > tp->snd_wnd)
1933 tcpstat.tcps_rcvwinupd++;
1934 tp->snd_wnd = tiwin;
1935 tp->snd_wl1 = th->th_seq;
1936 tp->snd_wl2 = th->th_ack;
1937 if (tp->snd_wnd > tp->max_sndwnd)
1938 tp->max_sndwnd = tp->snd_wnd;
1939 }
1940 }
1941 tcpstat.tcps_rcvdupack++;
1942
1943 /*
1944 * We have outstanding data (other than
1945 * a window probe), this is a completely
1946 * duplicate ack (ie, window info didn't
1947 * change), the ack is the biggest we've
1948 * seen and we've seen exactly our rexmt
1949 * threshhold of them, so assume a packet
1950 * has been dropped and retransmit it.
1951 * Kludge snd_nxt & the congestion
1952 * window so we send only this one
1953 * packet.
1954 */
1955 if (IN_FASTRECOVERY(tp)) {
1956 if (TCP_DO_SACK(tp)) {
1957 /* No artifical cwnd inflation. */
1958 tcp_sack_rexmt(tp, th);
1959 } else {
1960 /*
1961 * Dup acks mean that packets
1962 * have left the network
1963 * (they're now cached at the
1964 * receiver) so bump cwnd by
1965 * the amount in the receiver
1966 * to keep a constant cwnd
1967 * packets in the network.
1968 */
1969 tp->snd_cwnd += tp->t_maxseg;
1970 tcp_output(tp);
1971 }
1972 } else if (SEQ_LT(th->th_ack, tp->snd_recover)) {
1973 tp->t_dupacks = 0;
1974 break;
1975 } else if (tcp_ignore_redun_dsack && TCP_DO_SACK(tp) &&
1976 (to.to_flags & (TOF_DSACK | TOF_SACK_REDUNDANT)) ==
1977 (TOF_DSACK | TOF_SACK_REDUNDANT)) {
1978 /*
1979 * If the ACK carries DSACK and other
1980 * SACK blocks carry information that
1981 * we have already known, don't count
1982 * this ACK as duplicate ACK. This
1983 * prevents spurious early retransmit
1984 * and fast retransmit. This also
1985 * meets the requirement of RFC3042
1986 * that new segments should not be sent
1987 * if the SACK blocks do not contain
1988 * new information (XXX we actually
1989 * loosen the requirment that only DSACK
1990 * is checked here).
1991 *
1992 * This kind of ACKs are usually sent
1993 * after spurious retransmit.
1994 */
1995 /* Do nothing; don't change t_dupacks */
1996 } else if (++tp->t_dupacks == tp->t_rxtthresh) {
1997 tcp_seq old_snd_nxt;
1998 u_int win;
1999
2000fastretransmit:
2001 if (tcp_do_eifel_detect &&
2002 (tp->t_flags & TF_RCVD_TSTMP)) {
2003 tcp_save_congestion_state(tp);
2004 tp->rxt_flags |= TRXT_F_FASTREXMT;
2005 }
2006 /*
2007 * We know we're losing at the current
2008 * window size, so do congestion avoidance:
2009 * set ssthresh to half the current window
2010 * and pull our congestion window back to the
2011 * new ssthresh.
2012 */
2013 win = min(tp->snd_wnd, tp->snd_cwnd) / 2 /
2014 tp->t_maxseg;
2015 if (win < 2)
2016 win = 2;
2017 tp->snd_ssthresh = win * tp->t_maxseg;
2018 ENTER_FASTRECOVERY(tp);
2019 tp->snd_recover = tp->snd_max;
2020 tcp_callout_stop(tp, tp->tt_rexmt);
2021 tp->t_rtttime = 0;
2022 old_snd_nxt = tp->snd_nxt;
2023 tp->snd_nxt = th->th_ack;
2024 tp->snd_cwnd = tp->t_maxseg;
2025 tcp_output(tp);
2026 ++tcpstat.tcps_sndfastrexmit;
2027 tp->snd_cwnd = tp->snd_ssthresh;
2028 tp->rexmt_high = tp->snd_nxt;
2029 tp->sack_flags &= ~TSACK_F_SACKRESCUED;
2030 if (SEQ_GT(old_snd_nxt, tp->snd_nxt))
2031 tp->snd_nxt = old_snd_nxt;
2032 KASSERT(tp->snd_limited <= 2,
2033 ("tp->snd_limited too big"));
2034 if (TCP_DO_SACK(tp))
2035 tcp_sack_rexmt(tp, th);
2036 else
2037 tp->snd_cwnd += tp->t_maxseg *
2038 (tp->t_dupacks - tp->snd_limited);
2039 } else if (tcp_do_rfc3517bis && TCP_DO_SACK(tp)) {
2040 if (tcp_rfc3517bis_rxt &&
2041 tcp_sack_islost(&tp->scb, tp->snd_una))
2042 goto fastretransmit;
2043 if (tcp_do_limitedtransmit) {
2044 /* outstanding data */
2045 uint32_t ownd =
2046 tp->snd_max - tp->snd_una;
2047
2048 if (!tcp_sack_limitedxmit(tp) &&
2049 need_early_retransmit(tp, ownd)) {
2050 ++tcpstat.tcps_sndearlyrexmit;
2051 tp->rxt_flags |=
2052 TRXT_F_EARLYREXMT;
2053 goto fastretransmit;
2054 }
2055 }
2056 } else if (tcp_do_limitedtransmit) {
2057 u_long oldcwnd = tp->snd_cwnd;
2058 tcp_seq oldsndmax = tp->snd_max;
2059 tcp_seq oldsndnxt = tp->snd_nxt;
2060 /* outstanding data */
2061 uint32_t ownd = tp->snd_max - tp->snd_una;
2062 u_int sent;
2063
2064 KASSERT(tp->t_dupacks == 1 ||
2065 tp->t_dupacks == 2,
2066 ("dupacks not 1 or 2"));
2067 if (tp->t_dupacks == 1)
2068 tp->snd_limited = 0;
2069 tp->snd_nxt = tp->snd_max;
2070 tp->snd_cwnd = ownd +
2071 (tp->t_dupacks - tp->snd_limited) *
2072 tp->t_maxseg;
2073 tcp_output(tp);
2074
2075 if (SEQ_LT(oldsndnxt, oldsndmax)) {
2076 KASSERT(SEQ_GEQ(oldsndnxt, tp->snd_una),
2077 ("snd_una moved in other threads"));
2078 tp->snd_nxt = oldsndnxt;
2079 }
2080 tp->snd_cwnd = oldcwnd;
2081 sent = tp->snd_max - oldsndmax;
2082 if (sent > tp->t_maxseg) {
2083 KASSERT((tp->t_dupacks == 2 &&
2084 tp->snd_limited == 0) ||
2085 (sent == tp->t_maxseg + 1 &&
2086 tp->t_flags & TF_SENTFIN),
2087 ("sent too much"));
2088 KASSERT(sent <= tp->t_maxseg * 2,
2089 ("sent too many segments"));
2090 tp->snd_limited = 2;
2091 tcpstat.tcps_sndlimited += 2;
2092 } else if (sent > 0) {
2093 ++tp->snd_limited;
2094 ++tcpstat.tcps_sndlimited;
2095 } else if (need_early_retransmit(tp, ownd)) {
2096 ++tcpstat.tcps_sndearlyrexmit;
2097 tp->rxt_flags |= TRXT_F_EARLYREXMT;
2098 goto fastretransmit;
2099 }
2100 }
2101 if (tlen != 0)
2102 break;
2103 else
2104 goto drop;
2105 }
2106
2107 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), ("th_ack <= snd_una"));
2108 tp->t_dupacks = 0;
2109 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2110 /*
2111 * Detected optimistic ACK attack.
2112 * Force slow-start to de-synchronize attack.
2113 */
2114 tp->snd_cwnd = tp->t_maxseg;
2115 tp->snd_wacked = 0;
2116
2117 tcpstat.tcps_rcvacktoomuch++;
2118 goto dropafterack;
2119 }
2120 /*
2121 * If we reach this point, ACK is not a duplicate,
2122 * i.e., it ACKs something we sent.
2123 */
2124 if (tp->t_flags & TF_NEEDSYN) {
2125 /*
2126 * T/TCP: Connection was half-synchronized, and our
2127 * SYN has been ACK'd (so connection is now fully
2128 * synchronized). Go to non-starred state,
2129 * increment snd_una for ACK of SYN, and check if
2130 * we can do window scaling.
2131 */
2132 tp->t_flags &= ~TF_NEEDSYN;
2133 tp->snd_una++;
2134 /* Do window scaling? */
2135 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
2136 (TF_RCVD_SCALE | TF_REQ_SCALE))
2137 tp->rcv_scale = tp->request_r_scale;
2138 }
2139
2140process_ACK:
2141 acked = th->th_ack - tp->snd_una;
2142 tcpstat.tcps_rcvackpack++;
2143 tcpstat.tcps_rcvackbyte += acked;
2144
2145 if (tcp_do_eifel_detect && acked > 0 &&
2146 (to.to_flags & TOF_TS) && (to.to_tsecr != 0) &&
2147 (tp->rxt_flags & TRXT_F_FIRSTACCACK)) {
2148 /* Eifel detection applicable. */
2149 if (to.to_tsecr < tp->t_rexmtTS) {
2150 ++tcpstat.tcps_eifeldetected;
2151 tcp_revert_congestion_state(tp);
2152 if (tp->t_rxtshift != 1 ||
2153 ticks >= tp->t_badrxtwin)
2154 ++tcpstat.tcps_rttcantdetect;
2155 }
2156 } else if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) {
2157 /*
2158 * If we just performed our first retransmit,
2159 * and the ACK arrives within our recovery window,
2160 * then it was a mistake to do the retransmit
2161 * in the first place. Recover our original cwnd
2162 * and ssthresh, and proceed to transmit where we
2163 * left off.
2164 */
2165 tcp_revert_congestion_state(tp);
2166 ++tcpstat.tcps_rttdetected;
2167 }
2168
2169 /*
2170 * If we have a timestamp reply, update smoothed
2171 * round trip time. If no timestamp is present but
2172 * transmit timer is running and timed sequence
2173 * number was acked, update smoothed round trip time.
2174 * Since we now have an rtt measurement, cancel the
2175 * timer backoff (cf., Phil Karn's retransmit alg.).
2176 * Recompute the initial retransmit timer.
2177 *
2178 * Some machines (certain windows boxes) send broken
2179 * timestamp replies during the SYN+ACK phase, ignore
2180 * timestamps of 0.
2181 */
2182 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0))
2183 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1, th->th_ack);
2184 else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq))
2185 tcp_xmit_timer(tp, ticks - tp->t_rtttime, th->th_ack);
2186 tcp_xmit_bandwidth_limit(tp, th->th_ack);
2187
2188 /*
2189 * If no data (only SYN) was ACK'd,
2190 * skip rest of ACK processing.
2191 */
2192 if (acked == 0)
2193 goto step6;
2194
2195 /* Stop looking for an acceptable ACK since one was received. */
2196 tp->rxt_flags &= ~(TRXT_F_FIRSTACCACK |
2197 TRXT_F_FASTREXMT | TRXT_F_EARLYREXMT);
2198
2199 if (acked > so->so_snd.ssb_cc) {
2200 tp->snd_wnd -= so->so_snd.ssb_cc;
2201 sbdrop(&so->so_snd.sb, (int)so->so_snd.ssb_cc);
2202 ourfinisacked = TRUE;
2203 } else {
2204 sbdrop(&so->so_snd.sb, acked);
2205 tp->snd_wnd -= acked;
2206 ourfinisacked = FALSE;
2207 }
2208 sowwakeup(so);
2209
2210 /*
2211 * Update window information.
2212 */
2213 if (acceptable_window_update(tp, th, tiwin)) {
2214 /* keep track of pure window updates */
2215 if (tlen == 0 && tp->snd_wl2 == th->th_ack &&
2216 tiwin > tp->snd_wnd)
2217 tcpstat.tcps_rcvwinupd++;
2218 tp->snd_wnd = tiwin;
2219 tp->snd_wl1 = th->th_seq;
2220 tp->snd_wl2 = th->th_ack;
2221 if (tp->snd_wnd > tp->max_sndwnd)
2222 tp->max_sndwnd = tp->snd_wnd;
2223 needoutput = TRUE;
2224 }
2225
2226 tp->snd_una = th->th_ack;
2227 if (TCP_DO_SACK(tp))
2228 tcp_sack_update_scoreboard(tp, &to);
2229 if (IN_FASTRECOVERY(tp)) {
2230 if (SEQ_GEQ(th->th_ack, tp->snd_recover)) {
2231 EXIT_FASTRECOVERY(tp);
2232 needoutput = TRUE;
2233 /*
2234 * If the congestion window was inflated
2235 * to account for the other side's
2236 * cached packets, retract it.
2237 */
2238 if (!TCP_DO_SACK(tp))
2239 tp->snd_cwnd = tp->snd_ssthresh;
2240
2241 /*
2242 * Window inflation should have left us
2243 * with approximately snd_ssthresh outstanding
2244 * data. But, in case we would be inclined
2245 * to send a burst, better do it using
2246 * slow start.
2247 */
2248 if (SEQ_GT(th->th_ack + tp->snd_cwnd,
2249 tp->snd_max + 2 * tp->t_maxseg))
2250 tp->snd_cwnd =
2251 (tp->snd_max - tp->snd_una) +
2252 2 * tp->t_maxseg;
2253
2254 tp->snd_wacked = 0;
2255 } else {
2256 if (TCP_DO_SACK(tp)) {
2257 tp->snd_max_rexmt = tp->snd_max;
2258 tcp_sack_rexmt(tp, th);
2259 } else {
2260 tcp_newreno_partial_ack(tp, th, acked);
2261 }
2262 needoutput = FALSE;
2263 }
2264 } else {
2265 /*
2266 * Open the congestion window. When in slow-start,
2267 * open exponentially: maxseg per packet. Otherwise,
2268 * open linearly: maxseg per window.
2269 */
2270 if (tp->snd_cwnd <= tp->snd_ssthresh) {
2271 u_int abc_sslimit =
2272 (SEQ_LT(tp->snd_nxt, tp->snd_max) ?
2273 tp->t_maxseg : 2 * tp->t_maxseg);
2274
2275 /* slow-start */
2276 tp->snd_cwnd += tcp_do_abc ?
2277 min(acked, abc_sslimit) : tp->t_maxseg;
2278 } else {
2279 /* linear increase */
2280 tp->snd_wacked += tcp_do_abc ? acked :
2281 tp->t_maxseg;
2282 if (tp->snd_wacked >= tp->snd_cwnd) {
2283 tp->snd_wacked -= tp->snd_cwnd;
2284 tp->snd_cwnd += tp->t_maxseg;
2285 }
2286 }
2287 tp->snd_cwnd = min(tp->snd_cwnd,
2288 TCP_MAXWIN << tp->snd_scale);
2289 tp->snd_recover = th->th_ack - 1;
2290 }
2291 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2292 tp->snd_nxt = tp->snd_una;
2293
2294 /*
2295 * If all outstanding data is acked, stop retransmit
2296 * timer and remember to restart (more output or persist).
2297 * If there is more data to be acked, restart retransmit
2298 * timer, using current (possibly backed-off) value.
2299 */
2300 if (th->th_ack == tp->snd_max) {
2301 tcp_callout_stop(tp, tp->tt_rexmt);
2302 needoutput = TRUE;
2303 } else if (!tcp_callout_active(tp, tp->tt_persist)) {
2304 tcp_callout_reset(tp, tp->tt_rexmt, tp->t_rxtcur,
2305 tcp_timer_rexmt);
2306 }
2307
2308 switch (tp->t_state) {
2309 /*
2310 * In FIN_WAIT_1 STATE in addition to the processing
2311 * for the ESTABLISHED state if our FIN is now acknowledged
2312 * then enter FIN_WAIT_2.
2313 */
2314 case TCPS_FIN_WAIT_1:
2315 if (ourfinisacked) {
2316 /*
2317 * If we can't receive any more
2318 * data, then closing user can proceed.
2319 * Starting the timer is contrary to the
2320 * specification, but if we don't get a FIN
2321 * we'll hang forever.
2322 */
2323 if (so->so_state & SS_CANTRCVMORE) {
2324 soisdisconnected(so);
2325 tcp_callout_reset(tp, tp->tt_2msl,
2326 tp->t_maxidle, tcp_timer_2msl);
2327 }
2328 tp->t_state = TCPS_FIN_WAIT_2;
2329 }
2330 break;
2331
2332 /*
2333 * In CLOSING STATE in addition to the processing for
2334 * the ESTABLISHED state if the ACK acknowledges our FIN
2335 * then enter the TIME-WAIT state, otherwise ignore
2336 * the segment.
2337 */
2338 case TCPS_CLOSING:
2339 if (ourfinisacked) {
2340 tp->t_state = TCPS_TIME_WAIT;
2341 tcp_canceltimers(tp);
2342 tcp_callout_reset(tp, tp->tt_2msl,
2343 2 * tcp_rmx_msl(tp),
2344 tcp_timer_2msl);
2345 soisdisconnected(so);
2346 }
2347 break;
2348
2349 /*
2350 * In LAST_ACK, we may still be waiting for data to drain
2351 * and/or to be acked, as well as for the ack of our FIN.
2352 * If our FIN is now acknowledged, delete the TCB,
2353 * enter the closed state and return.
2354 */
2355 case TCPS_LAST_ACK:
2356 if (ourfinisacked) {
2357 tp = tcp_close(tp);
2358 goto drop;
2359 }
2360 break;
2361
2362 /*
2363 * In TIME_WAIT state the only thing that should arrive
2364 * is a retransmission of the remote FIN. Acknowledge
2365 * it and restart the finack timer.
2366 */
2367 case TCPS_TIME_WAIT:
2368 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp),
2369 tcp_timer_2msl);
2370 goto dropafterack;
2371 }
2372 }
2373
2374step6:
2375 /*
2376 * Update window information.
2377 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2378 */
2379 if ((thflags & TH_ACK) &&
2380 acceptable_window_update(tp, th, tiwin)) {
2381 /* keep track of pure window updates */
2382 if (tlen == 0 && tp->snd_wl2 == th->th_ack &&
2383 tiwin > tp->snd_wnd)
2384 tcpstat.tcps_rcvwinupd++;
2385 tp->snd_wnd = tiwin;
2386 tp->snd_wl1 = th->th_seq;
2387 tp->snd_wl2 = th->th_ack;
2388 if (tp->snd_wnd > tp->max_sndwnd)
2389 tp->max_sndwnd = tp->snd_wnd;
2390 needoutput = TRUE;
2391 }
2392
2393 /*
2394 * Process segments with URG.
2395 */
2396 if ((thflags & TH_URG) && th->th_urp &&
2397 !TCPS_HAVERCVDFIN(tp->t_state)) {
2398 /*
2399 * This is a kludge, but if we receive and accept
2400 * random urgent pointers, we'll crash in
2401 * soreceive. It's hard to imagine someone
2402 * actually wanting to send this much urgent data.
2403 */
2404 if (th->th_urp + so->so_rcv.ssb_cc > sb_max) {
2405 th->th_urp = 0; /* XXX */
2406 thflags &= ~TH_URG; /* XXX */
2407 goto dodata; /* XXX */
2408 }
2409 /*
2410 * If this segment advances the known urgent pointer,
2411 * then mark the data stream. This should not happen
2412 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2413 * a FIN has been received from the remote side.
2414 * In these states we ignore the URG.
2415 *
2416 * According to RFC961 (Assigned Protocols),
2417 * the urgent pointer points to the last octet
2418 * of urgent data. We continue, however,
2419 * to consider it to indicate the first octet
2420 * of data past the urgent section as the original
2421 * spec states (in one of two places).
2422 */
2423 if (SEQ_GT(th->th_seq + th->th_urp, tp->rcv_up)) {
2424 tp->rcv_up = th->th_seq + th->th_urp;
2425 so->so_oobmark = so->so_rcv.ssb_cc +
2426 (tp->rcv_up - tp->rcv_nxt) - 1;
2427 if (so->so_oobmark == 0)
2428 sosetstate(so, SS_RCVATMARK);
2429 sohasoutofband(so);
2430 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2431 }
2432 /*
2433 * Remove out of band data so doesn't get presented to user.
2434 * This can happen independent of advancing the URG pointer,
2435 * but if two URG's are pending at once, some out-of-band
2436 * data may creep in... ick.
2437 */
2438 if (th->th_urp <= (u_long)tlen &&
2439 !(so->so_options & SO_OOBINLINE)) {
2440 /* hdr drop is delayed */
2441 tcp_pulloutofband(so, th, m, drop_hdrlen);
2442 }
2443 } else {
2444 /*
2445 * If no out of band data is expected,
2446 * pull receive urgent pointer along
2447 * with the receive window.
2448 */
2449 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
2450 tp->rcv_up = tp->rcv_nxt;
2451 }
2452
2453dodata: /* XXX */
2454 /*
2455 * Process the segment text, merging it into the TCP sequencing queue,
2456 * and arranging for acknowledgment of receipt if necessary.
2457 * This process logically involves adjusting tp->rcv_wnd as data
2458 * is presented to the user (this happens in tcp_usrreq.c,
2459 * case PRU_RCVD). If a FIN has already been received on this
2460 * connection then we just ignore the text.
2461 */
2462 if ((tlen || (thflags & TH_FIN)) && !TCPS_HAVERCVDFIN(tp->t_state)) {
2463 m_adj(m, drop_hdrlen); /* delayed header drop */
2464 /*
2465 * Insert segment which includes th into TCP reassembly queue
2466 * with control block tp. Set thflags to whether reassembly now
2467 * includes a segment with FIN. This handles the common case
2468 * inline (segment is the next to be received on an established
2469 * connection, and the queue is empty), avoiding linkage into
2470 * and removal from the queue and repetition of various
2471 * conversions.
2472 * Set DELACK for segments received in order, but ack
2473 * immediately when segments are out of order (so
2474 * fast retransmit can work).
2475 */
2476 if (th->th_seq == tp->rcv_nxt &&
2477 LIST_EMPTY(&tp->t_segq) &&
2478 TCPS_HAVEESTABLISHED(tp->t_state)) {
2479 if (DELAY_ACK(tp)) {
2480 tcp_callout_reset(tp, tp->tt_delack,
2481 tcp_delacktime, tcp_timer_delack);
2482 } else {
2483 tp->t_flags |= TF_ACKNOW;
2484 }
2485 tp->rcv_nxt += tlen;
2486 thflags = th->th_flags & TH_FIN;
2487 tcpstat.tcps_rcvpack++;
2488 tcpstat.tcps_rcvbyte += tlen;
2489 ND6_HINT(tp);
2490 if (so->so_state & SS_CANTRCVMORE) {
2491 m_freem(m);
2492 } else {
2493 lwkt_gettoken(&so->so_rcv.ssb_token);
2494 ssb_appendstream(&so->so_rcv, m);
2495 lwkt_reltoken(&so->so_rcv.ssb_token);
2496 }
2497 sorwakeup(so);
2498 } else {
2499 if (!(tp->sack_flags & TSACK_F_DUPSEG)) {
2500 /* Initialize SACK report block. */
2501 tp->reportblk.rblk_start = th->th_seq;
2502 tp->reportblk.rblk_end = TCP_SACK_BLKEND(
2503 th->th_seq + tlen, thflags);
2504 }
2505 thflags = tcp_reass(tp, th, &tlen, m);
2506 tp->t_flags |= TF_ACKNOW;
2507 }
2508
2509 /*
2510 * Note the amount of data that peer has sent into
2511 * our window, in order to estimate the sender's
2512 * buffer size.
2513 */
2514 len = so->so_rcv.ssb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
2515 } else {
2516 m_freem(m);
2517 thflags &= ~TH_FIN;
2518 }
2519
2520 /*
2521 * If FIN is received ACK the FIN and let the user know
2522 * that the connection is closing.
2523 */
2524 if (thflags & TH_FIN) {
2525 if (!TCPS_HAVERCVDFIN(tp->t_state)) {
2526 socantrcvmore(so);
2527 /*
2528 * If connection is half-synchronized
2529 * (ie NEEDSYN flag on) then delay ACK,
2530 * so it may be piggybacked when SYN is sent.
2531 * Otherwise, since we received a FIN then no
2532 * more input can be expected, send ACK now.
2533 */
2534 if (DELAY_ACK(tp) && (tp->t_flags & TF_NEEDSYN)) {
2535 tcp_callout_reset(tp, tp->tt_delack,
2536 tcp_delacktime, tcp_timer_delack);
2537 } else {
2538 tp->t_flags |= TF_ACKNOW;
2539 }
2540 tp->rcv_nxt++;
2541 }
2542
2543 switch (tp->t_state) {
2544 /*
2545 * In SYN_RECEIVED and ESTABLISHED STATES
2546 * enter the CLOSE_WAIT state.
2547 */
2548 case TCPS_SYN_RECEIVED:
2549 tp->t_starttime = ticks;
2550 /*FALLTHROUGH*/
2551 case TCPS_ESTABLISHED:
2552 tp->t_state = TCPS_CLOSE_WAIT;
2553 break;
2554
2555 /*
2556 * If still in FIN_WAIT_1 STATE FIN has not been acked so
2557 * enter the CLOSING state.
2558 */
2559 case TCPS_FIN_WAIT_1:
2560 tp->t_state = TCPS_CLOSING;
2561 break;
2562
2563 /*
2564 * In FIN_WAIT_2 state enter the TIME_WAIT state,
2565 * starting the time-wait timer, turning off the other
2566 * standard timers.
2567 */
2568 case TCPS_FIN_WAIT_2:
2569 tp->t_state = TCPS_TIME_WAIT;
2570 tcp_canceltimers(tp);
2571 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp),
2572 tcp_timer_2msl);
2573 soisdisconnected(so);
2574 break;
2575
2576 /*
2577 * In TIME_WAIT state restart the 2 MSL time_wait timer.
2578 */
2579 case TCPS_TIME_WAIT:
2580 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp),
2581 tcp_timer_2msl);
2582 break;
2583 }
2584 }
2585
2586#ifdef TCPDEBUG
2587 if (so->so_options & SO_DEBUG)
2588 tcp_trace(TA_INPUT, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0);
2589#endif
2590
2591 /*
2592 * Return any desired output.
2593 */
2594 if (needoutput || (tp->t_flags & TF_ACKNOW))
2595 tcp_output(tp);
2596 tcp_sack_report_cleanup(tp);
2597 return(IPPROTO_DONE);
2598
2599dropafterack:
2600 /*
2601 * Generate an ACK dropping incoming segment if it occupies
2602 * sequence space, where the ACK reflects our state.
2603 *
2604 * We can now skip the test for the RST flag since all
2605 * paths to this code happen after packets containing
2606 * RST have been dropped.
2607 *
2608 * In the SYN-RECEIVED state, don't send an ACK unless the
2609 * segment we received passes the SYN-RECEIVED ACK test.
2610 * If it fails send a RST. This breaks the loop in the
2611 * "LAND" DoS attack, and also prevents an ACK storm
2612 * between two listening ports that have been sent forged
2613 * SYN segments, each with the source address of the other.
2614 */
2615 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
2616 (SEQ_GT(tp->snd_una, th->th_ack) ||
2617 SEQ_GT(th->th_ack, tp->snd_max)) ) {
2618 rstreason = BANDLIM_RST_OPENPORT;
2619 goto dropwithreset;
2620 }
2621#ifdef TCPDEBUG
2622 if (so->so_options & SO_DEBUG)
2623 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0);
2624#endif
2625 m_freem(m);
2626 tp->t_flags |= TF_ACKNOW;
2627 tcp_output(tp);
2628 tcp_sack_report_cleanup(tp);
2629 return(IPPROTO_DONE);
2630
2631dropwithreset:
2632 /*
2633 * Generate a RST, dropping incoming segment.
2634 * Make ACK acceptable to originator of segment.
2635 * Don't bother to respond if destination was broadcast/multicast.
2636 */
2637 if ((thflags & TH_RST) || m->m_flags & (M_BCAST | M_MCAST))
2638 goto drop;
2639 if (isipv6) {
2640 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
2641 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
2642 goto drop;
2643 } else {
2644 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
2645 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
2646 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
2647 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
2648 goto drop;
2649 }
2650 /* IPv6 anycast check is done at tcp6_input() */
2651
2652 /*
2653 * Perform bandwidth limiting.
2654 */
2655#ifdef ICMP_BANDLIM
2656 if (badport_bandlim(rstreason) < 0)
2657 goto drop;
2658#endif
2659
2660#ifdef TCPDEBUG
2661 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
2662 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0);
2663#endif
2664 if (thflags & TH_ACK)
2665 /* mtod() below is safe as long as hdr dropping is delayed */
2666 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, th->th_ack,
2667 TH_RST);
2668 else {
2669 if (thflags & TH_SYN)
2670 tlen++;
2671 /* mtod() below is safe as long as hdr dropping is delayed */
2672 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq + tlen,
2673 (tcp_seq)0, TH_RST | TH_ACK);
2674 }
2675 if (tp != NULL)
2676 tcp_sack_report_cleanup(tp);
2677 return(IPPROTO_DONE);
2678
2679drop:
2680 /*
2681 * Drop space held by incoming segment and return.
2682 */
2683#ifdef TCPDEBUG
2684 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
2685 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0);
2686#endif
2687 m_freem(m);
2688 if (tp != NULL)
2689 tcp_sack_report_cleanup(tp);
2690 return(IPPROTO_DONE);
2691}
2692
2693/*
2694 * Parse TCP options and place in tcpopt.
2695 */
2696static void
2697tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, boolean_t is_syn,
2698 tcp_seq ack)
2699{
2700 int opt, optlen, i;
2701
2702 to->to_flags = 0;
2703 for (; cnt > 0; cnt -= optlen, cp += optlen) {
2704 opt = cp[0];
2705 if (opt == TCPOPT_EOL)
2706 break;
2707 if (opt == TCPOPT_NOP)
2708 optlen = 1;
2709 else {
2710 if (cnt < 2)
2711 break;
2712 optlen = cp[1];
2713 if (optlen < 2 || optlen > cnt)
2714 break;
2715 }
2716 switch (opt) {
2717 case TCPOPT_MAXSEG:
2718 if (optlen != TCPOLEN_MAXSEG)
2719 continue;
2720 if (!is_syn)
2721 continue;
2722 to->to_flags |= TOF_MSS;
2723 bcopy(cp + 2, &to->to_mss, sizeof to->to_mss);
2724 to->to_mss = ntohs(to->to_mss);
2725 break;
2726 case TCPOPT_WINDOW:
2727 if (optlen != TCPOLEN_WINDOW)
2728 continue;
2729 if (!is_syn)
2730 continue;
2731 to->to_flags |= TOF_SCALE;
2732 to->to_requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT);
2733 break;
2734 case TCPOPT_TIMESTAMP:
2735 if (optlen != TCPOLEN_TIMESTAMP)
2736 continue;
2737 to->to_flags |= TOF_TS;
2738 bcopy(cp + 2, &to->to_tsval, sizeof to->to_tsval);
2739 to->to_tsval = ntohl(to->to_tsval);
2740 bcopy(cp + 6, &to->to_tsecr, sizeof to->to_tsecr);
2741 to->to_tsecr = ntohl(to->to_tsecr);
2742 /*
2743 * If echoed timestamp is later than the current time,
2744 * fall back to non RFC1323 RTT calculation.
2745 */
2746 if (to->to_tsecr != 0 && TSTMP_GT(to->to_tsecr, ticks))
2747 to->to_tsecr = 0;
2748 break;
2749 case TCPOPT_SACK_PERMITTED:
2750 if (optlen != TCPOLEN_SACK_PERMITTED)
2751 continue;
2752 if (!is_syn)
2753 continue;
2754 to->to_flags |= TOF_SACK_PERMITTED;
2755 break;
2756 case TCPOPT_SACK:
2757 if ((optlen - 2) & 0x07) /* not multiple of 8 */
2758 continue;
2759 to->to_nsackblocks = (optlen - 2) / 8;
2760 to->to_sackblocks = (struct raw_sackblock *) (cp + 2);
2761 to->to_flags |= TOF_SACK;
2762 for (i = 0; i < to->to_nsackblocks; i++) {
2763 struct raw_sackblock *r = &to->to_sackblocks[i];
2764
2765 r->rblk_start = ntohl(r->rblk_start);
2766 r->rblk_end = ntohl(r->rblk_end);
2767
2768 if (SEQ_LEQ(r->rblk_end, r->rblk_start)) {
2769 /*
2770 * Invalid SACK block; discard all
2771 * SACK blocks
2772 */
2773 tcpstat.tcps_rcvbadsackopt++;
2774 to->to_nsackblocks = 0;
2775 to->to_sackblocks = NULL;
2776 to->to_flags &= ~TOF_SACK;
2777 break;
2778 }
2779 }
2780 if ((to->to_flags & TOF_SACK) &&
2781 tcp_sack_ndsack_blocks(to->to_sackblocks,
2782 to->to_nsackblocks, ack))
2783 to->to_flags |= TOF_DSACK;
2784 break;
2785#ifdef TCP_SIGNATURE
2786 /*
2787 * XXX In order to reply to a host which has set the
2788 * TCP_SIGNATURE option in its initial SYN, we have to
2789 * record the fact that the option was observed here
2790 * for the syncache code to perform the correct response.
2791 */
2792 case TCPOPT_SIGNATURE:
2793 if (optlen != TCPOLEN_SIGNATURE)
2794 continue;
2795 to->to_flags |= (TOF_SIGNATURE | TOF_SIGLEN);
2796 break;
2797#endif /* TCP_SIGNATURE */
2798 default:
2799 continue;
2800 }
2801 }
2802}
2803
2804/*
2805 * Pull out of band byte out of a segment so
2806 * it doesn't appear in the user's data queue.
2807 * It is still reflected in the segment length for
2808 * sequencing purposes.
2809 * "off" is the delayed to be dropped hdrlen.
2810 */
2811static void
2812tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, int off)
2813{
2814 int cnt = off + th->th_urp - 1;
2815
2816 while (cnt >= 0) {
2817 if (m->m_len > cnt) {
2818 char *cp = mtod(m, caddr_t) + cnt;
2819 struct tcpcb *tp = sototcpcb(so);
2820
2821 tp->t_iobc = *cp;
2822 tp->t_oobflags |= TCPOOB_HAVEDATA;
2823 bcopy(cp + 1, cp, m->m_len - cnt - 1);
2824 m->m_len--;
2825 if (m->m_flags & M_PKTHDR)
2826 m->m_pkthdr.len--;
2827 return;
2828 }
2829 cnt -= m->m_len;
2830 m = m->m_next;
2831 if (m == NULL)
2832 break;
2833 }
2834 panic("tcp_pulloutofband");
2835}
2836
2837/*
2838 * Collect new round-trip time estimate
2839 * and update averages and current timeout.
2840 */
2841static void
2842tcp_xmit_timer(struct tcpcb *tp, int rtt, tcp_seq ack)
2843{
2844 int rebaserto = 0;
2845
2846 tcpstat.tcps_rttupdated++;
2847 tp->t_rttupdated++;
2848 if ((tp->rxt_flags & TRXT_F_REBASERTO) &&
2849 SEQ_GT(ack, tp->snd_max_prev)) {
2850#ifdef DEBUG_EIFEL_RESPONSE
2851 kprintf("srtt/rttvar, prev %d/%d, cur %d/%d, ",
2852 tp->t_srtt_prev, tp->t_rttvar_prev,
2853 tp->t_srtt, tp->t_rttvar);
2854#endif
2855
2856 tcpstat.tcps_eifelresponse++;
2857 rebaserto = 1;
2858 tp->rxt_flags &= ~TRXT_F_REBASERTO;
2859 tp->t_srtt = max(tp->t_srtt_prev, (rtt << TCP_RTT_SHIFT));
2860 tp->t_rttvar = max(tp->t_rttvar_prev,
2861 (rtt << (TCP_RTTVAR_SHIFT - 1)));
2862 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
2863 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2864
2865#ifdef DEBUG_EIFEL_RESPONSE
2866 kprintf("new %d/%d ", tp->t_srtt, tp->t_rttvar);
2867#endif
2868 } else if (tp->t_srtt != 0) {
2869 int delta;
2870
2871 /*
2872 * srtt is stored as fixed point with 5 bits after the
2873 * binary point (i.e., scaled by 8). The following magic
2874 * is equivalent to the smoothing algorithm in rfc793 with
2875 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
2876 * point). Adjust rtt to origin 0.
2877 */
2878 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
2879 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
2880
2881 if ((tp->t_srtt += delta) <= 0)
2882 tp->t_srtt = 1;
2883
2884 /*
2885 * We accumulate a smoothed rtt variance (actually, a
2886 * smoothed mean difference), then set the retransmit
2887 * timer to smoothed rtt + 4 times the smoothed variance.
2888 * rttvar is stored as fixed point with 4 bits after the
2889 * binary point (scaled by 16). The following is
2890 * equivalent to rfc793 smoothing with an alpha of .75
2891 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
2892 * rfc793's wired-in beta.
2893 */
2894 if (delta < 0)
2895 delta = -delta;
2896 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
2897 if ((tp->t_rttvar += delta) <= 0)
2898 tp->t_rttvar = 1;
2899 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
2900 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2901 } else {
2902 /*
2903 * No rtt measurement yet - use the unsmoothed rtt.
2904 * Set the variance to half the rtt (so our first
2905 * retransmit happens at 3*rtt).
2906 */
2907 tp->t_srtt = rtt << TCP_RTT_SHIFT;
2908 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
2909 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2910 }
2911 tp->t_rtttime = 0;
2912 tp->t_rxtshift = 0;
2913
2914#ifdef DEBUG_EIFEL_RESPONSE
2915 if (rebaserto) {
2916 kprintf("| rxtcur prev %d, old %d, ",
2917 tp->t_rxtcur_prev, tp->t_rxtcur);
2918 }
2919#endif
2920
2921 /*
2922 * the retransmit should happen at rtt + 4 * rttvar.
2923 * Because of the way we do the smoothing, srtt and rttvar
2924 * will each average +1/2 tick of bias. When we compute
2925 * the retransmit timer, we want 1/2 tick of rounding and
2926 * 1 extra tick because of +-1/2 tick uncertainty in the
2927 * firing of the timer. The bias will give us exactly the
2928 * 1.5 tick we need. But, because the bias is
2929 * statistical, we have to test that we don't drop below
2930 * the minimum feasible timer (which is 2 ticks).
2931 */
2932 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
2933 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
2934
2935 if (rebaserto) {
2936 if (tp->t_rxtcur < tp->t_rxtcur_prev + tcp_eifel_rtoinc) {
2937 /*
2938 * RFC4015 requires that the new RTO is at least
2939 * 2*G (tcp_eifel_rtoinc) greater then the RTO
2940 * (t_rxtcur_prev) when the spurious retransmit
2941 * timeout happens.
2942 *
2943 * The above condition could be true, if the SRTT
2944 * and RTTVAR used to calculate t_rxtcur_prev
2945 * resulted in a value less than t_rttmin. So
2946 * simply increasing SRTT by tcp_eifel_rtoinc when
2947 * preparing for the Eifel response in
2948 * tcp_save_congestion_state() could not ensure
2949 * that the new RTO will be tcp_eifel_rtoinc greater
2950 * t_rxtcur_prev.
2951 */
2952 tp->t_rxtcur = tp->t_rxtcur_prev + tcp_eifel_rtoinc;
2953 }
2954#ifdef DEBUG_EIFEL_RESPONSE
2955 kprintf("new %d\n", tp->t_rxtcur);
2956#endif
2957 }
2958
2959 /*
2960 * We received an ack for a packet that wasn't retransmitted;
2961 * it is probably safe to discard any error indications we've
2962 * received recently. This isn't quite right, but close enough
2963 * for now (a route might have failed after we sent a segment,
2964 * and the return path might not be symmetrical).
2965 */
2966 tp->t_softerror = 0;
2967}
2968
2969/*
2970 * Determine a reasonable value for maxseg size.
2971 * If the route is known, check route for mtu.
2972 * If none, use an mss that can be handled on the outgoing
2973 * interface without forcing IP to fragment; if bigger than
2974 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
2975 * to utilize large mbufs. If no route is found, route has no mtu,
2976 * or the destination isn't local, use a default, hopefully conservative
2977 * size (usually 512 or the default IP max size, but no more than the mtu
2978 * of the interface), as we can't discover anything about intervening
2979 * gateways or networks. We also initialize the congestion/slow start
2980 * window to be a single segment if the destination isn't local.
2981 * While looking at the routing entry, we also initialize other path-dependent
2982 * parameters from pre-set or cached values in the routing entry.
2983 *
2984 * Also take into account the space needed for options that we
2985 * send regularly. Make maxseg shorter by that amount to assure
2986 * that we can send maxseg amount of data even when the options
2987 * are present. Store the upper limit of the length of options plus
2988 * data in maxopd.
2989 *
2990 * NOTE that this routine is only called when we process an incoming
2991 * segment, for outgoing segments only tcp_mssopt is called.
2992 */
2993void
2994tcp_mss(struct tcpcb *tp, int offer)
2995{
2996 struct rtentry *rt;
2997 struct ifnet *ifp;
2998 int rtt, mss;
2999 u_long bufsize;
3000 struct inpcb *inp = tp->t_inpcb;
3001 struct socket *so;
3002#ifdef INET6
3003 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) ? TRUE : FALSE);
3004 size_t min_protoh = isipv6 ?
3005 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
3006 sizeof(struct tcpiphdr);
3007#else
3008 const boolean_t isipv6 = FALSE;
3009 const size_t min_protoh = sizeof(struct tcpiphdr);
3010#endif
3011
3012 if (isipv6)
3013 rt = tcp_rtlookup6(&inp->inp_inc);
3014 else
3015 rt = tcp_rtlookup(&inp->inp_inc);
3016 if (rt == NULL) {
3017 tp->t_maxopd = tp->t_maxseg =
3018 (isipv6 ? tcp_v6mssdflt : tcp_mssdflt);
3019 return;
3020 }
3021 ifp = rt->rt_ifp;
3022 so = inp->inp_socket;
3023
3024 /*
3025 * Offer == 0 means that there was no MSS on the SYN segment,
3026 * in this case we use either the interface mtu or tcp_mssdflt.
3027 *
3028 * An offer which is too large will be cut down later.
3029 */
3030 if (offer == 0) {
3031 if (isipv6) {
3032 if (in6_localaddr(&inp->in6p_faddr)) {
3033 offer = ND_IFINFO(rt->rt_ifp)->linkmtu -
3034 min_protoh;
3035 } else {
3036 offer = tcp_v6mssdflt;
3037 }
3038 } else {
3039 if (in_localaddr(inp->inp_faddr))
3040 offer = ifp->if_mtu - min_protoh;
3041 else
3042 offer = tcp_mssdflt;
3043 }
3044 }
3045
3046 /*
3047 * Prevent DoS attack with too small MSS. Round up
3048 * to at least minmss.
3049 *
3050 * Sanity check: make sure that maxopd will be large
3051 * enough to allow some data on segments even is the
3052 * all the option space is used (40bytes). Otherwise
3053 * funny things may happen in tcp_output.
3054 */
3055 offer = max(offer, tcp_minmss);
3056 offer = max(offer, 64);
3057
3058 rt->rt_rmx.rmx_mssopt = offer;
3059
3060 /*
3061 * While we're here, check if there's an initial rtt
3062 * or rttvar. Convert from the route-table units
3063 * to scaled multiples of the slow timeout timer.
3064 */
3065 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) {
3066 /*
3067 * XXX the lock bit for RTT indicates that the value
3068 * is also a minimum value; this is subject to time.
3069 */
3070 if (rt->rt_rmx.rmx_locks & RTV_RTT)
3071 tp->t_rttmin = rtt / (RTM_RTTUNIT / hz);
3072 tp->t_srtt = rtt / (RTM_RTTUNIT / (hz * TCP_RTT_SCALE));
3073 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
3074 tcpstat.tcps_usedrtt++;
3075 if (rt->rt_rmx.rmx_rttvar) {
3076 tp->t_rttvar = rt->rt_rmx.rmx_rttvar /
3077 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE));
3078 tcpstat.tcps_usedrttvar++;
3079 } else {
3080 /* default variation is +- 1 rtt */
3081 tp->t_rttvar =
3082 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
3083 }
3084 TCPT_RANGESET(tp->t_rxtcur,
3085 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
3086 tp->t_rttmin, TCPTV_REXMTMAX);
3087 }
3088
3089 /*
3090 * if there's an mtu associated with the route, use it
3091 * else, use the link mtu. Take the smaller of mss or offer
3092 * as our final mss.
3093 */
3094 if (rt->rt_rmx.rmx_mtu) {
3095 mss = rt->rt_rmx.rmx_mtu - min_protoh;
3096 } else {
3097 if (isipv6)
3098 mss = ND_IFINFO(rt->rt_ifp)->linkmtu - min_protoh;
3099 else
3100 mss = ifp->if_mtu - min_protoh;
3101 }
3102 mss = min(mss, offer);
3103
3104 /*
3105 * maxopd stores the maximum length of data AND options
3106 * in a segment; maxseg is the amount of data in a normal
3107 * segment. We need to store this value (maxopd) apart
3108 * from maxseg, because now every segment carries options
3109 * and thus we normally have somewhat less data in segments.
3110 */
3111 tp->t_maxopd = mss;
3112
3113 if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
3114 ((tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
3115 mss -= TCPOLEN_TSTAMP_APPA;
3116
3117#if (MCLBYTES & (MCLBYTES - 1)) == 0
3118 if (mss > MCLBYTES)
3119 mss &= ~(MCLBYTES-1);
3120#else
3121 if (mss > MCLBYTES)
3122 mss = mss / MCLBYTES * MCLBYTES;
3123#endif
3124 /*
3125 * If there's a pipesize, change the socket buffer
3126 * to that size. Make the socket buffers an integral
3127 * number of mss units; if the mss is larger than
3128 * the socket buffer, decrease the mss.
3129 */
3130#ifdef RTV_SPIPE
3131 if ((bufsize = rt->rt_rmx.rmx_sendpipe) == 0)
3132#endif
3133 bufsize = so->so_snd.ssb_hiwat;
3134 if (bufsize < mss)
3135 mss = bufsize;
3136 else {
3137 bufsize = roundup(bufsize, mss);
3138 if (bufsize > sb_max)
3139 bufsize = sb_max;
3140 if (bufsize > so->so_snd.ssb_hiwat)
3141 ssb_reserve(&so->so_snd, bufsize, so, NULL);
3142 }
3143 tp->t_maxseg = mss;
3144
3145#ifdef RTV_RPIPE
3146 if ((bufsize = rt->rt_rmx.rmx_recvpipe) == 0)
3147#endif
3148 bufsize = so->so_rcv.ssb_hiwat;
3149 if (bufsize > mss) {
3150 bufsize = roundup(bufsize, mss);
3151 if (bufsize > sb_max)
3152 bufsize = sb_max;
3153 if (bufsize > so->so_rcv.ssb_hiwat) {
3154 lwkt_gettoken(&so->so_rcv.ssb_token);
3155 ssb_reserve(&so->so_rcv, bufsize, so, NULL);
3156 lwkt_reltoken(&so->so_rcv.ssb_token);
3157 }
3158 }
3159
3160 /*
3161 * Set the slow-start flight size
3162 *
3163 * NOTE: t_maxseg must have been configured!
3164 */
3165 tp->snd_cwnd = tcp_initial_window(tp);
3166
3167 if (rt->rt_rmx.rmx_ssthresh) {
3168 /*
3169 * There's some sort of gateway or interface
3170 * buffer limit on the path. Use this to set
3171 * the slow start threshhold, but set the
3172 * threshold to no less than 2*mss.
3173 */
3174 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh);
3175 tcpstat.tcps_usedssthresh++;
3176 }
3177}
3178
3179/*
3180 * Determine the MSS option to send on an outgoing SYN.
3181 */
3182int
3183tcp_mssopt(struct tcpcb *tp)
3184{
3185 struct rtentry *rt;
3186#ifdef INET6
3187 boolean_t isipv6 =
3188 ((tp->t_inpcb->inp_vflag & INP_IPV6) ? TRUE : FALSE);
3189 int min_protoh = isipv6 ?
3190 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
3191 sizeof(struct tcpiphdr);
3192#else
3193 const boolean_t isipv6 = FALSE;
3194 const size_t min_protoh = sizeof(struct tcpiphdr);
3195#endif
3196
3197 if (isipv6)
3198 rt = tcp_rtlookup6(&tp->t_inpcb->inp_inc);
3199 else
3200 rt = tcp_rtlookup(&tp->t_inpcb->inp_inc);
3201 if (rt == NULL)
3202 return (isipv6 ? tcp_v6mssdflt : tcp_mssdflt);
3203
3204 return (rt->rt_ifp->if_mtu - min_protoh);
3205}
3206
3207/*
3208 * When a partial ack arrives, force the retransmission of the
3209 * next unacknowledged segment. Do not exit Fast Recovery.
3210 *
3211 * Implement the Slow-but-Steady variant of NewReno by restarting the
3212 * the retransmission timer. Turn it off here so it can be restarted
3213 * later in tcp_output().
3214 */
3215static void
3216tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th, int acked)
3217{
3218 tcp_seq old_snd_nxt = tp->snd_nxt;
3219 u_long ocwnd = tp->snd_cwnd;
3220
3221 tcp_callout_stop(tp, tp->tt_rexmt);
3222 tp->t_rtttime = 0;
3223 tp->snd_nxt = th->th_ack;
3224 /* Set snd_cwnd to one segment beyond acknowledged offset. */
3225 tp->snd_cwnd = tp->t_maxseg;
3226 tp->t_flags |= TF_ACKNOW;
3227 tcp_output(tp);
3228 if (SEQ_GT(old_snd_nxt, tp->snd_nxt))
3229 tp->snd_nxt = old_snd_nxt;
3230 /* partial window deflation */
3231 if (ocwnd > acked)
3232 tp->snd_cwnd = ocwnd - acked + tp->t_maxseg;
3233 else
3234 tp->snd_cwnd = tp->t_maxseg;
3235}
3236
3237/*
3238 * In contrast to the Slow-but-Steady NewReno variant,
3239 * we do not reset the retransmission timer for SACK retransmissions,
3240 * except when retransmitting snd_una.
3241 */
3242static void
3243tcp_sack_rexmt(struct tcpcb *tp, struct tcphdr *th)
3244{
3245 tcp_seq old_snd_nxt = tp->snd_nxt;
3246 u_long ocwnd = tp->snd_cwnd;
3247 uint32_t pipe;
3248 int nseg = 0; /* consecutive new segments */
3249 int nseg_rexmt = 0; /* retransmitted segments */
3250#define MAXBURST 4 /* limit burst of new packets on partial ack */
3251
3252 tp->t_rtttime = 0;
3253 pipe = tcp_sack_compute_pipe(tp);
3254 while ((tcp_seq_diff_t)(ocwnd - pipe) >= (tcp_seq_diff_t)tp->t_maxseg &&
3255 (!tcp_do_smartsack || nseg < MAXBURST)) {
3256 tcp_seq old_snd_max, old_rexmt_high, nextrexmt;
3257 uint32_t sent, seglen;
3258 boolean_t rescue;
3259 int error;
3260
3261 old_rexmt_high = tp->rexmt_high;
3262 if (!tcp_sack_nextseg(tp, &nextrexmt, &seglen, &rescue)) {
3263 tp->rexmt_high = old_rexmt_high;
3264 break;
3265 }
3266
3267 /*
3268 * If the next tranmission is a rescue retranmission,
3269 * we check whether we have already sent some data
3270 * (either new segments or retransmitted segments)
3271 * into the the network or not. Since the idea of rescue
3272 * retransmission is to sustain ACK clock, as long as
3273 * some segments are in the network, ACK clock will be
3274 * kept ticking.
3275 */
3276 if (rescue && (nseg_rexmt > 0 || nseg > 0)) {
3277 tp->rexmt_high = old_rexmt_high;
3278 break;
3279 }
3280
3281 if (nextrexmt == tp->snd_max)
3282 ++nseg;
3283 else
3284 ++nseg_rexmt;
3285 tp->snd_nxt = nextrexmt;
3286 tp->snd_cwnd = nextrexmt - tp->snd_una + seglen;
3287 old_snd_max = tp->snd_max;
3288 if (nextrexmt == tp->snd_una)
3289 tcp_callout_stop(tp, tp->tt_rexmt);
3290 error = tcp_output(tp);
3291 if (error != 0) {
3292 tp->rexmt_high = old_rexmt_high;
3293 break;
3294 }
3295 sent = tp->snd_nxt - nextrexmt;
3296 if (sent <= 0) {
3297 tp->rexmt_high = old_rexmt_high;
3298 break;
3299 }
3300 pipe += sent;
3301 tcpstat.tcps_sndsackpack++;
3302 tcpstat.tcps_sndsackbyte += sent;
3303
3304 if (rescue) {
3305 tcpstat.tcps_sackrescue++;
3306 tp->rexmt_rescue = tp->snd_nxt;
3307 tp->sack_flags |= TSACK_F_SACKRESCUED;
3308 break;
3309 }
3310 if (SEQ_LT(nextrexmt, old_snd_max) &&
3311 SEQ_LT(tp->rexmt_high, tp->snd_nxt)) {
3312 tp->rexmt_high = seq_min(tp->snd_nxt, old_snd_max);
3313 if (tcp_aggressive_rescuesack &&
3314 (tp->sack_flags & TSACK_F_SACKRESCUED) &&
3315 SEQ_LT(tp->rexmt_rescue, tp->rexmt_high)) {
3316 /* Drag RescueRxt along with HighRxt */
3317 tp->rexmt_rescue = tp->rexmt_high;
3318 }
3319 }
3320 }
3321 if (SEQ_GT(old_snd_nxt, tp->snd_nxt))
3322 tp->snd_nxt = old_snd_nxt;
3323 tp->snd_cwnd = ocwnd;
3324}
3325
3326static boolean_t
3327tcp_sack_limitedxmit(struct tcpcb *tp)
3328{
3329 tcp_seq oldsndnxt = tp->snd_nxt;
3330 tcp_seq oldsndmax = tp->snd_max;
3331 u_long ocwnd = tp->snd_cwnd;
3332 uint32_t pipe;
3333 boolean_t ret = FALSE;
3334
3335 tp->rexmt_high = tp->snd_una - 1;
3336 pipe = tcp_sack_compute_pipe(tp);
3337 while ((tcp_seq_diff_t)(ocwnd - pipe) >= (tcp_seq_diff_t)tp->t_maxseg) {
3338 uint32_t sent;
3339 tcp_seq next;
3340 int error;
3341
3342 next = tp->snd_nxt = tp->snd_max;
3343 tp->snd_cwnd = tp->snd_nxt - tp->snd_una + tp->t_maxseg;
3344
3345 error = tcp_output(tp);
3346 if (error)
3347 break;
3348
3349 sent = tp->snd_nxt - next;
3350 if (sent <= 0)
3351 break;
3352 pipe += sent;
3353 ++tcpstat.tcps_sndlimited;
3354 ret = TRUE;
3355 }
3356
3357 if (SEQ_LT(oldsndnxt, oldsndmax)) {
3358 KASSERT(SEQ_GEQ(oldsndnxt, tp->snd_una),
3359 ("snd_una moved in other threads"));
3360 tp->snd_nxt = oldsndnxt;
3361 }
3362 tp->snd_cwnd = ocwnd;
3363
3364 return ret;
3365}
3366
3367/*
3368 * Reset idle time and keep-alive timer, typically called when a valid
3369 * tcp packet is received but may also be called when FASTKEEP is set
3370 * to prevent the previous long-timeout from calculating to a drop.
3371 *
3372 * Only update t_rcvtime for non-SYN packets.
3373 *
3374 * Handle the case where one side thinks the connection is established
3375 * but the other side has, say, rebooted without cleaning out the
3376 * connection. The SYNs could be construed as an attack and wind
3377 * up ignored, but in case it isn't an attack we can validate the
3378 * connection by forcing a keepalive.
3379 */
3380void
3381tcp_timer_keep_activity(struct tcpcb *tp, int thflags)
3382{
3383 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
3384 if ((thflags & (TH_SYN | TH_ACK)) == TH_SYN) {
3385 tp->t_flags |= TF_KEEPALIVE;
3386 tcp_callout_reset(tp, tp->tt_keep, hz / 2,
3387 tcp_timer_keep);
3388 } else {
3389 tp->t_rcvtime = ticks;
3390 tp->t_flags &= ~TF_KEEPALIVE;
3391 tcp_callout_reset(tp, tp->tt_keep,
3392 tp->t_keepidle,
3393 tcp_timer_keep);
3394 }
3395 }
3396}
3397
3398static int
3399tcp_rmx_msl(const struct tcpcb *tp)
3400{
3401 struct rtentry *rt;
3402 struct inpcb *inp = tp->t_inpcb;
3403 int msl;
3404#ifdef INET6
3405 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) ? TRUE : FALSE);
3406#else
3407 const boolean_t isipv6 = FALSE;
3408#endif
3409
3410 if (isipv6)
3411 rt = tcp_rtlookup6(&inp->inp_inc);
3412 else
3413 rt = tcp_rtlookup(&inp->inp_inc);
3414 if (rt == NULL || rt->rt_rmx.rmx_msl == 0)
3415 return tcp_msl;
3416
3417 msl = (rt->rt_rmx.rmx_msl * hz) / 1000;
3418 if (msl == 0)
3419 msl = 1;
3420
3421 return msl;
3422}
3423
3424static void
3425tcp_established(struct tcpcb *tp)
3426{
3427 tp->t_state = TCPS_ESTABLISHED;
3428 tcp_callout_reset(tp, tp->tt_keep, tp->t_keepidle, tcp_timer_keep);
3429
3430 if (tp->t_rxtsyn > 0) {
3431 /*
3432 * RFC6298:
3433 * "If the timer expires awaiting the ACK of a SYN segment
3434 * and the TCP implementation is using an RTO less than 3
3435 * seconds, the RTO MUST be re-initialized to 3 seconds
3436 * when data transmission begins"
3437 */
3438 if (tp->t_rxtcur < TCPTV_RTOBASE3)
3439 tp->t_rxtcur = TCPTV_RTOBASE3;
3440 }
3441}