2 * Copyright (c) 2002, 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2002, 2003, 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
67 * $FreeBSD: src/sys/netinet/tcp_input.c,v 1.107.2.38 2003/05/21 04:46:41 cjc Exp $
68 * $DragonFly: src/sys/netinet/tcp_input.c,v 1.68 2008/08/22 09:14:17 sephe Exp $
71 #include "opt_ipfw.h" /* for ipfw_fwd */
72 #include "opt_inet6.h"
73 #include "opt_ipsec.h"
74 #include "opt_tcpdebug.h"
75 #include "opt_tcp_input.h"
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/kernel.h>
80 #include <sys/sysctl.h>
81 #include <sys/malloc.h>
83 #include <sys/proc.h> /* for proc0 declaration */
84 #include <sys/protosw.h>
85 #include <sys/socket.h>
86 #include <sys/socketvar.h>
87 #include <sys/syslog.h>
88 #include <sys/in_cksum.h>
90 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
91 #include <machine/stdarg.h>
94 #include <net/route.h>
96 #include <netinet/in.h>
97 #include <netinet/in_systm.h>
98 #include <netinet/ip.h>
99 #include <netinet/ip_icmp.h> /* for ICMP_BANDLIM */
100 #include <netinet/in_var.h>
101 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
102 #include <netinet/in_pcb.h>
103 #include <netinet/ip_var.h>
104 #include <netinet/ip6.h>
105 #include <netinet/icmp6.h>
106 #include <netinet6/nd6.h>
107 #include <netinet6/ip6_var.h>
108 #include <netinet6/in6_pcb.h>
109 #include <netinet/tcp.h>
110 #include <netinet/tcp_fsm.h>
111 #include <netinet/tcp_seq.h>
112 #include <netinet/tcp_timer.h>
113 #include <netinet/tcp_timer2.h>
114 #include <netinet/tcp_var.h>
115 #include <netinet6/tcp6_var.h>
116 #include <netinet/tcpip.h>
119 #include <netinet/tcp_debug.h>
121 u_char tcp_saveipgen[40]; /* the size must be of max ip header, now IPv6 */
122 struct tcphdr tcp_savetcp;
126 #include <netproto/ipsec/ipsec.h>
127 #include <netproto/ipsec/ipsec6.h>
131 #include <netinet6/ipsec.h>
132 #include <netinet6/ipsec6.h>
133 #include <netproto/key/key.h>
136 MALLOC_DEFINE(M_TSEGQ, "tseg_qent", "TCP segment queue entry");
139 static int log_in_vain = 0;
140 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
141 &log_in_vain, 0, "Log all incoming TCP connections");
143 static int blackhole = 0;
144 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
145 &blackhole, 0, "Do not send RST when dropping refused connections");
147 int tcp_delack_enabled = 1;
148 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
149 &tcp_delack_enabled, 0,
150 "Delay ACK to try and piggyback it onto a data packet");
152 #ifdef TCP_DROP_SYNFIN
153 static int drop_synfin = 0;
154 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW,
155 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set");
158 static int tcp_do_limitedtransmit = 1;
159 SYSCTL_INT(_net_inet_tcp, OID_AUTO, limitedtransmit, CTLFLAG_RW,
160 &tcp_do_limitedtransmit, 0, "Enable RFC 3042 (Limited Transmit)");
162 static int tcp_do_early_retransmit = 1;
163 SYSCTL_INT(_net_inet_tcp, OID_AUTO, earlyretransmit, CTLFLAG_RW,
164 &tcp_do_early_retransmit, 0, "Early retransmit");
166 int tcp_aggregate_acks = 1;
167 SYSCTL_INT(_net_inet_tcp, OID_AUTO, aggregate_acks, CTLFLAG_RW,
168 &tcp_aggregate_acks, 0, "Aggregate built-up acks into one ack");
170 int tcp_do_rfc3390 = 1;
171 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW,
173 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
175 static int tcp_do_eifel_detect = 1;
176 SYSCTL_INT(_net_inet_tcp, OID_AUTO, eifel, CTLFLAG_RW,
177 &tcp_do_eifel_detect, 0, "Eifel detection algorithm (RFC 3522)");
179 static int tcp_do_abc = 1;
180 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc, CTLFLAG_RW,
182 "TCP Appropriate Byte Counting (RFC 3465)");
185 * Define as tunable for easy testing with SACK on and off.
186 * Warning: do not change setting in the middle of an existing active TCP flow,
187 * else strange things might happen to that flow.
190 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sack, CTLFLAG_RW,
191 &tcp_do_sack, 0, "Enable SACK Algorithms");
193 int tcp_do_smartsack = 1;
194 SYSCTL_INT(_net_inet_tcp, OID_AUTO, smartsack, CTLFLAG_RW,
195 &tcp_do_smartsack, 0, "Enable Smart SACK Algorithms");
197 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0,
198 "TCP Segment Reassembly Queue");
200 int tcp_reass_maxseg = 0;
201 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RD,
202 &tcp_reass_maxseg, 0,
203 "Global maximum number of TCP Segments in Reassembly Queue");
205 int tcp_reass_qsize = 0;
206 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD,
208 "Global number of TCP Segments currently in Reassembly Queue");
210 static int tcp_reass_overflows = 0;
211 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows, CTLFLAG_RD,
212 &tcp_reass_overflows, 0,
213 "Global number of TCP Segment Reassembly Queue Overflows");
215 int tcp_do_autorcvbuf = 1;
216 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW,
217 &tcp_do_autorcvbuf, 0, "Enable automatic receive buffer sizing");
219 int tcp_autorcvbuf_inc = 16*1024;
220 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW,
221 &tcp_autorcvbuf_inc, 0,
222 "Incrementor step size of automatic receive buffer");
224 int tcp_autorcvbuf_max = 16*1024*1024;
225 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW,
226 &tcp_autorcvbuf_max, 0, "Max size of automatic receive buffer");
229 static void tcp_dooptions(struct tcpopt *, u_char *, int, boolean_t);
230 static void tcp_pulloutofband(struct socket *,
231 struct tcphdr *, struct mbuf *, int);
232 static int tcp_reass(struct tcpcb *, struct tcphdr *, int *,
234 static void tcp_xmit_timer(struct tcpcb *, int);
235 static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *, int);
236 static void tcp_sack_rexmt(struct tcpcb *, struct tcphdr *);
238 /* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
240 #define ND6_HINT(tp) \
242 if ((tp) && (tp)->t_inpcb && \
243 ((tp)->t_inpcb->inp_vflag & INP_IPV6) && \
244 (tp)->t_inpcb->in6p_route.ro_rt) \
245 nd6_nud_hint((tp)->t_inpcb->in6p_route.ro_rt, NULL, 0); \
252 * Indicate whether this ack should be delayed. We can delay the ack if
253 * - delayed acks are enabled and
254 * - there is no delayed ack timer in progress and
255 * - our last ack wasn't a 0-sized window. We never want to delay
256 * the ack that opens up a 0-sized window.
258 #define DELAY_ACK(tp) \
259 (tcp_delack_enabled && !tcp_callout_pending(tp, tp->tt_delack) && \
260 !(tp->t_flags & TF_RXWIN0SENT))
262 #define acceptable_window_update(tp, th, tiwin) \
263 (SEQ_LT(tp->snd_wl1, th->th_seq) || \
264 (tp->snd_wl1 == th->th_seq && \
265 (SEQ_LT(tp->snd_wl2, th->th_ack) || \
266 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))
269 tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
272 struct tseg_qent *p = NULL;
273 struct tseg_qent *te;
274 struct socket *so = tp->t_inpcb->inp_socket;
278 * Call with th == NULL after become established to
279 * force pre-ESTABLISHED data up to user socket.
285 * Limit the number of segments in the reassembly queue to prevent
286 * holding on to too many segments (and thus running out of mbufs).
287 * Make sure to let the missing segment through which caused this
288 * queue. Always keep one global queue entry spare to be able to
289 * process the missing segment.
291 if (th->th_seq != tp->rcv_nxt &&
292 tcp_reass_qsize + 1 >= tcp_reass_maxseg) {
293 tcp_reass_overflows++;
294 tcpstat.tcps_rcvmemdrop++;
296 /* no SACK block to report */
297 tp->reportblk.rblk_start = tp->reportblk.rblk_end;
301 /* Allocate a new queue entry. */
302 MALLOC(te, struct tseg_qent *, sizeof(struct tseg_qent), M_TSEGQ,
303 M_INTWAIT | M_NULLOK);
305 tcpstat.tcps_rcvmemdrop++;
307 /* no SACK block to report */
308 tp->reportblk.rblk_start = tp->reportblk.rblk_end;
314 * Find a segment which begins after this one does.
316 LIST_FOREACH(q, &tp->t_segq, tqe_q) {
317 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
323 * If there is a preceding segment, it may provide some of
324 * our data already. If so, drop the data from the incoming
325 * segment. If it provides all of our data, drop us.
330 /* conversion to int (in i) handles seq wraparound */
331 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
332 if (i > 0) { /* overlaps preceding segment */
333 tp->t_flags |= (TF_DUPSEG | TF_ENCLOSESEG);
334 /* enclosing block starts w/ preceding segment */
335 tp->encloseblk.rblk_start = p->tqe_th->th_seq;
337 /* preceding encloses incoming segment */
338 tp->encloseblk.rblk_end = p->tqe_th->th_seq +
340 tcpstat.tcps_rcvduppack++;
341 tcpstat.tcps_rcvdupbyte += *tlenp;
346 * Try to present any queued data
347 * at the left window edge to the user.
348 * This is needed after the 3-WHS
351 goto present; /* ??? */
356 /* incoming segment end is enclosing block end */
357 tp->encloseblk.rblk_end = th->th_seq + *tlenp +
358 ((th->th_flags & TH_FIN) != 0);
359 /* trim end of reported D-SACK block */
360 tp->reportblk.rblk_end = th->th_seq;
363 tcpstat.tcps_rcvoopack++;
364 tcpstat.tcps_rcvoobyte += *tlenp;
367 * While we overlap succeeding segments trim them or,
368 * if they are completely covered, dequeue them.
371 tcp_seq_diff_t i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
372 tcp_seq qend = q->tqe_th->th_seq + q->tqe_len;
373 struct tseg_qent *nq;
377 if (!(tp->t_flags & TF_DUPSEG)) { /* first time through */
378 tp->t_flags |= (TF_DUPSEG | TF_ENCLOSESEG);
379 tp->encloseblk = tp->reportblk;
380 /* report trailing duplicate D-SACK segment */
381 tp->reportblk.rblk_start = q->tqe_th->th_seq;
383 if ((tp->t_flags & TF_ENCLOSESEG) &&
384 SEQ_GT(qend, tp->encloseblk.rblk_end)) {
385 /* extend enclosing block if one exists */
386 tp->encloseblk.rblk_end = qend;
388 if (i < q->tqe_len) {
389 q->tqe_th->th_seq += i;
395 nq = LIST_NEXT(q, tqe_q);
396 LIST_REMOVE(q, tqe_q);
403 /* Insert the new segment queue entry into place. */
406 te->tqe_len = *tlenp;
408 /* check if can coalesce with following segment */
409 if (q != NULL && (th->th_seq + *tlenp == q->tqe_th->th_seq)) {
410 tcp_seq tend = te->tqe_th->th_seq + te->tqe_len;
412 te->tqe_len += q->tqe_len;
413 if (q->tqe_th->th_flags & TH_FIN)
414 te->tqe_th->th_flags |= TH_FIN;
415 m_cat(te->tqe_m, q->tqe_m);
416 tp->encloseblk.rblk_end = tend;
418 * When not reporting a duplicate segment, use
419 * the larger enclosing block as the SACK block.
421 if (!(tp->t_flags & TF_DUPSEG))
422 tp->reportblk.rblk_end = tend;
423 LIST_REMOVE(q, tqe_q);
429 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
431 /* check if can coalesce with preceding segment */
432 if (p->tqe_th->th_seq + p->tqe_len == th->th_seq) {
433 p->tqe_len += te->tqe_len;
434 m_cat(p->tqe_m, te->tqe_m);
435 tp->encloseblk.rblk_start = p->tqe_th->th_seq;
437 * When not reporting a duplicate segment, use
438 * the larger enclosing block as the SACK block.
440 if (!(tp->t_flags & TF_DUPSEG))
441 tp->reportblk.rblk_start = p->tqe_th->th_seq;
445 LIST_INSERT_AFTER(p, te, tqe_q);
450 * Present data to user, advancing rcv_nxt through
451 * completed sequence space.
453 if (!TCPS_HAVEESTABLISHED(tp->t_state))
455 q = LIST_FIRST(&tp->t_segq);
456 if (q == NULL || q->tqe_th->th_seq != tp->rcv_nxt)
458 tp->rcv_nxt += q->tqe_len;
459 if (!(tp->t_flags & TF_DUPSEG)) {
460 /* no SACK block to report since ACK advanced */
461 tp->reportblk.rblk_start = tp->reportblk.rblk_end;
463 /* no enclosing block to report since ACK advanced */
464 tp->t_flags &= ~TF_ENCLOSESEG;
465 flags = q->tqe_th->th_flags & TH_FIN;
466 LIST_REMOVE(q, tqe_q);
467 KASSERT(LIST_EMPTY(&tp->t_segq) ||
468 LIST_FIRST(&tp->t_segq)->tqe_th->th_seq != tp->rcv_nxt,
469 ("segment not coalesced"));
470 if (so->so_state & SS_CANTRCVMORE)
473 ssb_appendstream(&so->so_rcv, q->tqe_m);
482 * TCP input routine, follows pages 65-76 of the
483 * protocol specification dated September, 1981 very closely.
487 tcp6_input(struct mbuf **mp, int *offp, int proto)
489 struct mbuf *m = *mp;
490 struct in6_ifaddr *ia6;
492 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
495 * draft-itojun-ipv6-tcp-to-anycast
496 * better place to put this in?
498 ia6 = ip6_getdstifaddr(m);
499 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
502 ip6 = mtod(m, struct ip6_hdr *);
503 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
504 offsetof(struct ip6_hdr, ip6_dst));
505 return (IPPROTO_DONE);
508 tcp_input(m, *offp, proto);
509 return (IPPROTO_DONE);
514 tcp_input(struct mbuf *m, ...)
519 struct ip *ip = NULL;
521 struct inpcb *inp = NULL;
526 struct tcpcb *tp = NULL;
528 struct socket *so = 0;
530 boolean_t ourfinisacked, needoutput = FALSE;
533 struct tcpopt to; /* options in this segment */
534 struct rmxp_tao *taop; /* pointer to our TAO cache entry */
535 struct rmxp_tao tao_noncached; /* in case there's no cached entry */
536 struct sockaddr_in *next_hop = NULL;
537 int rstreason; /* For badport_bandlim accounting purposes */
539 struct ip6_hdr *ip6 = NULL;
543 const boolean_t isipv6 = FALSE;
550 off0 = __va_arg(ap, int);
551 proto = __va_arg(ap, int);
554 tcpstat.tcps_rcvtotal++;
556 if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED) {
559 mtag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
560 KKASSERT(mtag != NULL);
561 next_hop = m_tag_data(mtag);
565 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? TRUE : FALSE;
569 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */
570 ip6 = mtod(m, struct ip6_hdr *);
571 tlen = (sizeof *ip6) + ntohs(ip6->ip6_plen) - off0;
572 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) {
573 tcpstat.tcps_rcvbadsum++;
576 th = (struct tcphdr *)((caddr_t)ip6 + off0);
579 * Be proactive about unspecified IPv6 address in source.
580 * As we use all-zero to indicate unbounded/unconnected pcb,
581 * unspecified IPv6 address can be used to confuse us.
583 * Note that packets with unspecified IPv6 destination is
584 * already dropped in ip6_input.
586 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
592 * Get IP and TCP header together in first mbuf.
593 * Note: IP leaves IP header in first mbuf.
595 if (off0 > sizeof(struct ip)) {
597 off0 = sizeof(struct ip);
599 /* already checked and pulled up in ip_demux() */
600 KASSERT(m->m_len >= sizeof(struct tcpiphdr),
601 ("TCP header not in one mbuf: m->m_len %d", m->m_len));
602 ip = mtod(m, struct ip *);
603 ipov = (struct ipovly *)ip;
604 th = (struct tcphdr *)((caddr_t)ip + off0);
607 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
608 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
609 th->th_sum = m->m_pkthdr.csum_data;
611 th->th_sum = in_pseudo(ip->ip_src.s_addr,
613 htonl(m->m_pkthdr.csum_data +
616 th->th_sum ^= 0xffff;
619 * Checksum extended TCP header and data.
621 len = sizeof(struct ip) + tlen;
622 bzero(ipov->ih_x1, sizeof ipov->ih_x1);
623 ipov->ih_len = (u_short)tlen;
624 ipov->ih_len = htons(ipov->ih_len);
625 th->th_sum = in_cksum(m, len);
628 tcpstat.tcps_rcvbadsum++;
632 /* Re-initialization for later version check */
633 ip->ip_v = IPVERSION;
638 * Check that TCP offset makes sense,
639 * pull out TCP options and adjust length. XXX
641 off = th->th_off << 2;
642 /* already checked and pulled up in ip_demux() */
643 KASSERT(off >= sizeof(struct tcphdr) && off <= tlen,
644 ("bad TCP data offset %d (tlen %d)", off, tlen));
645 tlen -= off; /* tlen is used instead of ti->ti_len */
646 if (off > sizeof(struct tcphdr)) {
648 IP6_EXTHDR_CHECK(m, off0, off, );
649 ip6 = mtod(m, struct ip6_hdr *);
650 th = (struct tcphdr *)((caddr_t)ip6 + off0);
652 /* already pulled up in ip_demux() */
653 KASSERT(m->m_len >= sizeof(struct ip) + off,
654 ("TCP header and options not in one mbuf: "
655 "m_len %d, off %d", m->m_len, off));
657 optlen = off - sizeof(struct tcphdr);
658 optp = (u_char *)(th + 1);
660 thflags = th->th_flags;
662 #ifdef TCP_DROP_SYNFIN
664 * If the drop_synfin option is enabled, drop all packets with
665 * both the SYN and FIN bits set. This prevents e.g. nmap from
666 * identifying the TCP/IP stack.
668 * This is a violation of the TCP specification.
670 if (drop_synfin && (thflags & (TH_SYN | TH_FIN)) == (TH_SYN | TH_FIN))
675 * Convert TCP protocol specific fields to host format.
677 th->th_seq = ntohl(th->th_seq);
678 th->th_ack = ntohl(th->th_ack);
679 th->th_win = ntohs(th->th_win);
680 th->th_urp = ntohs(th->th_urp);
683 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options,
684 * until after ip6_savecontrol() is called and before other functions
685 * which don't want those proto headers.
686 * Because ip6_savecontrol() is going to parse the mbuf to
687 * search for data to be passed up to user-land, it wants mbuf
688 * parameters to be unchanged.
689 * XXX: the call of ip6_savecontrol() has been obsoleted based on
690 * latest version of the advanced API (20020110).
692 drop_hdrlen = off0 + off;
695 * Locate pcb for segment.
698 /* IPFIREWALL_FORWARD section */
699 if (next_hop != NULL && !isipv6) { /* IPv6 support is not there yet */
701 * Transparently forwarded. Pretend to be the destination.
702 * already got one like this?
704 cpu = mycpu->gd_cpuid;
705 inp = in_pcblookup_hash(&tcbinfo[cpu],
706 ip->ip_src, th->th_sport,
707 ip->ip_dst, th->th_dport,
708 0, m->m_pkthdr.rcvif);
711 * It's new. Try to find the ambushing socket.
715 * The rest of the ipfw code stores the port in
717 * (The IP address is still in network order.)
719 in_port_t dport = next_hop->sin_port ?
720 htons(next_hop->sin_port) :
723 cpu = tcp_addrcpu(ip->ip_src.s_addr, th->th_sport,
724 next_hop->sin_addr.s_addr, dport);
725 inp = in_pcblookup_hash(&tcbinfo[cpu],
726 ip->ip_src, th->th_sport,
727 next_hop->sin_addr, dport,
728 1, m->m_pkthdr.rcvif);
732 inp = in6_pcblookup_hash(&tcbinfo[0],
733 &ip6->ip6_src, th->th_sport,
734 &ip6->ip6_dst, th->th_dport,
735 1, m->m_pkthdr.rcvif);
737 cpu = mycpu->gd_cpuid;
738 inp = in_pcblookup_hash(&tcbinfo[cpu],
739 ip->ip_src, th->th_sport,
740 ip->ip_dst, th->th_dport,
741 1, m->m_pkthdr.rcvif);
746 * If the state is CLOSED (i.e., TCB does not exist) then
747 * all data in the incoming segment is discarded.
748 * If the TCB exists but is in CLOSED state, it is embryonic,
749 * but should either do a listen or a connect soon.
754 char dbuf[INET6_ADDRSTRLEN+2], sbuf[INET6_ADDRSTRLEN+2];
756 char dbuf[sizeof "aaa.bbb.ccc.ddd"];
757 char sbuf[sizeof "aaa.bbb.ccc.ddd"];
761 strcat(dbuf, ip6_sprintf(&ip6->ip6_dst));
764 strcat(sbuf, ip6_sprintf(&ip6->ip6_src));
767 strcpy(dbuf, inet_ntoa(ip->ip_dst));
768 strcpy(sbuf, inet_ntoa(ip->ip_src));
770 switch (log_in_vain) {
772 if (!(thflags & TH_SYN))
776 "Connection attempt to TCP %s:%d "
777 "from %s:%d flags:0x%02x\n",
778 dbuf, ntohs(th->th_dport), sbuf,
779 ntohs(th->th_sport), thflags);
788 if (thflags & TH_SYN)
797 rstreason = BANDLIM_RST_CLOSEDPORT;
803 if (ipsec6_in_reject_so(m, inp->inp_socket)) {
804 ipsec6stat.in_polvio++;
808 if (ipsec4_in_reject_so(m, inp->inp_socket)) {
809 ipsecstat.in_polvio++;
816 if (ipsec6_in_reject(m, inp))
819 if (ipsec4_in_reject(m, inp))
823 /* Check the minimum TTL for socket. */
825 if ((isipv6 ? ip6->ip6_hlim : ip->ip_ttl) < inp->inp_ip_minttl)
831 rstreason = BANDLIM_RST_CLOSEDPORT;
834 if (tp->t_state <= TCPS_CLOSED)
837 /* Unscale the window into a 32-bit value. */
838 if (!(thflags & TH_SYN))
839 tiwin = th->th_win << tp->snd_scale;
843 so = inp->inp_socket;
846 if (so->so_options & SO_DEBUG) {
847 ostate = tp->t_state;
849 bcopy(ip6, tcp_saveipgen, sizeof(*ip6));
851 bcopy(ip, tcp_saveipgen, sizeof(*ip));
856 bzero(&to, sizeof to);
858 if (so->so_options & SO_ACCEPTCONN) {
859 struct in_conninfo inc;
862 inc.inc_isipv6 = (isipv6 == TRUE);
865 inc.inc6_faddr = ip6->ip6_src;
866 inc.inc6_laddr = ip6->ip6_dst;
867 inc.inc6_route.ro_rt = NULL; /* XXX */
869 inc.inc_faddr = ip->ip_src;
870 inc.inc_laddr = ip->ip_dst;
871 inc.inc_route.ro_rt = NULL; /* XXX */
873 inc.inc_fport = th->th_sport;
874 inc.inc_lport = th->th_dport;
877 * If the state is LISTEN then ignore segment if it contains
878 * a RST. If the segment contains an ACK then it is bad and
879 * send a RST. If it does not contain a SYN then it is not
880 * interesting; drop it.
882 * If the state is SYN_RECEIVED (syncache) and seg contains
883 * an ACK, but not for our SYN/ACK, send a RST. If the seg
884 * contains a RST, check the sequence number to see if it
885 * is a valid reset segment.
887 if ((thflags & (TH_RST | TH_ACK | TH_SYN)) != TH_SYN) {
888 if ((thflags & (TH_RST | TH_ACK | TH_SYN)) == TH_ACK) {
889 if (!syncache_expand(&inc, th, &so, m)) {
891 * No syncache entry, or ACK was not
892 * for our SYN/ACK. Send a RST.
894 tcpstat.tcps_badsyn++;
895 rstreason = BANDLIM_RST_OPENPORT;
900 * Could not complete 3-way handshake,
901 * connection is being closed down, and
902 * syncache will free mbuf.
906 * Socket is created in state SYN_RECEIVED.
907 * Continue processing segment.
912 * This is what would have happened in
913 * tcp_output() when the SYN,ACK was sent.
915 tp->snd_up = tp->snd_una;
916 tp->snd_max = tp->snd_nxt = tp->iss + 1;
917 tp->last_ack_sent = tp->rcv_nxt;
919 * XXX possible bug - it doesn't appear that tp->snd_wnd is unscaled
920 * until the _second_ ACK is received:
921 * rcv SYN (set wscale opts) --> send SYN/ACK, set snd_wnd = window.
922 * rcv ACK, calculate tiwin --> process SYN_RECEIVED, determine wscale,
923 * move to ESTAB, set snd_wnd to tiwin.
925 tp->snd_wnd = tiwin; /* unscaled */
928 if (thflags & TH_RST) {
929 syncache_chkrst(&inc, th);
932 if (thflags & TH_ACK) {
933 syncache_badack(&inc);
934 tcpstat.tcps_badsyn++;
935 rstreason = BANDLIM_RST_OPENPORT;
942 * Segment's flags are (SYN) or (SYN | FIN).
946 * If deprecated address is forbidden,
947 * we do not accept SYN to deprecated interface
948 * address to prevent any new inbound connection from
949 * getting established.
950 * When we do not accept SYN, we send a TCP RST,
951 * with deprecated source address (instead of dropping
952 * it). We compromise it as it is much better for peer
953 * to send a RST, and RST will be the final packet
956 * If we do not forbid deprecated addresses, we accept
957 * the SYN packet. RFC2462 does not suggest dropping
959 * If we decipher RFC2462 5.5.4, it says like this:
960 * 1. use of deprecated addr with existing
961 * communication is okay - "SHOULD continue to be
963 * 2. use of it with new communication:
964 * (2a) "SHOULD NOT be used if alternate address
965 * with sufficient scope is available"
966 * (2b) nothing mentioned otherwise.
967 * Here we fall into (2b) case as we have no choice in
968 * our source address selection - we must obey the peer.
970 * The wording in RFC2462 is confusing, and there are
971 * multiple description text for deprecated address
972 * handling - worse, they are not exactly the same.
973 * I believe 5.5.4 is the best one, so we follow 5.5.4.
975 if (isipv6 && !ip6_use_deprecated) {
976 struct in6_ifaddr *ia6;
978 if ((ia6 = ip6_getdstifaddr(m)) &&
979 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
981 rstreason = BANDLIM_RST_OPENPORT;
987 * If it is from this socket, drop it, it must be forged.
988 * Don't bother responding if the destination was a broadcast.
990 if (th->th_dport == th->th_sport) {
992 if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
996 if (ip->ip_dst.s_addr == ip->ip_src.s_addr)
1001 * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN
1003 * Note that it is quite possible to receive unicast
1004 * link-layer packets with a broadcast IP address. Use
1005 * in_broadcast() to find them.
1007 if (m->m_flags & (M_BCAST | M_MCAST))
1010 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1011 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
1014 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1015 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1016 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1017 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
1021 * SYN appears to be valid; create compressed TCP state
1022 * for syncache, or perform t/tcp connection.
1024 if (so->so_qlen <= so->so_qlimit) {
1025 tcp_dooptions(&to, optp, optlen, TRUE);
1026 if (!syncache_add(&inc, &to, th, &so, m))
1030 * Entry added to syncache, mbuf used to
1031 * send SYN,ACK packet.
1035 * Segment passed TAO tests.
1038 tp = intotcpcb(inp);
1039 tp->snd_wnd = tiwin;
1040 tp->t_starttime = ticks;
1041 tp->t_state = TCPS_ESTABLISHED;
1044 * If there is a FIN, or if there is data and the
1045 * connection is local, then delay SYN,ACK(SYN) in
1046 * the hope of piggy-backing it on a response
1047 * segment. Otherwise must send ACK now in case
1048 * the other side is slow starting.
1050 if (DELAY_ACK(tp) &&
1051 ((thflags & TH_FIN) ||
1053 ((isipv6 && in6_localaddr(&inp->in6p_faddr)) ||
1054 (!isipv6 && in_localaddr(inp->inp_faddr)))))) {
1055 tcp_callout_reset(tp, tp->tt_delack,
1056 tcp_delacktime, tcp_timer_delack);
1057 tp->t_flags |= TF_NEEDSYN;
1059 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
1062 tcpstat.tcps_connects++;
1070 /* should not happen - syncache should pick up these connections */
1071 KASSERT(tp->t_state != TCPS_LISTEN, ("tcp_input: TCPS_LISTEN state"));
1074 * This is the second part of the MSS DoS prevention code (after
1075 * minmss on the sending side) and it deals with too many too small
1076 * tcp packets in a too short timeframe (1 second).
1078 * XXX Removed. This code was crap. It does not scale to network
1079 * speed, and default values break NFS. Gone.
1084 * Segment received on connection.
1086 * Reset idle time and keep-alive timer. Don't waste time if less
1087 * then a second has elapsed. Only update t_rcvtime for non-SYN
1090 * Handle the case where one side thinks the connection is established
1091 * but the other side has, say, rebooted without cleaning out the
1092 * connection. The SYNs could be construed as an attack and wind
1093 * up ignored, but in case it isn't an attack we can validate the
1094 * connection by forcing a keepalive.
1096 if (TCPS_HAVEESTABLISHED(tp->t_state) && (ticks - tp->t_rcvtime) > hz) {
1097 if ((thflags & (TH_SYN | TH_ACK)) == TH_SYN) {
1098 tp->t_flags |= TF_KEEPALIVE;
1099 tcp_callout_reset(tp, tp->tt_keep, hz / 2,
1102 tp->t_rcvtime = ticks;
1103 tp->t_flags &= ~TF_KEEPALIVE;
1104 tcp_callout_reset(tp, tp->tt_keep, tcp_keepidle,
1111 * XXX this is tradtitional behavior, may need to be cleaned up.
1113 tcp_dooptions(&to, optp, optlen, (thflags & TH_SYN) != 0);
1114 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1115 if (to.to_flags & TOF_SCALE) {
1116 tp->t_flags |= TF_RCVD_SCALE;
1117 tp->requested_s_scale = to.to_requested_s_scale;
1119 if (to.to_flags & TOF_TS) {
1120 tp->t_flags |= TF_RCVD_TSTMP;
1121 tp->ts_recent = to.to_tsval;
1122 tp->ts_recent_age = ticks;
1124 if (to.to_flags & (TOF_CC | TOF_CCNEW))
1125 tp->t_flags |= TF_RCVD_CC;
1126 if (to.to_flags & TOF_MSS)
1127 tcp_mss(tp, to.to_mss);
1129 * Only set the TF_SACK_PERMITTED per-connection flag
1130 * if we got a SACK_PERMITTED option from the other side
1131 * and the global tcp_do_sack variable is true.
1133 if (tcp_do_sack && (to.to_flags & TOF_SACK_PERMITTED))
1134 tp->t_flags |= TF_SACK_PERMITTED;
1138 * Header prediction: check for the two common cases
1139 * of a uni-directional data xfer. If the packet has
1140 * no control flags, is in-sequence, the window didn't
1141 * change and we're not retransmitting, it's a
1142 * candidate. If the length is zero and the ack moved
1143 * forward, we're the sender side of the xfer. Just
1144 * free the data acked & wake any higher level process
1145 * that was blocked waiting for space. If the length
1146 * is non-zero and the ack didn't move, we're the
1147 * receiver side. If we're getting packets in-order
1148 * (the reassembly queue is empty), add the data to
1149 * the socket buffer and note that we need a delayed ack.
1150 * Make sure that the hidden state-flags are also off.
1151 * Since we check for TCPS_ESTABLISHED above, it can only
1154 if (tp->t_state == TCPS_ESTABLISHED &&
1155 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1156 !(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)) &&
1157 (!(to.to_flags & TOF_TS) ||
1158 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) &&
1160 * Using the CC option is compulsory if once started:
1161 * the segment is OK if no T/TCP was negotiated or
1162 * if the segment has a CC option equal to CCrecv
1164 ((tp->t_flags & (TF_REQ_CC|TF_RCVD_CC)) != (TF_REQ_CC|TF_RCVD_CC) ||
1165 ((to.to_flags & TOF_CC) && to.to_cc == tp->cc_recv)) &&
1166 th->th_seq == tp->rcv_nxt &&
1167 tp->snd_nxt == tp->snd_max) {
1170 * If last ACK falls within this segment's sequence numbers,
1171 * record the timestamp.
1172 * NOTE that the test is modified according to the latest
1173 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1175 if ((to.to_flags & TOF_TS) &&
1176 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1177 tp->ts_recent_age = ticks;
1178 tp->ts_recent = to.to_tsval;
1182 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1183 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1184 tp->snd_cwnd >= tp->snd_wnd &&
1185 !IN_FASTRECOVERY(tp)) {
1187 * This is a pure ack for outstanding data.
1189 ++tcpstat.tcps_predack;
1191 * "bad retransmit" recovery
1193 * If Eifel detection applies, then
1194 * it is deterministic, so use it
1195 * unconditionally over the old heuristic.
1196 * Otherwise, fall back to the old heuristic.
1198 if (tcp_do_eifel_detect &&
1199 (to.to_flags & TOF_TS) && to.to_tsecr &&
1200 (tp->t_flags & TF_FIRSTACCACK)) {
1201 /* Eifel detection applicable. */
1202 if (to.to_tsecr < tp->t_rexmtTS) {
1203 tcp_revert_congestion_state(tp);
1204 ++tcpstat.tcps_eifeldetected;
1206 } else if (tp->t_rxtshift == 1 &&
1207 ticks < tp->t_badrxtwin) {
1208 tcp_revert_congestion_state(tp);
1209 ++tcpstat.tcps_rttdetected;
1211 tp->t_flags &= ~(TF_FIRSTACCACK |
1212 TF_FASTREXMT | TF_EARLYREXMT);
1214 * Recalculate the retransmit timer / rtt.
1216 * Some machines (certain windows boxes)
1217 * send broken timestamp replies during the
1218 * SYN+ACK phase, ignore timestamps of 0.
1220 if ((to.to_flags & TOF_TS) && to.to_tsecr) {
1222 ticks - to.to_tsecr + 1);
1223 } else if (tp->t_rtttime &&
1224 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1226 ticks - tp->t_rtttime);
1228 tcp_xmit_bandwidth_limit(tp, th->th_ack);
1229 acked = th->th_ack - tp->snd_una;
1230 tcpstat.tcps_rcvackpack++;
1231 tcpstat.tcps_rcvackbyte += acked;
1232 sbdrop(&so->so_snd.sb, acked);
1233 tp->snd_recover = th->th_ack - 1;
1234 tp->snd_una = th->th_ack;
1237 * Update window information.
1239 if (tiwin != tp->snd_wnd &&
1240 acceptable_window_update(tp, th, tiwin)) {
1241 /* keep track of pure window updates */
1242 if (tp->snd_wl2 == th->th_ack &&
1243 tiwin > tp->snd_wnd)
1244 tcpstat.tcps_rcvwinupd++;
1245 tp->snd_wnd = tiwin;
1246 tp->snd_wl1 = th->th_seq;
1247 tp->snd_wl2 = th->th_ack;
1248 if (tp->snd_wnd > tp->max_sndwnd)
1249 tp->max_sndwnd = tp->snd_wnd;
1252 ND6_HINT(tp); /* some progress has been done */
1254 * If all outstanding data are acked, stop
1255 * retransmit timer, otherwise restart timer
1256 * using current (possibly backed-off) value.
1257 * If process is waiting for space,
1258 * wakeup/selwakeup/signal. If data
1259 * are ready to send, let tcp_output
1260 * decide between more output or persist.
1262 if (tp->snd_una == tp->snd_max) {
1263 tcp_callout_stop(tp, tp->tt_rexmt);
1264 } else if (!tcp_callout_active(tp,
1266 tcp_callout_reset(tp, tp->tt_rexmt,
1267 tp->t_rxtcur, tcp_timer_rexmt);
1270 if (so->so_snd.ssb_cc > 0)
1274 } else if (tiwin == tp->snd_wnd &&
1275 th->th_ack == tp->snd_una &&
1276 LIST_EMPTY(&tp->t_segq) &&
1277 tlen <= ssb_space(&so->so_rcv)) {
1278 int newsize = 0; /* automatic sockbuf scaling */
1280 * This is a pure, in-sequence data packet
1281 * with nothing on the reassembly queue and
1282 * we have enough buffer space to take it.
1284 ++tcpstat.tcps_preddat;
1285 tp->rcv_nxt += tlen;
1286 tcpstat.tcps_rcvpack++;
1287 tcpstat.tcps_rcvbyte += tlen;
1288 ND6_HINT(tp); /* some progress has been done */
1290 * Automatic sizing of receive socket buffer. Often the send
1291 * buffer size is not optimally adjusted to the actual network
1292 * conditions at hand (delay bandwidth product). Setting the
1293 * buffer size too small limits throughput on links with high
1294 * bandwidth and high delay (eg. trans-continental/oceanic links).
1296 * On the receive side the socket buffer memory is only rarely
1297 * used to any significant extent. This allows us to be much
1298 * more aggressive in scaling the receive socket buffer. For
1299 * the case that the buffer space is actually used to a large
1300 * extent and we run out of kernel memory we can simply drop
1301 * the new segments; TCP on the sender will just retransmit it
1302 * later. Setting the buffer size too big may only consume too
1303 * much kernel memory if the application doesn't read() from
1304 * the socket or packet loss or reordering makes use of the
1307 * The criteria to step up the receive buffer one notch are:
1308 * 1. the number of bytes received during the time it takes
1309 * one timestamp to be reflected back to us (the RTT);
1310 * 2. received bytes per RTT is within seven eighth of the
1311 * current socket buffer size;
1312 * 3. receive buffer size has not hit maximal automatic size;
1314 * This algorithm does one step per RTT at most and only if
1315 * we receive a bulk stream w/o packet losses or reorderings.
1316 * Shrinking the buffer during idle times is not necessary as
1317 * it doesn't consume any memory when idle.
1319 * TODO: Only step up if the application is actually serving
1320 * the buffer to better manage the socket buffer resources.
1322 if (tcp_do_autorcvbuf &&
1324 (so->so_rcv.ssb_flags & SSB_AUTOSIZE)) {
1325 if (to.to_tsecr > tp->rfbuf_ts &&
1326 to.to_tsecr - tp->rfbuf_ts < hz) {
1328 (so->so_rcv.ssb_hiwat / 8 * 7) &&
1329 so->so_rcv.ssb_hiwat <
1330 tcp_autorcvbuf_max) {
1332 min(so->so_rcv.ssb_hiwat +
1334 tcp_autorcvbuf_max);
1336 /* Start over with next RTT. */
1340 tp->rfbuf_cnt += tlen; /* add up */
1343 * Add data to socket buffer.
1345 if (so->so_state & SS_CANTRCVMORE) {
1349 * Set new socket buffer size.
1350 * Give up when limit is reached.
1353 if (!ssb_reserve(&so->so_rcv, newsize,
1355 so->so_rcv.ssb_flags &= ~SSB_AUTOSIZE;
1356 m_adj(m, drop_hdrlen); /* delayed header drop */
1357 ssb_appendstream(&so->so_rcv, m);
1361 * This code is responsible for most of the ACKs
1362 * the TCP stack sends back after receiving a data
1363 * packet. Note that the DELAY_ACK check fails if
1364 * the delack timer is already running, which results
1365 * in an ack being sent every other packet (which is
1368 * We then further aggregate acks by not actually
1369 * sending one until the protocol thread has completed
1370 * processing the current backlog of packets. This
1371 * does not delay the ack any further, but allows us
1372 * to take advantage of the packet aggregation that
1373 * high speed NICs do (usually blocks of 8-10 packets)
1374 * to send a single ack rather then four or five acks,
1375 * greatly reducing the ack rate, the return channel
1376 * bandwidth, and the protocol overhead on both ends.
1378 * Since this also has the effect of slowing down
1379 * the exponential slow-start ramp-up, systems with
1380 * very large bandwidth-delay products might want
1381 * to turn the feature off.
1383 if (DELAY_ACK(tp)) {
1384 tcp_callout_reset(tp, tp->tt_delack,
1385 tcp_delacktime, tcp_timer_delack);
1386 } else if (tcp_aggregate_acks) {
1387 tp->t_flags |= TF_ACKNOW;
1388 if (!(tp->t_flags & TF_ONOUTPUTQ)) {
1389 tp->t_flags |= TF_ONOUTPUTQ;
1390 tp->tt_cpu = mycpu->gd_cpuid;
1392 &tcpcbackq[tp->tt_cpu],
1396 tp->t_flags |= TF_ACKNOW;
1404 * Calculate amount of space in receive window,
1405 * and then do TCP input processing.
1406 * Receive window is amount of space in rcv queue,
1407 * but not less than advertised window.
1409 recvwin = ssb_space(&so->so_rcv);
1412 tp->rcv_wnd = imax(recvwin, (int)(tp->rcv_adv - tp->rcv_nxt));
1414 /* Reset receive buffer auto scaling when not in bulk receive mode. */
1418 switch (tp->t_state) {
1420 * If the state is SYN_RECEIVED:
1421 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1423 case TCPS_SYN_RECEIVED:
1424 if ((thflags & TH_ACK) &&
1425 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1426 SEQ_GT(th->th_ack, tp->snd_max))) {
1427 rstreason = BANDLIM_RST_OPENPORT;
1433 * If the state is SYN_SENT:
1434 * if seg contains an ACK, but not for our SYN, drop the input.
1435 * if seg contains a RST, then drop the connection.
1436 * if seg does not contain SYN, then drop it.
1437 * Otherwise this is an acceptable SYN segment
1438 * initialize tp->rcv_nxt and tp->irs
1439 * if seg contains ack then advance tp->snd_una
1440 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1441 * arrange for segment to be acked (eventually)
1442 * continue processing rest of data/controls, beginning with URG
1445 if ((taop = tcp_gettaocache(&inp->inp_inc)) == NULL) {
1446 taop = &tao_noncached;
1447 bzero(taop, sizeof *taop);
1450 if ((thflags & TH_ACK) &&
1451 (SEQ_LEQ(th->th_ack, tp->iss) ||
1452 SEQ_GT(th->th_ack, tp->snd_max))) {
1454 * If we have a cached CCsent for the remote host,
1455 * hence we haven't just crashed and restarted,
1456 * do not send a RST. This may be a retransmission
1457 * from the other side after our earlier ACK was lost.
1458 * Our new SYN, when it arrives, will serve as the
1461 if (taop->tao_ccsent != 0)
1464 rstreason = BANDLIM_UNLIMITED;
1468 if (thflags & TH_RST) {
1469 if (thflags & TH_ACK)
1470 tp = tcp_drop(tp, ECONNREFUSED);
1473 if (!(thflags & TH_SYN))
1475 tp->snd_wnd = th->th_win; /* initial send window */
1476 tp->cc_recv = to.to_cc; /* foreign CC */
1478 tp->irs = th->th_seq;
1480 if (thflags & TH_ACK) {
1482 * Our SYN was acked. If segment contains CC.ECHO
1483 * option, check it to make sure this segment really
1484 * matches our SYN. If not, just drop it as old
1485 * duplicate, but send an RST if we're still playing
1486 * by the old rules. If no CC.ECHO option, make sure
1487 * we don't get fooled into using T/TCP.
1489 if (to.to_flags & TOF_CCECHO) {
1490 if (tp->cc_send != to.to_ccecho) {
1491 if (taop->tao_ccsent != 0)
1494 rstreason = BANDLIM_UNLIMITED;
1499 tp->t_flags &= ~TF_RCVD_CC;
1500 tcpstat.tcps_connects++;
1502 /* Do window scaling on this connection? */
1503 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
1504 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
1505 tp->snd_scale = tp->requested_s_scale;
1506 tp->rcv_scale = tp->request_r_scale;
1508 /* Segment is acceptable, update cache if undefined. */
1509 if (taop->tao_ccsent == 0)
1510 taop->tao_ccsent = to.to_ccecho;
1512 tp->rcv_adv += tp->rcv_wnd;
1513 tp->snd_una++; /* SYN is acked */
1514 tcp_callout_stop(tp, tp->tt_rexmt);
1516 * If there's data, delay ACK; if there's also a FIN
1517 * ACKNOW will be turned on later.
1519 if (DELAY_ACK(tp) && tlen != 0) {
1520 tcp_callout_reset(tp, tp->tt_delack,
1521 tcp_delacktime, tcp_timer_delack);
1523 tp->t_flags |= TF_ACKNOW;
1526 * Received <SYN,ACK> in SYN_SENT[*] state.
1528 * SYN_SENT --> ESTABLISHED
1529 * SYN_SENT* --> FIN_WAIT_1
1531 tp->t_starttime = ticks;
1532 if (tp->t_flags & TF_NEEDFIN) {
1533 tp->t_state = TCPS_FIN_WAIT_1;
1534 tp->t_flags &= ~TF_NEEDFIN;
1537 tp->t_state = TCPS_ESTABLISHED;
1538 tcp_callout_reset(tp, tp->tt_keep, tcp_keepidle,
1543 * Received initial SYN in SYN-SENT[*] state =>
1544 * simultaneous open. If segment contains CC option
1545 * and there is a cached CC, apply TAO test.
1546 * If it succeeds, connection is * half-synchronized.
1547 * Otherwise, do 3-way handshake:
1548 * SYN-SENT -> SYN-RECEIVED
1549 * SYN-SENT* -> SYN-RECEIVED*
1550 * If there was no CC option, clear cached CC value.
1552 tp->t_flags |= TF_ACKNOW;
1553 tcp_callout_stop(tp, tp->tt_rexmt);
1554 if (to.to_flags & TOF_CC) {
1555 if (taop->tao_cc != 0 &&
1556 CC_GT(to.to_cc, taop->tao_cc)) {
1558 * update cache and make transition:
1559 * SYN-SENT -> ESTABLISHED*
1560 * SYN-SENT* -> FIN-WAIT-1*
1562 taop->tao_cc = to.to_cc;
1563 tp->t_starttime = ticks;
1564 if (tp->t_flags & TF_NEEDFIN) {
1565 tp->t_state = TCPS_FIN_WAIT_1;
1566 tp->t_flags &= ~TF_NEEDFIN;
1568 tp->t_state = TCPS_ESTABLISHED;
1569 tcp_callout_reset(tp,
1570 tp->tt_keep, tcp_keepidle,
1573 tp->t_flags |= TF_NEEDSYN;
1575 tp->t_state = TCPS_SYN_RECEIVED;
1577 /* CC.NEW or no option => invalidate cache */
1579 tp->t_state = TCPS_SYN_RECEIVED;
1585 * Advance th->th_seq to correspond to first data byte.
1586 * If data, trim to stay within window,
1587 * dropping FIN if necessary.
1590 if (tlen > tp->rcv_wnd) {
1591 todrop = tlen - tp->rcv_wnd;
1595 tcpstat.tcps_rcvpackafterwin++;
1596 tcpstat.tcps_rcvbyteafterwin += todrop;
1598 tp->snd_wl1 = th->th_seq - 1;
1599 tp->rcv_up = th->th_seq;
1601 * Client side of transaction: already sent SYN and data.
1602 * If the remote host used T/TCP to validate the SYN,
1603 * our data will be ACK'd; if so, enter normal data segment
1604 * processing in the middle of step 5, ack processing.
1605 * Otherwise, goto step 6.
1607 if (thflags & TH_ACK)
1613 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
1614 * if segment contains a SYN and CC [not CC.NEW] option:
1615 * if state == TIME_WAIT and connection duration > MSL,
1616 * drop packet and send RST;
1618 * if SEG.CC > CCrecv then is new SYN, and can implicitly
1619 * ack the FIN (and data) in retransmission queue.
1620 * Complete close and delete TCPCB. Then reprocess
1621 * segment, hoping to find new TCPCB in LISTEN state;
1623 * else must be old SYN; drop it.
1624 * else do normal processing.
1628 case TCPS_TIME_WAIT:
1629 if ((thflags & TH_SYN) &&
1630 (to.to_flags & TOF_CC) && tp->cc_recv != 0) {
1631 if (tp->t_state == TCPS_TIME_WAIT &&
1632 (ticks - tp->t_starttime) > tcp_msl) {
1633 rstreason = BANDLIM_UNLIMITED;
1636 if (CC_GT(to.to_cc, tp->cc_recv)) {
1643 break; /* continue normal processing */
1647 * States other than LISTEN or SYN_SENT.
1648 * First check the RST flag and sequence number since reset segments
1649 * are exempt from the timestamp and connection count tests. This
1650 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
1651 * below which allowed reset segments in half the sequence space
1652 * to fall though and be processed (which gives forged reset
1653 * segments with a random sequence number a 50 percent chance of
1654 * killing a connection).
1655 * Then check timestamp, if present.
1656 * Then check the connection count, if present.
1657 * Then check that at least some bytes of segment are within
1658 * receive window. If segment begins before rcv_nxt,
1659 * drop leading data (and SYN); if nothing left, just ack.
1662 * If the RST bit is set, check the sequence number to see
1663 * if this is a valid reset segment.
1665 * In all states except SYN-SENT, all reset (RST) segments
1666 * are validated by checking their SEQ-fields. A reset is
1667 * valid if its sequence number is in the window.
1668 * Note: this does not take into account delayed ACKs, so
1669 * we should test against last_ack_sent instead of rcv_nxt.
1670 * The sequence number in the reset segment is normally an
1671 * echo of our outgoing acknowledgement numbers, but some hosts
1672 * send a reset with the sequence number at the rightmost edge
1673 * of our receive window, and we have to handle this case.
1674 * If we have multiple segments in flight, the intial reset
1675 * segment sequence numbers will be to the left of last_ack_sent,
1676 * but they will eventually catch up.
1677 * In any case, it never made sense to trim reset segments to
1678 * fit the receive window since RFC 1122 says:
1679 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
1681 * A TCP SHOULD allow a received RST segment to include data.
1684 * It has been suggested that a RST segment could contain
1685 * ASCII text that encoded and explained the cause of the
1686 * RST. No standard has yet been established for such
1689 * If the reset segment passes the sequence number test examine
1691 * SYN_RECEIVED STATE:
1692 * If passive open, return to LISTEN state.
1693 * If active open, inform user that connection was refused.
1694 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
1695 * Inform user that connection was reset, and close tcb.
1696 * CLOSING, LAST_ACK STATES:
1699 * Drop the segment - see Stevens, vol. 2, p. 964 and
1702 if (thflags & TH_RST) {
1703 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
1704 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
1705 switch (tp->t_state) {
1707 case TCPS_SYN_RECEIVED:
1708 so->so_error = ECONNREFUSED;
1711 case TCPS_ESTABLISHED:
1712 case TCPS_FIN_WAIT_1:
1713 case TCPS_FIN_WAIT_2:
1714 case TCPS_CLOSE_WAIT:
1715 so->so_error = ECONNRESET;
1717 tp->t_state = TCPS_CLOSED;
1718 tcpstat.tcps_drops++;
1727 case TCPS_TIME_WAIT:
1735 * RFC 1323 PAWS: If we have a timestamp reply on this segment
1736 * and it's less than ts_recent, drop it.
1738 if ((to.to_flags & TOF_TS) && tp->ts_recent != 0 &&
1739 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
1741 /* Check to see if ts_recent is over 24 days old. */
1742 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) {
1744 * Invalidate ts_recent. If this segment updates
1745 * ts_recent, the age will be reset later and ts_recent
1746 * will get a valid value. If it does not, setting
1747 * ts_recent to zero will at least satisfy the
1748 * requirement that zero be placed in the timestamp
1749 * echo reply when ts_recent isn't valid. The
1750 * age isn't reset until we get a valid ts_recent
1751 * because we don't want out-of-order segments to be
1752 * dropped when ts_recent is old.
1756 tcpstat.tcps_rcvduppack++;
1757 tcpstat.tcps_rcvdupbyte += tlen;
1758 tcpstat.tcps_pawsdrop++;
1767 * If T/TCP was negotiated and the segment doesn't have CC,
1768 * or if its CC is wrong then drop the segment.
1769 * RST segments do not have to comply with this.
1771 if ((tp->t_flags & (TF_REQ_CC|TF_RCVD_CC)) == (TF_REQ_CC|TF_RCVD_CC) &&
1772 (!(to.to_flags & TOF_CC) || tp->cc_recv != to.to_cc))
1776 * In the SYN-RECEIVED state, validate that the packet belongs to
1777 * this connection before trimming the data to fit the receive
1778 * window. Check the sequence number versus IRS since we know
1779 * the sequence numbers haven't wrapped. This is a partial fix
1780 * for the "LAND" DoS attack.
1782 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
1783 rstreason = BANDLIM_RST_OPENPORT;
1787 todrop = tp->rcv_nxt - th->th_seq;
1789 if (TCP_DO_SACK(tp)) {
1790 /* Report duplicate segment at head of packet. */
1791 tp->reportblk.rblk_start = th->th_seq;
1792 tp->reportblk.rblk_end = th->th_seq + tlen;
1793 if (thflags & TH_FIN)
1794 ++tp->reportblk.rblk_end;
1795 if (SEQ_GT(tp->reportblk.rblk_end, tp->rcv_nxt))
1796 tp->reportblk.rblk_end = tp->rcv_nxt;
1797 tp->t_flags |= (TF_DUPSEG | TF_SACKLEFT | TF_ACKNOW);
1799 if (thflags & TH_SYN) {
1809 * Following if statement from Stevens, vol. 2, p. 960.
1811 if (todrop > tlen ||
1812 (todrop == tlen && !(thflags & TH_FIN))) {
1814 * Any valid FIN must be to the left of the window.
1815 * At this point the FIN must be a duplicate or out
1816 * of sequence; drop it.
1821 * Send an ACK to resynchronize and drop any data.
1822 * But keep on processing for RST or ACK.
1824 tp->t_flags |= TF_ACKNOW;
1826 tcpstat.tcps_rcvduppack++;
1827 tcpstat.tcps_rcvdupbyte += todrop;
1829 tcpstat.tcps_rcvpartduppack++;
1830 tcpstat.tcps_rcvpartdupbyte += todrop;
1832 drop_hdrlen += todrop; /* drop from the top afterwards */
1833 th->th_seq += todrop;
1835 if (th->th_urp > todrop)
1836 th->th_urp -= todrop;
1844 * If new data are received on a connection after the
1845 * user processes are gone, then RST the other end.
1847 if ((so->so_state & SS_NOFDREF) &&
1848 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
1850 tcpstat.tcps_rcvafterclose++;
1851 rstreason = BANDLIM_UNLIMITED;
1856 * If segment ends after window, drop trailing data
1857 * (and PUSH and FIN); if nothing left, just ACK.
1859 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
1861 tcpstat.tcps_rcvpackafterwin++;
1862 if (todrop >= tlen) {
1863 tcpstat.tcps_rcvbyteafterwin += tlen;
1865 * If a new connection request is received
1866 * while in TIME_WAIT, drop the old connection
1867 * and start over if the sequence numbers
1868 * are above the previous ones.
1870 if (thflags & TH_SYN &&
1871 tp->t_state == TCPS_TIME_WAIT &&
1872 SEQ_GT(th->th_seq, tp->rcv_nxt)) {
1877 * If window is closed can only take segments at
1878 * window edge, and have to drop data and PUSH from
1879 * incoming segments. Continue processing, but
1880 * remember to ack. Otherwise, drop segment
1883 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
1884 tp->t_flags |= TF_ACKNOW;
1885 tcpstat.tcps_rcvwinprobe++;
1889 tcpstat.tcps_rcvbyteafterwin += todrop;
1892 thflags &= ~(TH_PUSH | TH_FIN);
1896 * If last ACK falls within this segment's sequence numbers,
1897 * record its timestamp.
1899 * 1) That the test incorporates suggestions from the latest
1900 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1901 * 2) That updating only on newer timestamps interferes with
1902 * our earlier PAWS tests, so this check should be solely
1903 * predicated on the sequence space of this segment.
1904 * 3) That we modify the segment boundary check to be
1905 * Last.ACK.Sent <= SEG.SEQ + SEG.LEN
1906 * instead of RFC1323's
1907 * Last.ACK.Sent < SEG.SEQ + SEG.LEN,
1908 * This modified check allows us to overcome RFC1323's
1909 * limitations as described in Stevens TCP/IP Illustrated
1910 * Vol. 2 p.869. In such cases, we can still calculate the
1911 * RTT correctly when RCV.NXT == Last.ACK.Sent.
1913 if ((to.to_flags & TOF_TS) && SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
1914 SEQ_LEQ(tp->last_ack_sent, (th->th_seq + tlen
1915 + ((thflags & TH_SYN) != 0)
1916 + ((thflags & TH_FIN) != 0)))) {
1917 tp->ts_recent_age = ticks;
1918 tp->ts_recent = to.to_tsval;
1922 * If a SYN is in the window, then this is an
1923 * error and we send an RST and drop the connection.
1925 if (thflags & TH_SYN) {
1926 tp = tcp_drop(tp, ECONNRESET);
1927 rstreason = BANDLIM_UNLIMITED;
1932 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
1933 * flag is on (half-synchronized state), then queue data for
1934 * later processing; else drop segment and return.
1936 if (!(thflags & TH_ACK)) {
1937 if (tp->t_state == TCPS_SYN_RECEIVED ||
1938 (tp->t_flags & TF_NEEDSYN))
1947 switch (tp->t_state) {
1949 * In SYN_RECEIVED state, the ACK acknowledges our SYN, so enter
1950 * ESTABLISHED state and continue processing.
1951 * The ACK was checked above.
1953 case TCPS_SYN_RECEIVED:
1955 tcpstat.tcps_connects++;
1957 /* Do window scaling? */
1958 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
1959 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
1960 tp->snd_scale = tp->requested_s_scale;
1961 tp->rcv_scale = tp->request_r_scale;
1964 * Upon successful completion of 3-way handshake,
1965 * update cache.CC if it was undefined, pass any queued
1966 * data to the user, and advance state appropriately.
1968 if ((taop = tcp_gettaocache(&inp->inp_inc)) != NULL &&
1970 taop->tao_cc = tp->cc_recv;
1974 * SYN-RECEIVED -> ESTABLISHED
1975 * SYN-RECEIVED* -> FIN-WAIT-1
1977 tp->t_starttime = ticks;
1978 if (tp->t_flags & TF_NEEDFIN) {
1979 tp->t_state = TCPS_FIN_WAIT_1;
1980 tp->t_flags &= ~TF_NEEDFIN;
1982 tp->t_state = TCPS_ESTABLISHED;
1983 tcp_callout_reset(tp, tp->tt_keep, tcp_keepidle,
1987 * If segment contains data or ACK, will call tcp_reass()
1988 * later; if not, do so now to pass queued data to user.
1990 if (tlen == 0 && !(thflags & TH_FIN))
1991 tcp_reass(tp, NULL, NULL, NULL);
1995 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1996 * ACKs. If the ack is in the range
1997 * tp->snd_una < th->th_ack <= tp->snd_max
1998 * then advance tp->snd_una to th->th_ack and drop
1999 * data from the retransmission queue. If this ACK reflects
2000 * more up to date window information we update our window information.
2002 case TCPS_ESTABLISHED:
2003 case TCPS_FIN_WAIT_1:
2004 case TCPS_FIN_WAIT_2:
2005 case TCPS_CLOSE_WAIT:
2008 case TCPS_TIME_WAIT:
2010 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
2011 if (TCP_DO_SACK(tp))
2012 tcp_sack_update_scoreboard(tp, &to);
2013 if (tlen != 0 || tiwin != tp->snd_wnd) {
2017 tcpstat.tcps_rcvdupack++;
2018 if (!tcp_callout_active(tp, tp->tt_rexmt) ||
2019 th->th_ack != tp->snd_una) {
2024 * We have outstanding data (other than
2025 * a window probe), this is a completely
2026 * duplicate ack (ie, window info didn't
2027 * change), the ack is the biggest we've
2028 * seen and we've seen exactly our rexmt
2029 * threshhold of them, so assume a packet
2030 * has been dropped and retransmit it.
2031 * Kludge snd_nxt & the congestion
2032 * window so we send only this one
2035 if (IN_FASTRECOVERY(tp)) {
2036 if (TCP_DO_SACK(tp)) {
2037 /* No artifical cwnd inflation. */
2038 tcp_sack_rexmt(tp, th);
2041 * Dup acks mean that packets
2042 * have left the network
2043 * (they're now cached at the
2044 * receiver) so bump cwnd by
2045 * the amount in the receiver
2046 * to keep a constant cwnd
2047 * packets in the network.
2049 tp->snd_cwnd += tp->t_maxseg;
2052 } else if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2055 } else if (++tp->t_dupacks == tcprexmtthresh) {
2056 tcp_seq old_snd_nxt;
2060 if (tcp_do_eifel_detect &&
2061 (tp->t_flags & TF_RCVD_TSTMP)) {
2062 tcp_save_congestion_state(tp);
2063 tp->t_flags |= TF_FASTREXMT;
2066 * We know we're losing at the current
2067 * window size, so do congestion avoidance:
2068 * set ssthresh to half the current window
2069 * and pull our congestion window back to the
2072 win = min(tp->snd_wnd, tp->snd_cwnd) / 2 /
2076 tp->snd_ssthresh = win * tp->t_maxseg;
2077 ENTER_FASTRECOVERY(tp);
2078 tp->snd_recover = tp->snd_max;
2079 tcp_callout_stop(tp, tp->tt_rexmt);
2081 old_snd_nxt = tp->snd_nxt;
2082 tp->snd_nxt = th->th_ack;
2083 tp->snd_cwnd = tp->t_maxseg;
2085 ++tcpstat.tcps_sndfastrexmit;
2086 tp->snd_cwnd = tp->snd_ssthresh;
2087 tp->rexmt_high = tp->snd_nxt;
2088 if (SEQ_GT(old_snd_nxt, tp->snd_nxt))
2089 tp->snd_nxt = old_snd_nxt;
2090 KASSERT(tp->snd_limited <= 2,
2091 ("tp->snd_limited too big"));
2092 if (TCP_DO_SACK(tp))
2093 tcp_sack_rexmt(tp, th);
2095 tp->snd_cwnd += tp->t_maxseg *
2096 (tp->t_dupacks - tp->snd_limited);
2097 } else if (tcp_do_limitedtransmit) {
2098 u_long oldcwnd = tp->snd_cwnd;
2099 tcp_seq oldsndmax = tp->snd_max;
2100 tcp_seq oldsndnxt = tp->snd_nxt;
2101 /* outstanding data */
2102 uint32_t ownd = tp->snd_max - tp->snd_una;
2105 #define iceildiv(n, d) (((n)+(d)-1) / (d))
2107 KASSERT(tp->t_dupacks == 1 ||
2109 ("dupacks not 1 or 2"));
2110 if (tp->t_dupacks == 1)
2111 tp->snd_limited = 0;
2112 tp->snd_nxt = tp->snd_max;
2113 tp->snd_cwnd = ownd +
2114 (tp->t_dupacks - tp->snd_limited) *
2119 * Other acks may have been processed,
2120 * snd_nxt cannot be reset to a value less
2123 if (SEQ_LT(oldsndnxt, oldsndmax)) {
2124 if (SEQ_GT(oldsndnxt, tp->snd_una))
2125 tp->snd_nxt = oldsndnxt;
2127 tp->snd_nxt = tp->snd_una;
2129 tp->snd_cwnd = oldcwnd;
2130 sent = tp->snd_max - oldsndmax;
2131 if (sent > tp->t_maxseg) {
2132 KASSERT((tp->t_dupacks == 2 &&
2133 tp->snd_limited == 0) ||
2134 (sent == tp->t_maxseg + 1 &&
2135 tp->t_flags & TF_SENTFIN),
2137 KASSERT(sent <= tp->t_maxseg * 2,
2138 ("sent too many segments"));
2139 tp->snd_limited = 2;
2140 tcpstat.tcps_sndlimited += 2;
2141 } else if (sent > 0) {
2143 ++tcpstat.tcps_sndlimited;
2144 } else if (tcp_do_early_retransmit &&
2145 (tcp_do_eifel_detect &&
2146 (tp->t_flags & TF_RCVD_TSTMP)) &&
2147 ownd < 4 * tp->t_maxseg &&
2148 tp->t_dupacks + 1 >=
2149 iceildiv(ownd, tp->t_maxseg) &&
2150 (!TCP_DO_SACK(tp) ||
2151 ownd <= tp->t_maxseg ||
2152 tcp_sack_has_sacked(&tp->scb,
2153 ownd - tp->t_maxseg))) {
2154 ++tcpstat.tcps_sndearlyrexmit;
2155 tp->t_flags |= TF_EARLYREXMT;
2156 goto fastretransmit;
2162 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), ("th_ack <= snd_una"));
2164 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2166 * Detected optimistic ACK attack.
2167 * Force slow-start to de-synchronize attack.
2169 tp->snd_cwnd = tp->t_maxseg;
2172 tcpstat.tcps_rcvacktoomuch++;
2176 * If we reach this point, ACK is not a duplicate,
2177 * i.e., it ACKs something we sent.
2179 if (tp->t_flags & TF_NEEDSYN) {
2181 * T/TCP: Connection was half-synchronized, and our
2182 * SYN has been ACK'd (so connection is now fully
2183 * synchronized). Go to non-starred state,
2184 * increment snd_una for ACK of SYN, and check if
2185 * we can do window scaling.
2187 tp->t_flags &= ~TF_NEEDSYN;
2189 /* Do window scaling? */
2190 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
2191 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
2192 tp->snd_scale = tp->requested_s_scale;
2193 tp->rcv_scale = tp->request_r_scale;
2198 acked = th->th_ack - tp->snd_una;
2199 tcpstat.tcps_rcvackpack++;
2200 tcpstat.tcps_rcvackbyte += acked;
2202 if (tcp_do_eifel_detect && acked > 0 &&
2203 (to.to_flags & TOF_TS) && (to.to_tsecr != 0) &&
2204 (tp->t_flags & TF_FIRSTACCACK)) {
2205 /* Eifel detection applicable. */
2206 if (to.to_tsecr < tp->t_rexmtTS) {
2207 ++tcpstat.tcps_eifeldetected;
2208 tcp_revert_congestion_state(tp);
2209 if (tp->t_rxtshift == 1 &&
2210 ticks >= tp->t_badrxtwin)
2211 ++tcpstat.tcps_rttcantdetect;
2213 } else if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) {
2215 * If we just performed our first retransmit,
2216 * and the ACK arrives within our recovery window,
2217 * then it was a mistake to do the retransmit
2218 * in the first place. Recover our original cwnd
2219 * and ssthresh, and proceed to transmit where we
2222 tcp_revert_congestion_state(tp);
2223 ++tcpstat.tcps_rttdetected;
2227 * If we have a timestamp reply, update smoothed
2228 * round trip time. If no timestamp is present but
2229 * transmit timer is running and timed sequence
2230 * number was acked, update smoothed round trip time.
2231 * Since we now have an rtt measurement, cancel the
2232 * timer backoff (cf., Phil Karn's retransmit alg.).
2233 * Recompute the initial retransmit timer.
2235 * Some machines (certain windows boxes) send broken
2236 * timestamp replies during the SYN+ACK phase, ignore
2239 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0))
2240 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1);
2241 else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq))
2242 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
2243 tcp_xmit_bandwidth_limit(tp, th->th_ack);
2246 * If no data (only SYN) was ACK'd,
2247 * skip rest of ACK processing.
2252 /* Stop looking for an acceptable ACK since one was received. */
2253 tp->t_flags &= ~(TF_FIRSTACCACK | TF_FASTREXMT | TF_EARLYREXMT);
2255 if (acked > so->so_snd.ssb_cc) {
2256 tp->snd_wnd -= so->so_snd.ssb_cc;
2257 sbdrop(&so->so_snd.sb, (int)so->so_snd.ssb_cc);
2258 ourfinisacked = TRUE;
2260 sbdrop(&so->so_snd.sb, acked);
2261 tp->snd_wnd -= acked;
2262 ourfinisacked = FALSE;
2267 * Update window information.
2268 * Don't look at window if no ACK:
2269 * TAC's send garbage on first SYN.
2271 if (SEQ_LT(tp->snd_wl1, th->th_seq) ||
2272 (tp->snd_wl1 == th->th_seq &&
2273 (SEQ_LT(tp->snd_wl2, th->th_ack) ||
2274 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)))) {
2275 /* keep track of pure window updates */
2276 if (tlen == 0 && tp->snd_wl2 == th->th_ack &&
2277 tiwin > tp->snd_wnd)
2278 tcpstat.tcps_rcvwinupd++;
2279 tp->snd_wnd = tiwin;
2280 tp->snd_wl1 = th->th_seq;
2281 tp->snd_wl2 = th->th_ack;
2282 if (tp->snd_wnd > tp->max_sndwnd)
2283 tp->max_sndwnd = tp->snd_wnd;
2287 tp->snd_una = th->th_ack;
2288 if (TCP_DO_SACK(tp))
2289 tcp_sack_update_scoreboard(tp, &to);
2290 if (IN_FASTRECOVERY(tp)) {
2291 if (SEQ_GEQ(th->th_ack, tp->snd_recover)) {
2292 EXIT_FASTRECOVERY(tp);
2295 * If the congestion window was inflated
2296 * to account for the other side's
2297 * cached packets, retract it.
2299 if (!TCP_DO_SACK(tp))
2300 tp->snd_cwnd = tp->snd_ssthresh;
2303 * Window inflation should have left us
2304 * with approximately snd_ssthresh outstanding
2305 * data. But, in case we would be inclined
2306 * to send a burst, better do it using
2309 if (SEQ_GT(th->th_ack + tp->snd_cwnd,
2310 tp->snd_max + 2 * tp->t_maxseg))
2312 (tp->snd_max - tp->snd_una) +
2317 if (TCP_DO_SACK(tp)) {
2318 tp->snd_max_rexmt = tp->snd_max;
2319 tcp_sack_rexmt(tp, th);
2321 tcp_newreno_partial_ack(tp, th, acked);
2327 * Open the congestion window. When in slow-start,
2328 * open exponentially: maxseg per packet. Otherwise,
2329 * open linearly: maxseg per window.
2331 if (tp->snd_cwnd <= tp->snd_ssthresh) {
2333 (SEQ_LT(tp->snd_nxt, tp->snd_max) ?
2334 tp->t_maxseg : 2 * tp->t_maxseg);
2337 tp->snd_cwnd += tcp_do_abc ?
2338 min(acked, abc_sslimit) : tp->t_maxseg;
2340 /* linear increase */
2341 tp->snd_wacked += tcp_do_abc ? acked :
2343 if (tp->snd_wacked >= tp->snd_cwnd) {
2344 tp->snd_wacked -= tp->snd_cwnd;
2345 tp->snd_cwnd += tp->t_maxseg;
2348 tp->snd_cwnd = min(tp->snd_cwnd,
2349 TCP_MAXWIN << tp->snd_scale);
2350 tp->snd_recover = th->th_ack - 1;
2352 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2353 tp->snd_nxt = tp->snd_una;
2356 * If all outstanding data is acked, stop retransmit
2357 * timer and remember to restart (more output or persist).
2358 * If there is more data to be acked, restart retransmit
2359 * timer, using current (possibly backed-off) value.
2361 if (th->th_ack == tp->snd_max) {
2362 tcp_callout_stop(tp, tp->tt_rexmt);
2364 } else if (!tcp_callout_active(tp, tp->tt_persist)) {
2365 tcp_callout_reset(tp, tp->tt_rexmt, tp->t_rxtcur,
2369 switch (tp->t_state) {
2371 * In FIN_WAIT_1 STATE in addition to the processing
2372 * for the ESTABLISHED state if our FIN is now acknowledged
2373 * then enter FIN_WAIT_2.
2375 case TCPS_FIN_WAIT_1:
2376 if (ourfinisacked) {
2378 * If we can't receive any more
2379 * data, then closing user can proceed.
2380 * Starting the timer is contrary to the
2381 * specification, but if we don't get a FIN
2382 * we'll hang forever.
2384 if (so->so_state & SS_CANTRCVMORE) {
2385 soisdisconnected(so);
2386 tcp_callout_reset(tp, tp->tt_2msl,
2387 tcp_maxidle, tcp_timer_2msl);
2389 tp->t_state = TCPS_FIN_WAIT_2;
2394 * In CLOSING STATE in addition to the processing for
2395 * the ESTABLISHED state if the ACK acknowledges our FIN
2396 * then enter the TIME-WAIT state, otherwise ignore
2400 if (ourfinisacked) {
2401 tp->t_state = TCPS_TIME_WAIT;
2402 tcp_canceltimers(tp);
2403 /* Shorten TIME_WAIT [RFC-1644, p.28] */
2404 if (tp->cc_recv != 0 &&
2405 (ticks - tp->t_starttime) < tcp_msl) {
2406 tcp_callout_reset(tp, tp->tt_2msl,
2407 tp->t_rxtcur * TCPTV_TWTRUNC,
2410 tcp_callout_reset(tp, tp->tt_2msl,
2411 2 * tcp_msl, tcp_timer_2msl);
2413 soisdisconnected(so);
2418 * In LAST_ACK, we may still be waiting for data to drain
2419 * and/or to be acked, as well as for the ack of our FIN.
2420 * If our FIN is now acknowledged, delete the TCB,
2421 * enter the closed state and return.
2424 if (ourfinisacked) {
2431 * In TIME_WAIT state the only thing that should arrive
2432 * is a retransmission of the remote FIN. Acknowledge
2433 * it and restart the finack timer.
2435 case TCPS_TIME_WAIT:
2436 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_msl,
2444 * Update window information.
2445 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2447 if ((thflags & TH_ACK) &&
2448 acceptable_window_update(tp, th, tiwin)) {
2449 /* keep track of pure window updates */
2450 if (tlen == 0 && tp->snd_wl2 == th->th_ack &&
2451 tiwin > tp->snd_wnd)
2452 tcpstat.tcps_rcvwinupd++;
2453 tp->snd_wnd = tiwin;
2454 tp->snd_wl1 = th->th_seq;
2455 tp->snd_wl2 = th->th_ack;
2456 if (tp->snd_wnd > tp->max_sndwnd)
2457 tp->max_sndwnd = tp->snd_wnd;
2462 * Process segments with URG.
2464 if ((thflags & TH_URG) && th->th_urp &&
2465 !TCPS_HAVERCVDFIN(tp->t_state)) {
2467 * This is a kludge, but if we receive and accept
2468 * random urgent pointers, we'll crash in
2469 * soreceive. It's hard to imagine someone
2470 * actually wanting to send this much urgent data.
2472 if (th->th_urp + so->so_rcv.ssb_cc > sb_max) {
2473 th->th_urp = 0; /* XXX */
2474 thflags &= ~TH_URG; /* XXX */
2475 goto dodata; /* XXX */
2478 * If this segment advances the known urgent pointer,
2479 * then mark the data stream. This should not happen
2480 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2481 * a FIN has been received from the remote side.
2482 * In these states we ignore the URG.
2484 * According to RFC961 (Assigned Protocols),
2485 * the urgent pointer points to the last octet
2486 * of urgent data. We continue, however,
2487 * to consider it to indicate the first octet
2488 * of data past the urgent section as the original
2489 * spec states (in one of two places).
2491 if (SEQ_GT(th->th_seq + th->th_urp, tp->rcv_up)) {
2492 tp->rcv_up = th->th_seq + th->th_urp;
2493 so->so_oobmark = so->so_rcv.ssb_cc +
2494 (tp->rcv_up - tp->rcv_nxt) - 1;
2495 if (so->so_oobmark == 0)
2496 so->so_state |= SS_RCVATMARK;
2498 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2501 * Remove out of band data so doesn't get presented to user.
2502 * This can happen independent of advancing the URG pointer,
2503 * but if two URG's are pending at once, some out-of-band
2504 * data may creep in... ick.
2506 if (th->th_urp <= (u_long)tlen &&
2507 !(so->so_options & SO_OOBINLINE)) {
2508 /* hdr drop is delayed */
2509 tcp_pulloutofband(so, th, m, drop_hdrlen);
2513 * If no out of band data is expected,
2514 * pull receive urgent pointer along
2515 * with the receive window.
2517 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
2518 tp->rcv_up = tp->rcv_nxt;
2523 * Process the segment text, merging it into the TCP sequencing queue,
2524 * and arranging for acknowledgment of receipt if necessary.
2525 * This process logically involves adjusting tp->rcv_wnd as data
2526 * is presented to the user (this happens in tcp_usrreq.c,
2527 * case PRU_RCVD). If a FIN has already been received on this
2528 * connection then we just ignore the text.
2530 if ((tlen || (thflags & TH_FIN)) && !TCPS_HAVERCVDFIN(tp->t_state)) {
2531 m_adj(m, drop_hdrlen); /* delayed header drop */
2533 * Insert segment which includes th into TCP reassembly queue
2534 * with control block tp. Set thflags to whether reassembly now
2535 * includes a segment with FIN. This handles the common case
2536 * inline (segment is the next to be received on an established
2537 * connection, and the queue is empty), avoiding linkage into
2538 * and removal from the queue and repetition of various
2540 * Set DELACK for segments received in order, but ack
2541 * immediately when segments are out of order (so
2542 * fast retransmit can work).
2544 if (th->th_seq == tp->rcv_nxt &&
2545 LIST_EMPTY(&tp->t_segq) &&
2546 TCPS_HAVEESTABLISHED(tp->t_state)) {
2547 if (DELAY_ACK(tp)) {
2548 tcp_callout_reset(tp, tp->tt_delack,
2549 tcp_delacktime, tcp_timer_delack);
2551 tp->t_flags |= TF_ACKNOW;
2553 tp->rcv_nxt += tlen;
2554 thflags = th->th_flags & TH_FIN;
2555 tcpstat.tcps_rcvpack++;
2556 tcpstat.tcps_rcvbyte += tlen;
2558 if (so->so_state & SS_CANTRCVMORE)
2561 ssb_appendstream(&so->so_rcv, m);
2564 if (!(tp->t_flags & TF_DUPSEG)) {
2565 /* Initialize SACK report block. */
2566 tp->reportblk.rblk_start = th->th_seq;
2567 tp->reportblk.rblk_end = th->th_seq + tlen +
2568 ((thflags & TH_FIN) != 0);
2570 thflags = tcp_reass(tp, th, &tlen, m);
2571 tp->t_flags |= TF_ACKNOW;
2575 * Note the amount of data that peer has sent into
2576 * our window, in order to estimate the sender's
2579 len = so->so_rcv.ssb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
2586 * If FIN is received ACK the FIN and let the user know
2587 * that the connection is closing.
2589 if (thflags & TH_FIN) {
2590 if (!TCPS_HAVERCVDFIN(tp->t_state)) {
2593 * If connection is half-synchronized
2594 * (ie NEEDSYN flag on) then delay ACK,
2595 * so it may be piggybacked when SYN is sent.
2596 * Otherwise, since we received a FIN then no
2597 * more input can be expected, send ACK now.
2599 if (DELAY_ACK(tp) && (tp->t_flags & TF_NEEDSYN)) {
2600 tcp_callout_reset(tp, tp->tt_delack,
2601 tcp_delacktime, tcp_timer_delack);
2603 tp->t_flags |= TF_ACKNOW;
2608 switch (tp->t_state) {
2610 * In SYN_RECEIVED and ESTABLISHED STATES
2611 * enter the CLOSE_WAIT state.
2613 case TCPS_SYN_RECEIVED:
2614 tp->t_starttime = ticks;
2616 case TCPS_ESTABLISHED:
2617 tp->t_state = TCPS_CLOSE_WAIT;
2621 * If still in FIN_WAIT_1 STATE FIN has not been acked so
2622 * enter the CLOSING state.
2624 case TCPS_FIN_WAIT_1:
2625 tp->t_state = TCPS_CLOSING;
2629 * In FIN_WAIT_2 state enter the TIME_WAIT state,
2630 * starting the time-wait timer, turning off the other
2633 case TCPS_FIN_WAIT_2:
2634 tp->t_state = TCPS_TIME_WAIT;
2635 tcp_canceltimers(tp);
2636 /* Shorten TIME_WAIT [RFC-1644, p.28] */
2637 if (tp->cc_recv != 0 &&
2638 (ticks - tp->t_starttime) < tcp_msl) {
2639 tcp_callout_reset(tp, tp->tt_2msl,
2640 tp->t_rxtcur * TCPTV_TWTRUNC,
2642 /* For transaction client, force ACK now. */
2643 tp->t_flags |= TF_ACKNOW;
2645 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_msl,
2648 soisdisconnected(so);
2652 * In TIME_WAIT state restart the 2 MSL time_wait timer.
2654 case TCPS_TIME_WAIT:
2655 tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_msl,
2662 if (so->so_options & SO_DEBUG)
2663 tcp_trace(TA_INPUT, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0);
2667 * Return any desired output.
2669 if (needoutput || (tp->t_flags & TF_ACKNOW))
2675 * Generate an ACK dropping incoming segment if it occupies
2676 * sequence space, where the ACK reflects our state.
2678 * We can now skip the test for the RST flag since all
2679 * paths to this code happen after packets containing
2680 * RST have been dropped.
2682 * In the SYN-RECEIVED state, don't send an ACK unless the
2683 * segment we received passes the SYN-RECEIVED ACK test.
2684 * If it fails send a RST. This breaks the loop in the
2685 * "LAND" DoS attack, and also prevents an ACK storm
2686 * between two listening ports that have been sent forged
2687 * SYN segments, each with the source address of the other.
2689 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
2690 (SEQ_GT(tp->snd_una, th->th_ack) ||
2691 SEQ_GT(th->th_ack, tp->snd_max)) ) {
2692 rstreason = BANDLIM_RST_OPENPORT;
2696 if (so->so_options & SO_DEBUG)
2697 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0);
2700 tp->t_flags |= TF_ACKNOW;
2706 * Generate a RST, dropping incoming segment.
2707 * Make ACK acceptable to originator of segment.
2708 * Don't bother to respond if destination was broadcast/multicast.
2710 if ((thflags & TH_RST) || m->m_flags & (M_BCAST | M_MCAST))
2713 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
2714 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
2717 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
2718 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
2719 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
2720 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
2723 /* IPv6 anycast check is done at tcp6_input() */
2726 * Perform bandwidth limiting.
2729 if (badport_bandlim(rstreason) < 0)
2734 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
2735 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0);
2737 if (thflags & TH_ACK)
2738 /* mtod() below is safe as long as hdr dropping is delayed */
2739 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, th->th_ack,
2742 if (thflags & TH_SYN)
2744 /* mtod() below is safe as long as hdr dropping is delayed */
2745 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq + tlen,
2746 (tcp_seq)0, TH_RST | TH_ACK);
2752 * Drop space held by incoming segment and return.
2755 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
2756 tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0);
2763 * Parse TCP options and place in tcpopt.
2766 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, boolean_t is_syn)
2771 for (; cnt > 0; cnt -= optlen, cp += optlen) {
2773 if (opt == TCPOPT_EOL)
2775 if (opt == TCPOPT_NOP)
2781 if (optlen < 2 || optlen > cnt)
2786 if (optlen != TCPOLEN_MAXSEG)
2790 to->to_flags |= TOF_MSS;
2791 bcopy(cp + 2, &to->to_mss, sizeof to->to_mss);
2792 to->to_mss = ntohs(to->to_mss);
2795 if (optlen != TCPOLEN_WINDOW)
2799 to->to_flags |= TOF_SCALE;
2800 to->to_requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT);
2802 case TCPOPT_TIMESTAMP:
2803 if (optlen != TCPOLEN_TIMESTAMP)
2805 to->to_flags |= TOF_TS;
2806 bcopy(cp + 2, &to->to_tsval, sizeof to->to_tsval);
2807 to->to_tsval = ntohl(to->to_tsval);
2808 bcopy(cp + 6, &to->to_tsecr, sizeof to->to_tsecr);
2809 to->to_tsecr = ntohl(to->to_tsecr);
2811 * If echoed timestamp is later than the current time,
2812 * fall back to non RFC1323 RTT calculation.
2814 if (to->to_tsecr != 0 && TSTMP_GT(to->to_tsecr, ticks))
2818 if (optlen != TCPOLEN_CC)
2820 to->to_flags |= TOF_CC;
2821 bcopy(cp + 2, &to->to_cc, sizeof to->to_cc);
2822 to->to_cc = ntohl(to->to_cc);
2825 if (optlen != TCPOLEN_CC)
2829 to->to_flags |= TOF_CCNEW;
2830 bcopy(cp + 2, &to->to_cc, sizeof to->to_cc);
2831 to->to_cc = ntohl(to->to_cc);
2834 if (optlen != TCPOLEN_CC)
2838 to->to_flags |= TOF_CCECHO;
2839 bcopy(cp + 2, &to->to_ccecho, sizeof to->to_ccecho);
2840 to->to_ccecho = ntohl(to->to_ccecho);
2842 case TCPOPT_SACK_PERMITTED:
2843 if (optlen != TCPOLEN_SACK_PERMITTED)
2847 to->to_flags |= TOF_SACK_PERMITTED;
2850 if ((optlen - 2) & 0x07) /* not multiple of 8 */
2852 to->to_nsackblocks = (optlen - 2) / 8;
2853 to->to_sackblocks = (struct raw_sackblock *) (cp + 2);
2854 to->to_flags |= TOF_SACK;
2855 for (i = 0; i < to->to_nsackblocks; i++) {
2856 struct raw_sackblock *r = &to->to_sackblocks[i];
2858 r->rblk_start = ntohl(r->rblk_start);
2859 r->rblk_end = ntohl(r->rblk_end);
2869 * Pull out of band byte out of a segment so
2870 * it doesn't appear in the user's data queue.
2871 * It is still reflected in the segment length for
2872 * sequencing purposes.
2873 * "off" is the delayed to be dropped hdrlen.
2876 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, int off)
2878 int cnt = off + th->th_urp - 1;
2881 if (m->m_len > cnt) {
2882 char *cp = mtod(m, caddr_t) + cnt;
2883 struct tcpcb *tp = sototcpcb(so);
2886 tp->t_oobflags |= TCPOOB_HAVEDATA;
2887 bcopy(cp + 1, cp, m->m_len - cnt - 1);
2889 if (m->m_flags & M_PKTHDR)
2898 panic("tcp_pulloutofband");
2902 * Collect new round-trip time estimate
2903 * and update averages and current timeout.
2906 tcp_xmit_timer(struct tcpcb *tp, int rtt)
2910 tcpstat.tcps_rttupdated++;
2912 if (tp->t_srtt != 0) {
2914 * srtt is stored as fixed point with 5 bits after the
2915 * binary point (i.e., scaled by 8). The following magic
2916 * is equivalent to the smoothing algorithm in rfc793 with
2917 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
2918 * point). Adjust rtt to origin 0.
2920 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
2921 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
2923 if ((tp->t_srtt += delta) <= 0)
2927 * We accumulate a smoothed rtt variance (actually, a
2928 * smoothed mean difference), then set the retransmit
2929 * timer to smoothed rtt + 4 times the smoothed variance.
2930 * rttvar is stored as fixed point with 4 bits after the
2931 * binary point (scaled by 16). The following is
2932 * equivalent to rfc793 smoothing with an alpha of .75
2933 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
2934 * rfc793's wired-in beta.
2938 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
2939 if ((tp->t_rttvar += delta) <= 0)
2941 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
2942 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2945 * No rtt measurement yet - use the unsmoothed rtt.
2946 * Set the variance to half the rtt (so our first
2947 * retransmit happens at 3*rtt).
2949 tp->t_srtt = rtt << TCP_RTT_SHIFT;
2950 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
2951 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2957 * the retransmit should happen at rtt + 4 * rttvar.
2958 * Because of the way we do the smoothing, srtt and rttvar
2959 * will each average +1/2 tick of bias. When we compute
2960 * the retransmit timer, we want 1/2 tick of rounding and
2961 * 1 extra tick because of +-1/2 tick uncertainty in the
2962 * firing of the timer. The bias will give us exactly the
2963 * 1.5 tick we need. But, because the bias is
2964 * statistical, we have to test that we don't drop below
2965 * the minimum feasible timer (which is 2 ticks).
2967 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
2968 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
2971 * We received an ack for a packet that wasn't retransmitted;
2972 * it is probably safe to discard any error indications we've
2973 * received recently. This isn't quite right, but close enough
2974 * for now (a route might have failed after we sent a segment,
2975 * and the return path might not be symmetrical).
2977 tp->t_softerror = 0;
2981 * Determine a reasonable value for maxseg size.
2982 * If the route is known, check route for mtu.
2983 * If none, use an mss that can be handled on the outgoing
2984 * interface without forcing IP to fragment; if bigger than
2985 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
2986 * to utilize large mbufs. If no route is found, route has no mtu,
2987 * or the destination isn't local, use a default, hopefully conservative
2988 * size (usually 512 or the default IP max size, but no more than the mtu
2989 * of the interface), as we can't discover anything about intervening
2990 * gateways or networks. We also initialize the congestion/slow start
2991 * window to be a single segment if the destination isn't local.
2992 * While looking at the routing entry, we also initialize other path-dependent
2993 * parameters from pre-set or cached values in the routing entry.
2995 * Also take into account the space needed for options that we
2996 * send regularly. Make maxseg shorter by that amount to assure
2997 * that we can send maxseg amount of data even when the options
2998 * are present. Store the upper limit of the length of options plus
3001 * NOTE that this routine is only called when we process an incoming
3002 * segment, for outgoing segments only tcp_mssopt is called.
3004 * In case of T/TCP, we call this routine during implicit connection
3005 * setup as well (offer = -1), to initialize maxseg from the cached
3009 tcp_mss(struct tcpcb *tp, int offer)
3015 struct inpcb *inp = tp->t_inpcb;
3017 struct rmxp_tao *taop;
3018 int origoffer = offer;
3020 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) ? TRUE : FALSE);
3021 size_t min_protoh = isipv6 ?
3022 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
3023 sizeof(struct tcpiphdr);
3025 const boolean_t isipv6 = FALSE;
3026 const size_t min_protoh = sizeof(struct tcpiphdr);
3030 rt = tcp_rtlookup6(&inp->inp_inc);
3032 rt = tcp_rtlookup(&inp->inp_inc);
3034 tp->t_maxopd = tp->t_maxseg =
3035 (isipv6 ? tcp_v6mssdflt : tcp_mssdflt);
3039 so = inp->inp_socket;
3041 taop = rmx_taop(rt->rt_rmx);
3043 * Offer == -1 means that we didn't receive SYN yet,
3044 * use cached value in that case;
3047 offer = taop->tao_mssopt;
3049 * Offer == 0 means that there was no MSS on the SYN segment,
3050 * in this case we use tcp_mssdflt.
3053 offer = (isipv6 ? tcp_v6mssdflt : tcp_mssdflt);
3056 * Prevent DoS attack with too small MSS. Round up
3057 * to at least minmss.
3059 offer = max(offer, tcp_minmss);
3061 * Sanity check: make sure that maxopd will be large
3062 * enough to allow some data on segments even is the
3063 * all the option space is used (40bytes). Otherwise
3064 * funny things may happen in tcp_output.
3066 offer = max(offer, 64);
3068 taop->tao_mssopt = offer;
3071 * While we're here, check if there's an initial rtt
3072 * or rttvar. Convert from the route-table units
3073 * to scaled multiples of the slow timeout timer.
3075 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) {
3077 * XXX the lock bit for RTT indicates that the value
3078 * is also a minimum value; this is subject to time.
3080 if (rt->rt_rmx.rmx_locks & RTV_RTT)
3081 tp->t_rttmin = rtt / (RTM_RTTUNIT / hz);
3082 tp->t_srtt = rtt / (RTM_RTTUNIT / (hz * TCP_RTT_SCALE));
3083 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
3084 tcpstat.tcps_usedrtt++;
3085 if (rt->rt_rmx.rmx_rttvar) {
3086 tp->t_rttvar = rt->rt_rmx.rmx_rttvar /
3087 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE));
3088 tcpstat.tcps_usedrttvar++;
3090 /* default variation is +- 1 rtt */
3092 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
3094 TCPT_RANGESET(tp->t_rxtcur,
3095 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
3096 tp->t_rttmin, TCPTV_REXMTMAX);
3099 * if there's an mtu associated with the route, use it
3100 * else, use the link mtu.
3102 if (rt->rt_rmx.rmx_mtu)
3103 mss = rt->rt_rmx.rmx_mtu - min_protoh;
3106 mss = ND_IFINFO(rt->rt_ifp)->linkmtu - min_protoh;
3107 if (!in6_localaddr(&inp->in6p_faddr))
3108 mss = min(mss, tcp_v6mssdflt);
3110 mss = ifp->if_mtu - min_protoh;
3111 if (!in_localaddr(inp->inp_faddr))
3112 mss = min(mss, tcp_mssdflt);
3115 mss = min(mss, offer);
3117 * maxopd stores the maximum length of data AND options
3118 * in a segment; maxseg is the amount of data in a normal
3119 * segment. We need to store this value (maxopd) apart
3120 * from maxseg, because now every segment carries options
3121 * and thus we normally have somewhat less data in segments.
3126 * In case of T/TCP, origoffer==-1 indicates, that no segments
3127 * were received yet. In this case we just guess, otherwise
3128 * we do the same as before T/TCP.
3130 if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
3132 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
3133 mss -= TCPOLEN_TSTAMP_APPA;
3134 if ((tp->t_flags & (TF_REQ_CC | TF_NOOPT)) == TF_REQ_CC &&
3136 (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC))
3137 mss -= TCPOLEN_CC_APPA;
3139 #if (MCLBYTES & (MCLBYTES - 1)) == 0
3141 mss &= ~(MCLBYTES-1);
3144 mss = mss / MCLBYTES * MCLBYTES;
3147 * If there's a pipesize, change the socket buffer
3148 * to that size. Make the socket buffers an integral
3149 * number of mss units; if the mss is larger than
3150 * the socket buffer, decrease the mss.
3153 if ((bufsize = rt->rt_rmx.rmx_sendpipe) == 0)
3155 bufsize = so->so_snd.ssb_hiwat;
3159 bufsize = roundup(bufsize, mss);
3160 if (bufsize > sb_max)
3162 if (bufsize > so->so_snd.ssb_hiwat)
3163 ssb_reserve(&so->so_snd, bufsize, so, NULL);
3168 if ((bufsize = rt->rt_rmx.rmx_recvpipe) == 0)
3170 bufsize = so->so_rcv.ssb_hiwat;
3171 if (bufsize > mss) {
3172 bufsize = roundup(bufsize, mss);
3173 if (bufsize > sb_max)
3175 if (bufsize > so->so_rcv.ssb_hiwat)
3176 ssb_reserve(&so->so_rcv, bufsize, so, NULL);
3180 * Set the slow-start flight size depending on whether this
3181 * is a local network or not.
3184 tp->snd_cwnd = min(4 * mss, max(2 * mss, 4380));
3188 if (rt->rt_rmx.rmx_ssthresh) {
3190 * There's some sort of gateway or interface
3191 * buffer limit on the path. Use this to set
3192 * the slow start threshhold, but set the
3193 * threshold to no less than 2*mss.
3195 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh);
3196 tcpstat.tcps_usedssthresh++;
3201 * Determine the MSS option to send on an outgoing SYN.
3204 tcp_mssopt(struct tcpcb *tp)
3209 ((tp->t_inpcb->inp_vflag & INP_IPV6) ? TRUE : FALSE);
3210 int min_protoh = isipv6 ?
3211 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
3212 sizeof(struct tcpiphdr);
3214 const boolean_t isipv6 = FALSE;
3215 const size_t min_protoh = sizeof(struct tcpiphdr);
3219 rt = tcp_rtlookup6(&tp->t_inpcb->inp_inc);
3221 rt = tcp_rtlookup(&tp->t_inpcb->inp_inc);
3223 return (isipv6 ? tcp_v6mssdflt : tcp_mssdflt);
3225 return (rt->rt_ifp->if_mtu - min_protoh);
3229 * When a partial ack arrives, force the retransmission of the
3230 * next unacknowledged segment. Do not exit Fast Recovery.
3232 * Implement the Slow-but-Steady variant of NewReno by restarting the
3233 * the retransmission timer. Turn it off here so it can be restarted
3234 * later in tcp_output().
3237 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th, int acked)
3239 tcp_seq old_snd_nxt = tp->snd_nxt;
3240 u_long ocwnd = tp->snd_cwnd;
3242 tcp_callout_stop(tp, tp->tt_rexmt);
3244 tp->snd_nxt = th->th_ack;
3245 /* Set snd_cwnd to one segment beyond acknowledged offset. */
3246 tp->snd_cwnd = tp->t_maxseg;
3247 tp->t_flags |= TF_ACKNOW;
3249 if (SEQ_GT(old_snd_nxt, tp->snd_nxt))
3250 tp->snd_nxt = old_snd_nxt;
3251 /* partial window deflation */
3253 tp->snd_cwnd = ocwnd - acked + tp->t_maxseg;
3255 tp->snd_cwnd = tp->t_maxseg;
3259 * In contrast to the Slow-but-Steady NewReno variant,
3260 * we do not reset the retransmission timer for SACK retransmissions,
3261 * except when retransmitting snd_una.
3264 tcp_sack_rexmt(struct tcpcb *tp, struct tcphdr *th)
3266 uint32_t pipe, seglen;
3269 tcp_seq old_snd_nxt = tp->snd_nxt;
3270 u_long ocwnd = tp->snd_cwnd;
3271 int nseg = 0; /* consecutive new segments */
3272 #define MAXBURST 4 /* limit burst of new packets on partial ack */
3275 pipe = tcp_sack_compute_pipe(tp);
3276 while ((tcp_seq_diff_t)(ocwnd - pipe) >= (tcp_seq_diff_t)tp->t_maxseg &&
3277 (!tcp_do_smartsack || nseg < MAXBURST) &&
3278 tcp_sack_nextseg(tp, &nextrexmt, &seglen, &lostdup)) {
3280 tcp_seq old_snd_max;
3283 if (nextrexmt == tp->snd_max)
3285 tp->snd_nxt = nextrexmt;
3286 tp->snd_cwnd = nextrexmt - tp->snd_una + seglen;
3287 old_snd_max = tp->snd_max;
3288 if (nextrexmt == tp->snd_una)
3289 tcp_callout_stop(tp, tp->tt_rexmt);
3290 error = tcp_output(tp);
3293 sent = tp->snd_nxt - nextrexmt;
3298 tcpstat.tcps_sndsackpack++;
3299 tcpstat.tcps_sndsackbyte += sent;
3300 if (SEQ_LT(nextrexmt, old_snd_max) &&
3301 SEQ_LT(tp->rexmt_high, tp->snd_nxt))
3302 tp->rexmt_high = seq_min(tp->snd_nxt, old_snd_max);
3304 if (SEQ_GT(old_snd_nxt, tp->snd_nxt))
3305 tp->snd_nxt = old_snd_nxt;
3306 tp->snd_cwnd = ocwnd;