Commit | Line | Data |
---|---|---|
984263bc | 1 | /* |
66d6c637 JH |
2 | * Copyright (c) 2002, 2003, 2004 Jeffrey M. Hsu. All rights reserved. |
3 | * Copyright (c) 2002, 2003, 2004 The DragonFly Project. All rights reserved. | |
95b22adf | 4 | * |
66d6c637 JH |
5 | * This code is derived from software contributed to The DragonFly Project |
6 | * by Jeffrey M. Hsu. | |
95b22adf | 7 | * |
66d6c637 JH |
8 | * Redistribution and use in source and binary forms, with or without |
9 | * modification, are permitted provided that the following conditions | |
10 | * are met: | |
11 | * 1. Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in the | |
15 | * documentation and/or other materials provided with the distribution. | |
16 | * 3. Neither the name of The DragonFly Project nor the names of its | |
17 | * contributors may be used to endorse or promote products derived | |
18 | * from this software without specific, prior written permission. | |
95b22adf | 19 | * |
66d6c637 JH |
20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
23 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | |
24 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | |
25 | * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, | |
26 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
27 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED | |
28 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | |
29 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | |
30 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
31 | * SUCH DAMAGE. | |
32 | */ | |
33 | ||
66d6c637 | 34 | /* |
984263bc MD |
35 | * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 |
36 | * The Regents of the University of California. All rights reserved. | |
37 | * | |
38 | * Redistribution and use in source and binary forms, with or without | |
39 | * modification, are permitted provided that the following conditions | |
40 | * are met: | |
41 | * 1. Redistributions of source code must retain the above copyright | |
42 | * notice, this list of conditions and the following disclaimer. | |
43 | * 2. Redistributions in binary form must reproduce the above copyright | |
44 | * notice, this list of conditions and the following disclaimer in the | |
45 | * documentation and/or other materials provided with the distribution. | |
46 | * 3. All advertising materials mentioning features or use of this software | |
47 | * must display the following acknowledgement: | |
48 | * This product includes software developed by the University of | |
49 | * California, Berkeley and its contributors. | |
50 | * 4. Neither the name of the University nor the names of its contributors | |
51 | * may be used to endorse or promote products derived from this software | |
52 | * without specific prior written permission. | |
53 | * | |
54 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
55 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
56 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
57 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
58 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
59 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
60 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
61 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
62 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
63 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
64 | * SUCH DAMAGE. | |
65 | * | |
66 | * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 | |
67 | * $FreeBSD: src/sys/netinet/tcp_input.c,v 1.107.2.38 2003/05/21 04:46:41 cjc Exp $ | |
68 | */ | |
69 | ||
b1992928 | 70 | #include "opt_inet.h" |
984263bc MD |
71 | #include "opt_inet6.h" |
72 | #include "opt_ipsec.h" | |
73 | #include "opt_tcpdebug.h" | |
74 | #include "opt_tcp_input.h" | |
75 | ||
76 | #include <sys/param.h> | |
77 | #include <sys/systm.h> | |
78 | #include <sys/kernel.h> | |
79 | #include <sys/sysctl.h> | |
80 | #include <sys/malloc.h> | |
81 | #include <sys/mbuf.h> | |
82 | #include <sys/proc.h> /* for proc0 declaration */ | |
83 | #include <sys/protosw.h> | |
84 | #include <sys/socket.h> | |
85 | #include <sys/socketvar.h> | |
86 | #include <sys/syslog.h> | |
3f9db7f8 | 87 | #include <sys/in_cksum.h> |
984263bc | 88 | |
6cef7136 MD |
89 | #include <sys/socketvar2.h> |
90 | ||
984263bc | 91 | #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ |
a00138cb | 92 | #include <machine/stdarg.h> |
984263bc MD |
93 | |
94 | #include <net/if.h> | |
95 | #include <net/route.h> | |
96 | ||
97 | #include <netinet/in.h> | |
98 | #include <netinet/in_systm.h> | |
99 | #include <netinet/ip.h> | |
95b22adf | 100 | #include <netinet/ip_icmp.h> /* for ICMP_BANDLIM */ |
984263bc | 101 | #include <netinet/in_var.h> |
95b22adf | 102 | #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ |
984263bc MD |
103 | #include <netinet/in_pcb.h> |
104 | #include <netinet/ip_var.h> | |
105 | #include <netinet/ip6.h> | |
106 | #include <netinet/icmp6.h> | |
107 | #include <netinet6/nd6.h> | |
108 | #include <netinet6/ip6_var.h> | |
109 | #include <netinet6/in6_pcb.h> | |
110 | #include <netinet/tcp.h> | |
111 | #include <netinet/tcp_fsm.h> | |
112 | #include <netinet/tcp_seq.h> | |
113 | #include <netinet/tcp_timer.h> | |
a48c5dd5 | 114 | #include <netinet/tcp_timer2.h> |
984263bc MD |
115 | #include <netinet/tcp_var.h> |
116 | #include <netinet6/tcp6_var.h> | |
117 | #include <netinet/tcpip.h> | |
95b22adf | 118 | |
984263bc MD |
119 | #ifdef TCPDEBUG |
120 | #include <netinet/tcp_debug.h> | |
121 | ||
95b22adf | 122 | u_char tcp_saveipgen[40]; /* the size must be of max ip header, now IPv6 */ |
984263bc | 123 | struct tcphdr tcp_savetcp; |
95b22adf | 124 | #endif |
984263bc MD |
125 | |
126 | #ifdef FAST_IPSEC | |
bf844ffa JH |
127 | #include <netproto/ipsec/ipsec.h> |
128 | #include <netproto/ipsec/ipsec6.h> | |
984263bc MD |
129 | #endif |
130 | ||
131 | #ifdef IPSEC | |
132 | #include <netinet6/ipsec.h> | |
133 | #include <netinet6/ipsec6.h> | |
d2438d69 | 134 | #include <netproto/key/key.h> |
95b22adf | 135 | #endif |
984263bc | 136 | |
984263bc MD |
137 | MALLOC_DEFINE(M_TSEGQ, "tseg_qent", "TCP segment queue entry"); |
138 | ||
984263bc | 139 | static int log_in_vain = 0; |
d24ce1dc | 140 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, |
984263bc MD |
141 | &log_in_vain, 0, "Log all incoming TCP connections"); |
142 | ||
143 | static int blackhole = 0; | |
144 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW, | |
145 | &blackhole, 0, "Do not send RST when dropping refused connections"); | |
146 | ||
147 | int tcp_delack_enabled = 1; | |
d24ce1dc JH |
148 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW, |
149 | &tcp_delack_enabled, 0, | |
984263bc MD |
150 | "Delay ACK to try and piggyback it onto a data packet"); |
151 | ||
152 | #ifdef TCP_DROP_SYNFIN | |
153 | static int drop_synfin = 0; | |
154 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW, | |
155 | &drop_synfin, 0, "Drop TCP packets with SYN+FIN set"); | |
156 | #endif | |
157 | ||
33abdd1c MD |
158 | static int tcp_do_limitedtransmit = 1; |
159 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, limitedtransmit, CTLFLAG_RW, | |
160 | &tcp_do_limitedtransmit, 0, "Enable RFC 3042 (Limited Transmit)"); | |
161 | ||
91489f6b | 162 | static int tcp_do_early_retransmit = 1; |
8819433a JH |
163 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, earlyretransmit, CTLFLAG_RW, |
164 | &tcp_do_early_retransmit, 0, "Early retransmit"); | |
165 | ||
8acdb67c | 166 | int tcp_aggregate_acks = 1; |
72b37eeb MD |
167 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, aggregate_acks, CTLFLAG_RW, |
168 | &tcp_aggregate_acks, 0, "Aggregate built-up acks into one ack"); | |
169 | ||
efd4b327 JH |
170 | static int tcp_do_eifel_detect = 1; |
171 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, eifel, CTLFLAG_RW, | |
172 | &tcp_do_eifel_detect, 0, "Eifel detection algorithm (RFC 3522)"); | |
173 | ||
8acdb67c JH |
174 | static int tcp_do_abc = 1; |
175 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc, CTLFLAG_RW, | |
176 | &tcp_do_abc, 0, | |
177 | "TCP Appropriate Byte Counting (RFC 3465)"); | |
178 | ||
9de1f696 SZ |
179 | /* |
180 | * The following value actually takes range [25ms, 250ms], | |
181 | * given that most modern systems use 1ms ~ 10ms as the unit | |
182 | * of timestamp option. | |
183 | */ | |
184 | static u_int tcp_paws_tolerance = 25; | |
185 | SYSCTL_UINT(_net_inet_tcp, OID_AUTO, paws_tolerance, CTLFLAG_RW, | |
186 | &tcp_paws_tolerance, 0, "RFC1323 PAWS tolerance"); | |
187 | ||
91489f6b JH |
188 | /* |
189 | * Define as tunable for easy testing with SACK on and off. | |
190 | * Warning: do not change setting in the middle of an existing active TCP flow, | |
191 | * else strange things might happen to that flow. | |
192 | */ | |
6e5bbdda | 193 | int tcp_do_sack = 1; |
91489f6b JH |
194 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, sack, CTLFLAG_RW, |
195 | &tcp_do_sack, 0, "Enable SACK Algorithms"); | |
196 | ||
197 | int tcp_do_smartsack = 1; | |
198 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, smartsack, CTLFLAG_RW, | |
199 | &tcp_do_smartsack, 0, "Enable Smart SACK Algorithms"); | |
200 | ||
1bdfd728 SZ |
201 | int tcp_do_rescuesack = 1; |
202 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, rescuesack, CTLFLAG_RW, | |
203 | &tcp_do_rescuesack, 0, "Rescue retransmission for SACK"); | |
204 | ||
c01f968f | 205 | int tcp_aggressive_rescuesack = 0; |
a098966f SZ |
206 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, rescuesack_agg, CTLFLAG_RW, |
207 | &tcp_aggressive_rescuesack, 0, "Aggressive rescue retransmission for SACK"); | |
208 | ||
ccb518ea SZ |
209 | static int tcp_force_sackrxt = 1; |
210 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, force_sackrxt, CTLFLAG_RW, | |
211 | &tcp_force_sackrxt, 0, "Allowed forced SACK retransmit burst"); | |
212 | ||
338fe0b0 | 213 | int tcp_do_rfc3517bis = 1; |
ffe35e17 SZ |
214 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3517bis, CTLFLAG_RW, |
215 | &tcp_do_rfc3517bis, 0, "Enable RFC3517 update"); | |
216 | ||
5fd89c20 SZ |
217 | int tcp_rfc3517bis_rxt = 0; |
218 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3517bis_rxt, CTLFLAG_RW, | |
219 | &tcp_rfc3517bis_rxt, 0, "Enable RFC3517 retransmit update"); | |
220 | ||
3edf7c37 RG |
221 | SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0, |
222 | "TCP Segment Reassembly Queue"); | |
223 | ||
224 | int tcp_reass_maxseg = 0; | |
225 | SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RD, | |
226 | &tcp_reass_maxseg, 0, | |
227 | "Global maximum number of TCP Segments in Reassembly Queue"); | |
228 | ||
229 | int tcp_reass_qsize = 0; | |
230 | SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD, | |
231 | &tcp_reass_qsize, 0, | |
232 | "Global number of TCP Segments currently in Reassembly Queue"); | |
233 | ||
234 | static int tcp_reass_overflows = 0; | |
235 | SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows, CTLFLAG_RD, | |
236 | &tcp_reass_overflows, 0, | |
237 | "Global number of TCP Segment Reassembly Queue Overflows"); | |
238 | ||
5b0b9fa5 PA |
239 | int tcp_do_autorcvbuf = 1; |
240 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW, | |
241 | &tcp_do_autorcvbuf, 0, "Enable automatic receive buffer sizing"); | |
242 | ||
243 | int tcp_autorcvbuf_inc = 16*1024; | |
244 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW, | |
245 | &tcp_autorcvbuf_inc, 0, | |
246 | "Incrementor step size of automatic receive buffer"); | |
247 | ||
46e92930 | 248 | int tcp_autorcvbuf_max = 2*1024*1024; |
5b0b9fa5 PA |
249 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW, |
250 | &tcp_autorcvbuf_max, 0, "Max size of automatic receive buffer"); | |
251 | ||
0df7608b SZ |
252 | int tcp_sosend_agglim = 2; |
253 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, sosend_agglim, CTLFLAG_RW, | |
254 | &tcp_sosend_agglim, 0, "TCP sosend mbuf aggregation limit"); | |
5b0b9fa5 | 255 | |
0df7608b SZ |
256 | int tcp_sosend_async = 1; |
257 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, sosend_async, CTLFLAG_RW, | |
258 | &tcp_sosend_async, 0, "TCP asynchronized pru_send"); | |
f2a3782e | 259 | |
12e60b57 SZ |
260 | static int tcp_ignore_redun_dsack = 1; |
261 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, ignore_redun_dsack, CTLFLAG_RW, | |
262 | &tcp_ignore_redun_dsack, 0, "Ignore redundant DSACK"); | |
263 | ||
6c1bbf57 SZ |
264 | static void tcp_dooptions(struct tcpopt *, u_char *, int, boolean_t, |
265 | tcp_seq); | |
984263bc MD |
266 | static void tcp_pulloutofband(struct socket *, |
267 | struct tcphdr *, struct mbuf *, int); | |
268 | static int tcp_reass(struct tcpcb *, struct tcphdr *, int *, | |
269 | struct mbuf *); | |
073ec6c4 | 270 | static void tcp_xmit_timer(struct tcpcb *, int, tcp_seq); |
91489f6b | 271 | static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *, int); |
ccb518ea | 272 | static void tcp_sack_rexmt(struct tcpcb *, boolean_t); |
ffe35e17 | 273 | static boolean_t tcp_sack_limitedxmit(struct tcpcb *); |
01d3427a | 274 | static int tcp_rmx_msl(const struct tcpcb *); |
8651f7f8 | 275 | static void tcp_established(struct tcpcb *); |
5c99b248 | 276 | static boolean_t tcp_recv_dupack(struct tcpcb *, tcp_seq, |
27f4bf33 | 277 | const struct tcpopt *); |
984263bc MD |
278 | |
279 | /* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */ | |
280 | #ifdef INET6 | |
281 | #define ND6_HINT(tp) \ | |
282 | do { \ | |
283 | if ((tp) && (tp)->t_inpcb && \ | |
61896e3c | 284 | ((tp)->t_inpcb->inp_vflag & INP_IPV6) && \ |
984263bc MD |
285 | (tp)->t_inpcb->in6p_route.ro_rt) \ |
286 | nd6_nud_hint((tp)->t_inpcb->in6p_route.ro_rt, NULL, 0); \ | |
287 | } while (0) | |
288 | #else | |
289 | #define ND6_HINT(tp) | |
290 | #endif | |
291 | ||
292 | /* | |
293 | * Indicate whether this ack should be delayed. We can delay the ack if | |
294 | * - delayed acks are enabled and | |
295 | * - there is no delayed ack timer in progress and | |
296 | * - our last ack wasn't a 0-sized window. We never want to delay | |
297 | * the ack that opens up a 0-sized window. | |
298 | */ | |
299 | #define DELAY_ACK(tp) \ | |
a48c5dd5 | 300 | (tcp_delack_enabled && !tcp_callout_pending(tp, tp->tt_delack) && \ |
61896e3c | 301 | !(tp->t_flags & TF_RXWIN0SENT)) |
984263bc | 302 | |
df9d7670 JH |
303 | #define acceptable_window_update(tp, th, tiwin) \ |
304 | (SEQ_LT(tp->snd_wl1, th->th_seq) || \ | |
305 | (tp->snd_wl1 == th->th_seq && \ | |
306 | (SEQ_LT(tp->snd_wl2, th->th_ack) || \ | |
307 | (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)))) | |
308 | ||
ffe35e17 SZ |
309 | #define iceildiv(n, d) (((n)+(d)-1) / (d)) |
310 | #define need_early_retransmit(tp, ownd) \ | |
311 | (tcp_do_early_retransmit && \ | |
312 | (tcp_do_eifel_detect && (tp->t_flags & TF_RCVD_TSTMP)) && \ | |
4c0f4a00 | 313 | ownd < ((tp->t_rxtthresh + 1) * tp->t_maxseg) && \ |
ffe35e17 SZ |
314 | tp->t_dupacks + 1 >= iceildiv(ownd, tp->t_maxseg) && \ |
315 | (!TCP_DO_SACK(tp) || ownd <= tp->t_maxseg || \ | |
316 | tcp_sack_has_sacked(&tp->scb, ownd - tp->t_maxseg))) | |
317 | ||
9de1f696 SZ |
318 | /* |
319 | * Returns TRUE, if this segment can be merged with the last | |
320 | * pending segment in the reassemble queue and this segment | |
321 | * does not overlap with the pending segment immediately | |
322 | * preceeding the last pending segment. | |
323 | */ | |
324 | static __inline boolean_t | |
325 | tcp_paws_canreasslast(const struct tcpcb *tp, const struct tcphdr *th, int tlen) | |
326 | { | |
327 | const struct tseg_qent *last, *prev; | |
328 | ||
329 | last = TAILQ_LAST(&tp->t_segq, tsegqe_head); | |
330 | if (last == NULL) | |
331 | return FALSE; | |
332 | ||
333 | /* This segment comes immediately after the last pending segment */ | |
04e6f08e SZ |
334 | if (last->tqe_th->th_seq + last->tqe_len == th->th_seq) { |
335 | if (last->tqe_th->th_flags & TH_FIN) { | |
336 | /* No segments should follow segment w/ FIN */ | |
337 | return FALSE; | |
338 | } | |
9de1f696 | 339 | return TRUE; |
04e6f08e | 340 | } |
9de1f696 SZ |
341 | |
342 | if (th->th_seq + tlen != last->tqe_th->th_seq) | |
343 | return FALSE; | |
344 | /* This segment comes immediately before the last pending segment */ | |
345 | ||
346 | prev = TAILQ_PREV(last, tsegqe_head, tqe_q); | |
347 | if (prev == NULL) { | |
348 | /* | |
349 | * No pending preceeding segment, we assume this segment | |
350 | * could be reassembled. | |
351 | */ | |
352 | return TRUE; | |
353 | } | |
354 | ||
355 | /* This segment does not overlap with the preceeding segment */ | |
356 | if (SEQ_GEQ(th->th_seq, prev->tqe_th->th_seq + prev->tqe_len)) | |
357 | return TRUE; | |
358 | ||
359 | return FALSE; | |
360 | } | |
361 | ||
e2289e66 SZ |
362 | static __inline void |
363 | tcp_ncr_update_rxtthresh(struct tcpcb *tp) | |
364 | { | |
365 | int old_rxtthresh = tp->t_rxtthresh; | |
366 | uint32_t ownd = tp->snd_max - tp->snd_una; | |
367 | ||
368 | tp->t_rxtthresh = max(3, ((ownd / tp->t_maxseg) >> 1)); | |
369 | if (tp->t_rxtthresh != old_rxtthresh) { | |
370 | tcp_sack_update_lostseq(&tp->scb, tp->snd_una, | |
371 | tp->t_maxseg, tp->t_rxtthresh); | |
372 | } | |
373 | } | |
374 | ||
984263bc | 375 | static int |
95b22adf | 376 | tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m) |
984263bc MD |
377 | { |
378 | struct tseg_qent *q; | |
379 | struct tseg_qent *p = NULL; | |
984263bc MD |
380 | struct tseg_qent *te; |
381 | struct socket *so = tp->t_inpcb->inp_socket; | |
382 | int flags; | |
383 | ||
384 | /* | |
61896e3c | 385 | * Call with th == NULL after become established to |
984263bc MD |
386 | * force pre-ESTABLISHED data up to user socket. |
387 | */ | |
61896e3c | 388 | if (th == NULL) |
984263bc MD |
389 | goto present; |
390 | ||
3edf7c37 RG |
391 | /* |
392 | * Limit the number of segments in the reassembly queue to prevent | |
393 | * holding on to too many segments (and thus running out of mbufs). | |
394 | * Make sure to let the missing segment through which caused this | |
395 | * queue. Always keep one global queue entry spare to be able to | |
396 | * process the missing segment. | |
397 | */ | |
398 | if (th->th_seq != tp->rcv_nxt && | |
399 | tcp_reass_qsize + 1 >= tcp_reass_maxseg) { | |
400 | tcp_reass_overflows++; | |
401 | tcpstat.tcps_rcvmemdrop++; | |
402 | m_freem(m); | |
91489f6b JH |
403 | /* no SACK block to report */ |
404 | tp->reportblk.rblk_start = tp->reportblk.rblk_end; | |
3edf7c37 RG |
405 | return (0); |
406 | } | |
407 | ||
ba4e3dbe | 408 | /* Allocate a new queue entry. */ |
884717e1 | 409 | te = kmalloc(sizeof(struct tseg_qent), M_TSEGQ, M_INTWAIT | M_NULLOK); |
984263bc MD |
410 | if (te == NULL) { |
411 | tcpstat.tcps_rcvmemdrop++; | |
412 | m_freem(m); | |
91489f6b JH |
413 | /* no SACK block to report */ |
414 | tp->reportblk.rblk_start = tp->reportblk.rblk_end; | |
984263bc MD |
415 | return (0); |
416 | } | |
2d23a8be | 417 | atomic_add_int(&tcp_reass_qsize, 1); |
984263bc MD |
418 | |
419 | /* | |
420 | * Find a segment which begins after this one does. | |
421 | */ | |
0f9e45de | 422 | TAILQ_FOREACH(q, &tp->t_segq, tqe_q) { |
984263bc MD |
423 | if (SEQ_GT(q->tqe_th->th_seq, th->th_seq)) |
424 | break; | |
425 | p = q; | |
426 | } | |
427 | ||
428 | /* | |
429 | * If there is a preceding segment, it may provide some of | |
430 | * our data already. If so, drop the data from the incoming | |
431 | * segment. If it provides all of our data, drop us. | |
432 | */ | |
433 | if (p != NULL) { | |
76cf5e41 | 434 | tcp_seq_diff_t i; |
91489f6b | 435 | |
984263bc MD |
436 | /* conversion to int (in i) handles seq wraparound */ |
437 | i = p->tqe_th->th_seq + p->tqe_len - th->th_seq; | |
91489f6b | 438 | if (i > 0) { /* overlaps preceding segment */ |
c7e6499a SZ |
439 | tp->sack_flags |= |
440 | (TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG); | |
91489f6b JH |
441 | /* enclosing block starts w/ preceding segment */ |
442 | tp->encloseblk.rblk_start = p->tqe_th->th_seq; | |
984263bc | 443 | if (i >= *tlenp) { |
53432bee SZ |
444 | if (th->th_flags & TH_FIN) |
445 | p->tqe_th->th_flags |= TH_FIN; | |
446 | ||
91489f6b | 447 | /* preceding encloses incoming segment */ |
3a5d999b SZ |
448 | tp->encloseblk.rblk_end = TCP_SACK_BLKEND( |
449 | p->tqe_th->th_seq + p->tqe_len, | |
450 | p->tqe_th->th_flags); | |
984263bc MD |
451 | tcpstat.tcps_rcvduppack++; |
452 | tcpstat.tcps_rcvdupbyte += *tlenp; | |
453 | m_freem(m); | |
efda3bd0 | 454 | kfree(te, M_TSEGQ); |
2d23a8be | 455 | atomic_add_int(&tcp_reass_qsize, -1); |
984263bc MD |
456 | /* |
457 | * Try to present any queued data | |
458 | * at the left window edge to the user. | |
459 | * This is needed after the 3-WHS | |
460 | * completes. | |
461 | */ | |
462 | goto present; /* ??? */ | |
463 | } | |
464 | m_adj(m, i); | |
465 | *tlenp -= i; | |
466 | th->th_seq += i; | |
91489f6b | 467 | /* incoming segment end is enclosing block end */ |
3a5d999b SZ |
468 | tp->encloseblk.rblk_end = TCP_SACK_BLKEND( |
469 | th->th_seq + *tlenp, th->th_flags); | |
91489f6b JH |
470 | /* trim end of reported D-SACK block */ |
471 | tp->reportblk.rblk_end = th->th_seq; | |
984263bc MD |
472 | } |
473 | } | |
474 | tcpstat.tcps_rcvoopack++; | |
475 | tcpstat.tcps_rcvoobyte += *tlenp; | |
476 | ||
477 | /* | |
478 | * While we overlap succeeding segments trim them or, | |
479 | * if they are completely covered, dequeue them. | |
480 | */ | |
481 | while (q) { | |
76cf5e41 | 482 | tcp_seq_diff_t i = (th->th_seq + *tlenp) - q->tqe_th->th_seq; |
91489f6b | 483 | tcp_seq qend = q->tqe_th->th_seq + q->tqe_len; |
3a5d999b | 484 | tcp_seq qend_sack = TCP_SACK_BLKEND(qend, q->tqe_th->th_flags); |
91489f6b JH |
485 | struct tseg_qent *nq; |
486 | ||
984263bc MD |
487 | if (i <= 0) |
488 | break; | |
c7e6499a SZ |
489 | if (!(tp->sack_flags & TSACK_F_DUPSEG)) { |
490 | /* first time through */ | |
491 | tp->sack_flags |= (TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG); | |
91489f6b JH |
492 | tp->encloseblk = tp->reportblk; |
493 | /* report trailing duplicate D-SACK segment */ | |
494 | tp->reportblk.rblk_start = q->tqe_th->th_seq; | |
495 | } | |
c7e6499a | 496 | if ((tp->sack_flags & TSACK_F_ENCLOSESEG) && |
3a5d999b | 497 | SEQ_GT(qend_sack, tp->encloseblk.rblk_end)) { |
91489f6b | 498 | /* extend enclosing block if one exists */ |
3a5d999b | 499 | tp->encloseblk.rblk_end = qend_sack; |
91489f6b | 500 | } |
984263bc MD |
501 | if (i < q->tqe_len) { |
502 | q->tqe_th->th_seq += i; | |
503 | q->tqe_len -= i; | |
504 | m_adj(q->tqe_m, i); | |
505 | break; | |
506 | } | |
507 | ||
53432bee SZ |
508 | if (q->tqe_th->th_flags & TH_FIN) |
509 | th->th_flags |= TH_FIN; | |
510 | ||
0f9e45de SZ |
511 | nq = TAILQ_NEXT(q, tqe_q); |
512 | TAILQ_REMOVE(&tp->t_segq, q, tqe_q); | |
984263bc | 513 | m_freem(q->tqe_m); |
efda3bd0 | 514 | kfree(q, M_TSEGQ); |
2d23a8be | 515 | atomic_add_int(&tcp_reass_qsize, -1); |
984263bc MD |
516 | q = nq; |
517 | } | |
518 | ||
519 | /* Insert the new segment queue entry into place. */ | |
520 | te->tqe_m = m; | |
521 | te->tqe_th = th; | |
522 | te->tqe_len = *tlenp; | |
523 | ||
91489f6b JH |
524 | /* check if can coalesce with following segment */ |
525 | if (q != NULL && (th->th_seq + *tlenp == q->tqe_th->th_seq)) { | |
53432bee | 526 | tcp_seq tend_sack; |
91489f6b JH |
527 | |
528 | te->tqe_len += q->tqe_len; | |
a174690a | 529 | if (q->tqe_th->th_flags & TH_FIN) |
f16f8cc3 | 530 | te->tqe_th->th_flags |= TH_FIN; |
53432bee SZ |
531 | tend_sack = TCP_SACK_BLKEND(te->tqe_th->th_seq + te->tqe_len, |
532 | te->tqe_th->th_flags); | |
533 | ||
91489f6b | 534 | m_cat(te->tqe_m, q->tqe_m); |
3a5d999b | 535 | tp->encloseblk.rblk_end = tend_sack; |
91489f6b JH |
536 | /* |
537 | * When not reporting a duplicate segment, use | |
538 | * the larger enclosing block as the SACK block. | |
539 | */ | |
c7e6499a | 540 | if (!(tp->sack_flags & TSACK_F_DUPSEG)) |
3a5d999b | 541 | tp->reportblk.rblk_end = tend_sack; |
0f9e45de | 542 | TAILQ_REMOVE(&tp->t_segq, q, tqe_q); |
efda3bd0 | 543 | kfree(q, M_TSEGQ); |
2d23a8be | 544 | atomic_add_int(&tcp_reass_qsize, -1); |
91489f6b JH |
545 | } |
546 | ||
984263bc | 547 | if (p == NULL) { |
0f9e45de | 548 | TAILQ_INSERT_HEAD(&tp->t_segq, te, tqe_q); |
984263bc | 549 | } else { |
91489f6b JH |
550 | /* check if can coalesce with preceding segment */ |
551 | if (p->tqe_th->th_seq + p->tqe_len == th->th_seq) { | |
53432bee SZ |
552 | if (te->tqe_th->th_flags & TH_FIN) |
553 | p->tqe_th->th_flags |= TH_FIN; | |
a174690a JH |
554 | p->tqe_len += te->tqe_len; |
555 | m_cat(p->tqe_m, te->tqe_m); | |
91489f6b JH |
556 | tp->encloseblk.rblk_start = p->tqe_th->th_seq; |
557 | /* | |
558 | * When not reporting a duplicate segment, use | |
559 | * the larger enclosing block as the SACK block. | |
560 | */ | |
c7e6499a | 561 | if (!(tp->sack_flags & TSACK_F_DUPSEG)) |
91489f6b | 562 | tp->reportblk.rblk_start = p->tqe_th->th_seq; |
efda3bd0 | 563 | kfree(te, M_TSEGQ); |
2d23a8be MD |
564 | atomic_add_int(&tcp_reass_qsize, -1); |
565 | } else { | |
0f9e45de | 566 | TAILQ_INSERT_AFTER(&tp->t_segq, p, te, tqe_q); |
2d23a8be | 567 | } |
984263bc MD |
568 | } |
569 | ||
570 | present: | |
571 | /* | |
572 | * Present data to user, advancing rcv_nxt through | |
573 | * completed sequence space. | |
574 | */ | |
575 | if (!TCPS_HAVEESTABLISHED(tp->t_state)) | |
576 | return (0); | |
0f9e45de | 577 | q = TAILQ_FIRST(&tp->t_segq); |
61896e3c | 578 | if (q == NULL || q->tqe_th->th_seq != tp->rcv_nxt) |
984263bc | 579 | return (0); |
91489f6b | 580 | tp->rcv_nxt += q->tqe_len; |
c7e6499a | 581 | if (!(tp->sack_flags & TSACK_F_DUPSEG)) { |
91489f6b JH |
582 | /* no SACK block to report since ACK advanced */ |
583 | tp->reportblk.rblk_start = tp->reportblk.rblk_end; | |
584 | } | |
585 | /* no enclosing block to report since ACK advanced */ | |
c7e6499a | 586 | tp->sack_flags &= ~TSACK_F_ENCLOSESEG; |
91489f6b | 587 | flags = q->tqe_th->th_flags & TH_FIN; |
0f9e45de SZ |
588 | TAILQ_REMOVE(&tp->t_segq, q, tqe_q); |
589 | KASSERT(TAILQ_EMPTY(&tp->t_segq) || | |
590 | TAILQ_FIRST(&tp->t_segq)->tqe_th->th_seq != tp->rcv_nxt, | |
91489f6b | 591 | ("segment not coalesced")); |
6cef7136 | 592 | if (so->so_state & SS_CANTRCVMORE) { |
91489f6b | 593 | m_freem(q->tqe_m); |
6cef7136 MD |
594 | } else { |
595 | lwkt_gettoken(&so->so_rcv.ssb_token); | |
6d49aa6f | 596 | ssb_appendstream(&so->so_rcv, q->tqe_m); |
6cef7136 MD |
597 | lwkt_reltoken(&so->so_rcv.ssb_token); |
598 | } | |
efda3bd0 | 599 | kfree(q, M_TSEGQ); |
2d23a8be | 600 | atomic_add_int(&tcp_reass_qsize, -1); |
984263bc MD |
601 | ND6_HINT(tp); |
602 | sorwakeup(so); | |
603 | return (flags); | |
604 | } | |
605 | ||
606 | /* | |
607 | * TCP input routine, follows pages 65-76 of the | |
608 | * protocol specification dated September, 1981 very closely. | |
609 | */ | |
610 | #ifdef INET6 | |
611 | int | |
95b22adf | 612 | tcp6_input(struct mbuf **mp, int *offp, int proto) |
984263bc | 613 | { |
2256ba69 | 614 | struct mbuf *m = *mp; |
984263bc MD |
615 | struct in6_ifaddr *ia6; |
616 | ||
617 | IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); | |
618 | ||
619 | /* | |
620 | * draft-itojun-ipv6-tcp-to-anycast | |
621 | * better place to put this in? | |
622 | */ | |
623 | ia6 = ip6_getdstifaddr(m); | |
d24ce1dc | 624 | if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { |
984263bc MD |
625 | struct ip6_hdr *ip6; |
626 | ||
627 | ip6 = mtod(m, struct ip6_hdr *); | |
628 | icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, | |
f23061d4 | 629 | offsetof(struct ip6_hdr, ip6_dst)); |
d24ce1dc | 630 | return (IPPROTO_DONE); |
984263bc MD |
631 | } |
632 | ||
002c1265 | 633 | tcp_input(mp, offp, proto); |
d24ce1dc | 634 | return (IPPROTO_DONE); |
984263bc MD |
635 | } |
636 | #endif | |
637 | ||
002c1265 MD |
638 | int |
639 | tcp_input(struct mbuf **mp, int *offp, int proto) | |
984263bc | 640 | { |
002c1265 | 641 | int off0; |
2256ba69 RG |
642 | struct tcphdr *th; |
643 | struct ip *ip = NULL; | |
644 | struct ipovly *ipov; | |
645 | struct inpcb *inp = NULL; | |
984263bc MD |
646 | u_char *optp = NULL; |
647 | int optlen = 0; | |
b1992928 MD |
648 | int tlen, off; |
649 | int len = 0; | |
984263bc | 650 | int drop_hdrlen; |
2256ba69 RG |
651 | struct tcpcb *tp = NULL; |
652 | int thflags; | |
4090d6ff | 653 | struct socket *so = NULL; |
61896e3c | 654 | int todrop, acked; |
27f4bf33 SZ |
655 | boolean_t ourfinisacked, needoutput = FALSE, delayed_dupack = FALSE; |
656 | tcp_seq th_dupack = 0; /* XXX gcc warning */ | |
984263bc | 657 | u_long tiwin; |
61896e3c | 658 | int recvwin; |
984263bc | 659 | struct tcpopt to; /* options in this segment */ |
984263bc MD |
660 | struct sockaddr_in *next_hop = NULL; |
661 | int rstreason; /* For badport_bandlim accounting purposes */ | |
d371a63a | 662 | int cpu; |
984263bc | 663 | struct ip6_hdr *ip6 = NULL; |
002c1265 | 664 | struct mbuf *m; |
984263bc | 665 | #ifdef INET6 |
d24ce1dc | 666 | boolean_t isipv6; |
984263bc | 667 | #else |
d24ce1dc | 668 | const boolean_t isipv6 = FALSE; |
984263bc MD |
669 | #endif |
670 | #ifdef TCPDEBUG | |
671 | short ostate = 0; | |
672 | #endif | |
673 | ||
002c1265 MD |
674 | off0 = *offp; |
675 | m = *mp; | |
676 | *mp = NULL; | |
a00138cb | 677 | |
d24ce1dc JH |
678 | tcpstat.tcps_rcvtotal++; |
679 | ||
5de23090 SZ |
680 | if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED) { |
681 | struct m_tag *mtag; | |
682 | ||
683 | mtag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); | |
684 | KKASSERT(mtag != NULL); | |
685 | next_hop = m_tag_data(mtag); | |
984263bc | 686 | } |
d24ce1dc | 687 | |
984263bc | 688 | #ifdef INET6 |
d24ce1dc | 689 | isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? TRUE : FALSE; |
984263bc | 690 | #endif |
984263bc MD |
691 | |
692 | if (isipv6) { | |
693 | /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */ | |
694 | ip6 = mtod(m, struct ip6_hdr *); | |
407e896e | 695 | tlen = (sizeof *ip6) + ntohs(ip6->ip6_plen) - off0; |
984263bc MD |
696 | if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) { |
697 | tcpstat.tcps_rcvbadsum++; | |
698 | goto drop; | |
699 | } | |
700 | th = (struct tcphdr *)((caddr_t)ip6 + off0); | |
701 | ||
702 | /* | |
703 | * Be proactive about unspecified IPv6 address in source. | |
704 | * As we use all-zero to indicate unbounded/unconnected pcb, | |
705 | * unspecified IPv6 address can be used to confuse us. | |
706 | * | |
707 | * Note that packets with unspecified IPv6 destination is | |
708 | * already dropped in ip6_input. | |
709 | */ | |
710 | if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { | |
711 | /* XXX stat */ | |
712 | goto drop; | |
713 | } | |
714 | } else { | |
715 | /* | |
716 | * Get IP and TCP header together in first mbuf. | |
717 | * Note: IP leaves IP header in first mbuf. | |
718 | */ | |
719 | if (off0 > sizeof(struct ip)) { | |
bddf0751 | 720 | ip_stripoptions(m); |
984263bc MD |
721 | off0 = sizeof(struct ip); |
722 | } | |
55d829f8 JH |
723 | /* already checked and pulled up in ip_demux() */ |
724 | KASSERT(m->m_len >= sizeof(struct tcpiphdr), | |
61896e3c | 725 | ("TCP header not in one mbuf: m->m_len %d", m->m_len)); |
984263bc MD |
726 | ip = mtod(m, struct ip *); |
727 | ipov = (struct ipovly *)ip; | |
728 | th = (struct tcphdr *)((caddr_t)ip + off0); | |
729 | tlen = ip->ip_len; | |
730 | ||
731 | if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { | |
732 | if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) | |
733 | th->th_sum = m->m_pkthdr.csum_data; | |
734 | else | |
735 | th->th_sum = in_pseudo(ip->ip_src.s_addr, | |
736 | ip->ip_dst.s_addr, | |
737 | htonl(m->m_pkthdr.csum_data + | |
738 | ip->ip_len + | |
739 | IPPROTO_TCP)); | |
740 | th->th_sum ^= 0xffff; | |
741 | } else { | |
742 | /* | |
743 | * Checksum extended TCP header and data. | |
744 | */ | |
745 | len = sizeof(struct ip) + tlen; | |
407e896e | 746 | bzero(ipov->ih_x1, sizeof ipov->ih_x1); |
984263bc MD |
747 | ipov->ih_len = (u_short)tlen; |
748 | ipov->ih_len = htons(ipov->ih_len); | |
749 | th->th_sum = in_cksum(m, len); | |
750 | } | |
751 | if (th->th_sum) { | |
752 | tcpstat.tcps_rcvbadsum++; | |
753 | goto drop; | |
754 | } | |
755 | #ifdef INET6 | |
756 | /* Re-initialization for later version check */ | |
757 | ip->ip_v = IPVERSION; | |
758 | #endif | |
759 | } | |
760 | ||
761 | /* | |
762 | * Check that TCP offset makes sense, | |
763 | * pull out TCP options and adjust length. XXX | |
764 | */ | |
765 | off = th->th_off << 2; | |
55d829f8 JH |
766 | /* already checked and pulled up in ip_demux() */ |
767 | KASSERT(off >= sizeof(struct tcphdr) && off <= tlen, | |
61896e3c | 768 | ("bad TCP data offset %d (tlen %d)", off, tlen)); |
984263bc MD |
769 | tlen -= off; /* tlen is used instead of ti->ti_len */ |
770 | if (off > sizeof(struct tcphdr)) { | |
771 | if (isipv6) { | |
002c1265 | 772 | IP6_EXTHDR_CHECK(m, off0, off, IPPROTO_DONE); |
984263bc MD |
773 | ip6 = mtod(m, struct ip6_hdr *); |
774 | th = (struct tcphdr *)((caddr_t)ip6 + off0); | |
775 | } else { | |
55d829f8 JH |
776 | /* already pulled up in ip_demux() */ |
777 | KASSERT(m->m_len >= sizeof(struct ip) + off, | |
61896e3c JH |
778 | ("TCP header and options not in one mbuf: " |
779 | "m_len %d, off %d", m->m_len, off)); | |
984263bc MD |
780 | } |
781 | optlen = off - sizeof(struct tcphdr); | |
782 | optp = (u_char *)(th + 1); | |
783 | } | |
784 | thflags = th->th_flags; | |
785 | ||
786 | #ifdef TCP_DROP_SYNFIN | |
787 | /* | |
788 | * If the drop_synfin option is enabled, drop all packets with | |
789 | * both the SYN and FIN bits set. This prevents e.g. nmap from | |
790 | * identifying the TCP/IP stack. | |
791 | * | |
792 | * This is a violation of the TCP specification. | |
793 | */ | |
61896e3c | 794 | if (drop_synfin && (thflags & (TH_SYN | TH_FIN)) == (TH_SYN | TH_FIN)) |
984263bc MD |
795 | goto drop; |
796 | #endif | |
797 | ||
798 | /* | |
799 | * Convert TCP protocol specific fields to host format. | |
800 | */ | |
801 | th->th_seq = ntohl(th->th_seq); | |
802 | th->th_ack = ntohl(th->th_ack); | |
803 | th->th_win = ntohs(th->th_win); | |
804 | th->th_urp = ntohs(th->th_urp); | |
805 | ||
806 | /* | |
d24ce1dc | 807 | * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options, |
984263bc MD |
808 | * until after ip6_savecontrol() is called and before other functions |
809 | * which don't want those proto headers. | |
810 | * Because ip6_savecontrol() is going to parse the mbuf to | |
811 | * search for data to be passed up to user-land, it wants mbuf | |
812 | * parameters to be unchanged. | |
813 | * XXX: the call of ip6_savecontrol() has been obsoleted based on | |
814 | * latest version of the advanced API (20020110). | |
815 | */ | |
816 | drop_hdrlen = off0 + off; | |
817 | ||
818 | /* | |
819 | * Locate pcb for segment. | |
820 | */ | |
821 | findpcb: | |
822 | /* IPFIREWALL_FORWARD section */ | |
d24ce1dc | 823 | if (next_hop != NULL && !isipv6) { /* IPv6 support is not there yet */ |
984263bc MD |
824 | /* |
825 | * Transparently forwarded. Pretend to be the destination. | |
d24ce1dc | 826 | * already got one like this? |
984263bc | 827 | */ |
6ca1a1cd | 828 | cpu = mycpu->gd_cpuid; |
6ca1a1cd | 829 | inp = in_pcblookup_hash(&tcbinfo[cpu], |
d371a63a | 830 | ip->ip_src, th->th_sport, |
984263bc MD |
831 | ip->ip_dst, th->th_dport, |
832 | 0, m->m_pkthdr.rcvif); | |
833 | if (!inp) { | |
83be63fe JH |
834 | /* |
835 | * It's new. Try to find the ambushing socket. | |
836 | */ | |
837 | ||
838 | /* | |
839 | * The rest of the ipfw code stores the port in | |
840 | * host order. XXX | |
841 | * (The IP address is still in network order.) | |
842 | */ | |
843 | in_port_t dport = next_hop->sin_port ? | |
844 | htons(next_hop->sin_port) : | |
845 | th->th_dport; | |
846 | ||
d371a63a | 847 | cpu = tcp_addrcpu(ip->ip_src.s_addr, th->th_sport, |
83be63fe | 848 | next_hop->sin_addr.s_addr, dport); |
d371a63a | 849 | inp = in_pcblookup_hash(&tcbinfo[cpu], |
984263bc | 850 | ip->ip_src, th->th_sport, |
83be63fe | 851 | next_hop->sin_addr, dport, |
984263bc MD |
852 | 1, m->m_pkthdr.rcvif); |
853 | } | |
854 | } else { | |
6ca1a1cd | 855 | if (isipv6) { |
d371a63a | 856 | inp = in6_pcblookup_hash(&tcbinfo[0], |
984263bc MD |
857 | &ip6->ip6_src, th->th_sport, |
858 | &ip6->ip6_dst, th->th_dport, | |
859 | 1, m->m_pkthdr.rcvif); | |
6ca1a1cd | 860 | } else { |
6ca1a1cd | 861 | cpu = mycpu->gd_cpuid; |
6ca1a1cd | 862 | inp = in_pcblookup_hash(&tcbinfo[cpu], |
984263bc MD |
863 | ip->ip_src, th->th_sport, |
864 | ip->ip_dst, th->th_dport, | |
865 | 1, m->m_pkthdr.rcvif); | |
6ca1a1cd | 866 | } |
984263bc | 867 | } |
984263bc MD |
868 | |
869 | /* | |
870 | * If the state is CLOSED (i.e., TCB does not exist) then | |
871 | * all data in the incoming segment is discarded. | |
872 | * If the TCB exists but is in CLOSED state, it is embryonic, | |
873 | * but should either do a listen or a connect soon. | |
874 | */ | |
875 | if (inp == NULL) { | |
876 | if (log_in_vain) { | |
877 | #ifdef INET6 | |
878 | char dbuf[INET6_ADDRSTRLEN+2], sbuf[INET6_ADDRSTRLEN+2]; | |
879 | #else | |
1141eb20 JH |
880 | char dbuf[sizeof "aaa.bbb.ccc.ddd"]; |
881 | char sbuf[sizeof "aaa.bbb.ccc.ddd"]; | |
984263bc MD |
882 | #endif |
883 | if (isipv6) { | |
884 | strcpy(dbuf, "["); | |
984263bc | 885 | strcat(dbuf, ip6_sprintf(&ip6->ip6_dst)); |
984263bc | 886 | strcat(dbuf, "]"); |
95b22adf JH |
887 | strcpy(sbuf, "["); |
888 | strcat(sbuf, ip6_sprintf(&ip6->ip6_src)); | |
984263bc MD |
889 | strcat(sbuf, "]"); |
890 | } else { | |
891 | strcpy(dbuf, inet_ntoa(ip->ip_dst)); | |
892 | strcpy(sbuf, inet_ntoa(ip->ip_src)); | |
893 | } | |
894 | switch (log_in_vain) { | |
895 | case 1: | |
61896e3c | 896 | if (!(thflags & TH_SYN)) |
984263bc MD |
897 | break; |
898 | case 2: | |
899 | log(LOG_INFO, | |
900 | "Connection attempt to TCP %s:%d " | |
901 | "from %s:%d flags:0x%02x\n", | |
902 | dbuf, ntohs(th->th_dport), sbuf, | |
903 | ntohs(th->th_sport), thflags); | |
904 | break; | |
905 | default: | |
906 | break; | |
907 | } | |
908 | } | |
d24ce1dc | 909 | if (blackhole) { |
984263bc MD |
910 | switch (blackhole) { |
911 | case 1: | |
912 | if (thflags & TH_SYN) | |
913 | goto drop; | |
914 | break; | |
915 | case 2: | |
916 | goto drop; | |
917 | default: | |
918 | goto drop; | |
919 | } | |
920 | } | |
921 | rstreason = BANDLIM_RST_CLOSEDPORT; | |
922 | goto dropwithreset; | |
923 | } | |
61896e3c JH |
924 | |
925 | #ifdef IPSEC | |
926 | if (isipv6) { | |
927 | if (ipsec6_in_reject_so(m, inp->inp_socket)) { | |
928 | ipsec6stat.in_polvio++; | |
929 | goto drop; | |
930 | } | |
931 | } else { | |
932 | if (ipsec4_in_reject_so(m, inp->inp_socket)) { | |
933 | ipsecstat.in_polvio++; | |
934 | goto drop; | |
935 | } | |
936 | } | |
937 | #endif | |
938 | #ifdef FAST_IPSEC | |
939 | if (isipv6) { | |
940 | if (ipsec6_in_reject(m, inp)) | |
941 | goto drop; | |
942 | } else { | |
943 | if (ipsec4_in_reject(m, inp)) | |
944 | goto drop; | |
945 | } | |
946 | #endif | |
95926362 MD |
947 | /* Check the minimum TTL for socket. */ |
948 | #ifdef INET6 | |
949 | if ((isipv6 ? ip6->ip6_hlim : ip->ip_ttl) < inp->inp_ip_minttl) | |
950 | goto drop; | |
951 | #endif | |
61896e3c | 952 | |
984263bc MD |
953 | tp = intotcpcb(inp); |
954 | if (tp == NULL) { | |
955 | rstreason = BANDLIM_RST_CLOSEDPORT; | |
956 | goto dropwithreset; | |
957 | } | |
eb594563 | 958 | if (tp->t_state <= TCPS_CLOSED) |
984263bc MD |
959 | goto drop; |
960 | ||
984263bc | 961 | so = inp->inp_socket; |
47654766 | 962 | |
984263bc | 963 | #ifdef TCPDEBUG |
47654766 JH |
964 | if (so->so_options & SO_DEBUG) { |
965 | ostate = tp->t_state; | |
966 | if (isipv6) | |
95b22adf | 967 | bcopy(ip6, tcp_saveipgen, sizeof(*ip6)); |
47654766 | 968 | else |
95b22adf | 969 | bcopy(ip, tcp_saveipgen, sizeof(*ip)); |
47654766 JH |
970 | tcp_savetcp = *th; |
971 | } | |
984263bc | 972 | #endif |
47654766 | 973 | |
407e896e | 974 | bzero(&to, sizeof to); |
d24ce1dc | 975 | |
47654766 JH |
976 | if (so->so_options & SO_ACCEPTCONN) { |
977 | struct in_conninfo inc; | |
978 | ||
984263bc | 979 | #ifdef INET6 |
d24ce1dc | 980 | inc.inc_isipv6 = (isipv6 == TRUE); |
984263bc MD |
981 | #endif |
982 | if (isipv6) { | |
983 | inc.inc6_faddr = ip6->ip6_src; | |
984 | inc.inc6_laddr = ip6->ip6_dst; | |
985 | inc.inc6_route.ro_rt = NULL; /* XXX */ | |
986 | } else { | |
987 | inc.inc_faddr = ip->ip_src; | |
988 | inc.inc_laddr = ip->ip_dst; | |
989 | inc.inc_route.ro_rt = NULL; /* XXX */ | |
990 | } | |
991 | inc.inc_fport = th->th_sport; | |
992 | inc.inc_lport = th->th_dport; | |
993 | ||
61896e3c JH |
994 | /* |
995 | * If the state is LISTEN then ignore segment if it contains | |
984263bc MD |
996 | * a RST. If the segment contains an ACK then it is bad and |
997 | * send a RST. If it does not contain a SYN then it is not | |
998 | * interesting; drop it. | |
999 | * | |
1000 | * If the state is SYN_RECEIVED (syncache) and seg contains | |
1001 | * an ACK, but not for our SYN/ACK, send a RST. If the seg | |
1002 | * contains a RST, check the sequence number to see if it | |
1003 | * is a valid reset segment. | |
1004 | */ | |
61896e3c JH |
1005 | if ((thflags & (TH_RST | TH_ACK | TH_SYN)) != TH_SYN) { |
1006 | if ((thflags & (TH_RST | TH_ACK | TH_SYN)) == TH_ACK) { | |
984263bc MD |
1007 | if (!syncache_expand(&inc, th, &so, m)) { |
1008 | /* | |
1009 | * No syncache entry, or ACK was not | |
1010 | * for our SYN/ACK. Send a RST. | |
1011 | */ | |
1012 | tcpstat.tcps_badsyn++; | |
1013 | rstreason = BANDLIM_RST_OPENPORT; | |
1014 | goto dropwithreset; | |
1015 | } | |
2d23a8be MD |
1016 | |
1017 | /* | |
1018 | * Could not complete 3-way handshake, | |
1019 | * connection is being closed down, and | |
1020 | * syncache will free mbuf. | |
1021 | */ | |
984263bc | 1022 | if (so == NULL) |
002c1265 | 1023 | return(IPPROTO_DONE); |
2d23a8be MD |
1024 | |
1025 | /* | |
1026 | * We must be in the correct protocol thread | |
1027 | * for this connection. | |
1028 | */ | |
1029 | KKASSERT(so->so_port == &curthread->td_msgport); | |
1030 | ||
984263bc MD |
1031 | /* |
1032 | * Socket is created in state SYN_RECEIVED. | |
1033 | * Continue processing segment. | |
1034 | */ | |
ed894f8c | 1035 | inp = so->so_pcb; |
984263bc MD |
1036 | tp = intotcpcb(inp); |
1037 | /* | |
1038 | * This is what would have happened in | |
1039 | * tcp_output() when the SYN,ACK was sent. | |
1040 | */ | |
1041 | tp->snd_up = tp->snd_una; | |
1042 | tp->snd_max = tp->snd_nxt = tp->iss + 1; | |
1043 | tp->last_ack_sent = tp->rcv_nxt; | |
df1d2774 | 1044 | |
984263bc MD |
1045 | goto after_listen; |
1046 | } | |
1047 | if (thflags & TH_RST) { | |
1048 | syncache_chkrst(&inc, th); | |
1049 | goto drop; | |
1050 | } | |
1051 | if (thflags & TH_ACK) { | |
1052 | syncache_badack(&inc); | |
1053 | tcpstat.tcps_badsyn++; | |
1054 | rstreason = BANDLIM_RST_OPENPORT; | |
1055 | goto dropwithreset; | |
1056 | } | |
1057 | goto drop; | |
1058 | } | |
1059 | ||
1060 | /* | |
61896e3c | 1061 | * Segment's flags are (SYN) or (SYN | FIN). |
984263bc MD |
1062 | */ |
1063 | #ifdef INET6 | |
1064 | /* | |
1065 | * If deprecated address is forbidden, | |
1066 | * we do not accept SYN to deprecated interface | |
1067 | * address to prevent any new inbound connection from | |
1068 | * getting established. | |
1069 | * When we do not accept SYN, we send a TCP RST, | |
1070 | * with deprecated source address (instead of dropping | |
1071 | * it). We compromise it as it is much better for peer | |
1072 | * to send a RST, and RST will be the final packet | |
1073 | * for the exchange. | |
1074 | * | |
1075 | * If we do not forbid deprecated addresses, we accept | |
1076 | * the SYN packet. RFC2462 does not suggest dropping | |
1077 | * SYN in this case. | |
1078 | * If we decipher RFC2462 5.5.4, it says like this: | |
1079 | * 1. use of deprecated addr with existing | |
1080 | * communication is okay - "SHOULD continue to be | |
1081 | * used" | |
1082 | * 2. use of it with new communication: | |
1083 | * (2a) "SHOULD NOT be used if alternate address | |
61896e3c | 1084 | * with sufficient scope is available" |
984263bc MD |
1085 | * (2b) nothing mentioned otherwise. |
1086 | * Here we fall into (2b) case as we have no choice in | |
1087 | * our source address selection - we must obey the peer. | |
1088 | * | |
1089 | * The wording in RFC2462 is confusing, and there are | |
1090 | * multiple description text for deprecated address | |
1091 | * handling - worse, they are not exactly the same. | |
1092 | * I believe 5.5.4 is the best one, so we follow 5.5.4. | |
1093 | */ | |
1094 | if (isipv6 && !ip6_use_deprecated) { | |
1095 | struct in6_ifaddr *ia6; | |
1096 | ||
1097 | if ((ia6 = ip6_getdstifaddr(m)) && | |
1098 | (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { | |
1099 | tp = NULL; | |
1100 | rstreason = BANDLIM_RST_OPENPORT; | |
1101 | goto dropwithreset; | |
1102 | } | |
1103 | } | |
1104 | #endif | |
1105 | /* | |
1106 | * If it is from this socket, drop it, it must be forged. | |
1107 | * Don't bother responding if the destination was a broadcast. | |
1108 | */ | |
1109 | if (th->th_dport == th->th_sport) { | |
1110 | if (isipv6) { | |
1111 | if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, | |
1112 | &ip6->ip6_src)) | |
1113 | goto drop; | |
1114 | } else { | |
1115 | if (ip->ip_dst.s_addr == ip->ip_src.s_addr) | |
1116 | goto drop; | |
1117 | } | |
1118 | } | |
1119 | /* | |
1120 | * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN | |
1121 | * | |
1122 | * Note that it is quite possible to receive unicast | |
1123 | * link-layer packets with a broadcast IP address. Use | |
1124 | * in_broadcast() to find them. | |
1125 | */ | |
61896e3c | 1126 | if (m->m_flags & (M_BCAST | M_MCAST)) |
984263bc MD |
1127 | goto drop; |
1128 | if (isipv6) { | |
1129 | if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || | |
1130 | IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) | |
1131 | goto drop; | |
1132 | } else { | |
1133 | if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || | |
1134 | IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || | |
1135 | ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || | |
1136 | in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) | |
1137 | goto drop; | |
1138 | } | |
1139 | /* | |
1140 | * SYN appears to be valid; create compressed TCP state | |
1141 | * for syncache, or perform t/tcp connection. | |
1142 | */ | |
1143 | if (so->so_qlen <= so->so_qlimit) { | |
6c1bbf57 | 1144 | tcp_dooptions(&to, optp, optlen, TRUE, th->th_ack); |
b09567cc | 1145 | if (!syncache_add(&inc, &to, th, so, m)) |
984263bc | 1146 | goto drop; |
2d23a8be MD |
1147 | |
1148 | /* | |
1149 | * Entry added to syncache, mbuf used to | |
1150 | * send SYN,ACK packet. | |
1151 | */ | |
b09567cc | 1152 | return(IPPROTO_DONE); |
984263bc MD |
1153 | } |
1154 | goto drop; | |
1155 | } | |
984263bc | 1156 | |
2d23a8be MD |
1157 | after_listen: |
1158 | /* | |
1159 | * Should not happen - syncache should pick up these connections. | |
1160 | * | |
1161 | * Once we are past handling listen sockets we must be in the | |
1162 | * correct protocol processing thread. | |
1163 | */ | |
61896e3c | 1164 | KASSERT(tp->t_state != TCPS_LISTEN, ("tcp_input: TCPS_LISTEN state")); |
2d23a8be | 1165 | KKASSERT(so->so_port == &curthread->td_msgport); |
984263bc | 1166 | |
df1d2774 SZ |
1167 | /* Unscale the window into a 32-bit value. */ |
1168 | if (!(thflags & TH_SYN)) | |
1169 | tiwin = th->th_win << tp->snd_scale; | |
1170 | else | |
1171 | tiwin = th->th_win; | |
1172 | ||
5b0b9fa5 PA |
1173 | /* |
1174 | * This is the second part of the MSS DoS prevention code (after | |
1175 | * minmss on the sending side) and it deals with too many too small | |
1176 | * tcp packets in a too short timeframe (1 second). | |
1177 | * | |
153d34ac MD |
1178 | * XXX Removed. This code was crap. It does not scale to network |
1179 | * speed, and default values break NFS. Gone. | |
5b0b9fa5 | 1180 | */ |
153d34ac | 1181 | /* REMOVED */ |
5b0b9fa5 | 1182 | |
984263bc MD |
1183 | /* |
1184 | * Segment received on connection. | |
efca2b8e MD |
1185 | * |
1186 | * Reset idle time and keep-alive timer. Don't waste time if less | |
0ecd93f9 | 1187 | * then a second has elapsed. |
984263bc | 1188 | */ |
0ecd93f9 MD |
1189 | if ((int)(ticks - tp->t_rcvtime) > hz) |
1190 | tcp_timer_keep_activity(tp, thflags); | |
984263bc MD |
1191 | |
1192 | /* | |
1193 | * Process options. | |
1194 | * XXX this is tradtitional behavior, may need to be cleaned up. | |
1195 | */ | |
6c1bbf57 | 1196 | tcp_dooptions(&to, optp, optlen, (thflags & TH_SYN) != 0, th->th_ack); |
ad0af98b | 1197 | if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { |
df1d2774 | 1198 | if ((to.to_flags & TOF_SCALE) && (tp->t_flags & TF_REQ_SCALE)) { |
984263bc | 1199 | tp->t_flags |= TF_RCVD_SCALE; |
df1d2774 | 1200 | tp->snd_scale = to.to_requested_s_scale; |
984263bc | 1201 | } |
df1d2774 SZ |
1202 | |
1203 | /* | |
1204 | * Initial send window; will be updated upon next ACK | |
1205 | */ | |
1206 | tp->snd_wnd = th->th_win; | |
1207 | ||
984263bc MD |
1208 | if (to.to_flags & TOF_TS) { |
1209 | tp->t_flags |= TF_RCVD_TSTMP; | |
1210 | tp->ts_recent = to.to_tsval; | |
1211 | tp->ts_recent_age = ticks; | |
1212 | } | |
f34fd0f2 SZ |
1213 | if (!(to.to_flags & TOF_MSS)) |
1214 | to.to_mss = 0; | |
1215 | tcp_mss(tp, to.to_mss); | |
91489f6b JH |
1216 | /* |
1217 | * Only set the TF_SACK_PERMITTED per-connection flag | |
1218 | * if we got a SACK_PERMITTED option from the other side | |
1219 | * and the global tcp_do_sack variable is true. | |
1220 | */ | |
1221 | if (tcp_do_sack && (to.to_flags & TOF_SACK_PERMITTED)) | |
1222 | tp->t_flags |= TF_SACK_PERMITTED; | |
984263bc MD |
1223 | } |
1224 | ||
1225 | /* | |
1226 | * Header prediction: check for the two common cases | |
1227 | * of a uni-directional data xfer. If the packet has | |
1228 | * no control flags, is in-sequence, the window didn't | |
1229 | * change and we're not retransmitting, it's a | |
1230 | * candidate. If the length is zero and the ack moved | |
1231 | * forward, we're the sender side of the xfer. Just | |
1232 | * free the data acked & wake any higher level process | |
1233 | * that was blocked waiting for space. If the length | |
1234 | * is non-zero and the ack didn't move, we're the | |
1235 | * receiver side. If we're getting packets in-order | |
1236 | * (the reassembly queue is empty), add the data to | |
1237 | * the socket buffer and note that we need a delayed ack. | |
1238 | * Make sure that the hidden state-flags are also off. | |
1239 | * Since we check for TCPS_ESTABLISHED above, it can only | |
1240 | * be TH_NEEDSYN. | |
1241 | */ | |
1242 | if (tp->t_state == TCPS_ESTABLISHED && | |
1243 | (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && | |
61896e3c JH |
1244 | !(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)) && |
1245 | (!(to.to_flags & TOF_TS) || | |
984263bc | 1246 | TSTMP_GEQ(to.to_tsval, tp->ts_recent)) && |
984263bc | 1247 | th->th_seq == tp->rcv_nxt && |
984263bc MD |
1248 | tp->snd_nxt == tp->snd_max) { |
1249 | ||
1250 | /* | |
1251 | * If last ACK falls within this segment's sequence numbers, | |
1252 | * record the timestamp. | |
1253 | * NOTE that the test is modified according to the latest | |
1254 | * proposal of the tcplw@cray.com list (Braden 1993/04/26). | |
1255 | */ | |
61896e3c | 1256 | if ((to.to_flags & TOF_TS) && |
984263bc MD |
1257 | SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { |
1258 | tp->ts_recent_age = ticks; | |
1259 | tp->ts_recent = to.to_tsval; | |
1260 | } | |
1261 | ||
1262 | if (tlen == 0) { | |
1263 | if (SEQ_GT(th->th_ack, tp->snd_una) && | |
1264 | SEQ_LEQ(th->th_ack, tp->snd_max) && | |
1265 | tp->snd_cwnd >= tp->snd_wnd && | |
95b22adf | 1266 | !IN_FASTRECOVERY(tp)) { |
984263bc | 1267 | /* |
be23faf1 | 1268 | * This is a pure ack for outstanding data. |
984263bc MD |
1269 | */ |
1270 | ++tcpstat.tcps_predack; | |
1271 | /* | |
1272 | * "bad retransmit" recovery | |
bfdb979e JH |
1273 | * |
1274 | * If Eifel detection applies, then | |
1275 | * it is deterministic, so use it | |
1276 | * unconditionally over the old heuristic. | |
1277 | * Otherwise, fall back to the old heuristic. | |
984263bc | 1278 | */ |
bfdb979e JH |
1279 | if (tcp_do_eifel_detect && |
1280 | (to.to_flags & TOF_TS) && to.to_tsecr && | |
1c8b7a61 | 1281 | (tp->rxt_flags & TRXT_F_FIRSTACCACK)) { |
bfdb979e JH |
1282 | /* Eifel detection applicable. */ |
1283 | if (to.to_tsecr < tp->t_rexmtTS) { | |
1284 | tcp_revert_congestion_state(tp); | |
1285 | ++tcpstat.tcps_eifeldetected; | |
928c3291 SZ |
1286 | if (tp->t_rxtshift != 1 || |
1287 | ticks >= tp->t_badrxtwin) | |
1288 | ++tcpstat.tcps_rttcantdetect; | |
bfdb979e JH |
1289 | } |
1290 | } else if (tp->t_rxtshift == 1 && | |
1291 | ticks < tp->t_badrxtwin) { | |
1292 | tcp_revert_congestion_state(tp); | |
1293 | ++tcpstat.tcps_rttdetected; | |
984263bc | 1294 | } |
1c8b7a61 SZ |
1295 | tp->rxt_flags &= ~(TRXT_F_FIRSTACCACK | |
1296 | TRXT_F_FASTREXMT | TRXT_F_EARLYREXMT); | |
984263bc MD |
1297 | /* |
1298 | * Recalculate the retransmit timer / rtt. | |
1299 | * | |
d24ce1dc | 1300 | * Some machines (certain windows boxes) |
984263bc MD |
1301 | * send broken timestamp replies during the |
1302 | * SYN+ACK phase, ignore timestamps of 0. | |
1303 | */ | |
95b22adf | 1304 | if ((to.to_flags & TOF_TS) && to.to_tsecr) { |
984263bc | 1305 | tcp_xmit_timer(tp, |
073ec6c4 SZ |
1306 | ticks - to.to_tsecr + 1, |
1307 | th->th_ack); | |
984263bc | 1308 | } else if (tp->t_rtttime && |
95b22adf | 1309 | SEQ_GT(th->th_ack, tp->t_rtseq)) { |
984263bc | 1310 | tcp_xmit_timer(tp, |
073ec6c4 SZ |
1311 | ticks - tp->t_rtttime, |
1312 | th->th_ack); | |
984263bc MD |
1313 | } |
1314 | tcp_xmit_bandwidth_limit(tp, th->th_ack); | |
1315 | acked = th->th_ack - tp->snd_una; | |
1316 | tcpstat.tcps_rcvackpack++; | |
1317 | tcpstat.tcps_rcvackbyte += acked; | |
6d49aa6f | 1318 | sbdrop(&so->so_snd.sb, acked); |
cfb3f3f4 | 1319 | tp->snd_recover = th->th_ack - 1; |
9845754e | 1320 | tp->snd_una = th->th_ack; |
984263bc | 1321 | tp->t_dupacks = 0; |
df9d7670 JH |
1322 | /* |
1323 | * Update window information. | |
1324 | */ | |
1325 | if (tiwin != tp->snd_wnd && | |
1326 | acceptable_window_update(tp, th, tiwin)) { | |
1327 | /* keep track of pure window updates */ | |
1328 | if (tp->snd_wl2 == th->th_ack && | |
1329 | tiwin > tp->snd_wnd) | |
1330 | tcpstat.tcps_rcvwinupd++; | |
1331 | tp->snd_wnd = tiwin; | |
1332 | tp->snd_wl1 = th->th_seq; | |
1333 | tp->snd_wl2 = th->th_ack; | |
1334 | if (tp->snd_wnd > tp->max_sndwnd) | |
1335 | tp->max_sndwnd = tp->snd_wnd; | |
1336 | } | |
984263bc MD |
1337 | m_freem(m); |
1338 | ND6_HINT(tp); /* some progress has been done */ | |
984263bc MD |
1339 | /* |
1340 | * If all outstanding data are acked, stop | |
1341 | * retransmit timer, otherwise restart timer | |
1342 | * using current (possibly backed-off) value. | |
1343 | * If process is waiting for space, | |
1344 | * wakeup/selwakeup/signal. If data | |
1345 | * are ready to send, let tcp_output | |
1346 | * decide between more output or persist. | |
1347 | */ | |
a48c5dd5 SZ |
1348 | if (tp->snd_una == tp->snd_max) { |
1349 | tcp_callout_stop(tp, tp->tt_rexmt); | |
1350 | } else if (!tcp_callout_active(tp, | |
1351 | tp->tt_persist)) { | |
1352 | tcp_callout_reset(tp, tp->tt_rexmt, | |
1353 | tp->t_rxtcur, tcp_timer_rexmt); | |
1354 | } | |
984263bc | 1355 | sowwakeup(so); |
6d49aa6f | 1356 | if (so->so_snd.ssb_cc > 0) |
f23061d4 | 1357 | tcp_output(tp); |
002c1265 | 1358 | return(IPPROTO_DONE); |
984263bc | 1359 | } |
df9d7670 JH |
1360 | } else if (tiwin == tp->snd_wnd && |
1361 | th->th_ack == tp->snd_una && | |
0f9e45de | 1362 | TAILQ_EMPTY(&tp->t_segq) && |
6d49aa6f | 1363 | tlen <= ssb_space(&so->so_rcv)) { |
46e92930 | 1364 | u_long newsize = 0; /* automatic sockbuf scaling */ |
984263bc | 1365 | /* |
be23faf1 | 1366 | * This is a pure, in-sequence data packet |
984263bc MD |
1367 | * with nothing on the reassembly queue and |
1368 | * we have enough buffer space to take it. | |
1369 | */ | |
1370 | ++tcpstat.tcps_preddat; | |
1371 | tp->rcv_nxt += tlen; | |
1372 | tcpstat.tcps_rcvpack++; | |
1373 | tcpstat.tcps_rcvbyte += tlen; | |
1374 | ND6_HINT(tp); /* some progress has been done */ | |
5b0b9fa5 PA |
1375 | /* |
1376 | * Automatic sizing of receive socket buffer. Often the send | |
1377 | * buffer size is not optimally adjusted to the actual network | |
1378 | * conditions at hand (delay bandwidth product). Setting the | |
1379 | * buffer size too small limits throughput on links with high | |
1380 | * bandwidth and high delay (eg. trans-continental/oceanic links). | |
1381 | * | |
1382 | * On the receive side the socket buffer memory is only rarely | |
1383 | * used to any significant extent. This allows us to be much | |
1384 | * more aggressive in scaling the receive socket buffer. For | |
1385 | * the case that the buffer space is actually used to a large | |
1386 | * extent and we run out of kernel memory we can simply drop | |
1387 | * the new segments; TCP on the sender will just retransmit it | |
1388 | * later. Setting the buffer size too big may only consume too | |
1389 | * much kernel memory if the application doesn't read() from | |
1390 | * the socket or packet loss or reordering makes use of the | |
1391 | * reassembly queue. | |
1392 | * | |
1393 | * The criteria to step up the receive buffer one notch are: | |
1394 | * 1. the number of bytes received during the time it takes | |
1395 | * one timestamp to be reflected back to us (the RTT); | |
1396 | * 2. received bytes per RTT is within seven eighth of the | |
1397 | * current socket buffer size; | |
1398 | * 3. receive buffer size has not hit maximal automatic size; | |
1399 | * | |
1400 | * This algorithm does one step per RTT at most and only if | |
1401 | * we receive a bulk stream w/o packet losses or reorderings. | |
1402 | * Shrinking the buffer during idle times is not necessary as | |
1403 | * it doesn't consume any memory when idle. | |
1404 | * | |
1405 | * TODO: Only step up if the application is actually serving | |
1406 | * the buffer to better manage the socket buffer resources. | |
1407 | */ | |
1408 | if (tcp_do_autorcvbuf && | |
1409 | to.to_tsecr && | |
1410 | (so->so_rcv.ssb_flags & SSB_AUTOSIZE)) { | |
1411 | if (to.to_tsecr > tp->rfbuf_ts && | |
1412 | to.to_tsecr - tp->rfbuf_ts < hz) { | |
1413 | if (tp->rfbuf_cnt > | |
1414 | (so->so_rcv.ssb_hiwat / 8 * 7) && | |
1415 | so->so_rcv.ssb_hiwat < | |
1416 | tcp_autorcvbuf_max) { | |
1417 | newsize = | |
46e92930 MD |
1418 | ulmin(so->so_rcv.ssb_hiwat + |
1419 | tcp_autorcvbuf_inc, | |
1420 | tcp_autorcvbuf_max); | |
5b0b9fa5 PA |
1421 | } |
1422 | /* Start over with next RTT. */ | |
1423 | tp->rfbuf_ts = 0; | |
1424 | tp->rfbuf_cnt = 0; | |
1425 | } else | |
1426 | tp->rfbuf_cnt += tlen; /* add up */ | |
1427 | } | |
984263bc MD |
1428 | /* |
1429 | * Add data to socket buffer. | |
1430 | */ | |
1431 | if (so->so_state & SS_CANTRCVMORE) { | |
1432 | m_freem(m); | |
1433 | } else { | |
5b0b9fa5 | 1434 | /* |
46e92930 MD |
1435 | * Set new socket buffer size, give up when |
1436 | * limit is reached. | |
1437 | * | |
1438 | * Adjusting the size can mess up ACK | |
1439 | * sequencing when pure window updates are | |
1440 | * being avoided (which is the default), | |
1441 | * so force an ack. | |
5b0b9fa5 | 1442 | */ |
6cef7136 | 1443 | lwkt_gettoken(&so->so_rcv.ssb_token); |
46e92930 MD |
1444 | if (newsize) { |
1445 | tp->t_flags |= TF_RXRESIZED; | |
5b0b9fa5 | 1446 | if (!ssb_reserve(&so->so_rcv, newsize, |
46e92930 | 1447 | so, NULL)) { |
14343ad3 | 1448 | atomic_clear_int(&so->so_rcv.ssb_flags, SSB_AUTOSIZE); |
46e92930 MD |
1449 | } |
1450 | if (newsize >= | |
1451 | (TCP_MAXWIN << tp->rcv_scale)) { | |
14343ad3 | 1452 | atomic_clear_int(&so->so_rcv.ssb_flags, SSB_AUTOSIZE); |
46e92930 MD |
1453 | } |
1454 | } | |
09cf73e0 | 1455 | m_adj(m, drop_hdrlen); /* delayed header drop */ |
6d49aa6f | 1456 | ssb_appendstream(&so->so_rcv, m); |
6cef7136 | 1457 | lwkt_reltoken(&so->so_rcv.ssb_token); |
984263bc MD |
1458 | } |
1459 | sorwakeup(so); | |
2b1ce38a MD |
1460 | /* |
1461 | * This code is responsible for most of the ACKs | |
1462 | * the TCP stack sends back after receiving a data | |
1463 | * packet. Note that the DELAY_ACK check fails if | |
1464 | * the delack timer is already running, which results | |
1465 | * in an ack being sent every other packet (which is | |
1466 | * what we want). | |
72b37eeb MD |
1467 | * |
1468 | * We then further aggregate acks by not actually | |
1469 | * sending one until the protocol thread has completed | |
1470 | * processing the current backlog of packets. This | |
1471 | * does not delay the ack any further, but allows us | |
1472 | * to take advantage of the packet aggregation that | |
1473 | * high speed NICs do (usually blocks of 8-10 packets) | |
1474 | * to send a single ack rather then four or five acks, | |
1475 | * greatly reducing the ack rate, the return channel | |
1476 | * bandwidth, and the protocol overhead on both ends. | |
1477 | * | |
1478 | * Since this also has the effect of slowing down | |
1479 | * the exponential slow-start ramp-up, systems with | |
1480 | * very large bandwidth-delay products might want | |
1481 | * to turn the feature off. | |
2b1ce38a | 1482 | */ |
984263bc | 1483 | if (DELAY_ACK(tp)) { |
a48c5dd5 SZ |
1484 | tcp_callout_reset(tp, tp->tt_delack, |
1485 | tcp_delacktime, tcp_timer_delack); | |
72b37eeb | 1486 | } else if (tcp_aggregate_acks) { |
984263bc | 1487 | tp->t_flags |= TF_ACKNOW; |
61896e3c | 1488 | if (!(tp->t_flags & TF_ONOUTPUTQ)) { |
2b1ce38a MD |
1489 | tp->t_flags |= TF_ONOUTPUTQ; |
1490 | tp->tt_cpu = mycpu->gd_cpuid; | |
1491 | TAILQ_INSERT_TAIL( | |
1492 | &tcpcbackq[tp->tt_cpu], | |
1493 | tp, t_outputq); | |
1494 | } | |
72b37eeb MD |
1495 | } else { |
1496 | tp->t_flags |= TF_ACKNOW; | |
1497 | tcp_output(tp); | |
984263bc | 1498 | } |
002c1265 | 1499 | return(IPPROTO_DONE); |
984263bc MD |
1500 | } |
1501 | } | |
1502 | ||
1503 | /* | |
1504 | * Calculate amount of space in receive window, | |
1505 | * and then do TCP input processing. | |
1506 | * Receive window is amount of space in rcv queue, | |
1507 | * but not less than advertised window. | |
1508 | */ | |
6d49aa6f | 1509 | recvwin = ssb_space(&so->so_rcv); |
61896e3c JH |
1510 | if (recvwin < 0) |
1511 | recvwin = 0; | |
1512 | tp->rcv_wnd = imax(recvwin, (int)(tp->rcv_adv - tp->rcv_nxt)); | |
984263bc | 1513 | |
5b0b9fa5 PA |
1514 | /* Reset receive buffer auto scaling when not in bulk receive mode. */ |
1515 | tp->rfbuf_ts = 0; | |
1516 | tp->rfbuf_cnt = 0; | |
1517 | ||
984263bc | 1518 | switch (tp->t_state) { |
984263bc MD |
1519 | /* |
1520 | * If the state is SYN_RECEIVED: | |
1521 | * if seg contains an ACK, but not for our SYN/ACK, send a RST. | |
1522 | */ | |
1523 | case TCPS_SYN_RECEIVED: | |
1524 | if ((thflags & TH_ACK) && | |
1525 | (SEQ_LEQ(th->th_ack, tp->snd_una) || | |
1526 | SEQ_GT(th->th_ack, tp->snd_max))) { | |
1527 | rstreason = BANDLIM_RST_OPENPORT; | |
1528 | goto dropwithreset; | |
1529 | } | |
1530 | break; | |
1531 | ||
1532 | /* | |
1533 | * If the state is SYN_SENT: | |
1534 | * if seg contains an ACK, but not for our SYN, drop the input. | |
1535 | * if seg contains a RST, then drop the connection. | |
1536 | * if seg does not contain SYN, then drop it. | |
1537 | * Otherwise this is an acceptable SYN segment | |
1538 | * initialize tp->rcv_nxt and tp->irs | |
1539 | * if seg contains ack then advance tp->snd_una | |
1540 | * if SYN has been acked change to ESTABLISHED else SYN_RCVD state | |
1541 | * arrange for segment to be acked (eventually) | |
1542 | * continue processing rest of data/controls, beginning with URG | |
1543 | */ | |
1544 | case TCPS_SYN_SENT: | |
984263bc MD |
1545 | if ((thflags & TH_ACK) && |
1546 | (SEQ_LEQ(th->th_ack, tp->iss) || | |
1547 | SEQ_GT(th->th_ack, tp->snd_max))) { | |
27b8aee3 AE |
1548 | rstreason = BANDLIM_UNLIMITED; |
1549 | goto dropwithreset; | |
984263bc MD |
1550 | } |
1551 | if (thflags & TH_RST) { | |
1552 | if (thflags & TH_ACK) | |
1553 | tp = tcp_drop(tp, ECONNREFUSED); | |
1554 | goto drop; | |
1555 | } | |
61896e3c | 1556 | if (!(thflags & TH_SYN)) |
984263bc | 1557 | goto drop; |
984263bc MD |
1558 | |
1559 | tp->irs = th->th_seq; | |
1560 | tcp_rcvseqinit(tp); | |
1561 | if (thflags & TH_ACK) { | |
27b8aee3 | 1562 | /* Our SYN was acked. */ |
984263bc MD |
1563 | tcpstat.tcps_connects++; |
1564 | soisconnected(so); | |
1565 | /* Do window scaling on this connection? */ | |
61896e3c | 1566 | if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == |
df1d2774 | 1567 | (TF_RCVD_SCALE | TF_REQ_SCALE)) |
984263bc | 1568 | tp->rcv_scale = tp->request_r_scale; |
984263bc MD |
1569 | tp->rcv_adv += tp->rcv_wnd; |
1570 | tp->snd_una++; /* SYN is acked */ | |
a48c5dd5 | 1571 | tcp_callout_stop(tp, tp->tt_rexmt); |
984263bc MD |
1572 | /* |
1573 | * If there's data, delay ACK; if there's also a FIN | |
1574 | * ACKNOW will be turned on later. | |
1575 | */ | |
a48c5dd5 SZ |
1576 | if (DELAY_ACK(tp) && tlen != 0) { |
1577 | tcp_callout_reset(tp, tp->tt_delack, | |
1578 | tcp_delacktime, tcp_timer_delack); | |
1579 | } else { | |
984263bc | 1580 | tp->t_flags |= TF_ACKNOW; |
a48c5dd5 | 1581 | } |
984263bc MD |
1582 | /* |
1583 | * Received <SYN,ACK> in SYN_SENT[*] state. | |
1584 | * Transitions: | |
1585 | * SYN_SENT --> ESTABLISHED | |
1586 | * SYN_SENT* --> FIN_WAIT_1 | |
1587 | */ | |
1588 | tp->t_starttime = ticks; | |
1589 | if (tp->t_flags & TF_NEEDFIN) { | |
1590 | tp->t_state = TCPS_FIN_WAIT_1; | |
1591 | tp->t_flags &= ~TF_NEEDFIN; | |
1592 | thflags &= ~TH_SYN; | |
1593 | } else { | |
8651f7f8 | 1594 | tcp_established(tp); |
984263bc MD |
1595 | } |
1596 | } else { | |
1597 | /* | |
95b22adf | 1598 | * Received initial SYN in SYN-SENT[*] state => |
27b8aee3 AE |
1599 | * simultaneous open. |
1600 | * Do 3-way handshake: | |
61896e3c JH |
1601 | * SYN-SENT -> SYN-RECEIVED |
1602 | * SYN-SENT* -> SYN-RECEIVED* | |
95b22adf | 1603 | */ |
984263bc | 1604 | tp->t_flags |= TF_ACKNOW; |
a48c5dd5 | 1605 | tcp_callout_stop(tp, tp->tt_rexmt); |
27b8aee3 | 1606 | tp->t_state = TCPS_SYN_RECEIVED; |
984263bc MD |
1607 | } |
1608 | ||
984263bc MD |
1609 | /* |
1610 | * Advance th->th_seq to correspond to first data byte. | |
1611 | * If data, trim to stay within window, | |
1612 | * dropping FIN if necessary. | |
1613 | */ | |
1614 | th->th_seq++; | |
1615 | if (tlen > tp->rcv_wnd) { | |
1616 | todrop = tlen - tp->rcv_wnd; | |
1617 | m_adj(m, -todrop); | |
1618 | tlen = tp->rcv_wnd; | |
1619 | thflags &= ~TH_FIN; | |
1620 | tcpstat.tcps_rcvpackafterwin++; | |
1621 | tcpstat.tcps_rcvbyteafterwin += todrop; | |
1622 | } | |
1623 | tp->snd_wl1 = th->th_seq - 1; | |
1624 | tp->rcv_up = th->th_seq; | |
1625 | /* | |
1626 | * Client side of transaction: already sent SYN and data. | |
1627 | * If the remote host used T/TCP to validate the SYN, | |
1628 | * our data will be ACK'd; if so, enter normal data segment | |
1629 | * processing in the middle of step 5, ack processing. | |
1630 | * Otherwise, goto step 6. | |
1631 | */ | |
95b22adf | 1632 | if (thflags & TH_ACK) |
984263bc MD |
1633 | goto process_ACK; |
1634 | ||
1635 | goto step6; | |
1636 | ||
1637 | /* | |
1638 | * If the state is LAST_ACK or CLOSING or TIME_WAIT: | |
27b8aee3 | 1639 | * do normal processing (we no longer bother with T/TCP). |
984263bc MD |
1640 | */ |
1641 | case TCPS_LAST_ACK: | |
1642 | case TCPS_CLOSING: | |
1643 | case TCPS_TIME_WAIT: | |
95b22adf | 1644 | break; /* continue normal processing */ |
984263bc MD |
1645 | } |
1646 | ||
1647 | /* | |
1648 | * States other than LISTEN or SYN_SENT. | |
1649 | * First check the RST flag and sequence number since reset segments | |
1650 | * are exempt from the timestamp and connection count tests. This | |
1651 | * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix | |
1652 | * below which allowed reset segments in half the sequence space | |
1653 | * to fall though and be processed (which gives forged reset | |
1654 | * segments with a random sequence number a 50 percent chance of | |
1655 | * killing a connection). | |
1656 | * Then check timestamp, if present. | |
1657 | * Then check the connection count, if present. | |
1658 | * Then check that at least some bytes of segment are within | |
1659 | * receive window. If segment begins before rcv_nxt, | |
1660 | * drop leading data (and SYN); if nothing left, just ack. | |
1661 | * | |
1662 | * | |
1663 | * If the RST bit is set, check the sequence number to see | |
1664 | * if this is a valid reset segment. | |
1665 | * RFC 793 page 37: | |
1666 | * In all states except SYN-SENT, all reset (RST) segments | |
1667 | * are validated by checking their SEQ-fields. A reset is | |
1668 | * valid if its sequence number is in the window. | |
1669 | * Note: this does not take into account delayed ACKs, so | |
1670 | * we should test against last_ack_sent instead of rcv_nxt. | |
1671 | * The sequence number in the reset segment is normally an | |
91489f6b | 1672 | * echo of our outgoing acknowledgement numbers, but some hosts |
984263bc MD |
1673 | * send a reset with the sequence number at the rightmost edge |
1674 | * of our receive window, and we have to handle this case. | |
1675 | * If we have multiple segments in flight, the intial reset | |
1676 | * segment sequence numbers will be to the left of last_ack_sent, | |
1677 | * but they will eventually catch up. | |
1678 | * In any case, it never made sense to trim reset segments to | |
1679 | * fit the receive window since RFC 1122 says: | |
1680 | * 4.2.2.12 RST Segment: RFC-793 Section 3.4 | |
1681 | * | |
1682 | * A TCP SHOULD allow a received RST segment to include data. | |
1683 | * | |
1684 | * DISCUSSION | |
61896e3c JH |
1685 | * It has been suggested that a RST segment could contain |
1686 | * ASCII text that encoded and explained the cause of the | |
91489f6b | 1687 | * RST. No standard has yet been established for such |
61896e3c | 1688 | * data. |
984263bc MD |
1689 | * |
1690 | * If the reset segment passes the sequence number test examine | |
1691 | * the state: | |
1692 | * SYN_RECEIVED STATE: | |
1693 | * If passive open, return to LISTEN state. | |
1694 | * If active open, inform user that connection was refused. | |
1695 | * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES: | |
1696 | * Inform user that connection was reset, and close tcb. | |
1697 | * CLOSING, LAST_ACK STATES: | |
1698 | * Close the tcb. | |
1699 | * TIME_WAIT STATE: | |
1700 | * Drop the segment - see Stevens, vol. 2, p. 964 and | |
61896e3c | 1701 | * RFC 1337. |
984263bc MD |
1702 | */ |
1703 | if (thflags & TH_RST) { | |
1704 | if (SEQ_GEQ(th->th_seq, tp->last_ack_sent) && | |
d4dbb5be | 1705 | SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { |
984263bc MD |
1706 | switch (tp->t_state) { |
1707 | ||
1708 | case TCPS_SYN_RECEIVED: | |
1709 | so->so_error = ECONNREFUSED; | |
1710 | goto close; | |
1711 | ||
1712 | case TCPS_ESTABLISHED: | |
1713 | case TCPS_FIN_WAIT_1: | |
1714 | case TCPS_FIN_WAIT_2: | |
1715 | case TCPS_CLOSE_WAIT: | |
1716 | so->so_error = ECONNRESET; | |
1717 | close: | |
1718 | tp->t_state = TCPS_CLOSED; | |
1719 | tcpstat.tcps_drops++; | |
1720 | tp = tcp_close(tp); | |
1721 | break; | |
1722 | ||
1723 | case TCPS_CLOSING: | |
1724 | case TCPS_LAST_ACK: | |
1725 | tp = tcp_close(tp); | |
1726 | break; | |
1727 | ||
1728 | case TCPS_TIME_WAIT: | |
1729 | break; | |
1730 | } | |
1731 | } | |
1732 | goto drop; | |
1733 | } | |
1734 | ||
1735 | /* | |
1736 | * RFC 1323 PAWS: If we have a timestamp reply on this segment | |
1737 | * and it's less than ts_recent, drop it. | |
1738 | */ | |
61896e3c | 1739 | if ((to.to_flags & TOF_TS) && tp->ts_recent != 0 && |
984263bc | 1740 | TSTMP_LT(to.to_tsval, tp->ts_recent)) { |
984263bc MD |
1741 | /* Check to see if ts_recent is over 24 days old. */ |
1742 | if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) { | |
1743 | /* | |
1744 | * Invalidate ts_recent. If this segment updates | |
1745 | * ts_recent, the age will be reset later and ts_recent | |
1746 | * will get a valid value. If it does not, setting | |
1747 | * ts_recent to zero will at least satisfy the | |
1748 | * requirement that zero be placed in the timestamp | |
1749 | * echo reply when ts_recent isn't valid. The | |
1750 | * age isn't reset until we get a valid ts_recent | |
1751 | * because we don't want out-of-order segments to be | |
1752 | * dropped when ts_recent is old. | |
1753 | */ | |
1754 | tp->ts_recent = 0; | |
9de1f696 SZ |
1755 | } else if (tcp_paws_tolerance && tlen != 0 && |
1756 | tp->t_state == TCPS_ESTABLISHED && | |
1757 | (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK&& | |
1758 | !(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)) && | |
1759 | th->th_ack == tp->snd_una && | |
1760 | tiwin == tp->snd_wnd && | |
1761 | TSTMP_GEQ(to.to_tsval + tcp_paws_tolerance, tp->ts_recent)&& | |
1762 | (th->th_seq == tp->rcv_nxt || | |
1763 | (SEQ_GT(th->th_seq, tp->rcv_nxt) && | |
1764 | tcp_paws_canreasslast(tp, th, tlen)))) { | |
1765 | /* | |
1766 | * This tends to prevent valid new segments from being | |
1767 | * dropped by the reordered segments sent by the fast | |
1768 | * retransmission algorithm on the sending side, i.e. | |
1769 | * the fast retransmitted segment w/ larger timestamp | |
1770 | * arrives earlier than the previously sent new segments | |
1771 | * w/ smaller timestamp. | |
1772 | * | |
1773 | * If following conditions are met, the segment is | |
1774 | * accepted: | |
1775 | * - The segment contains data | |
1776 | * - The connection is established | |
1777 | * - The header does not contain important flags | |
1778 | * - SYN or FIN is not needed | |
1779 | * - It does not acknowledge new data | |
1780 | * - Receive window is not changed | |
1781 | * - The timestamp is within "acceptable" range | |
1782 | * - The new segment is what we are expecting or | |
1783 | * the new segment could be merged w/ the last | |
1784 | * pending segment on the reassemble queue | |
1785 | */ | |
1786 | tcpstat.tcps_pawsaccept++; | |
1787 | tcpstat.tcps_pawsdrop++; | |
984263bc MD |
1788 | } else { |
1789 | tcpstat.tcps_rcvduppack++; | |
1790 | tcpstat.tcps_rcvdupbyte += tlen; | |
1791 | tcpstat.tcps_pawsdrop++; | |
1792 | if (tlen) | |
1793 | goto dropafterack; | |
1794 | goto drop; | |
1795 | } | |
1796 | } | |
1797 | ||
984263bc MD |
1798 | /* |
1799 | * In the SYN-RECEIVED state, validate that the packet belongs to | |
1800 | * this connection before trimming the data to fit the receive | |
1801 | * window. Check the sequence number versus IRS since we know | |
1802 | * the sequence numbers haven't wrapped. This is a partial fix | |
1803 | * for the "LAND" DoS attack. | |
1804 | */ | |
1805 | if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { | |
1806 | rstreason = BANDLIM_RST_OPENPORT; | |
1807 | goto dropwithreset; | |
1808 | } | |
1809 | ||
1810 | todrop = tp->rcv_nxt - th->th_seq; | |
1811 | if (todrop > 0) { | |
91489f6b JH |
1812 | if (TCP_DO_SACK(tp)) { |
1813 | /* Report duplicate segment at head of packet. */ | |
1814 | tp->reportblk.rblk_start = th->th_seq; | |
3a5d999b SZ |
1815 | tp->reportblk.rblk_end = TCP_SACK_BLKEND( |
1816 | th->th_seq + tlen, thflags); | |
91489f6b JH |
1817 | if (SEQ_GT(tp->reportblk.rblk_end, tp->rcv_nxt)) |
1818 | tp->reportblk.rblk_end = tp->rcv_nxt; | |
c7e6499a SZ |
1819 | tp->sack_flags |= (TSACK_F_DUPSEG | TSACK_F_SACKLEFT); |
1820 | tp->t_flags |= TF_ACKNOW; | |
91489f6b | 1821 | } |
984263bc MD |
1822 | if (thflags & TH_SYN) { |
1823 | thflags &= ~TH_SYN; | |
1824 | th->th_seq++; | |
1825 | if (th->th_urp > 1) | |
1826 | th->th_urp--; | |
1827 | else | |
1828 | thflags &= ~TH_URG; | |
1829 | todrop--; | |
1830 | } | |
1831 | /* | |
1832 | * Following if statement from Stevens, vol. 2, p. 960. | |
1833 | */ | |
61896e3c JH |
1834 | if (todrop > tlen || |
1835 | (todrop == tlen && !(thflags & TH_FIN))) { | |
984263bc MD |
1836 | /* |
1837 | * Any valid FIN must be to the left of the window. | |
1838 | * At this point the FIN must be a duplicate or out | |
1839 | * of sequence; drop it. | |
1840 | */ | |
1841 | thflags &= ~TH_FIN; | |
1842 | ||
1843 | /* | |
1844 | * Send an ACK to resynchronize and drop any data. | |
1845 | * But keep on processing for RST or ACK. | |
1846 | */ | |
1847 | tp->t_flags |= TF_ACKNOW; | |
1848 | todrop = tlen; | |
1849 | tcpstat.tcps_rcvduppack++; | |
1850 | tcpstat.tcps_rcvdupbyte += todrop; | |
1851 | } else { | |
1852 | tcpstat.tcps_rcvpartduppack++; | |
1853 | tcpstat.tcps_rcvpartdupbyte += todrop; | |
1854 | } | |
1855 | drop_hdrlen += todrop; /* drop from the top afterwards */ | |
1856 | th->th_seq += todrop; | |
1857 | tlen -= todrop; | |
1858 | if (th->th_urp > todrop) | |
1859 | th->th_urp -= todrop; | |
1860 | else { | |
1861 | thflags &= ~TH_URG; | |
1862 | th->th_urp = 0; | |
1863 | } | |
1864 | } | |
1865 | ||
1866 | /* | |
1867 | * If new data are received on a connection after the | |
1868 | * user processes are gone, then RST the other end. | |
1869 | */ | |
1870 | if ((so->so_state & SS_NOFDREF) && | |
1871 | tp->t_state > TCPS_CLOSE_WAIT && tlen) { | |
1872 | tp = tcp_close(tp); | |
1873 | tcpstat.tcps_rcvafterclose++; | |
1874 | rstreason = BANDLIM_UNLIMITED; | |
1875 | goto dropwithreset; | |
1876 | } | |
1877 | ||
1878 | /* | |
1879 | * If segment ends after window, drop trailing data | |
1880 | * (and PUSH and FIN); if nothing left, just ACK. | |
1881 | */ | |
61896e3c | 1882 | todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); |
984263bc MD |
1883 | if (todrop > 0) { |
1884 | tcpstat.tcps_rcvpackafterwin++; | |
1885 | if (todrop >= tlen) { | |
1886 | tcpstat.tcps_rcvbyteafterwin += tlen; | |
1887 | /* | |
1888 | * If a new connection request is received | |
1889 | * while in TIME_WAIT, drop the old connection | |
1890 | * and start over if the sequence numbers | |
1891 | * are above the previous ones. | |
1892 | */ | |
1893 | if (thflags & TH_SYN && | |
1894 | tp->t_state == TCPS_TIME_WAIT && | |
1895 | SEQ_GT(th->th_seq, tp->rcv_nxt)) { | |
1896 | tp = tcp_close(tp); | |
1897 | goto findpcb; | |
1898 | } | |
1899 | /* | |
1900 | * If window is closed can only take segments at | |
1901 | * window edge, and have to drop data and PUSH from | |
1902 | * incoming segments. Continue processing, but | |
1903 | * remember to ack. Otherwise, drop segment | |
1904 | * and ack. | |
1905 | */ | |
1906 | if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { | |
1907 | tp->t_flags |= TF_ACKNOW; | |
1908 | tcpstat.tcps_rcvwinprobe++; | |
1909 | } else | |
1910 | goto dropafterack; | |
1911 | } else | |
1912 | tcpstat.tcps_rcvbyteafterwin += todrop; | |
1913 | m_adj(m, -todrop); | |
1914 | tlen -= todrop; | |
61896e3c | 1915 | thflags &= ~(TH_PUSH | TH_FIN); |
984263bc MD |
1916 | } |
1917 | ||
1918 | /* | |
1919 | * If last ACK falls within this segment's sequence numbers, | |
1920 | * record its timestamp. | |
ad0af98b ND |
1921 | * NOTE: |
1922 | * 1) That the test incorporates suggestions from the latest | |
1923 | * proposal of the tcplw@cray.com list (Braden 1993/04/26). | |
1924 | * 2) That updating only on newer timestamps interferes with | |
1925 | * our earlier PAWS tests, so this check should be solely | |
1926 | * predicated on the sequence space of this segment. | |
1927 | * 3) That we modify the segment boundary check to be | |
1928 | * Last.ACK.Sent <= SEG.SEQ + SEG.LEN | |
1929 | * instead of RFC1323's | |
1930 | * Last.ACK.Sent < SEG.SEQ + SEG.LEN, | |
1931 | * This modified check allows us to overcome RFC1323's | |
1932 | * limitations as described in Stevens TCP/IP Illustrated | |
1933 | * Vol. 2 p.869. In such cases, we can still calculate the | |
1934 | * RTT correctly when RCV.NXT == Last.ACK.Sent. | |
984263bc | 1935 | */ |
ad0af98b ND |
1936 | if ((to.to_flags & TOF_TS) && SEQ_LEQ(th->th_seq, tp->last_ack_sent) && |
1937 | SEQ_LEQ(tp->last_ack_sent, (th->th_seq + tlen | |
1938 | + ((thflags & TH_SYN) != 0) | |
1939 | + ((thflags & TH_FIN) != 0)))) { | |
984263bc MD |
1940 | tp->ts_recent_age = ticks; |
1941 | tp->ts_recent = to.to_tsval; | |
1942 | } | |
1943 | ||
1944 | /* | |
1945 | * If a SYN is in the window, then this is an | |
1946 | * error and we send an RST and drop the connection. | |
1947 | */ | |
1948 | if (thflags & TH_SYN) { | |
1949 | tp = tcp_drop(tp, ECONNRESET); | |
1950 | rstreason = BANDLIM_UNLIMITED; | |
1951 | goto dropwithreset; | |
1952 | } | |
1953 | ||
1954 | /* | |
1955 | * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN | |
1956 | * flag is on (half-synchronized state), then queue data for | |
1957 | * later processing; else drop segment and return. | |
1958 | */ | |
61896e3c | 1959 | if (!(thflags & TH_ACK)) { |
984263bc MD |
1960 | if (tp->t_state == TCPS_SYN_RECEIVED || |
1961 | (tp->t_flags & TF_NEEDSYN)) | |
1962 | goto step6; | |
1963 | else | |
1964 | goto drop; | |
1965 | } | |
1966 | ||
1967 | /* | |
1968 | * Ack processing. | |
1969 | */ | |
1970 | switch (tp->t_state) { | |
984263bc | 1971 | /* |
91489f6b | 1972 | * In SYN_RECEIVED state, the ACK acknowledges our SYN, so enter |
984263bc MD |
1973 | * ESTABLISHED state and continue processing. |
1974 | * The ACK was checked above. | |
1975 | */ | |
1976 | case TCPS_SYN_RECEIVED: | |
1977 | ||
1978 | tcpstat.tcps_connects++; | |
1979 | soisconnected(so); | |
1980 | /* Do window scaling? */ | |
61896e3c | 1981 | if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == |
df1d2774 | 1982 | (TF_RCVD_SCALE | TF_REQ_SCALE)) |
984263bc | 1983 | tp->rcv_scale = tp->request_r_scale; |
984263bc MD |
1984 | /* |
1985 | * Make transitions: | |
1986 | * SYN-RECEIVED -> ESTABLISHED | |
1987 | * SYN-RECEIVED* -> FIN-WAIT-1 | |
1988 | */ | |
1989 | tp->t_starttime = ticks; | |
1990 | if (tp->t_flags & TF_NEEDFIN) { | |
1991 | tp->t_state = TCPS_FIN_WAIT_1; | |
1992 | tp->t_flags &= ~TF_NEEDFIN; | |
1993 | } else { | |
8651f7f8 | 1994 | tcp_established(tp); |
984263bc MD |
1995 | } |
1996 | /* | |
1997 | * If segment contains data or ACK, will call tcp_reass() | |
1998 | * later; if not, do so now to pass queued data to user. | |
1999 | */ | |
61896e3c | 2000 | if (tlen == 0 && !(thflags & TH_FIN)) |
f23061d4 | 2001 | tcp_reass(tp, NULL, NULL, NULL); |
984263bc MD |
2002 | /* fall into ... */ |
2003 | ||
2004 | /* | |
2005 | * In ESTABLISHED state: drop duplicate ACKs; ACK out of range | |
2006 | * ACKs. If the ack is in the range | |
2007 | * tp->snd_una < th->th_ack <= tp->snd_max | |
2008 | * then advance tp->snd_una to th->th_ack and drop | |
2009 | * data from the retransmission queue. If this ACK reflects | |
2010 | * more up to date window information we update our window information. | |
2011 | */ | |
2012 | case TCPS_ESTABLISHED: | |
2013 | case TCPS_FIN_WAIT_1: | |
2014 | case TCPS_FIN_WAIT_2: | |
2015 | case TCPS_CLOSE_WAIT: | |
2016 | case TCPS_CLOSING: | |
2017 | case TCPS_LAST_ACK: | |
2018 | case TCPS_TIME_WAIT: | |
2019 | ||
2020 | if (SEQ_LEQ(th->th_ack, tp->snd_una)) { | |
91489f6b JH |
2021 | if (TCP_DO_SACK(tp)) |
2022 | tcp_sack_update_scoreboard(tp, &to); | |
a48c5dd5 | 2023 | if (!tcp_callout_active(tp, tp->tt_rexmt) || |
91489f6b | 2024 | th->th_ack != tp->snd_una) { |
52f9ffcf SZ |
2025 | if (tlen == 0 && tiwin == tp->snd_wnd) |
2026 | tcpstat.tcps_rcvdupack++; | |
91489f6b JH |
2027 | tp->t_dupacks = 0; |
2028 | break; | |
2029 | } | |
ffe35e17 SZ |
2030 | if (tlen != 0 || tiwin != tp->snd_wnd) { |
2031 | if (!tcp_do_rfc3517bis || | |
2032 | !TCP_DO_SACK(tp) || | |
2033 | (to.to_flags & | |
2034 | (TOF_SACK | TOF_SACK_REDUNDANT)) | |
2035 | != TOF_SACK) { | |
2036 | tp->t_dupacks = 0; | |
91489f6b | 2037 | } else { |
27f4bf33 SZ |
2038 | delayed_dupack = TRUE; |
2039 | th_dupack = th->th_ack; | |
91489f6b | 2040 | } |
95b22adf | 2041 | break; |
95b22adf | 2042 | } |
5c99b248 | 2043 | if (tcp_recv_dupack(tp, th->th_ack, &to)) |
ffe35e17 | 2044 | goto drop; |
27f4bf33 SZ |
2045 | else |
2046 | break; | |
984263bc MD |
2047 | } |
2048 | ||
2049 | KASSERT(SEQ_GT(th->th_ack, tp->snd_una), ("th_ack <= snd_una")); | |
984263bc MD |
2050 | tp->t_dupacks = 0; |
2051 | if (SEQ_GT(th->th_ack, tp->snd_max)) { | |
5a274421 JH |
2052 | /* |
2053 | * Detected optimistic ACK attack. | |
2054 | * Force slow-start to de-synchronize attack. | |
2055 | */ | |
2056 | tp->snd_cwnd = tp->t_maxseg; | |
8acdb67c | 2057 | tp->snd_wacked = 0; |
5a274421 | 2058 | |
984263bc MD |
2059 | tcpstat.tcps_rcvacktoomuch++; |
2060 | goto dropafterack; | |
2061 | } | |
2062 | /* | |
2063 | * If we reach this point, ACK is not a duplicate, | |
2064 | * i.e., it ACKs something we sent. | |
2065 | */ | |
2066 | if (tp->t_flags & TF_NEEDSYN) { | |
2067 | /* | |
2068 | * T/TCP: Connection was half-synchronized, and our | |
2069 | * SYN has been ACK'd (so connection is now fully | |
2070 | * synchronized). Go to non-starred state, | |
2071 | * increment snd_una for ACK of SYN, and check if | |
2072 | * we can do window scaling. | |
2073 | */ | |
2074 | tp->t_flags &= ~TF_NEEDSYN; | |
2075 | tp->snd_una++; | |
2076 | /* Do window scaling? */ | |
61896e3c | 2077 | if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == |
df1d2774 | 2078 | (TF_RCVD_SCALE | TF_REQ_SCALE)) |
984263bc | 2079 | tp->rcv_scale = tp->request_r_scale; |
984263bc MD |
2080 | } |
2081 | ||
2082 | process_ACK: | |
2083 | acked = th->th_ack - tp->snd_una; | |
2084 | tcpstat.tcps_rcvackpack++; | |
2085 | tcpstat.tcps_rcvackbyte += acked; | |
2086 | ||
b5572302 | 2087 | if (tcp_do_eifel_detect && acked > 0 && |
95b22adf | 2088 | (to.to_flags & TOF_TS) && (to.to_tsecr != 0) && |
1c8b7a61 | 2089 | (tp->rxt_flags & TRXT_F_FIRSTACCACK)) { |
bfdb979e JH |
2090 | /* Eifel detection applicable. */ |
2091 | if (to.to_tsecr < tp->t_rexmtTS) { | |
bfdb979e | 2092 | ++tcpstat.tcps_eifeldetected; |
8819433a | 2093 | tcp_revert_congestion_state(tp); |
928c3291 | 2094 | if (tp->t_rxtshift != 1 || |
8819433a JH |
2095 | ticks >= tp->t_badrxtwin) |
2096 | ++tcpstat.tcps_rttcantdetect; | |
bfdb979e JH |
2097 | } |
2098 | } else if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) { | |
b5572302 JH |
2099 | /* |
2100 | * If we just performed our first retransmit, | |
2101 | * and the ACK arrives within our recovery window, | |
2102 | * then it was a mistake to do the retransmit | |
91489f6b | 2103 | * in the first place. Recover our original cwnd |
b5572302 JH |
2104 | * and ssthresh, and proceed to transmit where we |
2105 | * left off. | |
2106 | */ | |
bfdb979e JH |
2107 | tcp_revert_congestion_state(tp); |
2108 | ++tcpstat.tcps_rttdetected; | |
984263bc MD |
2109 | } |
2110 | ||
2111 | /* | |
2112 | * If we have a timestamp reply, update smoothed | |
2113 | * round trip time. If no timestamp is present but | |
2114 | * transmit timer is running and timed sequence | |
2115 | * number was acked, update smoothed round trip time. | |
2116 | * Since we now have an rtt measurement, cancel the | |
2117 | * timer backoff (cf., Phil Karn's retransmit alg.). | |
2118 | * Recompute the initial retransmit timer. | |
2119 | * | |
2120 | * Some machines (certain windows boxes) send broken | |
d24ce1dc | 2121 | * timestamp replies during the SYN+ACK phase, ignore |
984263bc MD |
2122 | * timestamps of 0. |
2123 | */ | |
95b22adf | 2124 | if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) |
073ec6c4 | 2125 | tcp_xmit_timer(tp, ticks - to.to_tsecr + 1, th->th_ack); |
95b22adf | 2126 | else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) |
073ec6c4 | 2127 | tcp_xmit_timer(tp, ticks - tp->t_rtttime, th->th_ack); |
984263bc MD |
2128 | tcp_xmit_bandwidth_limit(tp, th->th_ack); |
2129 | ||
984263bc MD |
2130 | /* |
2131 | * If no data (only SYN) was ACK'd, | |
2132 | * skip rest of ACK processing. | |
2133 | */ | |
2134 | if (acked == 0) | |
2135 | goto step6; | |
2136 | ||
efd4b327 | 2137 | /* Stop looking for an acceptable ACK since one was received. */ |
1c8b7a61 SZ |
2138 | tp->rxt_flags &= ~(TRXT_F_FIRSTACCACK | |
2139 | TRXT_F_FASTREXMT | TRXT_F_EARLYREXMT); | |
efd4b327 | 2140 | |
6d49aa6f MD |
2141 | if (acked > so->so_snd.ssb_cc) { |
2142 | tp->snd_wnd -= so->so_snd.ssb_cc; | |
2143 | sbdrop(&so->so_snd.sb, (int)so->so_snd.ssb_cc); | |
61896e3c | 2144 | ourfinisacked = TRUE; |
984263bc | 2145 | } else { |
6d49aa6f | 2146 | sbdrop(&so->so_snd.sb, acked); |
984263bc | 2147 | tp->snd_wnd -= acked; |
61896e3c | 2148 | ourfinisacked = FALSE; |
984263bc MD |
2149 | } |
2150 | sowwakeup(so); | |
91489f6b JH |
2151 | |
2152 | /* | |
2153 | * Update window information. | |
91489f6b | 2154 | */ |
e126661b | 2155 | if (acceptable_window_update(tp, th, tiwin)) { |
91489f6b JH |
2156 | /* keep track of pure window updates */ |
2157 | if (tlen == 0 && tp->snd_wl2 == th->th_ack && | |
2158 | tiwin > tp->snd_wnd) | |
2159 | tcpstat.tcps_rcvwinupd++; | |
2160 | tp->snd_wnd = tiwin; | |
2161 | tp->snd_wl1 = th->th_seq; | |
2162 | tp->snd_wl2 = th->th_ack; | |
2163 | if (tp->snd_wnd > tp->max_sndwnd) | |
2164 | tp->max_sndwnd = tp->snd_wnd; | |
2165 | needoutput = TRUE; | |
2166 | } | |
2167 | ||
2168 | tp->snd_una = th->th_ack; | |
2169 | if (TCP_DO_SACK(tp)) | |
2170 | tcp_sack_update_scoreboard(tp, &to); | |
95b22adf | 2171 | if (IN_FASTRECOVERY(tp)) { |
91489f6b | 2172 | if (SEQ_GEQ(th->th_ack, tp->snd_recover)) { |
95b22adf | 2173 | EXIT_FASTRECOVERY(tp); |
91489f6b JH |
2174 | needoutput = TRUE; |
2175 | /* | |
2176 | * If the congestion window was inflated | |
2177 | * to account for the other side's | |
2178 | * cached packets, retract it. | |
8acdb67c JH |
2179 | */ |
2180 | if (!TCP_DO_SACK(tp)) | |
2181 | tp->snd_cwnd = tp->snd_ssthresh; | |
2182 | ||
2183 | /* | |
91489f6b JH |
2184 | * Window inflation should have left us |
2185 | * with approximately snd_ssthresh outstanding | |
2186 | * data. But, in case we would be inclined | |
2187 | * to send a burst, better do it using | |
2188 | * slow start. | |
2189 | */ | |
91489f6b JH |
2190 | if (SEQ_GT(th->th_ack + tp->snd_cwnd, |
2191 | tp->snd_max + 2 * tp->t_maxseg)) | |
2192 | tp->snd_cwnd = | |
2193 | (tp->snd_max - tp->snd_una) + | |
2194 | 2 * tp->t_maxseg; | |
8acdb67c JH |
2195 | |
2196 | tp->snd_wacked = 0; | |
91489f6b JH |
2197 | } else { |
2198 | if (TCP_DO_SACK(tp)) { | |
2199 | tp->snd_max_rexmt = tp->snd_max; | |
ccb518ea SZ |
2200 | tcp_sack_rexmt(tp, |
2201 | tp->snd_una == tp->rexmt_high); | |
91489f6b JH |
2202 | } else { |
2203 | tcp_newreno_partial_ack(tp, th, acked); | |
2204 | } | |
2205 | needoutput = FALSE; | |
2206 | } | |
95b22adf | 2207 | } else { |
91489f6b | 2208 | /* |
8acdb67c JH |
2209 | * Open the congestion window. When in slow-start, |
2210 | * open exponentially: maxseg per packet. Otherwise, | |
2211 | * open linearly: maxseg per window. | |
91489f6b | 2212 | */ |
8acdb67c JH |
2213 | if (tp->snd_cwnd <= tp->snd_ssthresh) { |
2214 | u_int abc_sslimit = | |
2215 | (SEQ_LT(tp->snd_nxt, tp->snd_max) ? | |
2216 | tp->t_maxseg : 2 * tp->t_maxseg); | |
2217 | ||
2218 | /* slow-start */ | |
2219 | tp->snd_cwnd += tcp_do_abc ? | |
2220 | min(acked, abc_sslimit) : tp->t_maxseg; | |
2221 | } else { | |
2222 | /* linear increase */ | |
2223 | tp->snd_wacked += tcp_do_abc ? acked : | |
2224 | tp->t_maxseg; | |
2225 | if (tp->snd_wacked >= tp->snd_cwnd) { | |
2226 | tp->snd_wacked -= tp->snd_cwnd; | |
2227 | tp->snd_cwnd += tp->t_maxseg; | |
2228 | } | |
2229 | } | |
2230 | tp->snd_cwnd = min(tp->snd_cwnd, | |
2231 | TCP_MAXWIN << tp->snd_scale); | |
95b22adf | 2232 | tp->snd_recover = th->th_ack - 1; |
cfb3f3f4 | 2233 | } |
984263bc MD |
2234 | if (SEQ_LT(tp->snd_nxt, tp->snd_una)) |
2235 | tp->snd_nxt = tp->snd_una; | |
2236 | ||
91489f6b JH |
2237 | /* |
2238 | * If all outstanding data is acked, stop retransmit | |
2239 | * timer and remember to restart (more output or persist). | |
2240 | * If there is more data to be acked, restart retransmit | |
2241 | * timer, using current (possibly backed-off) value. | |
2242 | */ | |
2243 | if (th->th_ack == tp->snd_max) { | |
a48c5dd5 | 2244 | tcp_callout_stop(tp, tp->tt_rexmt); |
91489f6b | 2245 | needoutput = TRUE; |
a48c5dd5 SZ |
2246 | } else if (!tcp_callout_active(tp, tp->tt_persist)) { |
2247 | tcp_callout_reset(tp, tp->tt_rexmt, tp->t_rxtcur, | |
2248 | tcp_timer_rexmt); | |
2249 | } | |
91489f6b | 2250 | |
984263bc | 2251 | switch (tp->t_state) { |
984263bc MD |
2252 | /* |
2253 | * In FIN_WAIT_1 STATE in addition to the processing | |
2254 | * for the ESTABLISHED state if our FIN is now acknowledged | |
2255 | * then enter FIN_WAIT_2. | |
2256 | */ | |
2257 | case TCPS_FIN_WAIT_1: | |
2258 | if (ourfinisacked) { | |
2259 | /* | |
2260 | * If we can't receive any more | |
2261 | * data, then closing user can proceed. | |
2262 | * Starting the timer is contrary to the | |
2263 | * specification, but if we don't get a FIN | |
2264 | * we'll hang forever. | |
2265 | */ | |
2266 | if (so->so_state & SS_CANTRCVMORE) { | |
2267 | soisdisconnected(so); | |
a48c5dd5 | 2268 | tcp_callout_reset(tp, tp->tt_2msl, |
5d61ded3 | 2269 | tp->t_maxidle, tcp_timer_2msl); |
984263bc MD |
2270 | } |
2271 | tp->t_state = TCPS_FIN_WAIT_2; | |
2272 | } | |
2273 | break; | |
2274 | ||
95b22adf | 2275 | /* |
984263bc MD |
2276 | * In CLOSING STATE in addition to the processing for |
2277 | * the ESTABLISHED state if the ACK acknowledges our FIN | |
2278 | * then enter the TIME-WAIT state, otherwise ignore | |
2279 | * the segment. | |
2280 | */ | |
2281 | case TCPS_CLOSING: | |
2282 | if (ourfinisacked) { | |
2283 | tp->t_state = TCPS_TIME_WAIT; | |
2284 | tcp_canceltimers(tp); | |
27b8aee3 | 2285 | tcp_callout_reset(tp, tp->tt_2msl, |
01d3427a SZ |
2286 | 2 * tcp_rmx_msl(tp), |
2287 | tcp_timer_2msl); | |
984263bc MD |
2288 | soisdisconnected(so); |
2289 | } | |
2290 | break; | |
2291 | ||
2292 | /* | |
2293 | * In LAST_ACK, we may still be waiting for data to drain | |
2294 | * and/or to be acked, as well as for the ack of our FIN. | |
2295 | * If our FIN is now acknowledged, delete the TCB, | |
2296 | * enter the closed state and return. | |
2297 | */ | |
2298 | case TCPS_LAST_ACK: | |
2299 | if (ourfinisacked) { | |
2300 | tp = tcp_close(tp); | |
2301 | goto drop; | |
2302 | } | |
2303 | break; | |
2304 | ||
2305 | /* | |
2306 | * In TIME_WAIT state the only thing that should arrive | |
2307 | * is a retransmission of the remote FIN. Acknowledge | |
2308 | * it and restart the finack timer. | |
2309 | */ | |
2310 | case TCPS_TIME_WAIT: | |
01d3427a | 2311 | tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp), |
a48c5dd5 | 2312 | tcp_timer_2msl); |
984263bc MD |
2313 | goto dropafterack; |
2314 | } | |
2315 | } | |
2316 | ||
2317 | step6: | |
2318 | /* | |
2319 | * Update window information. | |
2320 | * Don't look at window if no ACK: TAC's send garbage on first SYN. | |
2321 | */ | |
2322 | if ((thflags & TH_ACK) && | |
df9d7670 | 2323 | acceptable_window_update(tp, th, tiwin)) { |
984263bc | 2324 | /* keep track of pure window updates */ |
b5572302 JH |
2325 | if (tlen == 0 && tp->snd_wl2 == th->th_ack && |
2326 | tiwin > tp->snd_wnd) | |
984263bc MD |
2327 | tcpstat.tcps_rcvwinupd++; |
2328 | tp->snd_wnd = tiwin; | |
2329 | tp->snd_wl1 = th->th_seq; | |
2330 | tp->snd_wl2 = th->th_ack; | |
2331 | if (tp->snd_wnd > tp->max_sndwnd) | |
2332 | tp->max_sndwnd = tp->snd_wnd; | |
61896e3c | 2333 | needoutput = TRUE; |
984263bc MD |
2334 | } |
2335 | ||
2336 | /* | |
2337 | * Process segments with URG. | |
2338 | */ | |
2339 | if ((thflags & TH_URG) && th->th_urp && | |
95b22adf | 2340 | !TCPS_HAVERCVDFIN(tp->t_state)) { |
984263bc MD |
2341 | /* |
2342 | * This is a kludge, but if we receive and accept | |
2343 | * random urgent pointers, we'll crash in | |
2344 | * soreceive. It's hard to imagine someone | |
2345 | * actually wanting to send this much urgent data. | |
2346 | */ | |
6d49aa6f | 2347 | if (th->th_urp + so->so_rcv.ssb_cc > sb_max) { |
984263bc MD |
2348 | th->th_urp = 0; /* XXX */ |
2349 | thflags &= ~TH_URG; /* XXX */ | |
2350 | goto dodata; /* XXX */ | |
2351 | } | |
2352 | /* | |
2353 | * If this segment advances the known urgent pointer, | |
2354 | * then mark the data stream. This should not happen | |
2355 | * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since | |
2356 | * a FIN has been received from the remote side. | |
2357 | * In these states we ignore the URG. | |
2358 | * | |
2359 | * According to RFC961 (Assigned Protocols), | |
2360 | * the urgent pointer points to the last octet | |
2361 | * of urgent data. We continue, however, | |
2362 | * to consider it to indicate the first octet | |
2363 | * of data past the urgent section as the original | |
2364 | * spec states (in one of two places). | |
2365 | */ | |
61896e3c | 2366 | if (SEQ_GT(th->th_seq + th->th_urp, tp->rcv_up)) { |
984263bc | 2367 | tp->rcv_up = th->th_seq + th->th_urp; |
6d49aa6f | 2368 | so->so_oobmark = so->so_rcv.ssb_cc + |
984263bc MD |
2369 | (tp->rcv_up - tp->rcv_nxt) - 1; |
2370 | if (so->so_oobmark == 0) | |
6cef7136 | 2371 | sosetstate(so, SS_RCVATMARK); |
984263bc MD |
2372 | sohasoutofband(so); |
2373 | tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); | |
2374 | } | |
2375 | /* | |
2376 | * Remove out of band data so doesn't get presented to user. | |
2377 | * This can happen independent of advancing the URG pointer, | |
2378 | * but if two URG's are pending at once, some out-of-band | |
2379 | * data may creep in... ick. | |
2380 | */ | |
61896e3c JH |
2381 | if (th->th_urp <= (u_long)tlen && |
2382 | !(so->so_options & SO_OOBINLINE)) { | |
2383 | /* hdr drop is delayed */ | |
2384 | tcp_pulloutofband(so, th, m, drop_hdrlen); | |
2385 | } | |
984263bc MD |
2386 | } else { |
2387 | /* | |
2388 | * If no out of band data is expected, | |
2389 | * pull receive urgent pointer along | |
2390 | * with the receive window. | |
2391 | */ | |
2392 | if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) | |
2393 | tp->rcv_up = tp->rcv_nxt; | |
2394 | } | |
984263bc | 2395 | |
61896e3c | 2396 | dodata: /* XXX */ |
984263bc MD |
2397 | /* |
2398 | * Process the segment text, merging it into the TCP sequencing queue, | |
2399 | * and arranging for acknowledgment of receipt if necessary. | |
2400 | * This process logically involves adjusting tp->rcv_wnd as data | |
2401 | * is presented to the user (this happens in tcp_usrreq.c, | |
2402 | * case PRU_RCVD). If a FIN has already been received on this | |
2403 | * connection then we just ignore the text. | |
2404 | */ | |
95b22adf | 2405 | if ((tlen || (thflags & TH_FIN)) && !TCPS_HAVERCVDFIN(tp->t_state)) { |
984263bc MD |
2406 | m_adj(m, drop_hdrlen); /* delayed header drop */ |
2407 | /* | |
2408 | * Insert segment which includes th into TCP reassembly queue | |
2409 | * with control block tp. Set thflags to whether reassembly now | |
2410 | * includes a segment with FIN. This handles the common case | |
2411 | * inline (segment is the next to be received on an established | |
2412 | * connection, and the queue is empty), avoiding linkage into | |
2413 | * and removal from the queue and repetition of various | |
2414 | * conversions. | |
2415 | * Set DELACK for segments received in order, but ack | |
2416 | * immediately when segments are out of order (so | |
2417 | * fast retransmit can work). | |
2418 | */ | |
2419 | if (th->th_seq == tp->rcv_nxt && | |
0f9e45de | 2420 | TAILQ_EMPTY(&tp->t_segq) && |
984263bc | 2421 | TCPS_HAVEESTABLISHED(tp->t_state)) { |
a48c5dd5 SZ |
2422 | if (DELAY_ACK(tp)) { |
2423 | tcp_callout_reset(tp, tp->tt_delack, | |
2424 | tcp_delacktime, tcp_timer_delack); | |
2425 | } else { | |
984263bc | 2426 | tp->t_flags |= TF_ACKNOW; |
a48c5dd5 | 2427 | } |
984263bc MD |
2428 | tp->rcv_nxt += tlen; |
2429 | thflags = th->th_flags & TH_FIN; | |
2430 | tcpstat.tcps_rcvpack++; | |
2431 | tcpstat.tcps_rcvbyte += tlen; | |
2432 | ND6_HINT(tp); | |
6cef7136 | 2433 | if (so->so_state & SS_CANTRCVMORE) { |
984263bc | 2434 | m_freem(m); |
6cef7136 MD |
2435 | } else { |
2436 | lwkt_gettoken(&so->so_rcv.ssb_token); | |
6d49aa6f | 2437 | ssb_appendstream(&so->so_rcv, m); |
6cef7136 MD |
2438 | lwkt_reltoken(&so->so_rcv.ssb_token); |
2439 | } | |
984263bc MD |
2440 | sorwakeup(so); |
2441 | } else { | |
c7e6499a | 2442 | if (!(tp->sack_flags & TSACK_F_DUPSEG)) { |
91489f6b JH |
2443 | /* Initialize SACK report block. */ |
2444 | tp->reportblk.rblk_start = th->th_seq; | |
3a5d999b SZ |
2445 | tp->reportblk.rblk_end = TCP_SACK_BLKEND( |
2446 | th->th_seq + tlen, thflags); | |
91489f6b | 2447 | } |
984263bc MD |
2448 | thflags = tcp_reass(tp, th, &tlen, m); |
2449 | tp->t_flags |= TF_ACKNOW; | |
2450 | } | |
2451 | ||
2452 | /* | |
2453 | * Note the amount of data that peer has sent into | |
2454 | * our window, in order to estimate the sender's | |
2455 | * buffer size. | |
2456 | */ | |
6d49aa6f | 2457 | len = so->so_rcv.ssb_hiwat - (tp->rcv_adv - tp->rcv_nxt); |
984263bc MD |
2458 | } else { |
2459 | m_freem(m); | |
2460 | thflags &= ~TH_FIN; | |
2461 | } | |
2462 | ||
2463 | /* | |
2464 | * If FIN is received ACK the FIN and let the user know | |
2465 | * that the connection is closing. | |
2466 | */ | |
2467 | if (thflags & TH_FIN) { | |
95b22adf | 2468 | if (!TCPS_HAVERCVDFIN(tp->t_state)) { |
984263bc MD |
2469 | socantrcvmore(so); |
2470 | /* | |
2471 | * If connection is half-synchronized | |
2472 | * (ie NEEDSYN flag on) then delay ACK, | |
2473 | * so it may be piggybacked when SYN is sent. | |
2474 | * Otherwise, since we received a FIN then no | |
2475 | * more input can be expected, send ACK now. | |
2476 | */ | |
a48c5dd5 SZ |
2477 | if (DELAY_ACK(tp) && (tp->t_flags & TF_NEEDSYN)) { |
2478 | tcp_callout_reset(tp, tp->tt_delack, | |
2479 | tcp_delacktime, tcp_timer_delack); | |
2480 | } else { | |
984263bc | 2481 | tp->t_flags |= TF_ACKNOW; |
a48c5dd5 | 2482 | } |
984263bc MD |
2483 | tp->rcv_nxt++; |
2484 | } | |
984263bc | 2485 | |
61896e3c | 2486 | switch (tp->t_state) { |
95b22adf | 2487 | /* |
984263bc MD |
2488 | * In SYN_RECEIVED and ESTABLISHED STATES |
2489 | * enter the CLOSE_WAIT state. | |
2490 | */ | |
2491 | case TCPS_SYN_RECEIVED: | |
2492 | tp->t_starttime = ticks; | |
2493 | /*FALLTHROUGH*/ | |
2494 | case TCPS_ESTABLISHED: | |
2495 | tp->t_state = TCPS_CLOSE_WAIT; | |
2496 | break; | |
2497 | ||
95b22adf | 2498 | /* |
984263bc MD |
2499 | * If still in FIN_WAIT_1 STATE FIN has not been acked so |
2500 | * enter the CLOSING state. | |
2501 | */ | |
2502 | case TCPS_FIN_WAIT_1: | |
2503 | tp->t_state = TCPS_CLOSING; | |
2504 | break; | |
2505 | ||
95b22adf | 2506 | /* |
984263bc MD |
2507 | * In FIN_WAIT_2 state enter the TIME_WAIT state, |
2508 | * starting the time-wait timer, turning off the other | |
2509 | * standard timers. | |
2510 | */ | |
2511 | case TCPS_FIN_WAIT_2: | |
2512 | tp->t_state = TCPS_TIME_WAIT; | |
2513 | tcp_canceltimers(tp); | |
01d3427a | 2514 | tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp), |
a48c5dd5 | 2515 | tcp_timer_2msl); |
984263bc MD |
2516 | soisdisconnected(so); |
2517 | break; | |
2518 | ||
2519 | /* | |
2520 | * In TIME_WAIT state restart the 2 MSL time_wait timer. | |
2521 | */ | |
2522 | case TCPS_TIME_WAIT: | |
01d3427a | 2523 | tcp_callout_reset(tp, tp->tt_2msl, 2 * tcp_rmx_msl(tp), |
a48c5dd5 | 2524 | tcp_timer_2msl); |
984263bc MD |
2525 | break; |
2526 | } | |
2527 | } | |
61896e3c | 2528 | |
984263bc MD |
2529 | #ifdef TCPDEBUG |
2530 | if (so->so_options & SO_DEBUG) | |
f23061d4 | 2531 | tcp_trace(TA_INPUT, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); |
984263bc MD |
2532 | #endif |
2533 | ||
27f4bf33 SZ |
2534 | /* |
2535 | * Delayed duplicated ACK processing | |
2536 | */ | |
5c99b248 | 2537 | if (delayed_dupack && tcp_recv_dupack(tp, th_dupack, &to)) |
27f4bf33 SZ |
2538 | needoutput = FALSE; |
2539 | ||
984263bc MD |
2540 | /* |
2541 | * Return any desired output. | |
2542 | */ | |
2543 | if (needoutput || (tp->t_flags & TF_ACKNOW)) | |
f23061d4 | 2544 | tcp_output(tp); |
d58ca578 | 2545 | tcp_sack_report_cleanup(tp); |
002c1265 | 2546 | return(IPPROTO_DONE); |
984263bc MD |
2547 | |
2548 | dropafterack: | |
2549 | /* | |
2550 | * Generate an ACK dropping incoming segment if it occupies | |
2551 | * sequence space, where the ACK reflects our state. | |
2552 | * | |
2553 | * We can now skip the test for the RST flag since all | |
2554 | * paths to this code happen after packets containing | |
2555 | * RST have been dropped. | |
2556 | * | |
2557 | * In the SYN-RECEIVED state, don't send an ACK unless the | |
2558 | * segment we received passes the SYN-RECEIVED ACK test. | |
2559 | * If it fails send a RST. This breaks the loop in the | |
2560 | * "LAND" DoS attack, and also prevents an ACK storm | |
2561 | * between two listening ports that have been sent forged | |
2562 | * SYN segments, each with the source address of the other. | |
2563 | */ | |
2564 | if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && | |
2565 | (SEQ_GT(tp->snd_una, th->th_ack) || | |
2566 | SEQ_GT(th->th_ack, tp->snd_max)) ) { | |
2567 | rstreason = BANDLIM_RST_OPENPORT; | |
2568 | goto dropwithreset; | |
2569 | } | |
2570 | #ifdef TCPDEBUG | |
2571 | if (so->so_options & SO_DEBUG) | |
f23061d4 | 2572 | tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); |
984263bc MD |
2573 | #endif |
2574 | m_freem(m); | |
2575 | tp->t_flags |= TF_ACKNOW; | |
f23061d4 | 2576 | tcp_output(tp); |
d58ca578 | 2577 | tcp_sack_report_cleanup(tp); |
002c1265 | 2578 | return(IPPROTO_DONE); |
984263bc MD |
2579 | |
2580 | dropwithreset: | |
2581 | /* | |
2582 | * Generate a RST, dropping incoming segment. | |
2583 | * Make ACK acceptable to originator of segment. | |
2584 | * Don't bother to respond if destination was broadcast/multicast. | |
2585 | */ | |
61896e3c | 2586 | if ((thflags & TH_RST) || m->m_flags & (M_BCAST | M_MCAST)) |
984263bc MD |
2587 | goto drop; |
2588 | if (isipv6) { | |
2589 | if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || | |
2590 | IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) | |
2591 | goto drop; | |
2592 | } else { | |
2593 | if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || | |
2594 | IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || | |
95b22adf JH |
2595 | ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || |
2596 | in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) | |
984263bc MD |
2597 | goto drop; |
2598 | } | |
2599 | /* IPv6 anycast check is done at tcp6_input() */ | |
2600 | ||
2601 | /* | |
2602 | * Perform bandwidth limiting. | |
2603 | */ | |
2604 | #ifdef ICMP_BANDLIM | |
2605 | if (badport_bandlim(rstreason) < 0) | |
2606 | goto drop; | |
2607 | #endif | |
2608 | ||
2609 | #ifdef TCPDEBUG | |
2610 | if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) | |
f23061d4 | 2611 | tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); |
984263bc MD |
2612 | #endif |
2613 | if (thflags & TH_ACK) | |
2614 | /* mtod() below is safe as long as hdr dropping is delayed */ | |
2615 | tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, th->th_ack, | |
2616 | TH_RST); | |
2617 | else { | |
2618 | if (thflags & TH_SYN) | |
2619 | tlen++; | |
2620 | /* mtod() below is safe as long as hdr dropping is delayed */ | |
61896e3c JH |
2621 | tcp_respond(tp, mtod(m, void *), th, m, th->th_seq + tlen, |
2622 | (tcp_seq)0, TH_RST | TH_ACK); | |
984263bc | 2623 | } |
d58ca578 SZ |
2624 | if (tp != NULL) |
2625 | tcp_sack_report_cleanup(tp); | |
002c1265 | 2626 | return(IPPROTO_DONE); |
984263bc MD |
2627 | |
2628 | drop: | |
2629 | /* | |
2630 | * Drop space held by incoming segment and return. | |
2631 | */ | |
2632 | #ifdef TCPDEBUG | |
2633 | if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) | |
f23061d4 | 2634 | tcp_trace(TA_DROP, ostate, tp, tcp_saveipgen, &tcp_savetcp, 0); |
984263bc MD |
2635 | #endif |
2636 | m_freem(m); | |
d58ca578 SZ |
2637 | if (tp != NULL) |
2638 | tcp_sack_report_cleanup(tp); | |
002c1265 | 2639 | return(IPPROTO_DONE); |
984263bc MD |
2640 | } |
2641 | ||
2642 | /* | |
2643 | * Parse TCP options and place in tcpopt. | |
2644 | */ | |
2645 | static void | |
6c1bbf57 SZ |
2646 | tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, boolean_t is_syn, |
2647 | tcp_seq ack) | |
984263bc | 2648 | { |
91489f6b | 2649 | int opt, optlen, i; |
984263bc MD |
2650 | |
2651 | to->to_flags = 0; | |
2652 | for (; cnt > 0; cnt -= optlen, cp += optlen) { | |
2653 | opt = cp[0]; | |
2654 | if (opt == TCPOPT_EOL) | |
2655 | break; | |
2656 | if (opt == TCPOPT_NOP) | |
2657 | optlen = 1; | |
2658 | else { | |
2659 | if (cnt < 2) | |
2660 | break; | |
2661 | optlen = cp[1]; | |
2662 | if (optlen < 2 || optlen > cnt) | |
2663 | break; | |
2664 | } | |
2665 | switch (opt) { | |
2666 | case TCPOPT_MAXSEG: | |
2667 | if (optlen != TCPOLEN_MAXSEG) | |
2668 | continue; | |
2669 | if (!is_syn) | |
2670 | continue; | |
2671 | to->to_flags |= TOF_MSS; | |
407e896e | 2672 | bcopy(cp + 2, &to->to_mss, sizeof to->to_mss); |
984263bc MD |
2673 | to->to_mss = ntohs(to->to_mss); |
2674 | break; | |
2675 | case TCPOPT_WINDOW: | |
2676 | if (optlen != TCPOLEN_WINDOW) | |
2677 | continue; | |
95b22adf | 2678 | if (!is_syn) |
984263bc MD |
2679 | continue; |
2680 | to->to_flags |= TOF_SCALE; | |
2681 | to->to_requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT); | |
2682 | break; | |
2683 | case TCPOPT_TIMESTAMP: | |
2684 | if (optlen != TCPOLEN_TIMESTAMP) | |
2685 | continue; | |
2686 | to->to_flags |= TOF_TS; | |
407e896e | 2687 | bcopy(cp + 2, &to->to_tsval, sizeof to->to_tsval); |
984263bc | 2688 | to->to_tsval = ntohl(to->to_tsval); |
407e896e | 2689 | bcopy(cp + 6, &to->to_tsecr, sizeof to->to_tsecr); |
984263bc | 2690 | to->to_tsecr = ntohl(to->to_tsecr); |
ad0af98b ND |
2691 | /* |
2692 | * If echoed timestamp is later than the current time, | |
2693 | * fall back to non RFC1323 RTT calculation. | |
2694 | */ | |
2695 | if (to->to_tsecr != 0 && TSTMP_GT(to->to_tsecr, ticks)) | |
2696 | to->to_tsecr = 0; | |
984263bc | 2697 | break; |
91489f6b JH |
2698 | case TCPOPT_SACK_PERMITTED: |
2699 | if (optlen != TCPOLEN_SACK_PERMITTED) | |
2700 | continue; | |
2701 | if (!is_syn) | |
2702 | continue; | |
2703 | to->to_flags |= TOF_SACK_PERMITTED; | |
2704 | break; | |
2705 | case TCPOPT_SACK: | |
2706 | if ((optlen - 2) & 0x07) /* not multiple of 8 */ | |
2707 | continue; | |
2708 | to->to_nsackblocks = (optlen - 2) / 8; | |
2709 | to->to_sackblocks = (struct raw_sackblock *) (cp + 2); | |
2710 | to->to_flags |= TOF_SACK; | |
2711 | for (i = 0; i < to->to_nsackblocks; i++) { | |
2712 | struct raw_sackblock *r = &to->to_sackblocks[i]; | |
2713 | ||
2714 | r->rblk_start = ntohl(r->rblk_start); | |
2715 | r->rblk_end = ntohl(r->rblk_end); | |
865b0477 SZ |
2716 | |
2717 | if (SEQ_LEQ(r->rblk_end, r->rblk_start)) { | |
2718 | /* | |
2719 | * Invalid SACK block; discard all | |
2720 | * SACK blocks | |
2721 | */ | |
02cc2f35 | 2722 | tcpstat.tcps_rcvbadsackopt++; |
865b0477 SZ |
2723 | to->to_nsackblocks = 0; |
2724 | to->to_sackblocks = NULL; | |
2725 | to->to_flags &= ~TOF_SACK; | |
2726 | break; | |
2727 | } | |
91489f6b | 2728 | } |
6c1bbf57 SZ |
2729 | if ((to->to_flags & TOF_SACK) && |
2730 | tcp_sack_ndsack_blocks(to->to_sackblocks, | |
2731 | to->to_nsackblocks, ack)) | |
2732 | to->to_flags |= TOF_DSACK; | |
91489f6b | 2733 | break; |
b1992928 MD |
2734 | #ifdef TCP_SIGNATURE |
2735 | /* | |
2736 | * XXX In order to reply to a host which has set the | |
2737 | * TCP_SIGNATURE option in its initial SYN, we have to | |
2738 | * record the fact that the option was observed here | |
2739 | * for the syncache code to perform the correct response. | |
2740 | */ | |
2741 | case TCPOPT_SIGNATURE: | |
2742 | if (optlen != TCPOLEN_SIGNATURE) | |
2743 | continue; | |
2744 | to->to_flags |= (TOF_SIGNATURE | TOF_SIGLEN); | |
2745 | break; | |
2746 | #endif /* TCP_SIGNATURE */ | |
984263bc MD |
2747 | default: |
2748 | continue; | |
2749 | } | |
2750 | } | |
2751 | } | |
2752 | ||
2753 | /* | |
2754 | * Pull out of band byte out of a segment so | |
2755 | * it doesn't appear in the user's data queue. | |
2756 | * It is still reflected in the segment length for | |
2757 | * sequencing purposes. | |
95b22adf | 2758 | * "off" is the delayed to be dropped hdrlen. |
984263bc MD |
2759 | */ |
2760 | static void | |
95b22adf | 2761 | tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, int off) |
984263bc MD |
2762 | { |
2763 | int cnt = off + th->th_urp - 1; | |
2764 | ||
2765 | while (cnt >= 0) { | |
2766 | if (m->m_len > cnt) { | |
2767 | char *cp = mtod(m, caddr_t) + cnt; | |
2768 | struct tcpcb *tp = sototcpcb(so); | |
2769 | ||
2770 | tp->t_iobc = *cp; | |
2771 | tp->t_oobflags |= TCPOOB_HAVEDATA; | |
95b22adf | 2772 | bcopy(cp + 1, cp, m->m_len - cnt - 1); |
984263bc MD |
2773 | m->m_len--; |
2774 | if (m->m_flags & M_PKTHDR) | |
2775 | m->m_pkthdr.len--; | |
2776 | return; | |
2777 | } | |
2778 | cnt -= m->m_len; | |
2779 | m = m->m_next; | |
4090d6ff | 2780 | if (m == NULL) |
984263bc MD |
2781 | break; |
2782 | } | |
2783 | panic("tcp_pulloutofband"); | |
2784 | } | |
2785 | ||
2786 | /* | |
2787 | * Collect new round-trip time estimate | |
2788 | * and update averages and current timeout. | |
2789 | */ | |
2790 | static void | |
073ec6c4 | 2791 | tcp_xmit_timer(struct tcpcb *tp, int rtt, tcp_seq ack) |
984263bc | 2792 | { |
073ec6c4 | 2793 | int rebaserto = 0; |
984263bc MD |
2794 | |
2795 | tcpstat.tcps_rttupdated++; | |
2796 | tp->t_rttupdated++; | |
1c8b7a61 SZ |
2797 | if ((tp->rxt_flags & TRXT_F_REBASERTO) && |
2798 | SEQ_GT(ack, tp->snd_max_prev)) { | |
073ec6c4 SZ |
2799 | #ifdef DEBUG_EIFEL_RESPONSE |
2800 | kprintf("srtt/rttvar, prev %d/%d, cur %d/%d, ", | |
2801 | tp->t_srtt_prev, tp->t_rttvar_prev, | |
2802 | tp->t_srtt, tp->t_rttvar); | |
2803 | #endif | |
2804 | ||
2805 | tcpstat.tcps_eifelresponse++; | |
2806 | rebaserto = 1; | |
1c8b7a61 | 2807 | tp->rxt_flags &= ~TRXT_F_REBASERTO; |
073ec6c4 SZ |
2808 | tp->t_srtt = max(tp->t_srtt_prev, (rtt << TCP_RTT_SHIFT)); |
2809 | tp->t_rttvar = max(tp->t_rttvar_prev, | |
2810 | (rtt << (TCP_RTTVAR_SHIFT - 1))); | |
2811 | if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) | |
2812 | tp->t_rttbest = tp->t_srtt + tp->t_rttvar; | |
2813 | ||
2814 | #ifdef DEBUG_EIFEL_RESPONSE | |
2815 | kprintf("new %d/%d ", tp->t_srtt, tp->t_rttvar); | |
2816 | #endif | |
2817 | } else if (tp->t_srtt != 0) { | |
2818 | int delta; | |
2819 | ||
984263bc MD |
2820 | /* |
2821 | * srtt is stored as fixed point with 5 bits after the | |
2822 | * binary point (i.e., scaled by 8). The following magic | |
2823 | * is equivalent to the smoothing algorithm in rfc793 with | |
2824 | * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed | |
2825 | * point). Adjust rtt to origin 0. | |
2826 | */ | |
2827 | delta = ((rtt - 1) << TCP_DELTA_SHIFT) | |
2828 | - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); | |
2829 | ||
2830 | if ((tp->t_srtt += delta) <= 0) | |
2831 | tp->t_srtt = 1; | |
2832 | ||
2833 | /* | |
2834 | * We accumulate a smoothed rtt variance (actually, a | |
2835 | * smoothed mean difference), then set the retransmit | |
2836 | * timer to smoothed rtt + 4 times the smoothed variance. | |
2837 | * rttvar is stored as fixed point with 4 bits after the | |
2838 | * binary point (scaled by 16). The following is | |
2839 | * equivalent to rfc793 smoothing with an alpha of .75 | |
2840 | * (rttvar = rttvar*3/4 + |delta| / 4). This replaces | |
2841 | * rfc793's wired-in beta. | |
2842 | */ | |
2843 | if (delta < 0) | |
2844 | delta = -delta; | |
2845 | delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); | |
2846 | if ((tp->t_rttvar += delta) <= 0) | |
2847 | tp->t_rttvar = 1; | |
2848 | if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) | |
2849 | tp->t_rttbest = tp->t_srtt + tp->t_rttvar; | |
2850 | } else { | |
2851 | /* | |
2852 | * No rtt measurement yet - use the unsmoothed rtt. | |
2853 | * Set the variance to half the rtt (so our first | |
2854 | * retransmit happens at 3*rtt). | |
2855 | */ | |
2856 | tp->t_srtt = rtt << TCP_RTT_SHIFT; | |
2857 | tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); | |
2858 | tp->t_rttbest = tp->t_srtt + tp->t_rttvar; | |
2859 | } | |
2860 | tp->t_rtttime = 0; | |
2861 | tp->t_rxtshift = 0; | |
2862 | ||
073ec6c4 SZ |
2863 | #ifdef DEBUG_EIFEL_RESPONSE |
2864 | if (rebaserto) { | |
2865 | kprintf("| rxtcur prev %d, old %d, ", | |
2866 | tp->t_rxtcur_prev, tp->t_rxtcur); | |
2867 | } | |
2868 | #endif | |
2869 | ||
984263bc MD |
2870 | /* |
2871 | * the retransmit should happen at rtt + 4 * rttvar. | |
2872 | * Because of the way we do the smoothing, srtt and rttvar | |
2873 | * will each average +1/2 tick of bias. When we compute | |
2874 | * the retransmit timer, we want 1/2 tick of rounding and | |
2875 | * 1 extra tick because of +-1/2 tick uncertainty in the | |
2876 | * firing of the timer. The bias will give us exactly the | |
2877 | * 1.5 tick we need. But, because the bias is | |
2878 | * statistical, we have to test that we don't drop below | |
2879 | * the minimum feasible timer (which is 2 ticks). | |
2880 | */ | |
2881 | TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), | |
2882 | max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); | |
2883 | ||
073ec6c4 SZ |
2884 | if (rebaserto) { |
2885 | if (tp->t_rxtcur < tp->t_rxtcur_prev + tcp_eifel_rtoinc) { | |
2886 | /* | |
2887 | * RFC4015 requires that the new RTO is at least | |
2888 | * 2*G (tcp_eifel_rtoinc) greater then the RTO | |
2889 | * (t_rxtcur_prev) when the spurious retransmit | |
2890 | * timeout happens. | |
2891 | * | |
2892 | * The above condition could be true, if the SRTT | |
2893 | * and RTTVAR used to calculate t_rxtcur_prev | |
2894 | * resulted in a value less than t_rttmin. So | |
2895 | * simply increasing SRTT by tcp_eifel_rtoinc when | |
e1dd9e15 | 2896 | * preparing for the Eifel response could not ensure |
073ec6c4 SZ |
2897 | * that the new RTO will be tcp_eifel_rtoinc greater |
2898 | * t_rxtcur_prev. | |
2899 | */ | |
2900 | tp->t_rxtcur = tp->t_rxtcur_prev + tcp_eifel_rtoinc; | |
2901 | } | |
2902 | #ifdef DEBUG_EIFEL_RESPONSE | |
2903 | kprintf("new %d\n", tp->t_rxtcur); | |
2904 | #endif | |
2905 | } | |
2906 | ||
984263bc MD |
2907 | /* |
2908 | * We received an ack for a packet that wasn't retransmitted; | |
2909 | * it is probably safe to discard any error indications we've | |
2910 | * received recently. This isn't quite right, but close enough | |
2911 | * for now (a route might have failed after we sent a segment, | |
2912 | * and the return path might not be symmetrical). | |
2913 | */ | |
2914 | tp->t_softerror = 0; | |
2915 | } | |
2916 | ||
2917 | /* | |
2918 | * Determine a reasonable value for maxseg size. | |
2919 | * If the route is known, check route for mtu. | |
2920 | * If none, use an mss that can be handled on the outgoing | |
2921 | * interface without forcing IP to fragment; if bigger than | |
2922 | * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES | |
2923 | * to utilize large mbufs. If no route is found, route has no mtu, | |
2924 | * or the destination isn't local, use a default, hopefully conservative | |
2925 | * size (usually 512 or the default IP max size, but no more than the mtu | |
2926 | * of the interface), as we can't discover anything about intervening | |
2927 | * gateways or networks. We also initialize the congestion/slow start | |
2928 | * window to be a single segment if the destination isn't local. | |
2929 | * While looking at the routing entry, we also initialize other path-dependent | |
2930 | * parameters from pre-set or cached values in the routing entry. | |
2931 | * | |
2932 | * Also take into account the space needed for options that we | |
2933 | * send regularly. Make maxseg shorter by that amount to assure | |
2934 | * that we can send maxseg amount of data even when the options | |
2935 | * are present. Store the upper limit of the length of options plus | |
2936 | * data in maxopd. | |
2937 | * | |
2938 | * NOTE that this routine is only called when we process an incoming | |
2939 | * segment, for outgoing segments only tcp_mssopt is called. | |
984263bc MD |
2940 | */ |
2941 | void | |
95b22adf | 2942 | tcp_mss(struct tcpcb *tp, int offer) |
984263bc | 2943 | { |
2256ba69 | 2944 | struct rtentry *rt; |
984263bc | 2945 | struct ifnet *ifp; |
2256ba69 | 2946 | int rtt, mss; |
984263bc MD |
2947 | u_long bufsize; |
2948 | struct inpcb *inp = tp->t_inpcb; | |
2949 | struct socket *so; | |
984263bc | 2950 | #ifdef INET6 |
d24ce1dc | 2951 | boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) ? TRUE : FALSE); |
984263bc MD |
2952 | size_t min_protoh = isipv6 ? |
2953 | sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : | |
2954 | sizeof(struct tcpiphdr); | |
2955 | #else | |
d24ce1dc | 2956 | const boolean_t isipv6 = FALSE; |
984263bc MD |
2957 | const size_t min_protoh = sizeof(struct tcpiphdr); |
2958 | #endif | |
2959 | ||
2960 | if (isipv6) | |
2961 | rt = tcp_rtlookup6(&inp->inp_inc); | |
2962 | else | |
2963 | rt = tcp_rtlookup(&inp->inp_inc); | |
2964 | if (rt == NULL) { | |
2965 | tp->t_maxopd = tp->t_maxseg = | |
d24ce1dc | 2966 | (isipv6 ? tcp_v6mssdflt : tcp_mssdflt); |
984263bc MD |
2967 | return; |
2968 | } | |
2969 | ifp = rt->rt_ifp; | |
2970 | so = inp->inp_socket; | |
2971 | ||
984263bc MD |
2972 | /* |
2973 | * Offer == 0 means that there was no MSS on the SYN segment, | |
b235ad6d MD |
2974 | * in this case we use either the interface mtu or tcp_mssdflt. |
2975 | * | |
2976 | * An offer which is too large will be cut down later. | |
984263bc | 2977 | */ |
5b0b9fa5 | 2978 | if (offer == 0) { |
b235ad6d MD |
2979 | if (isipv6) { |
2980 | if (in6_localaddr(&inp->in6p_faddr)) { | |
2981 | offer = ND_IFINFO(rt->rt_ifp)->linkmtu - | |
2982 | min_protoh; | |
2983 | } else { | |
2984 | offer = tcp_v6mssdflt; | |
2985 | } | |
2986 | } else { | |
2987 | if (in_localaddr(inp->inp_faddr)) | |
2988 | offer = ifp->if_mtu - min_protoh; | |
2989 | else | |
2990 | offer = tcp_mssdflt; | |
2991 | } | |
5b0b9fa5 | 2992 | } |
b235ad6d MD |
2993 | |
2994 | /* | |
2995 | * Prevent DoS attack with too small MSS. Round up | |
2996 | * to at least minmss. | |
2997 | * | |
2998 | * Sanity check: make sure that maxopd will be large | |
2999 | * enough to allow some data on segments even is the | |
3000 | * all the option space is used (40bytes). Otherwise | |
3001 | * funny things may happen in tcp_output. | |
3002 | */ | |
3003 | offer = max(offer, tcp_minmss); | |
3004 | offer = max(offer, 64); | |
3005 | ||
27b8aee3 | 3006 | rt->rt_rmx.rmx_mssopt = offer; |
984263bc MD |
3007 | |
3008 | /* | |
3009 | * While we're here, check if there's an initial rtt | |
3010 | * or rttvar. Convert from the route-table units | |
3011 | * to scaled multiples of the slow timeout timer. | |
3012 | */ | |
3013 | if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) { | |
3014 | /* | |
3015 | * XXX the lock bit for RTT indicates that the value | |
3016 | * is also a minimum value; this is subject to time. | |
3017 | */ | |
3018 | if (rt->rt_rmx.rmx_locks & RTV_RTT) | |
3019 | tp->t_rttmin = rtt / (RTM_RTTUNIT / hz); | |
3020 | tp->t_srtt = rtt / (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); | |
3021 | tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; | |
3022 | tcpstat.tcps_usedrtt++; | |
3023 | if (rt->rt_rmx.rmx_rttvar) { | |
3024 | tp->t_rttvar = rt->rt_rmx.rmx_rttvar / | |
3025 | (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); | |
3026 | tcpstat.tcps_usedrttvar++; | |
3027 | } else { | |
3028 | /* default variation is +- 1 rtt */ | |
3029 | tp->t_rttvar = | |
3030 | tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; | |
3031 | } | |
3032 | TCPT_RANGESET(tp->t_rxtcur, | |
3033 | ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, | |
3034 | tp->t_rttmin, TCPTV_REXMTMAX); | |
3035 | } | |
b235ad6d | 3036 | |
984263bc MD |
3037 | /* |
3038 | * if there's an mtu associated with the route, use it | |
b235ad6d MD |
3039 | * else, use the link mtu. Take the smaller of mss or offer |
3040 | * as our final mss. | |
984263bc | 3041 | */ |
b235ad6d | 3042 | if (rt->rt_rmx.rmx_mtu) { |
984263bc | 3043 | mss = rt->rt_rmx.rmx_mtu - min_protoh; |
b235ad6d MD |
3044 | } else { |
3045 | if (isipv6) | |
698ac46c | 3046 | mss = ND_IFINFO(rt->rt_ifp)->linkmtu - min_protoh; |
b235ad6d | 3047 | else |
984263bc | 3048 | mss = ifp->if_mtu - min_protoh; |
984263bc MD |
3049 | } |
3050 | mss = min(mss, offer); | |
b235ad6d | 3051 | |
984263bc MD |
3052 | /* |
3053 | * maxopd stores the maximum length of data AND options | |
3054 | * in a segment; maxseg is the amount of data in a normal | |
3055 | * segment. We need to store this value (maxopd) apart | |
3056 | * from maxseg, because now every segment carries options | |
3057 | * and thus we normally have somewhat less data in segments. | |
3058 | */ | |
3059 | tp->t_maxopd = mss; | |
3060 | ||
61896e3c | 3061 | if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP && |
27b8aee3 | 3062 | ((tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) |
984263bc | 3063 | mss -= TCPOLEN_TSTAMP_APPA; |
984263bc MD |
3064 | |
3065 | #if (MCLBYTES & (MCLBYTES - 1)) == 0 | |
f99dae5e SW |
3066 | if (mss > MCLBYTES) |
3067 | mss &= ~(MCLBYTES-1); | |
984263bc | 3068 | #else |
f99dae5e SW |
3069 | if (mss > MCLBYTES) |
3070 | mss = mss / MCLBYTES * MCLBYTES; | |
984263bc MD |
3071 | #endif |
3072 | /* | |
3073 | * If there's a pipesize, change the socket buffer | |
3074 | * to that size. Make the socket buffers an integral | |
3075 | * number of mss units; if the mss is larger than | |
3076 | * the socket buffer, decrease the mss. | |
3077 | */ | |
3078 | #ifdef RTV_SPIPE | |
3079 | if ((bufsize = rt->rt_rmx.rmx_sendpipe) == 0) | |
3080 | #endif | |
6d49aa6f | 3081 | bufsize = so->so_snd.ssb_hiwat; |
984263bc MD |
3082 | if (bufsize < mss) |
3083 | mss = bufsize; | |
3084 | else { | |
3085 | bufsize = roundup(bufsize, mss); | |
3086 | if (bufsize > sb_max) | |
3087 | bufsize = sb_max; | |
6d49aa6f MD |
3088 | if (bufsize > so->so_snd.ssb_hiwat) |
3089 | ssb_reserve(&so->so_snd, bufsize, so, NULL); | |
984263bc MD |
3090 | } |
3091 | tp->t_maxseg = mss; | |
3092 | ||
3093 | #ifdef RTV_RPIPE | |
3094 | if ((bufsize = rt->rt_rmx.rmx_recvpipe) == 0) | |
3095 | #endif | |
6d49aa6f | 3096 | bufsize = so->so_rcv.ssb_hiwat; |
984263bc MD |
3097 | if (bufsize > mss) { |
3098 | bufsize = roundup(bufsize, mss); | |
3099 | if (bufsize > sb_max) | |
3100 | bufsize = sb_max; | |
6cef7136 MD |
3101 | if (bufsize > so->so_rcv.ssb_hiwat) { |
3102 | lwkt_gettoken(&so->so_rcv.ssb_token); | |
6d49aa6f | 3103 | ssb_reserve(&so->so_rcv, bufsize, so, NULL); |
6cef7136 MD |
3104 | lwkt_reltoken(&so->so_rcv.ssb_token); |
3105 | } | |
984263bc MD |
3106 | } |
3107 | ||
3108 | /* | |
697020f5 SZ |
3109 | * Set the slow-start flight size |
3110 | * | |
3111 | * NOTE: t_maxseg must have been configured! | |
984263bc | 3112 | */ |
697020f5 | 3113 | tp->snd_cwnd = tcp_initial_window(tp); |
984263bc MD |
3114 | |
3115 | if (rt->rt_rmx.rmx_ssthresh) { | |
3116 | /* | |
3117 | * There's some sort of gateway or interface | |
3118 | * buffer limit on the path. Use this to set | |
3119 | * the slow start threshhold, but set the | |
3120 | * threshold to no less than 2*mss. | |
3121 | */ | |
3122 | tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh); | |
3123 | tcpstat.tcps_usedssthresh++; | |
3124 | } | |
3125 | } | |
3126 | ||
3127 | /* | |
3128 | * Determine the MSS option to send on an outgoing SYN. | |
3129 | */ | |
3130 | int | |
95b22adf | 3131 | tcp_mssopt(struct tcpcb *tp) |
984263bc MD |
3132 | { |
3133 | struct rtentry *rt; | |
3134 | #ifdef INET6 | |
d24ce1dc JH |
3135 | boolean_t isipv6 = |
3136 | ((tp->t_inpcb->inp_vflag & INP_IPV6) ? TRUE : FALSE); | |
984263bc MD |
3137 | int min_protoh = isipv6 ? |
3138 | sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : | |
3139 | sizeof(struct tcpiphdr); | |
3140 | #else | |
d24ce1dc | 3141 | const boolean_t isipv6 = FALSE; |
984263bc MD |
3142 | const size_t min_protoh = sizeof(struct tcpiphdr); |
3143 | #endif | |
3144 | ||
3145 | if (isipv6) | |
3146 | rt = tcp_rtlookup6(&tp->t_inpcb->inp_inc); | |
3147 | else | |
3148 | rt = tcp_rtlookup(&tp->t_inpcb->inp_inc); | |
3149 | if (rt == NULL) | |
3150 | return (isipv6 ? tcp_v6mssdflt : tcp_mssdflt); | |
3151 | ||
3152 | return (rt->rt_ifp->if_mtu - min_protoh); | |
3153 | } | |
3154 | ||
984263bc MD |
3155 | /* |
3156 | * When a partial ack arrives, force the retransmission of the | |
91489f6b JH |
3157 | * next unacknowledged segment. Do not exit Fast Recovery. |
3158 | * | |
3159 | * Implement the Slow-but-Steady variant of NewReno by restarting the | |
3160 | * the retransmission timer. Turn it off here so it can be restarted | |
3161 | * later in tcp_output(). | |
984263bc MD |
3162 | */ |
3163 | static void | |
91489f6b | 3164 | tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th, int acked) |
984263bc | 3165 | { |
91489f6b JH |
3166 | tcp_seq old_snd_nxt = tp->snd_nxt; |
3167 | u_long ocwnd = tp->snd_cwnd; | |
984263bc | 3168 | |
a48c5dd5 | 3169 | tcp_callout_stop(tp, tp->tt_rexmt); |
984263bc MD |
3170 | tp->t_rtttime = 0; |
3171 | tp->snd_nxt = th->th_ack; | |
91489f6b JH |
3172 | /* Set snd_cwnd to one segment beyond acknowledged offset. */ |
3173 | tp->snd_cwnd = tp->t_maxseg; | |
984263bc | 3174 | tp->t_flags |= TF_ACKNOW; |
f23061d4 | 3175 | tcp_output(tp); |
91489f6b JH |
3176 | if (SEQ_GT(old_snd_nxt, tp->snd_nxt)) |
3177 | tp->snd_nxt = old_snd_nxt; | |
3178 | /* partial window deflation */ | |
68947d12 ND |
3179 | if (ocwnd > acked) |
3180 | tp->snd_cwnd = ocwnd - acked + tp->t_maxseg; | |
3181 | else | |
3182 | tp->snd_cwnd = tp->t_maxseg; | |
91489f6b JH |
3183 | } |
3184 | ||
3185 | /* | |
3186 | * In contrast to the Slow-but-Steady NewReno variant, | |
3187 | * we do not reset the retransmission timer for SACK retransmissions, | |
3188 | * except when retransmitting snd_una. | |
3189 | */ | |
3190 | static void | |
ccb518ea | 3191 | tcp_sack_rexmt(struct tcpcb *tp, boolean_t force) |
91489f6b | 3192 | { |
91489f6b JH |
3193 | tcp_seq old_snd_nxt = tp->snd_nxt; |
3194 | u_long ocwnd = tp->snd_cwnd; | |
1bdfd728 | 3195 | uint32_t pipe; |
91489f6b | 3196 | int nseg = 0; /* consecutive new segments */ |
a098966f | 3197 | int nseg_rexmt = 0; /* retransmitted segments */ |
ccb518ea | 3198 | int maxrexmt = 0; |
91489f6b JH |
3199 | #define MAXBURST 4 /* limit burst of new packets on partial ack */ |
3200 | ||
ccb518ea SZ |
3201 | if (force) { |
3202 | uint32_t unsacked = tcp_sack_first_unsacked_len(tp); | |
3203 | ||
3204 | /* | |
3205 | * Try to fill the first hole in the receiver's | |
3206 | * reassemble queue. | |
3207 | */ | |
3208 | maxrexmt = howmany(unsacked, tp->t_maxseg); | |
3209 | if (maxrexmt > tcp_force_sackrxt) | |
3210 | maxrexmt = tcp_force_sackrxt; | |
3211 | } | |
3212 | ||
91489f6b JH |
3213 | tp->t_rtttime = 0; |
3214 | pipe = tcp_sack_compute_pipe(tp); | |
ccb518ea SZ |
3215 | while (((tcp_seq_diff_t)(ocwnd - pipe) >= (tcp_seq_diff_t)tp->t_maxseg |
3216 | || (force && nseg_rexmt < maxrexmt && nseg == 0)) && | |
1bdfd728 SZ |
3217 | (!tcp_do_smartsack || nseg < MAXBURST)) { |
3218 | tcp_seq old_snd_max, old_rexmt_high, nextrexmt; | |
3219 | uint32_t sent, seglen; | |
3220 | boolean_t rescue; | |
91489f6b JH |
3221 | int error; |
3222 | ||
1bdfd728 | 3223 | old_rexmt_high = tp->rexmt_high; |
64debfd6 | 3224 | if (!tcp_sack_nextseg(tp, &nextrexmt, &seglen, &rescue)) { |
1bdfd728 SZ |
3225 | tp->rexmt_high = old_rexmt_high; |
3226 | break; | |
3227 | } | |
3228 | ||
a098966f SZ |
3229 | /* |
3230 | * If the next tranmission is a rescue retranmission, | |
3231 | * we check whether we have already sent some data | |
3232 | * (either new segments or retransmitted segments) | |
3233 | * into the the network or not. Since the idea of rescue | |
3234 | * retransmission is to sustain ACK clock, as long as | |
3235 | * some segments are in the network, ACK clock will be | |
3236 | * kept ticking. | |
3237 | */ | |
3238 | if (rescue && (nseg_rexmt > 0 || nseg > 0)) { | |
3239 | tp->rexmt_high = old_rexmt_high; | |
3240 | break; | |
3241 | } | |
3242 | ||
98cb2337 MD |
3243 | if (nextrexmt == tp->snd_max) |
3244 | ++nseg; | |
a098966f SZ |
3245 | else |
3246 | ++nseg_rexmt; | |
91489f6b JH |
3247 | tp->snd_nxt = nextrexmt; |
3248 | tp->snd_cwnd = nextrexmt - tp->snd_una + seglen; | |
3249 | old_snd_max = tp->snd_max; | |
3250 | if (nextrexmt == tp->snd_una) | |
a48c5dd5 | 3251 | tcp_callout_stop(tp, tp->tt_rexmt); |
91489f6b | 3252 | error = tcp_output(tp); |
1bdfd728 SZ |
3253 | if (error != 0) { |
3254 | tp->rexmt_high = old_rexmt_high; | |
91489f6b | 3255 | break; |
1bdfd728 | 3256 | } |
91489f6b | 3257 | sent = tp->snd_nxt - nextrexmt; |
1bdfd728 SZ |
3258 | if (sent <= 0) { |
3259 | tp->rexmt_high = old_rexmt_high; | |
91489f6b | 3260 | break; |
1bdfd728 SZ |
3261 | } |
3262 | pipe += sent; | |
91489f6b JH |
3263 | tcpstat.tcps_sndsackpack++; |
3264 | tcpstat.tcps_sndsackbyte += sent; | |
1bdfd728 SZ |
3265 | |
3266 | if (rescue) { | |
a098966f | 3267 | tcpstat.tcps_sackrescue++; |
1bdfd728 | 3268 | tp->rexmt_rescue = tp->snd_nxt; |
c7e6499a | 3269 | tp->sack_flags |= TSACK_F_SACKRESCUED; |
1bdfd728 SZ |
3270 | break; |
3271 | } | |
91489f6b | 3272 | if (SEQ_LT(nextrexmt, old_snd_max) && |
a098966f | 3273 | SEQ_LT(tp->rexmt_high, tp->snd_nxt)) { |
91489f6b | 3274 | tp->rexmt_high = seq_min(tp->snd_nxt, old_snd_max); |
21f337ca | 3275 | if (tcp_aggressive_rescuesack && |
c7e6499a | 3276 | (tp->sack_flags & TSACK_F_SACKRESCUED) && |
a098966f SZ |
3277 | SEQ_LT(tp->rexmt_rescue, tp->rexmt_high)) { |
3278 | /* Drag RescueRxt along with HighRxt */ | |
3279 | tp->rexmt_rescue = tp->rexmt_high; | |
3280 | } | |
3281 | } | |
91489f6b JH |
3282 | } |
3283 | if (SEQ_GT(old_snd_nxt, tp->snd_nxt)) | |
3284 | tp->snd_nxt = old_snd_nxt; | |
984263bc | 3285 | tp->snd_cwnd = ocwnd; |
984263bc | 3286 | } |
0ecd93f9 | 3287 | |
e2289e66 SZ |
3288 | /* |
3289 | * Return TRUE, if some new segments are sent | |
3290 | */ | |
ffe35e17 SZ |
3291 | static boolean_t |
3292 | tcp_sack_limitedxmit(struct tcpcb *tp) | |
3293 | { | |
3294 | tcp_seq oldsndnxt = tp->snd_nxt; | |
3295 | tcp_seq oldsndmax = tp->snd_max; | |
3296 | u_long ocwnd = tp->snd_cwnd; | |
db421eef | 3297 | uint32_t pipe, sent; |
ffe35e17 | 3298 | boolean_t ret = FALSE; |
db421eef SZ |
3299 | tcp_seq_diff_t cwnd_left; |
3300 | tcp_seq next; | |
ffe35e17 SZ |
3301 | |
3302 | tp->rexmt_high = tp->snd_una - 1; | |
3303 | pipe = tcp_sack_compute_pipe(tp); | |
db421eef SZ |
3304 | cwnd_left = (tcp_seq_diff_t)(ocwnd - pipe); |
3305 | if (cwnd_left < (tcp_seq_diff_t)tp->t_maxseg) | |
3306 | return FALSE; | |
ffe35e17 | 3307 | |
db421eef SZ |
3308 | next = tp->snd_nxt = tp->snd_max; |
3309 | tp->snd_cwnd = tp->snd_nxt - tp->snd_una + | |
3310 | rounddown(cwnd_left, tp->t_maxseg); | |
ffe35e17 | 3311 | |
db421eef | 3312 | tcp_output(tp); |
ffe35e17 | 3313 | |
db421eef SZ |
3314 | sent = tp->snd_nxt - next; |
3315 | if (sent > 0) { | |
3316 | tcpstat.tcps_sndlimited += howmany(sent, tp->t_maxseg); | |
ffe35e17 SZ |
3317 | ret = TRUE; |
3318 | } | |
3319 | ||
3320 | if (SEQ_LT(oldsndnxt, oldsndmax)) { | |
3321 | KASSERT(SEQ_GEQ(oldsndnxt, tp->snd_una), | |
3322 | ("snd_una moved in other threads")); | |
3323 | tp->snd_nxt = oldsndnxt; | |
3324 | } | |
3325 | tp->snd_cwnd = ocwnd; | |
3326 | ||
e2289e66 SZ |
3327 | if (ret && TCP_DO_NCR(tp)) |
3328 | tcp_ncr_update_rxtthresh(tp); | |
3329 | ||
ffe35e17 SZ |
3330 | return ret; |
3331 | } | |
3332 | ||
0ecd93f9 MD |
3333 | /* |
3334 | * Reset idle time and keep-alive timer, typically called when a valid | |
3335 | * tcp packet is received but may also be called when FASTKEEP is set | |
3336 | * to prevent the previous long-timeout from calculating to a drop. | |
3337 | * | |
3338 | * Only update t_rcvtime for non-SYN packets. | |
3339 | * | |
3340 | * Handle the case where one side thinks the connection is established | |
3341 | * but the other side has, say, rebooted without cleaning out the | |
3342 | * connection. The SYNs could be construed as an attack and wind | |
3343 | * up ignored, but in case it isn't an attack we can validate the | |
3344 | * connection by forcing a keepalive. | |
3345 | */ | |
3346 | void | |
3347 | tcp_timer_keep_activity(struct tcpcb *tp, int thflags) | |
3348 | { | |
3349 | if (TCPS_HAVEESTABLISHED(tp->t_state)) { | |
3350 | if ((thflags & (TH_SYN | TH_ACK)) == TH_SYN) { | |
3351 | tp->t_flags |= TF_KEEPALIVE; | |
3352 | tcp_callout_reset(tp, tp->tt_keep, hz / 2, | |
3353 | tcp_timer_keep); | |
3354 | } else { | |
3355 | tp->t_rcvtime = ticks; | |
3356 | tp->t_flags &= ~TF_KEEPALIVE; | |
3357 | tcp_callout_reset(tp, tp->tt_keep, | |
fb8d5c6d | 3358 | tp->t_keepidle, |
0ecd93f9 MD |
3359 | tcp_timer_keep); |
3360 | } | |
3361 | } | |
3362 | } | |
01d3427a SZ |
3363 | |
3364 | static int | |
3365 | tcp_rmx_msl(const struct tcpcb *tp) | |
3366 | { | |
3367 | struct rtentry *rt; | |
3368 | struct inpcb *inp = tp->t_inpcb; | |
9855a4ef | 3369 | int msl; |
01d3427a SZ |
3370 | #ifdef INET6 |
3371 | boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) ? TRUE : FALSE); | |
3372 | #else | |
3373 | const boolean_t isipv6 = FALSE; | |
3374 | #endif | |
3375 | ||
3376 | if (isipv6) | |
3377 | rt = tcp_rtlookup6(&inp->inp_inc); | |
3378 | else | |
3379 | rt = tcp_rtlookup(&inp->inp_inc); | |
3380 | if (rt == NULL || rt->rt_rmx.rmx_msl == 0) | |
3381 | return tcp_msl; | |
3382 | ||
9855a4ef SZ |
3383 | msl = (rt->rt_rmx.rmx_msl * hz) / 1000; |
3384 | if (msl == 0) | |
3385 | msl = 1; | |
3386 | ||
3387 | return msl; | |
01d3427a | 3388 | } |
8651f7f8 SZ |
3389 | |
3390 | static void | |
3391 | tcp_established(struct tcpcb *tp) | |
3392 | { | |
3393 | tp->t_state = TCPS_ESTABLISHED; | |
fb8d5c6d | 3394 | tcp_callout_reset(tp, tp->tt_keep, tp->t_keepidle, tcp_timer_keep); |
be34e534 | 3395 | |
d5082e3d | 3396 | if (tp->t_rxtsyn > 0) { |
48a4676d SZ |
3397 | /* |
3398 | * RFC6298: | |
3399 | * "If the timer expires awaiting the ACK of a SYN segment | |
3400 | * and the TCP implementation is using an RTO less than 3 | |
3401 | * seconds, the RTO MUST be re-initialized to 3 seconds | |
3402 | * when data transmission begins" | |
3403 | */ | |
3404 | if (tp->t_rxtcur < TCPTV_RTOBASE3) | |
3405 | tp->t_rxtcur = TCPTV_RTOBASE3; | |
be34e534 | 3406 | } |
8651f7f8 | 3407 | } |
27f4bf33 SZ |
3408 | |
3409 | /* | |
3410 | * Returns TRUE, if the ACK should be dropped | |
3411 | */ | |
3412 | static boolean_t | |
5c99b248 | 3413 | tcp_recv_dupack(struct tcpcb *tp, tcp_seq th_ack, const struct tcpopt *to) |
27f4bf33 | 3414 | { |
ba0d6f99 SZ |
3415 | boolean_t fast_sack_rexmt = TRUE; |
3416 | ||
27f4bf33 SZ |
3417 | tcpstat.tcps_rcvdupack++; |
3418 | ||
3419 | /* | |
3420 | * We have outstanding data (other than a window probe), | |
3421 | * this is a completely duplicate ack (ie, window info | |
3422 | * didn't change), the ack is the biggest we've seen and | |
3423 | * we've seen exactly our rexmt threshhold of them, so | |
3424 | * assume a packet has been dropped and retransmit it. | |
3425 | * Kludge snd_nxt & the congestion window so we send only | |
3426 | * this one packet. | |
3427 | */ | |
3428 | if (IN_FASTRECOVERY(tp)) { | |
3429 | if (TCP_DO_SACK(tp)) { | |
ccb518ea SZ |
3430 | boolean_t force = FALSE; |
3431 | ||
3432 | if (tp->snd_una == tp->rexmt_high && | |
3433 | (to->to_flags & (TOF_SACK | TOF_SACK_REDUNDANT)) == | |
3434 | TOF_SACK) { | |
3435 | /* | |
3436 | * New segments got SACKed and | |
3437 | * no retransmit yet. | |
3438 | */ | |
3439 | force = TRUE; | |
3440 | } | |
3441 | ||
27f4bf33 | 3442 | /* No artifical cwnd inflation. */ |
ccb518ea | 3443 | tcp_sack_rexmt(tp, force); |
27f4bf33 SZ |
3444 | } else { |
3445 | /* | |
3446 | * Dup acks mean that packets have left | |
3447 | * the network (they're now cached at the | |
3448 | * receiver) so bump cwnd by the amount in | |
3449 | * the receiver to keep a constant cwnd | |
3450 | * packets in the network. | |
3451 | */ | |
3452 | tp->snd_cwnd += tp->t_maxseg; | |
3453 | tcp_output(tp); | |
3454 | } | |
e2289e66 | 3455 | return TRUE; |
27f4bf33 SZ |
3456 | } else if (SEQ_LT(th_ack, tp->snd_recover)) { |
3457 | tp->t_dupacks = 0; | |
3458 | return FALSE; | |
3459 | } else if (tcp_ignore_redun_dsack && TCP_DO_SACK(tp) && | |
3460 | (to->to_flags & (TOF_DSACK | TOF_SACK_REDUNDANT)) == | |
3461 | (TOF_DSACK | TOF_SACK_REDUNDANT)) { | |
3462 | /* | |
3463 | * If the ACK carries DSACK and other SACK blocks | |
3464 | * carry information that we have already known, | |
3465 | * don't count this ACK as duplicate ACK. This | |
3466 | * prevents spurious early retransmit and fast | |
3467 | * retransmit. This also meets the requirement of | |
3468 | * RFC3042 that new segments should not be sent if | |
3469 | * t |