| 1 | /* |
| 2 | * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. |
| 3 | * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. |
| 4 | * |
| 5 | * This code is derived from software contributed to The DragonFly Project |
| 6 | * by Jeffrey M. Hsu. |
| 7 | * |
| 8 | * Redistribution and use in source and binary forms, with or without |
| 9 | * modification, are permitted provided that the following conditions |
| 10 | * are met: |
| 11 | * 1. Redistributions of source code must retain the above copyright |
| 12 | * notice, this list of conditions and the following disclaimer. |
| 13 | * 2. Redistributions in binary form must reproduce the above copyright |
| 14 | * notice, this list of conditions and the following disclaimer in the |
| 15 | * documentation and/or other materials provided with the distribution. |
| 16 | * 3. Neither the name of The DragonFly Project nor the names of its |
| 17 | * contributors may be used to endorse or promote products derived |
| 18 | * from this software without specific, prior written permission. |
| 19 | * |
| 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 23 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 24 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 25 | * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, |
| 26 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 27 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
| 28 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| 29 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
| 30 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 31 | * SUCH DAMAGE. |
| 32 | */ |
| 33 | |
| 34 | /* |
| 35 | * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 |
| 36 | * The Regents of the University of California. All rights reserved. |
| 37 | * |
| 38 | * Redistribution and use in source and binary forms, with or without |
| 39 | * modification, are permitted provided that the following conditions |
| 40 | * are met: |
| 41 | * 1. Redistributions of source code must retain the above copyright |
| 42 | * notice, this list of conditions and the following disclaimer. |
| 43 | * 2. Redistributions in binary form must reproduce the above copyright |
| 44 | * notice, this list of conditions and the following disclaimer in the |
| 45 | * documentation and/or other materials provided with the distribution. |
| 46 | * 3. All advertising materials mentioning features or use of this software |
| 47 | * must display the following acknowledgement: |
| 48 | * This product includes software developed by the University of |
| 49 | * California, Berkeley and its contributors. |
| 50 | * 4. Neither the name of the University nor the names of its contributors |
| 51 | * may be used to endorse or promote products derived from this software |
| 52 | * without specific prior written permission. |
| 53 | * |
| 54 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
| 55 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 56 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 57 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
| 58 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 59 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 60 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 61 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 62 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 63 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 64 | * SUCH DAMAGE. |
| 65 | * |
| 66 | * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 |
| 67 | * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.73.2.31 2003/01/24 05:11:34 sam Exp $ |
| 68 | */ |
| 69 | |
| 70 | #include "opt_compat.h" |
| 71 | #include "opt_inet.h" |
| 72 | #include "opt_inet6.h" |
| 73 | #include "opt_ipsec.h" |
| 74 | #include "opt_tcpdebug.h" |
| 75 | |
| 76 | #include <sys/param.h> |
| 77 | #include <sys/systm.h> |
| 78 | #include <sys/callout.h> |
| 79 | #include <sys/kernel.h> |
| 80 | #include <sys/sysctl.h> |
| 81 | #include <sys/malloc.h> |
| 82 | #include <sys/mpipe.h> |
| 83 | #include <sys/mbuf.h> |
| 84 | #ifdef INET6 |
| 85 | #include <sys/domain.h> |
| 86 | #endif |
| 87 | #include <sys/proc.h> |
| 88 | #include <sys/priv.h> |
| 89 | #include <sys/socket.h> |
| 90 | #include <sys/socketvar.h> |
| 91 | #include <sys/protosw.h> |
| 92 | #include <sys/random.h> |
| 93 | #include <sys/in_cksum.h> |
| 94 | #include <sys/ktr.h> |
| 95 | |
| 96 | #include <net/route.h> |
| 97 | #include <net/if.h> |
| 98 | #include <net/netisr.h> |
| 99 | |
| 100 | #define _IP_VHL |
| 101 | #include <netinet/in.h> |
| 102 | #include <netinet/in_systm.h> |
| 103 | #include <netinet/ip.h> |
| 104 | #include <netinet/ip6.h> |
| 105 | #include <netinet/in_pcb.h> |
| 106 | #include <netinet6/in6_pcb.h> |
| 107 | #include <netinet/in_var.h> |
| 108 | #include <netinet/ip_var.h> |
| 109 | #include <netinet6/ip6_var.h> |
| 110 | #include <netinet/ip_icmp.h> |
| 111 | #ifdef INET6 |
| 112 | #include <netinet/icmp6.h> |
| 113 | #endif |
| 114 | #include <netinet/tcp.h> |
| 115 | #include <netinet/tcp_fsm.h> |
| 116 | #include <netinet/tcp_seq.h> |
| 117 | #include <netinet/tcp_timer.h> |
| 118 | #include <netinet/tcp_timer2.h> |
| 119 | #include <netinet/tcp_var.h> |
| 120 | #include <netinet6/tcp6_var.h> |
| 121 | #include <netinet/tcpip.h> |
| 122 | #ifdef TCPDEBUG |
| 123 | #include <netinet/tcp_debug.h> |
| 124 | #endif |
| 125 | #include <netinet6/ip6protosw.h> |
| 126 | |
| 127 | #ifdef IPSEC |
| 128 | #include <netinet6/ipsec.h> |
| 129 | #include <netproto/key/key.h> |
| 130 | #ifdef INET6 |
| 131 | #include <netinet6/ipsec6.h> |
| 132 | #endif |
| 133 | #endif |
| 134 | |
| 135 | #ifdef FAST_IPSEC |
| 136 | #include <netproto/ipsec/ipsec.h> |
| 137 | #ifdef INET6 |
| 138 | #include <netproto/ipsec/ipsec6.h> |
| 139 | #endif |
| 140 | #define IPSEC |
| 141 | #endif |
| 142 | |
| 143 | #include <sys/md5.h> |
| 144 | #include <machine/smp.h> |
| 145 | |
| 146 | #include <sys/msgport2.h> |
| 147 | #include <sys/mplock2.h> |
| 148 | #include <net/netmsg2.h> |
| 149 | |
| 150 | #if !defined(KTR_TCP) |
| 151 | #define KTR_TCP KTR_ALL |
| 152 | #endif |
| 153 | /* |
| 154 | KTR_INFO_MASTER(tcp); |
| 155 | KTR_INFO(KTR_TCP, tcp, rxmsg, 0, "tcp getmsg", 0); |
| 156 | KTR_INFO(KTR_TCP, tcp, wait, 1, "tcp waitmsg", 0); |
| 157 | KTR_INFO(KTR_TCP, tcp, delayed, 2, "tcp execute delayed ops", 0); |
| 158 | #define logtcp(name) KTR_LOG(tcp_ ## name) |
| 159 | */ |
| 160 | |
| 161 | struct inpcbinfo tcbinfo[MAXCPU]; |
| 162 | struct tcpcbackqhead tcpcbackq[MAXCPU]; |
| 163 | |
| 164 | static struct lwkt_token tcp_port_token = |
| 165 | LWKT_TOKEN_INITIALIZER(tcp_port_token); |
| 166 | |
| 167 | int tcp_mssdflt = TCP_MSS; |
| 168 | SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW, |
| 169 | &tcp_mssdflt, 0, "Default TCP Maximum Segment Size"); |
| 170 | |
| 171 | #ifdef INET6 |
| 172 | int tcp_v6mssdflt = TCP6_MSS; |
| 173 | SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, CTLFLAG_RW, |
| 174 | &tcp_v6mssdflt, 0, "Default TCP Maximum Segment Size for IPv6"); |
| 175 | #endif |
| 176 | |
| 177 | /* |
| 178 | * Minimum MSS we accept and use. This prevents DoS attacks where |
| 179 | * we are forced to a ridiculous low MSS like 20 and send hundreds |
| 180 | * of packets instead of one. The effect scales with the available |
| 181 | * bandwidth and quickly saturates the CPU and network interface |
| 182 | * with packet generation and sending. Set to zero to disable MINMSS |
| 183 | * checking. This setting prevents us from sending too small packets. |
| 184 | */ |
| 185 | int tcp_minmss = TCP_MINMSS; |
| 186 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW, |
| 187 | &tcp_minmss , 0, "Minmum TCP Maximum Segment Size"); |
| 188 | |
| 189 | #if 0 |
| 190 | static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; |
| 191 | SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW, |
| 192 | &tcp_rttdflt, 0, "Default maximum TCP Round Trip Time"); |
| 193 | #endif |
| 194 | |
| 195 | int tcp_do_rfc1323 = 1; |
| 196 | SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW, |
| 197 | &tcp_do_rfc1323, 0, "Enable rfc1323 (high performance TCP) extensions"); |
| 198 | |
| 199 | static int tcp_tcbhashsize = 0; |
| 200 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD, |
| 201 | &tcp_tcbhashsize, 0, "Size of TCP control block hashtable"); |
| 202 | |
| 203 | static int do_tcpdrain = 1; |
| 204 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, |
| 205 | "Enable tcp_drain routine for extra help when low on mbufs"); |
| 206 | |
| 207 | static int icmp_may_rst = 1; |
| 208 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0, |
| 209 | "Certain ICMP unreachable messages may abort connections in SYN_SENT"); |
| 210 | |
| 211 | static int tcp_isn_reseed_interval = 0; |
| 212 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW, |
| 213 | &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret"); |
| 214 | |
| 215 | /* |
| 216 | * TCP bandwidth limiting sysctls. The inflight limiter is now turned on |
| 217 | * by default, but with generous values which should allow maximal |
| 218 | * bandwidth. In particular, the slop defaults to 50 (5 packets). |
| 219 | * |
| 220 | * The reason for doing this is that the limiter is the only mechanism we |
| 221 | * have which seems to do a really good job preventing receiver RX rings |
| 222 | * on network interfaces from getting blown out. Even though GigE/10GigE |
| 223 | * is supposed to flow control it looks like either it doesn't actually |
| 224 | * do it or Open Source drivers do not properly enable it. |
| 225 | * |
| 226 | * People using the limiter to reduce bottlenecks on slower WAN connections |
| 227 | * should set the slop to 20 (2 packets). |
| 228 | */ |
| 229 | static int tcp_inflight_enable = 1; |
| 230 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_enable, CTLFLAG_RW, |
| 231 | &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting"); |
| 232 | |
| 233 | static int tcp_inflight_debug = 0; |
| 234 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_debug, CTLFLAG_RW, |
| 235 | &tcp_inflight_debug, 0, "Debug TCP inflight calculations"); |
| 236 | |
| 237 | static int tcp_inflight_min = 6144; |
| 238 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_min, CTLFLAG_RW, |
| 239 | &tcp_inflight_min, 0, "Lower bound for TCP inflight window"); |
| 240 | |
| 241 | static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT; |
| 242 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_max, CTLFLAG_RW, |
| 243 | &tcp_inflight_max, 0, "Upper bound for TCP inflight window"); |
| 244 | |
| 245 | static int tcp_inflight_stab = 50; |
| 246 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_stab, CTLFLAG_RW, |
| 247 | &tcp_inflight_stab, 0, "Slop in maximal packets / 10 (20 = 3 packets)"); |
| 248 | |
| 249 | static int tcp_do_rfc3390 = 1; |
| 250 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW, |
| 251 | &tcp_do_rfc3390, 0, |
| 252 | "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); |
| 253 | |
| 254 | static u_long tcp_iw_maxsegs = 4; |
| 255 | SYSCTL_ULONG(_net_inet_tcp, OID_AUTO, iwmaxsegs, CTLFLAG_RW, |
| 256 | &tcp_iw_maxsegs, 0, "TCP IW segments max"); |
| 257 | |
| 258 | static u_long tcp_iw_capsegs = 3; |
| 259 | SYSCTL_ULONG(_net_inet_tcp, OID_AUTO, iwcapsegs, CTLFLAG_RW, |
| 260 | &tcp_iw_capsegs, 0, "TCP IW segments"); |
| 261 | |
| 262 | int tcp_low_rtobase = 1; |
| 263 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, low_rtobase, CTLFLAG_RW, |
| 264 | &tcp_low_rtobase, 0, "Lowering the Initial RTO (RFC 6298)"); |
| 265 | |
| 266 | static MALLOC_DEFINE(M_TCPTEMP, "tcptemp", "TCP Templates for Keepalives"); |
| 267 | static struct malloc_pipe tcptemp_mpipe; |
| 268 | |
| 269 | static void tcp_willblock(void); |
| 270 | static void tcp_notify (struct inpcb *, int); |
| 271 | |
| 272 | struct tcp_stats tcpstats_percpu[MAXCPU]; |
| 273 | #ifdef SMP |
| 274 | static int |
| 275 | sysctl_tcpstats(SYSCTL_HANDLER_ARGS) |
| 276 | { |
| 277 | int cpu, error = 0; |
| 278 | |
| 279 | for (cpu = 0; cpu < ncpus; ++cpu) { |
| 280 | if ((error = SYSCTL_OUT(req, &tcpstats_percpu[cpu], |
| 281 | sizeof(struct tcp_stats)))) |
| 282 | break; |
| 283 | if ((error = SYSCTL_IN(req, &tcpstats_percpu[cpu], |
| 284 | sizeof(struct tcp_stats)))) |
| 285 | break; |
| 286 | } |
| 287 | |
| 288 | return (error); |
| 289 | } |
| 290 | SYSCTL_PROC(_net_inet_tcp, TCPCTL_STATS, stats, (CTLTYPE_OPAQUE | CTLFLAG_RW), |
| 291 | 0, 0, sysctl_tcpstats, "S,tcp_stats", "TCP statistics"); |
| 292 | #else |
| 293 | SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW, |
| 294 | &tcpstat, tcp_stats, "TCP statistics"); |
| 295 | #endif |
| 296 | |
| 297 | /* |
| 298 | * Target size of TCP PCB hash tables. Must be a power of two. |
| 299 | * |
| 300 | * Note that this can be overridden by the kernel environment |
| 301 | * variable net.inet.tcp.tcbhashsize |
| 302 | */ |
| 303 | #ifndef TCBHASHSIZE |
| 304 | #define TCBHASHSIZE 512 |
| 305 | #endif |
| 306 | |
| 307 | /* |
| 308 | * This is the actual shape of what we allocate using the zone |
| 309 | * allocator. Doing it this way allows us to protect both structures |
| 310 | * using the same generation count, and also eliminates the overhead |
| 311 | * of allocating tcpcbs separately. By hiding the structure here, |
| 312 | * we avoid changing most of the rest of the code (although it needs |
| 313 | * to be changed, eventually, for greater efficiency). |
| 314 | */ |
| 315 | #define ALIGNMENT 32 |
| 316 | #define ALIGNM1 (ALIGNMENT - 1) |
| 317 | struct inp_tp { |
| 318 | union { |
| 319 | struct inpcb inp; |
| 320 | char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1]; |
| 321 | } inp_tp_u; |
| 322 | struct tcpcb tcb; |
| 323 | struct tcp_callout inp_tp_rexmt; |
| 324 | struct tcp_callout inp_tp_persist; |
| 325 | struct tcp_callout inp_tp_keep; |
| 326 | struct tcp_callout inp_tp_2msl; |
| 327 | struct tcp_callout inp_tp_delack; |
| 328 | struct netmsg_tcp_timer inp_tp_timermsg; |
| 329 | }; |
| 330 | #undef ALIGNMENT |
| 331 | #undef ALIGNM1 |
| 332 | |
| 333 | /* |
| 334 | * Tcp initialization |
| 335 | */ |
| 336 | void |
| 337 | tcp_init(void) |
| 338 | { |
| 339 | struct inpcbporthead *porthashbase; |
| 340 | struct inpcbinfo *ticb; |
| 341 | u_long porthashmask; |
| 342 | int hashsize = TCBHASHSIZE; |
| 343 | int cpu; |
| 344 | |
| 345 | /* |
| 346 | * note: tcptemp is used for keepalives, and it is ok for an |
| 347 | * allocation to fail so do not specify MPF_INT. |
| 348 | */ |
| 349 | mpipe_init(&tcptemp_mpipe, M_TCPTEMP, sizeof(struct tcptemp), |
| 350 | 25, -1, 0, NULL, NULL, NULL); |
| 351 | |
| 352 | tcp_delacktime = TCPTV_DELACK; |
| 353 | tcp_keepinit = TCPTV_KEEP_INIT; |
| 354 | tcp_keepidle = TCPTV_KEEP_IDLE; |
| 355 | tcp_keepintvl = TCPTV_KEEPINTVL; |
| 356 | tcp_maxpersistidle = TCPTV_KEEP_IDLE; |
| 357 | tcp_msl = TCPTV_MSL; |
| 358 | tcp_rexmit_min = TCPTV_MIN; |
| 359 | tcp_rexmit_slop = TCPTV_CPU_VAR; |
| 360 | |
| 361 | TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize); |
| 362 | if (!powerof2(hashsize)) { |
| 363 | kprintf("WARNING: TCB hash size not a power of 2\n"); |
| 364 | hashsize = 512; /* safe default */ |
| 365 | } |
| 366 | tcp_tcbhashsize = hashsize; |
| 367 | porthashbase = hashinit(hashsize, M_PCB, &porthashmask); |
| 368 | |
| 369 | for (cpu = 0; cpu < ncpus2; cpu++) { |
| 370 | ticb = &tcbinfo[cpu]; |
| 371 | in_pcbinfo_init(ticb); |
| 372 | ticb->cpu = cpu; |
| 373 | ticb->hashbase = hashinit(hashsize, M_PCB, |
| 374 | &ticb->hashmask); |
| 375 | ticb->porthashbase = porthashbase; |
| 376 | ticb->porthashmask = porthashmask; |
| 377 | ticb->porttoken = &tcp_port_token; |
| 378 | #if 0 |
| 379 | ticb->porthashbase = hashinit(hashsize, M_PCB, |
| 380 | &ticb->porthashmask); |
| 381 | #endif |
| 382 | ticb->wildcardhashbase = hashinit(hashsize, M_PCB, |
| 383 | &ticb->wildcardhashmask); |
| 384 | ticb->ipi_size = sizeof(struct inp_tp); |
| 385 | TAILQ_INIT(&tcpcbackq[cpu]); |
| 386 | } |
| 387 | |
| 388 | tcp_reass_maxseg = nmbclusters / 16; |
| 389 | TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", &tcp_reass_maxseg); |
| 390 | |
| 391 | #ifdef INET6 |
| 392 | #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) |
| 393 | #else |
| 394 | #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) |
| 395 | #endif |
| 396 | if (max_protohdr < TCP_MINPROTOHDR) |
| 397 | max_protohdr = TCP_MINPROTOHDR; |
| 398 | if (max_linkhdr + TCP_MINPROTOHDR > MHLEN) |
| 399 | panic("tcp_init"); |
| 400 | #undef TCP_MINPROTOHDR |
| 401 | |
| 402 | /* |
| 403 | * Initialize TCP statistics counters for each CPU. |
| 404 | */ |
| 405 | #ifdef SMP |
| 406 | for (cpu = 0; cpu < ncpus; ++cpu) { |
| 407 | bzero(&tcpstats_percpu[cpu], sizeof(struct tcp_stats)); |
| 408 | } |
| 409 | #else |
| 410 | bzero(&tcpstat, sizeof(struct tcp_stats)); |
| 411 | #endif |
| 412 | |
| 413 | syncache_init(); |
| 414 | netisr_register_rollup(tcp_willblock); |
| 415 | } |
| 416 | |
| 417 | static void |
| 418 | tcp_willblock(void) |
| 419 | { |
| 420 | struct tcpcb *tp; |
| 421 | int cpu = mycpu->gd_cpuid; |
| 422 | |
| 423 | while ((tp = TAILQ_FIRST(&tcpcbackq[cpu])) != NULL) { |
| 424 | KKASSERT(tp->t_flags & TF_ONOUTPUTQ); |
| 425 | tp->t_flags &= ~TF_ONOUTPUTQ; |
| 426 | TAILQ_REMOVE(&tcpcbackq[cpu], tp, t_outputq); |
| 427 | tcp_output(tp); |
| 428 | } |
| 429 | } |
| 430 | |
| 431 | /* |
| 432 | * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb. |
| 433 | * tcp_template used to store this data in mbufs, but we now recopy it out |
| 434 | * of the tcpcb each time to conserve mbufs. |
| 435 | */ |
| 436 | void |
| 437 | tcp_fillheaders(struct tcpcb *tp, void *ip_ptr, void *tcp_ptr) |
| 438 | { |
| 439 | struct inpcb *inp = tp->t_inpcb; |
| 440 | struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr; |
| 441 | |
| 442 | #ifdef INET6 |
| 443 | if (inp->inp_vflag & INP_IPV6) { |
| 444 | struct ip6_hdr *ip6; |
| 445 | |
| 446 | ip6 = (struct ip6_hdr *)ip_ptr; |
| 447 | ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | |
| 448 | (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK); |
| 449 | ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | |
| 450 | (IPV6_VERSION & IPV6_VERSION_MASK); |
| 451 | ip6->ip6_nxt = IPPROTO_TCP; |
| 452 | ip6->ip6_plen = sizeof(struct tcphdr); |
| 453 | ip6->ip6_src = inp->in6p_laddr; |
| 454 | ip6->ip6_dst = inp->in6p_faddr; |
| 455 | tcp_hdr->th_sum = 0; |
| 456 | } else |
| 457 | #endif |
| 458 | { |
| 459 | struct ip *ip = (struct ip *) ip_ptr; |
| 460 | |
| 461 | ip->ip_vhl = IP_VHL_BORING; |
| 462 | ip->ip_tos = 0; |
| 463 | ip->ip_len = 0; |
| 464 | ip->ip_id = 0; |
| 465 | ip->ip_off = 0; |
| 466 | ip->ip_ttl = 0; |
| 467 | ip->ip_sum = 0; |
| 468 | ip->ip_p = IPPROTO_TCP; |
| 469 | ip->ip_src = inp->inp_laddr; |
| 470 | ip->ip_dst = inp->inp_faddr; |
| 471 | tcp_hdr->th_sum = in_pseudo(ip->ip_src.s_addr, |
| 472 | ip->ip_dst.s_addr, |
| 473 | htons(sizeof(struct tcphdr) + IPPROTO_TCP)); |
| 474 | } |
| 475 | |
| 476 | tcp_hdr->th_sport = inp->inp_lport; |
| 477 | tcp_hdr->th_dport = inp->inp_fport; |
| 478 | tcp_hdr->th_seq = 0; |
| 479 | tcp_hdr->th_ack = 0; |
| 480 | tcp_hdr->th_x2 = 0; |
| 481 | tcp_hdr->th_off = 5; |
| 482 | tcp_hdr->th_flags = 0; |
| 483 | tcp_hdr->th_win = 0; |
| 484 | tcp_hdr->th_urp = 0; |
| 485 | } |
| 486 | |
| 487 | /* |
| 488 | * Create template to be used to send tcp packets on a connection. |
| 489 | * Allocates an mbuf and fills in a skeletal tcp/ip header. The only |
| 490 | * use for this function is in keepalives, which use tcp_respond. |
| 491 | */ |
| 492 | struct tcptemp * |
| 493 | tcp_maketemplate(struct tcpcb *tp) |
| 494 | { |
| 495 | struct tcptemp *tmp; |
| 496 | |
| 497 | if ((tmp = mpipe_alloc_nowait(&tcptemp_mpipe)) == NULL) |
| 498 | return (NULL); |
| 499 | tcp_fillheaders(tp, &tmp->tt_ipgen, &tmp->tt_t); |
| 500 | return (tmp); |
| 501 | } |
| 502 | |
| 503 | void |
| 504 | tcp_freetemplate(struct tcptemp *tmp) |
| 505 | { |
| 506 | mpipe_free(&tcptemp_mpipe, tmp); |
| 507 | } |
| 508 | |
| 509 | /* |
| 510 | * Send a single message to the TCP at address specified by |
| 511 | * the given TCP/IP header. If m == NULL, then we make a copy |
| 512 | * of the tcpiphdr at ti and send directly to the addressed host. |
| 513 | * This is used to force keep alive messages out using the TCP |
| 514 | * template for a connection. If flags are given then we send |
| 515 | * a message back to the TCP which originated the * segment ti, |
| 516 | * and discard the mbuf containing it and any other attached mbufs. |
| 517 | * |
| 518 | * In any case the ack and sequence number of the transmitted |
| 519 | * segment are as specified by the parameters. |
| 520 | * |
| 521 | * NOTE: If m != NULL, then ti must point to *inside* the mbuf. |
| 522 | */ |
| 523 | void |
| 524 | tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, |
| 525 | tcp_seq ack, tcp_seq seq, int flags) |
| 526 | { |
| 527 | int tlen; |
| 528 | int win = 0; |
| 529 | struct route *ro = NULL; |
| 530 | struct route sro; |
| 531 | struct ip *ip = ipgen; |
| 532 | struct tcphdr *nth; |
| 533 | int ipflags = 0; |
| 534 | struct route_in6 *ro6 = NULL; |
| 535 | struct route_in6 sro6; |
| 536 | struct ip6_hdr *ip6 = ipgen; |
| 537 | boolean_t use_tmpro = TRUE; |
| 538 | #ifdef INET6 |
| 539 | boolean_t isipv6 = (IP_VHL_V(ip->ip_vhl) == 6); |
| 540 | #else |
| 541 | const boolean_t isipv6 = FALSE; |
| 542 | #endif |
| 543 | |
| 544 | if (tp != NULL) { |
| 545 | if (!(flags & TH_RST)) { |
| 546 | win = ssb_space(&tp->t_inpcb->inp_socket->so_rcv); |
| 547 | if (win < 0) |
| 548 | win = 0; |
| 549 | if (win > (long)TCP_MAXWIN << tp->rcv_scale) |
| 550 | win = (long)TCP_MAXWIN << tp->rcv_scale; |
| 551 | } |
| 552 | /* |
| 553 | * Don't use the route cache of a listen socket, |
| 554 | * it is not MPSAFE; use temporary route cache. |
| 555 | */ |
| 556 | if (tp->t_state != TCPS_LISTEN) { |
| 557 | if (isipv6) |
| 558 | ro6 = &tp->t_inpcb->in6p_route; |
| 559 | else |
| 560 | ro = &tp->t_inpcb->inp_route; |
| 561 | use_tmpro = FALSE; |
| 562 | } |
| 563 | } |
| 564 | if (use_tmpro) { |
| 565 | if (isipv6) { |
| 566 | ro6 = &sro6; |
| 567 | bzero(ro6, sizeof *ro6); |
| 568 | } else { |
| 569 | ro = &sro; |
| 570 | bzero(ro, sizeof *ro); |
| 571 | } |
| 572 | } |
| 573 | if (m == NULL) { |
| 574 | m = m_gethdr(MB_DONTWAIT, MT_HEADER); |
| 575 | if (m == NULL) |
| 576 | return; |
| 577 | tlen = 0; |
| 578 | m->m_data += max_linkhdr; |
| 579 | if (isipv6) { |
| 580 | bcopy(ip6, mtod(m, caddr_t), sizeof(struct ip6_hdr)); |
| 581 | ip6 = mtod(m, struct ip6_hdr *); |
| 582 | nth = (struct tcphdr *)(ip6 + 1); |
| 583 | } else { |
| 584 | bcopy(ip, mtod(m, caddr_t), sizeof(struct ip)); |
| 585 | ip = mtod(m, struct ip *); |
| 586 | nth = (struct tcphdr *)(ip + 1); |
| 587 | } |
| 588 | bcopy(th, nth, sizeof(struct tcphdr)); |
| 589 | flags = TH_ACK; |
| 590 | } else { |
| 591 | m_freem(m->m_next); |
| 592 | m->m_next = NULL; |
| 593 | m->m_data = (caddr_t)ipgen; |
| 594 | /* m_len is set later */ |
| 595 | tlen = 0; |
| 596 | #define xchg(a, b, type) { type t; t = a; a = b; b = t; } |
| 597 | if (isipv6) { |
| 598 | xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); |
| 599 | nth = (struct tcphdr *)(ip6 + 1); |
| 600 | } else { |
| 601 | xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long); |
| 602 | nth = (struct tcphdr *)(ip + 1); |
| 603 | } |
| 604 | if (th != nth) { |
| 605 | /* |
| 606 | * this is usually a case when an extension header |
| 607 | * exists between the IPv6 header and the |
| 608 | * TCP header. |
| 609 | */ |
| 610 | nth->th_sport = th->th_sport; |
| 611 | nth->th_dport = th->th_dport; |
| 612 | } |
| 613 | xchg(nth->th_dport, nth->th_sport, n_short); |
| 614 | #undef xchg |
| 615 | } |
| 616 | if (isipv6) { |
| 617 | ip6->ip6_flow = 0; |
| 618 | ip6->ip6_vfc = IPV6_VERSION; |
| 619 | ip6->ip6_nxt = IPPROTO_TCP; |
| 620 | ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) + tlen)); |
| 621 | tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr); |
| 622 | } else { |
| 623 | tlen += sizeof(struct tcpiphdr); |
| 624 | ip->ip_len = tlen; |
| 625 | ip->ip_ttl = ip_defttl; |
| 626 | } |
| 627 | m->m_len = tlen; |
| 628 | m->m_pkthdr.len = tlen; |
| 629 | m->m_pkthdr.rcvif = NULL; |
| 630 | nth->th_seq = htonl(seq); |
| 631 | nth->th_ack = htonl(ack); |
| 632 | nth->th_x2 = 0; |
| 633 | nth->th_off = sizeof(struct tcphdr) >> 2; |
| 634 | nth->th_flags = flags; |
| 635 | if (tp != NULL) |
| 636 | nth->th_win = htons((u_short) (win >> tp->rcv_scale)); |
| 637 | else |
| 638 | nth->th_win = htons((u_short)win); |
| 639 | nth->th_urp = 0; |
| 640 | if (isipv6) { |
| 641 | nth->th_sum = 0; |
| 642 | nth->th_sum = in6_cksum(m, IPPROTO_TCP, |
| 643 | sizeof(struct ip6_hdr), |
| 644 | tlen - sizeof(struct ip6_hdr)); |
| 645 | ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL, |
| 646 | (ro6 && ro6->ro_rt) ? |
| 647 | ro6->ro_rt->rt_ifp : NULL); |
| 648 | } else { |
| 649 | nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, |
| 650 | htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); |
| 651 | m->m_pkthdr.csum_flags = CSUM_TCP; |
| 652 | m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); |
| 653 | } |
| 654 | #ifdef TCPDEBUG |
| 655 | if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) |
| 656 | tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); |
| 657 | #endif |
| 658 | if (isipv6) { |
| 659 | ip6_output(m, NULL, ro6, ipflags, NULL, NULL, |
| 660 | tp ? tp->t_inpcb : NULL); |
| 661 | if ((ro6 == &sro6) && (ro6->ro_rt != NULL)) { |
| 662 | RTFREE(ro6->ro_rt); |
| 663 | ro6->ro_rt = NULL; |
| 664 | } |
| 665 | } else { |
| 666 | ipflags |= IP_DEBUGROUTE; |
| 667 | ip_output(m, NULL, ro, ipflags, NULL, tp ? tp->t_inpcb : NULL); |
| 668 | if ((ro == &sro) && (ro->ro_rt != NULL)) { |
| 669 | RTFREE(ro->ro_rt); |
| 670 | ro->ro_rt = NULL; |
| 671 | } |
| 672 | } |
| 673 | } |
| 674 | |
| 675 | /* |
| 676 | * Create a new TCP control block, making an |
| 677 | * empty reassembly queue and hooking it to the argument |
| 678 | * protocol control block. The `inp' parameter must have |
| 679 | * come from the zone allocator set up in tcp_init(). |
| 680 | */ |
| 681 | struct tcpcb * |
| 682 | tcp_newtcpcb(struct inpcb *inp) |
| 683 | { |
| 684 | struct inp_tp *it; |
| 685 | struct tcpcb *tp; |
| 686 | #ifdef INET6 |
| 687 | boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) != 0); |
| 688 | #else |
| 689 | const boolean_t isipv6 = FALSE; |
| 690 | #endif |
| 691 | |
| 692 | it = (struct inp_tp *)inp; |
| 693 | tp = &it->tcb; |
| 694 | bzero(tp, sizeof(struct tcpcb)); |
| 695 | LIST_INIT(&tp->t_segq); |
| 696 | tp->t_maxseg = tp->t_maxopd = isipv6 ? tcp_v6mssdflt : tcp_mssdflt; |
| 697 | |
| 698 | /* Set up our timeouts. */ |
| 699 | tp->tt_rexmt = &it->inp_tp_rexmt; |
| 700 | tp->tt_persist = &it->inp_tp_persist; |
| 701 | tp->tt_keep = &it->inp_tp_keep; |
| 702 | tp->tt_2msl = &it->inp_tp_2msl; |
| 703 | tp->tt_delack = &it->inp_tp_delack; |
| 704 | tcp_inittimers(tp); |
| 705 | |
| 706 | /* |
| 707 | * Zero out timer message. We don't create it here, |
| 708 | * since the current CPU may not be the owner of this |
| 709 | * inpcb. |
| 710 | */ |
| 711 | tp->tt_msg = &it->inp_tp_timermsg; |
| 712 | bzero(tp->tt_msg, sizeof(*tp->tt_msg)); |
| 713 | |
| 714 | tp->t_keepinit = tcp_keepinit; |
| 715 | tp->t_keepidle = tcp_keepidle; |
| 716 | tp->t_keepintvl = tcp_keepintvl; |
| 717 | tp->t_keepcnt = tcp_keepcnt; |
| 718 | tp->t_maxidle = tp->t_keepintvl * tp->t_keepcnt; |
| 719 | |
| 720 | if (tcp_do_rfc1323) |
| 721 | tp->t_flags = (TF_REQ_SCALE | TF_REQ_TSTMP); |
| 722 | tp->t_inpcb = inp; /* XXX */ |
| 723 | tp->t_state = TCPS_CLOSED; |
| 724 | /* |
| 725 | * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no |
| 726 | * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives |
| 727 | * reasonable initial retransmit time. |
| 728 | */ |
| 729 | tp->t_srtt = TCPTV_SRTTBASE; |
| 730 | tp->t_rttvar = |
| 731 | ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; |
| 732 | tp->t_rttmin = tcp_rexmit_min; |
| 733 | tp->t_rxtcur = TCPTV_RTOBASE; |
| 734 | tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; |
| 735 | tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; |
| 736 | tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; |
| 737 | tp->t_rcvtime = ticks; |
| 738 | /* |
| 739 | * IPv4 TTL initialization is necessary for an IPv6 socket as well, |
| 740 | * because the socket may be bound to an IPv6 wildcard address, |
| 741 | * which may match an IPv4-mapped IPv6 address. |
| 742 | */ |
| 743 | inp->inp_ip_ttl = ip_defttl; |
| 744 | inp->inp_ppcb = tp; |
| 745 | tcp_sack_tcpcb_init(tp); |
| 746 | return (tp); /* XXX */ |
| 747 | } |
| 748 | |
| 749 | /* |
| 750 | * Drop a TCP connection, reporting the specified error. |
| 751 | * If connection is synchronized, then send a RST to peer. |
| 752 | */ |
| 753 | struct tcpcb * |
| 754 | tcp_drop(struct tcpcb *tp, int error) |
| 755 | { |
| 756 | struct socket *so = tp->t_inpcb->inp_socket; |
| 757 | |
| 758 | if (TCPS_HAVERCVDSYN(tp->t_state)) { |
| 759 | tp->t_state = TCPS_CLOSED; |
| 760 | tcp_output(tp); |
| 761 | tcpstat.tcps_drops++; |
| 762 | } else |
| 763 | tcpstat.tcps_conndrops++; |
| 764 | if (error == ETIMEDOUT && tp->t_softerror) |
| 765 | error = tp->t_softerror; |
| 766 | so->so_error = error; |
| 767 | return (tcp_close(tp)); |
| 768 | } |
| 769 | |
| 770 | #ifdef SMP |
| 771 | |
| 772 | struct netmsg_listen_detach { |
| 773 | struct netmsg_base base; |
| 774 | struct tcpcb *nm_tp; |
| 775 | }; |
| 776 | |
| 777 | static void |
| 778 | tcp_listen_detach_handler(netmsg_t msg) |
| 779 | { |
| 780 | struct netmsg_listen_detach *nmsg = (struct netmsg_listen_detach *)msg; |
| 781 | struct tcpcb *tp = nmsg->nm_tp; |
| 782 | int cpu = mycpuid, nextcpu; |
| 783 | |
| 784 | if (tp->t_flags & TF_LISTEN) |
| 785 | syncache_destroy(tp); |
| 786 | |
| 787 | in_pcbremwildcardhash_oncpu(tp->t_inpcb, &tcbinfo[cpu]); |
| 788 | |
| 789 | nextcpu = cpu + 1; |
| 790 | if (nextcpu < ncpus2) |
| 791 | lwkt_forwardmsg(cpu_portfn(nextcpu), &nmsg->base.lmsg); |
| 792 | else |
| 793 | lwkt_replymsg(&nmsg->base.lmsg, 0); |
| 794 | } |
| 795 | |
| 796 | #endif |
| 797 | |
| 798 | /* |
| 799 | * Close a TCP control block: |
| 800 | * discard all space held by the tcp |
| 801 | * discard internet protocol block |
| 802 | * wake up any sleepers |
| 803 | */ |
| 804 | struct tcpcb * |
| 805 | tcp_close(struct tcpcb *tp) |
| 806 | { |
| 807 | struct tseg_qent *q; |
| 808 | struct inpcb *inp = tp->t_inpcb; |
| 809 | struct socket *so = inp->inp_socket; |
| 810 | struct rtentry *rt; |
| 811 | boolean_t dosavessthresh; |
| 812 | #ifdef INET6 |
| 813 | boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) != 0); |
| 814 | boolean_t isafinet6 = (INP_CHECK_SOCKAF(so, AF_INET6) != 0); |
| 815 | #else |
| 816 | const boolean_t isipv6 = FALSE; |
| 817 | #endif |
| 818 | |
| 819 | #ifdef SMP |
| 820 | /* |
| 821 | * INP_WILDCARD_MP indicates that listen(2) has been called on |
| 822 | * this socket. This implies: |
| 823 | * - A wildcard inp's hash is replicated for each protocol thread. |
| 824 | * - Syncache for this inp grows independently in each protocol |
| 825 | * thread. |
| 826 | * - There is more than one cpu |
| 827 | * |
| 828 | * We have to chain a message to the rest of the protocol threads |
| 829 | * to cleanup the wildcard hash and the syncache. The cleanup |
| 830 | * in the current protocol thread is defered till the end of this |
| 831 | * function. |
| 832 | * |
| 833 | * NOTE: |
| 834 | * After cleanup the inp's hash and syncache entries, this inp will |
| 835 | * no longer be available to the rest of the protocol threads, so we |
| 836 | * are safe to whack the inp in the following code. |
| 837 | */ |
| 838 | if (inp->inp_flags & INP_WILDCARD_MP) { |
| 839 | struct netmsg_listen_detach nmsg; |
| 840 | |
| 841 | KKASSERT(so->so_port == cpu_portfn(0)); |
| 842 | KKASSERT(&curthread->td_msgport == cpu_portfn(0)); |
| 843 | KKASSERT(inp->inp_pcbinfo == &tcbinfo[0]); |
| 844 | |
| 845 | netmsg_init(&nmsg.base, NULL, &curthread->td_msgport, |
| 846 | MSGF_PRIORITY, tcp_listen_detach_handler); |
| 847 | nmsg.nm_tp = tp; |
| 848 | lwkt_domsg(cpu_portfn(1), &nmsg.base.lmsg, 0); |
| 849 | |
| 850 | inp->inp_flags &= ~INP_WILDCARD_MP; |
| 851 | } |
| 852 | #endif |
| 853 | |
| 854 | KKASSERT(tp->t_state != TCPS_TERMINATING); |
| 855 | tp->t_state = TCPS_TERMINATING; |
| 856 | |
| 857 | /* |
| 858 | * Make sure that all of our timers are stopped before we |
| 859 | * delete the PCB. For listen TCP socket (tp->tt_msg == NULL), |
| 860 | * timers are never used. If timer message is never created |
| 861 | * (tp->tt_msg->tt_tcb == NULL), timers are never used too. |
| 862 | */ |
| 863 | if (tp->tt_msg != NULL && tp->tt_msg->tt_tcb != NULL) { |
| 864 | tcp_callout_stop(tp, tp->tt_rexmt); |
| 865 | tcp_callout_stop(tp, tp->tt_persist); |
| 866 | tcp_callout_stop(tp, tp->tt_keep); |
| 867 | tcp_callout_stop(tp, tp->tt_2msl); |
| 868 | tcp_callout_stop(tp, tp->tt_delack); |
| 869 | } |
| 870 | |
| 871 | if (tp->t_flags & TF_ONOUTPUTQ) { |
| 872 | KKASSERT(tp->tt_cpu == mycpu->gd_cpuid); |
| 873 | TAILQ_REMOVE(&tcpcbackq[tp->tt_cpu], tp, t_outputq); |
| 874 | tp->t_flags &= ~TF_ONOUTPUTQ; |
| 875 | } |
| 876 | |
| 877 | /* |
| 878 | * If we got enough samples through the srtt filter, |
| 879 | * save the rtt and rttvar in the routing entry. |
| 880 | * 'Enough' is arbitrarily defined as the 16 samples. |
| 881 | * 16 samples is enough for the srtt filter to converge |
| 882 | * to within 5% of the correct value; fewer samples and |
| 883 | * we could save a very bogus rtt. |
| 884 | * |
| 885 | * Don't update the default route's characteristics and don't |
| 886 | * update anything that the user "locked". |
| 887 | */ |
| 888 | if (tp->t_rttupdated >= 16) { |
| 889 | u_long i = 0; |
| 890 | |
| 891 | if (isipv6) { |
| 892 | struct sockaddr_in6 *sin6; |
| 893 | |
| 894 | if ((rt = inp->in6p_route.ro_rt) == NULL) |
| 895 | goto no_valid_rt; |
| 896 | sin6 = (struct sockaddr_in6 *)rt_key(rt); |
| 897 | if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) |
| 898 | goto no_valid_rt; |
| 899 | } else |
| 900 | if ((rt = inp->inp_route.ro_rt) == NULL || |
| 901 | ((struct sockaddr_in *)rt_key(rt))-> |
| 902 | sin_addr.s_addr == INADDR_ANY) |
| 903 | goto no_valid_rt; |
| 904 | |
| 905 | if (!(rt->rt_rmx.rmx_locks & RTV_RTT)) { |
| 906 | i = tp->t_srtt * (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); |
| 907 | if (rt->rt_rmx.rmx_rtt && i) |
| 908 | /* |
| 909 | * filter this update to half the old & half |
| 910 | * the new values, converting scale. |
| 911 | * See route.h and tcp_var.h for a |
| 912 | * description of the scaling constants. |
| 913 | */ |
| 914 | rt->rt_rmx.rmx_rtt = |
| 915 | (rt->rt_rmx.rmx_rtt + i) / 2; |
| 916 | else |
| 917 | rt->rt_rmx.rmx_rtt = i; |
| 918 | tcpstat.tcps_cachedrtt++; |
| 919 | } |
| 920 | if (!(rt->rt_rmx.rmx_locks & RTV_RTTVAR)) { |
| 921 | i = tp->t_rttvar * |
| 922 | (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); |
| 923 | if (rt->rt_rmx.rmx_rttvar && i) |
| 924 | rt->rt_rmx.rmx_rttvar = |
| 925 | (rt->rt_rmx.rmx_rttvar + i) / 2; |
| 926 | else |
| 927 | rt->rt_rmx.rmx_rttvar = i; |
| 928 | tcpstat.tcps_cachedrttvar++; |
| 929 | } |
| 930 | /* |
| 931 | * The old comment here said: |
| 932 | * update the pipelimit (ssthresh) if it has been updated |
| 933 | * already or if a pipesize was specified & the threshhold |
| 934 | * got below half the pipesize. I.e., wait for bad news |
| 935 | * before we start updating, then update on both good |
| 936 | * and bad news. |
| 937 | * |
| 938 | * But we want to save the ssthresh even if no pipesize is |
| 939 | * specified explicitly in the route, because such |
| 940 | * connections still have an implicit pipesize specified |
| 941 | * by the global tcp_sendspace. In the absence of a reliable |
| 942 | * way to calculate the pipesize, it will have to do. |
| 943 | */ |
| 944 | i = tp->snd_ssthresh; |
| 945 | if (rt->rt_rmx.rmx_sendpipe != 0) |
| 946 | dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe/2); |
| 947 | else |
| 948 | dosavessthresh = (i < so->so_snd.ssb_hiwat/2); |
| 949 | if (dosavessthresh || |
| 950 | (!(rt->rt_rmx.rmx_locks & RTV_SSTHRESH) && (i != 0) && |
| 951 | (rt->rt_rmx.rmx_ssthresh != 0))) { |
| 952 | /* |
| 953 | * convert the limit from user data bytes to |
| 954 | * packets then to packet data bytes. |
| 955 | */ |
| 956 | i = (i + tp->t_maxseg / 2) / tp->t_maxseg; |
| 957 | if (i < 2) |
| 958 | i = 2; |
| 959 | i *= tp->t_maxseg + |
| 960 | (isipv6 ? |
| 961 | sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : |
| 962 | sizeof(struct tcpiphdr)); |
| 963 | if (rt->rt_rmx.rmx_ssthresh) |
| 964 | rt->rt_rmx.rmx_ssthresh = |
| 965 | (rt->rt_rmx.rmx_ssthresh + i) / 2; |
| 966 | else |
| 967 | rt->rt_rmx.rmx_ssthresh = i; |
| 968 | tcpstat.tcps_cachedssthresh++; |
| 969 | } |
| 970 | } |
| 971 | |
| 972 | no_valid_rt: |
| 973 | /* free the reassembly queue, if any */ |
| 974 | while((q = LIST_FIRST(&tp->t_segq)) != NULL) { |
| 975 | LIST_REMOVE(q, tqe_q); |
| 976 | m_freem(q->tqe_m); |
| 977 | kfree(q, M_TSEGQ); |
| 978 | atomic_add_int(&tcp_reass_qsize, -1); |
| 979 | } |
| 980 | /* throw away SACK blocks in scoreboard*/ |
| 981 | if (TCP_DO_SACK(tp)) |
| 982 | tcp_sack_cleanup(&tp->scb); |
| 983 | |
| 984 | inp->inp_ppcb = NULL; |
| 985 | soisdisconnected(so); |
| 986 | /* note: pcb detached later on */ |
| 987 | |
| 988 | tcp_destroy_timermsg(tp); |
| 989 | |
| 990 | if (tp->t_flags & TF_LISTEN) |
| 991 | syncache_destroy(tp); |
| 992 | |
| 993 | /* |
| 994 | * NOTE: |
| 995 | * pcbdetach removes any wildcard hash entry on the current CPU. |
| 996 | */ |
| 997 | #ifdef INET6 |
| 998 | if (isafinet6) |
| 999 | in6_pcbdetach(inp); |
| 1000 | else |
| 1001 | #endif |
| 1002 | in_pcbdetach(inp); |
| 1003 | |
| 1004 | tcpstat.tcps_closed++; |
| 1005 | return (NULL); |
| 1006 | } |
| 1007 | |
| 1008 | static __inline void |
| 1009 | tcp_drain_oncpu(struct inpcbhead *head) |
| 1010 | { |
| 1011 | struct inpcb *marker; |
| 1012 | struct inpcb *inpb; |
| 1013 | struct tcpcb *tcpb; |
| 1014 | struct tseg_qent *te; |
| 1015 | |
| 1016 | /* |
| 1017 | * Allows us to block while running the list |
| 1018 | */ |
| 1019 | marker = kmalloc(sizeof(struct inpcb), M_TEMP, M_WAITOK|M_ZERO); |
| 1020 | marker->inp_flags |= INP_PLACEMARKER; |
| 1021 | LIST_INSERT_HEAD(head, marker, inp_list); |
| 1022 | |
| 1023 | while ((inpb = LIST_NEXT(marker, inp_list)) != NULL) { |
| 1024 | if ((inpb->inp_flags & INP_PLACEMARKER) == 0 && |
| 1025 | (tcpb = intotcpcb(inpb)) != NULL && |
| 1026 | (te = LIST_FIRST(&tcpb->t_segq)) != NULL) { |
| 1027 | LIST_REMOVE(te, tqe_q); |
| 1028 | m_freem(te->tqe_m); |
| 1029 | kfree(te, M_TSEGQ); |
| 1030 | atomic_add_int(&tcp_reass_qsize, -1); |
| 1031 | /* retry */ |
| 1032 | } else { |
| 1033 | LIST_REMOVE(marker, inp_list); |
| 1034 | LIST_INSERT_AFTER(inpb, marker, inp_list); |
| 1035 | } |
| 1036 | } |
| 1037 | LIST_REMOVE(marker, inp_list); |
| 1038 | kfree(marker, M_TEMP); |
| 1039 | } |
| 1040 | |
| 1041 | #ifdef SMP |
| 1042 | struct netmsg_tcp_drain { |
| 1043 | struct netmsg_base base; |
| 1044 | struct inpcbhead *nm_head; |
| 1045 | }; |
| 1046 | |
| 1047 | static void |
| 1048 | tcp_drain_handler(netmsg_t msg) |
| 1049 | { |
| 1050 | struct netmsg_tcp_drain *nm = (void *)msg; |
| 1051 | |
| 1052 | tcp_drain_oncpu(nm->nm_head); |
| 1053 | lwkt_replymsg(&nm->base.lmsg, 0); |
| 1054 | } |
| 1055 | #endif |
| 1056 | |
| 1057 | void |
| 1058 | tcp_drain(void) |
| 1059 | { |
| 1060 | #ifdef SMP |
| 1061 | int cpu; |
| 1062 | #endif |
| 1063 | |
| 1064 | if (!do_tcpdrain) |
| 1065 | return; |
| 1066 | |
| 1067 | /* |
| 1068 | * Walk the tcpbs, if existing, and flush the reassembly queue, |
| 1069 | * if there is one... |
| 1070 | * XXX: The "Net/3" implementation doesn't imply that the TCP |
| 1071 | * reassembly queue should be flushed, but in a situation |
| 1072 | * where we're really low on mbufs, this is potentially |
| 1073 | * useful. |
| 1074 | */ |
| 1075 | #ifdef SMP |
| 1076 | for (cpu = 0; cpu < ncpus2; cpu++) { |
| 1077 | struct netmsg_tcp_drain *nm; |
| 1078 | |
| 1079 | if (cpu == mycpu->gd_cpuid) { |
| 1080 | tcp_drain_oncpu(&tcbinfo[cpu].pcblisthead); |
| 1081 | } else { |
| 1082 | nm = kmalloc(sizeof(struct netmsg_tcp_drain), |
| 1083 | M_LWKTMSG, M_NOWAIT); |
| 1084 | if (nm == NULL) |
| 1085 | continue; |
| 1086 | netmsg_init(&nm->base, NULL, &netisr_afree_rport, |
| 1087 | 0, tcp_drain_handler); |
| 1088 | nm->nm_head = &tcbinfo[cpu].pcblisthead; |
| 1089 | lwkt_sendmsg(cpu_portfn(cpu), &nm->base.lmsg); |
| 1090 | } |
| 1091 | } |
| 1092 | #else |
| 1093 | tcp_drain_oncpu(&tcbinfo[0].pcblisthead); |
| 1094 | #endif |
| 1095 | } |
| 1096 | |
| 1097 | /* |
| 1098 | * Notify a tcp user of an asynchronous error; |
| 1099 | * store error as soft error, but wake up user |
| 1100 | * (for now, won't do anything until can select for soft error). |
| 1101 | * |
| 1102 | * Do not wake up user since there currently is no mechanism for |
| 1103 | * reporting soft errors (yet - a kqueue filter may be added). |
| 1104 | */ |
| 1105 | static void |
| 1106 | tcp_notify(struct inpcb *inp, int error) |
| 1107 | { |
| 1108 | struct tcpcb *tp = intotcpcb(inp); |
| 1109 | |
| 1110 | /* |
| 1111 | * Ignore some errors if we are hooked up. |
| 1112 | * If connection hasn't completed, has retransmitted several times, |
| 1113 | * and receives a second error, give up now. This is better |
| 1114 | * than waiting a long time to establish a connection that |
| 1115 | * can never complete. |
| 1116 | */ |
| 1117 | if (tp->t_state == TCPS_ESTABLISHED && |
| 1118 | (error == EHOSTUNREACH || error == ENETUNREACH || |
| 1119 | error == EHOSTDOWN)) { |
| 1120 | return; |
| 1121 | } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && |
| 1122 | tp->t_softerror) |
| 1123 | tcp_drop(tp, error); |
| 1124 | else |
| 1125 | tp->t_softerror = error; |
| 1126 | #if 0 |
| 1127 | wakeup(&so->so_timeo); |
| 1128 | sorwakeup(so); |
| 1129 | sowwakeup(so); |
| 1130 | #endif |
| 1131 | } |
| 1132 | |
| 1133 | static int |
| 1134 | tcp_pcblist(SYSCTL_HANDLER_ARGS) |
| 1135 | { |
| 1136 | int error, i, n; |
| 1137 | struct inpcb *marker; |
| 1138 | struct inpcb *inp; |
| 1139 | globaldata_t gd; |
| 1140 | int origcpu, ccpu; |
| 1141 | |
| 1142 | error = 0; |
| 1143 | n = 0; |
| 1144 | |
| 1145 | /* |
| 1146 | * The process of preparing the TCB list is too time-consuming and |
| 1147 | * resource-intensive to repeat twice on every request. |
| 1148 | */ |
| 1149 | if (req->oldptr == NULL) { |
| 1150 | for (ccpu = 0; ccpu < ncpus; ++ccpu) { |
| 1151 | gd = globaldata_find(ccpu); |
| 1152 | n += tcbinfo[gd->gd_cpuid].ipi_count; |
| 1153 | } |
| 1154 | req->oldidx = (n + n/8 + 10) * sizeof(struct xtcpcb); |
| 1155 | return (0); |
| 1156 | } |
| 1157 | |
| 1158 | if (req->newptr != NULL) |
| 1159 | return (EPERM); |
| 1160 | |
| 1161 | marker = kmalloc(sizeof(struct inpcb), M_TEMP, M_WAITOK|M_ZERO); |
| 1162 | marker->inp_flags |= INP_PLACEMARKER; |
| 1163 | |
| 1164 | /* |
| 1165 | * OK, now we're committed to doing something. Run the inpcb list |
| 1166 | * for each cpu in the system and construct the output. Use a |
| 1167 | * list placemarker to deal with list changes occuring during |
| 1168 | * copyout blockages (but otherwise depend on being on the correct |
| 1169 | * cpu to avoid races). |
| 1170 | */ |
| 1171 | origcpu = mycpu->gd_cpuid; |
| 1172 | for (ccpu = 1; ccpu <= ncpus && error == 0; ++ccpu) { |
| 1173 | globaldata_t rgd; |
| 1174 | caddr_t inp_ppcb; |
| 1175 | struct xtcpcb xt; |
| 1176 | int cpu_id; |
| 1177 | |
| 1178 | cpu_id = (origcpu + ccpu) % ncpus; |
| 1179 | if ((smp_active_mask & CPUMASK(cpu_id)) == 0) |
| 1180 | continue; |
| 1181 | rgd = globaldata_find(cpu_id); |
| 1182 | lwkt_setcpu_self(rgd); |
| 1183 | |
| 1184 | n = tcbinfo[cpu_id].ipi_count; |
| 1185 | |
| 1186 | LIST_INSERT_HEAD(&tcbinfo[cpu_id].pcblisthead, marker, inp_list); |
| 1187 | i = 0; |
| 1188 | while ((inp = LIST_NEXT(marker, inp_list)) != NULL && i < n) { |
| 1189 | /* |
| 1190 | * process a snapshot of pcbs, ignoring placemarkers |
| 1191 | * and using our own to allow SYSCTL_OUT to block. |
| 1192 | */ |
| 1193 | LIST_REMOVE(marker, inp_list); |
| 1194 | LIST_INSERT_AFTER(inp, marker, inp_list); |
| 1195 | |
| 1196 | if (inp->inp_flags & INP_PLACEMARKER) |
| 1197 | continue; |
| 1198 | if (prison_xinpcb(req->td, inp)) |
| 1199 | continue; |
| 1200 | |
| 1201 | xt.xt_len = sizeof xt; |
| 1202 | bcopy(inp, &xt.xt_inp, sizeof *inp); |
| 1203 | inp_ppcb = inp->inp_ppcb; |
| 1204 | if (inp_ppcb != NULL) |
| 1205 | bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp); |
| 1206 | else |
| 1207 | bzero(&xt.xt_tp, sizeof xt.xt_tp); |
| 1208 | if (inp->inp_socket) |
| 1209 | sotoxsocket(inp->inp_socket, &xt.xt_socket); |
| 1210 | if ((error = SYSCTL_OUT(req, &xt, sizeof xt)) != 0) |
| 1211 | break; |
| 1212 | ++i; |
| 1213 | } |
| 1214 | LIST_REMOVE(marker, inp_list); |
| 1215 | if (error == 0 && i < n) { |
| 1216 | bzero(&xt, sizeof xt); |
| 1217 | xt.xt_len = sizeof xt; |
| 1218 | while (i < n) { |
| 1219 | error = SYSCTL_OUT(req, &xt, sizeof xt); |
| 1220 | if (error) |
| 1221 | break; |
| 1222 | ++i; |
| 1223 | } |
| 1224 | } |
| 1225 | } |
| 1226 | |
| 1227 | /* |
| 1228 | * Make sure we are on the same cpu we were on originally, since |
| 1229 | * higher level callers expect this. Also don't pollute caches with |
| 1230 | * migrated userland data by (eventually) returning to userland |
| 1231 | * on a different cpu. |
| 1232 | */ |
| 1233 | lwkt_setcpu_self(globaldata_find(origcpu)); |
| 1234 | kfree(marker, M_TEMP); |
| 1235 | return (error); |
| 1236 | } |
| 1237 | |
| 1238 | SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0, |
| 1239 | tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); |
| 1240 | |
| 1241 | static int |
| 1242 | tcp_getcred(SYSCTL_HANDLER_ARGS) |
| 1243 | { |
| 1244 | struct sockaddr_in addrs[2]; |
| 1245 | struct inpcb *inp; |
| 1246 | int cpu; |
| 1247 | int error; |
| 1248 | |
| 1249 | error = priv_check(req->td, PRIV_ROOT); |
| 1250 | if (error != 0) |
| 1251 | return (error); |
| 1252 | error = SYSCTL_IN(req, addrs, sizeof addrs); |
| 1253 | if (error != 0) |
| 1254 | return (error); |
| 1255 | crit_enter(); |
| 1256 | cpu = tcp_addrcpu(addrs[1].sin_addr.s_addr, addrs[1].sin_port, |
| 1257 | addrs[0].sin_addr.s_addr, addrs[0].sin_port); |
| 1258 | inp = in_pcblookup_hash(&tcbinfo[cpu], addrs[1].sin_addr, |
| 1259 | addrs[1].sin_port, addrs[0].sin_addr, addrs[0].sin_port, 0, NULL); |
| 1260 | if (inp == NULL || inp->inp_socket == NULL) { |
| 1261 | error = ENOENT; |
| 1262 | goto out; |
| 1263 | } |
| 1264 | error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred)); |
| 1265 | out: |
| 1266 | crit_exit(); |
| 1267 | return (error); |
| 1268 | } |
| 1269 | |
| 1270 | SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW), |
| 1271 | 0, 0, tcp_getcred, "S,ucred", "Get the ucred of a TCP connection"); |
| 1272 | |
| 1273 | #ifdef INET6 |
| 1274 | static int |
| 1275 | tcp6_getcred(SYSCTL_HANDLER_ARGS) |
| 1276 | { |
| 1277 | struct sockaddr_in6 addrs[2]; |
| 1278 | struct inpcb *inp; |
| 1279 | int error; |
| 1280 | boolean_t mapped = FALSE; |
| 1281 | |
| 1282 | error = priv_check(req->td, PRIV_ROOT); |
| 1283 | if (error != 0) |
| 1284 | return (error); |
| 1285 | error = SYSCTL_IN(req, addrs, sizeof addrs); |
| 1286 | if (error != 0) |
| 1287 | return (error); |
| 1288 | if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) { |
| 1289 | if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr)) |
| 1290 | mapped = TRUE; |
| 1291 | else |
| 1292 | return (EINVAL); |
| 1293 | } |
| 1294 | crit_enter(); |
| 1295 | if (mapped) { |
| 1296 | inp = in_pcblookup_hash(&tcbinfo[0], |
| 1297 | *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12], |
| 1298 | addrs[1].sin6_port, |
| 1299 | *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12], |
| 1300 | addrs[0].sin6_port, |
| 1301 | 0, NULL); |
| 1302 | } else { |
| 1303 | inp = in6_pcblookup_hash(&tcbinfo[0], |
| 1304 | &addrs[1].sin6_addr, addrs[1].sin6_port, |
| 1305 | &addrs[0].sin6_addr, addrs[0].sin6_port, |
| 1306 | 0, NULL); |
| 1307 | } |
| 1308 | if (inp == NULL || inp->inp_socket == NULL) { |
| 1309 | error = ENOENT; |
| 1310 | goto out; |
| 1311 | } |
| 1312 | error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred)); |
| 1313 | out: |
| 1314 | crit_exit(); |
| 1315 | return (error); |
| 1316 | } |
| 1317 | |
| 1318 | SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW), |
| 1319 | 0, 0, |
| 1320 | tcp6_getcred, "S,ucred", "Get the ucred of a TCP6 connection"); |
| 1321 | #endif |
| 1322 | |
| 1323 | struct netmsg_tcp_notify { |
| 1324 | struct netmsg_base base; |
| 1325 | void (*nm_notify)(struct inpcb *, int); |
| 1326 | struct in_addr nm_faddr; |
| 1327 | int nm_arg; |
| 1328 | }; |
| 1329 | |
| 1330 | static void |
| 1331 | tcp_notifyall_oncpu(netmsg_t msg) |
| 1332 | { |
| 1333 | struct netmsg_tcp_notify *nm = (struct netmsg_tcp_notify *)msg; |
| 1334 | int nextcpu; |
| 1335 | |
| 1336 | in_pcbnotifyall(&tcbinfo[mycpuid].pcblisthead, nm->nm_faddr, |
| 1337 | nm->nm_arg, nm->nm_notify); |
| 1338 | |
| 1339 | nextcpu = mycpuid + 1; |
| 1340 | if (nextcpu < ncpus2) |
| 1341 | lwkt_forwardmsg(cpu_portfn(nextcpu), &nm->base.lmsg); |
| 1342 | else |
| 1343 | lwkt_replymsg(&nm->base.lmsg, 0); |
| 1344 | } |
| 1345 | |
| 1346 | void |
| 1347 | tcp_ctlinput(netmsg_t msg) |
| 1348 | { |
| 1349 | int cmd = msg->ctlinput.nm_cmd; |
| 1350 | struct sockaddr *sa = msg->ctlinput.nm_arg; |
| 1351 | struct ip *ip = msg->ctlinput.nm_extra; |
| 1352 | struct tcphdr *th; |
| 1353 | struct in_addr faddr; |
| 1354 | struct inpcb *inp; |
| 1355 | struct tcpcb *tp; |
| 1356 | void (*notify)(struct inpcb *, int) = tcp_notify; |
| 1357 | tcp_seq icmpseq; |
| 1358 | int arg, cpu; |
| 1359 | |
| 1360 | if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { |
| 1361 | goto done; |
| 1362 | } |
| 1363 | |
| 1364 | faddr = ((struct sockaddr_in *)sa)->sin_addr; |
| 1365 | if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) |
| 1366 | goto done; |
| 1367 | |
| 1368 | arg = inetctlerrmap[cmd]; |
| 1369 | if (cmd == PRC_QUENCH) { |
| 1370 | notify = tcp_quench; |
| 1371 | } else if (icmp_may_rst && |
| 1372 | (cmd == PRC_UNREACH_ADMIN_PROHIB || |
| 1373 | cmd == PRC_UNREACH_PORT || |
| 1374 | cmd == PRC_TIMXCEED_INTRANS) && |
| 1375 | ip != NULL) { |
| 1376 | notify = tcp_drop_syn_sent; |
| 1377 | } else if (cmd == PRC_MSGSIZE) { |
| 1378 | struct icmp *icmp = (struct icmp *) |
| 1379 | ((caddr_t)ip - offsetof(struct icmp, icmp_ip)); |
| 1380 | |
| 1381 | arg = ntohs(icmp->icmp_nextmtu); |
| 1382 | notify = tcp_mtudisc; |
| 1383 | } else if (PRC_IS_REDIRECT(cmd)) { |
| 1384 | ip = NULL; |
| 1385 | notify = in_rtchange; |
| 1386 | } else if (cmd == PRC_HOSTDEAD) { |
| 1387 | ip = NULL; |
| 1388 | } |
| 1389 | |
| 1390 | if (ip != NULL) { |
| 1391 | crit_enter(); |
| 1392 | th = (struct tcphdr *)((caddr_t)ip + |
| 1393 | (IP_VHL_HL(ip->ip_vhl) << 2)); |
| 1394 | cpu = tcp_addrcpu(faddr.s_addr, th->th_dport, |
| 1395 | ip->ip_src.s_addr, th->th_sport); |
| 1396 | inp = in_pcblookup_hash(&tcbinfo[cpu], faddr, th->th_dport, |
| 1397 | ip->ip_src, th->th_sport, 0, NULL); |
| 1398 | if ((inp != NULL) && (inp->inp_socket != NULL)) { |
| 1399 | icmpseq = htonl(th->th_seq); |
| 1400 | tp = intotcpcb(inp); |
| 1401 | if (SEQ_GEQ(icmpseq, tp->snd_una) && |
| 1402 | SEQ_LT(icmpseq, tp->snd_max)) |
| 1403 | (*notify)(inp, arg); |
| 1404 | } else { |
| 1405 | struct in_conninfo inc; |
| 1406 | |
| 1407 | inc.inc_fport = th->th_dport; |
| 1408 | inc.inc_lport = th->th_sport; |
| 1409 | inc.inc_faddr = faddr; |
| 1410 | inc.inc_laddr = ip->ip_src; |
| 1411 | #ifdef INET6 |
| 1412 | inc.inc_isipv6 = 0; |
| 1413 | #endif |
| 1414 | syncache_unreach(&inc, th); |
| 1415 | } |
| 1416 | crit_exit(); |
| 1417 | } else { |
| 1418 | struct netmsg_tcp_notify *nm; |
| 1419 | |
| 1420 | KKASSERT(&curthread->td_msgport == cpu_portfn(0)); |
| 1421 | nm = kmalloc(sizeof(*nm), M_LWKTMSG, M_INTWAIT); |
| 1422 | netmsg_init(&nm->base, NULL, &netisr_afree_rport, |
| 1423 | 0, tcp_notifyall_oncpu); |
| 1424 | nm->nm_faddr = faddr; |
| 1425 | nm->nm_arg = arg; |
| 1426 | nm->nm_notify = notify; |
| 1427 | |
| 1428 | lwkt_sendmsg(cpu_portfn(0), &nm->base.lmsg); |
| 1429 | } |
| 1430 | done: |
| 1431 | lwkt_replymsg(&msg->lmsg, 0); |
| 1432 | } |
| 1433 | |
| 1434 | #ifdef INET6 |
| 1435 | |
| 1436 | void |
| 1437 | tcp6_ctlinput(netmsg_t msg) |
| 1438 | { |
| 1439 | int cmd = msg->ctlinput.nm_cmd; |
| 1440 | struct sockaddr *sa = msg->ctlinput.nm_arg; |
| 1441 | void *d = msg->ctlinput.nm_extra; |
| 1442 | struct tcphdr th; |
| 1443 | void (*notify) (struct inpcb *, int) = tcp_notify; |
| 1444 | struct ip6_hdr *ip6; |
| 1445 | struct mbuf *m; |
| 1446 | struct ip6ctlparam *ip6cp = NULL; |
| 1447 | const struct sockaddr_in6 *sa6_src = NULL; |
| 1448 | int off; |
| 1449 | struct tcp_portonly { |
| 1450 | u_int16_t th_sport; |
| 1451 | u_int16_t th_dport; |
| 1452 | } *thp; |
| 1453 | int arg; |
| 1454 | |
| 1455 | if (sa->sa_family != AF_INET6 || |
| 1456 | sa->sa_len != sizeof(struct sockaddr_in6)) { |
| 1457 | goto out; |
| 1458 | } |
| 1459 | |
| 1460 | arg = 0; |
| 1461 | if (cmd == PRC_QUENCH) |
| 1462 | notify = tcp_quench; |
| 1463 | else if (cmd == PRC_MSGSIZE) { |
| 1464 | struct ip6ctlparam *ip6cp = d; |
| 1465 | struct icmp6_hdr *icmp6 = ip6cp->ip6c_icmp6; |
| 1466 | |
| 1467 | arg = ntohl(icmp6->icmp6_mtu); |
| 1468 | notify = tcp_mtudisc; |
| 1469 | } else if (!PRC_IS_REDIRECT(cmd) && |
| 1470 | ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) { |
| 1471 | goto out; |
| 1472 | } |
| 1473 | |
| 1474 | /* if the parameter is from icmp6, decode it. */ |
| 1475 | if (d != NULL) { |
| 1476 | ip6cp = (struct ip6ctlparam *)d; |
| 1477 | m = ip6cp->ip6c_m; |
| 1478 | ip6 = ip6cp->ip6c_ip6; |
| 1479 | off = ip6cp->ip6c_off; |
| 1480 | sa6_src = ip6cp->ip6c_src; |
| 1481 | } else { |
| 1482 | m = NULL; |
| 1483 | ip6 = NULL; |
| 1484 | off = 0; /* fool gcc */ |
| 1485 | sa6_src = &sa6_any; |
| 1486 | } |
| 1487 | |
| 1488 | if (ip6 != NULL) { |
| 1489 | struct in_conninfo inc; |
| 1490 | /* |
| 1491 | * XXX: We assume that when IPV6 is non NULL, |
| 1492 | * M and OFF are valid. |
| 1493 | */ |
| 1494 | |
| 1495 | /* check if we can safely examine src and dst ports */ |
| 1496 | if (m->m_pkthdr.len < off + sizeof *thp) |
| 1497 | goto out; |
| 1498 | |
| 1499 | bzero(&th, sizeof th); |
| 1500 | m_copydata(m, off, sizeof *thp, (caddr_t)&th); |
| 1501 | |
| 1502 | in6_pcbnotify(&tcbinfo[0].pcblisthead, sa, th.th_dport, |
| 1503 | (struct sockaddr *)ip6cp->ip6c_src, |
| 1504 | th.th_sport, cmd, arg, notify); |
| 1505 | |
| 1506 | inc.inc_fport = th.th_dport; |
| 1507 | inc.inc_lport = th.th_sport; |
| 1508 | inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr; |
| 1509 | inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr; |
| 1510 | inc.inc_isipv6 = 1; |
| 1511 | syncache_unreach(&inc, &th); |
| 1512 | } else { |
| 1513 | in6_pcbnotify(&tcbinfo[0].pcblisthead, sa, 0, |
| 1514 | (const struct sockaddr *)sa6_src, 0, cmd, arg, notify); |
| 1515 | } |
| 1516 | out: |
| 1517 | lwkt_replymsg(&msg->ctlinput.base.lmsg, 0); |
| 1518 | } |
| 1519 | |
| 1520 | #endif |
| 1521 | |
| 1522 | /* |
| 1523 | * Following is where TCP initial sequence number generation occurs. |
| 1524 | * |
| 1525 | * There are two places where we must use initial sequence numbers: |
| 1526 | * 1. In SYN-ACK packets. |
| 1527 | * 2. In SYN packets. |
| 1528 | * |
| 1529 | * All ISNs for SYN-ACK packets are generated by the syncache. See |
| 1530 | * tcp_syncache.c for details. |
| 1531 | * |
| 1532 | * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling |
| 1533 | * depends on this property. In addition, these ISNs should be |
| 1534 | * unguessable so as to prevent connection hijacking. To satisfy |
| 1535 | * the requirements of this situation, the algorithm outlined in |
| 1536 | * RFC 1948 is used to generate sequence numbers. |
| 1537 | * |
| 1538 | * Implementation details: |
| 1539 | * |
| 1540 | * Time is based off the system timer, and is corrected so that it |
| 1541 | * increases by one megabyte per second. This allows for proper |
| 1542 | * recycling on high speed LANs while still leaving over an hour |
| 1543 | * before rollover. |
| 1544 | * |
| 1545 | * net.inet.tcp.isn_reseed_interval controls the number of seconds |
| 1546 | * between seeding of isn_secret. This is normally set to zero, |
| 1547 | * as reseeding should not be necessary. |
| 1548 | * |
| 1549 | */ |
| 1550 | |
| 1551 | #define ISN_BYTES_PER_SECOND 1048576 |
| 1552 | |
| 1553 | u_char isn_secret[32]; |
| 1554 | int isn_last_reseed; |
| 1555 | MD5_CTX isn_ctx; |
| 1556 | |
| 1557 | tcp_seq |
| 1558 | tcp_new_isn(struct tcpcb *tp) |
| 1559 | { |
| 1560 | u_int32_t md5_buffer[4]; |
| 1561 | tcp_seq new_isn; |
| 1562 | |
| 1563 | /* Seed if this is the first use, reseed if requested. */ |
| 1564 | if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) && |
| 1565 | (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz) |
| 1566 | < (u_int)ticks))) { |
| 1567 | read_random_unlimited(&isn_secret, sizeof isn_secret); |
| 1568 | isn_last_reseed = ticks; |
| 1569 | } |
| 1570 | |
| 1571 | /* Compute the md5 hash and return the ISN. */ |
| 1572 | MD5Init(&isn_ctx); |
| 1573 | MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_fport, sizeof(u_short)); |
| 1574 | MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_lport, sizeof(u_short)); |
| 1575 | #ifdef INET6 |
| 1576 | if (tp->t_inpcb->inp_vflag & INP_IPV6) { |
| 1577 | MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr, |
| 1578 | sizeof(struct in6_addr)); |
| 1579 | MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr, |
| 1580 | sizeof(struct in6_addr)); |
| 1581 | } else |
| 1582 | #endif |
| 1583 | { |
| 1584 | MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr, |
| 1585 | sizeof(struct in_addr)); |
| 1586 | MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr, |
| 1587 | sizeof(struct in_addr)); |
| 1588 | } |
| 1589 | MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret)); |
| 1590 | MD5Final((u_char *) &md5_buffer, &isn_ctx); |
| 1591 | new_isn = (tcp_seq) md5_buffer[0]; |
| 1592 | new_isn += ticks * (ISN_BYTES_PER_SECOND / hz); |
| 1593 | return (new_isn); |
| 1594 | } |
| 1595 | |
| 1596 | /* |
| 1597 | * When a source quench is received, close congestion window |
| 1598 | * to one segment. We will gradually open it again as we proceed. |
| 1599 | */ |
| 1600 | void |
| 1601 | tcp_quench(struct inpcb *inp, int error) |
| 1602 | { |
| 1603 | struct tcpcb *tp = intotcpcb(inp); |
| 1604 | |
| 1605 | if (tp != NULL) { |
| 1606 | tp->snd_cwnd = tp->t_maxseg; |
| 1607 | tp->snd_wacked = 0; |
| 1608 | } |
| 1609 | } |
| 1610 | |
| 1611 | /* |
| 1612 | * When a specific ICMP unreachable message is received and the |
| 1613 | * connection state is SYN-SENT, drop the connection. This behavior |
| 1614 | * is controlled by the icmp_may_rst sysctl. |
| 1615 | */ |
| 1616 | void |
| 1617 | tcp_drop_syn_sent(struct inpcb *inp, int error) |
| 1618 | { |
| 1619 | struct tcpcb *tp = intotcpcb(inp); |
| 1620 | |
| 1621 | if ((tp != NULL) && (tp->t_state == TCPS_SYN_SENT)) |
| 1622 | tcp_drop(tp, error); |
| 1623 | } |
| 1624 | |
| 1625 | /* |
| 1626 | * When a `need fragmentation' ICMP is received, update our idea of the MSS |
| 1627 | * based on the new value in the route. Also nudge TCP to send something, |
| 1628 | * since we know the packet we just sent was dropped. |
| 1629 | * This duplicates some code in the tcp_mss() function in tcp_input.c. |
| 1630 | */ |
| 1631 | void |
| 1632 | tcp_mtudisc(struct inpcb *inp, int mtu) |
| 1633 | { |
| 1634 | struct tcpcb *tp = intotcpcb(inp); |
| 1635 | struct rtentry *rt; |
| 1636 | struct socket *so = inp->inp_socket; |
| 1637 | int maxopd, mss; |
| 1638 | #ifdef INET6 |
| 1639 | boolean_t isipv6 = ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0); |
| 1640 | #else |
| 1641 | const boolean_t isipv6 = FALSE; |
| 1642 | #endif |
| 1643 | |
| 1644 | if (tp == NULL) |
| 1645 | return; |
| 1646 | |
| 1647 | /* |
| 1648 | * If no MTU is provided in the ICMP message, use the |
| 1649 | * next lower likely value, as specified in RFC 1191. |
| 1650 | */ |
| 1651 | if (mtu == 0) { |
| 1652 | int oldmtu; |
| 1653 | |
| 1654 | oldmtu = tp->t_maxopd + |
| 1655 | (isipv6 ? |
| 1656 | sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : |
| 1657 | sizeof(struct tcpiphdr)); |
| 1658 | mtu = ip_next_mtu(oldmtu, 0); |
| 1659 | } |
| 1660 | |
| 1661 | if (isipv6) |
| 1662 | rt = tcp_rtlookup6(&inp->inp_inc); |
| 1663 | else |
| 1664 | rt = tcp_rtlookup(&inp->inp_inc); |
| 1665 | if (rt != NULL) { |
| 1666 | if (rt->rt_rmx.rmx_mtu != 0 && rt->rt_rmx.rmx_mtu < mtu) |
| 1667 | mtu = rt->rt_rmx.rmx_mtu; |
| 1668 | |
| 1669 | maxopd = mtu - |
| 1670 | (isipv6 ? |
| 1671 | sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : |
| 1672 | sizeof(struct tcpiphdr)); |
| 1673 | |
| 1674 | /* |
| 1675 | * XXX - The following conditional probably violates the TCP |
| 1676 | * spec. The problem is that, since we don't know the |
| 1677 | * other end's MSS, we are supposed to use a conservative |
| 1678 | * default. But, if we do that, then MTU discovery will |
| 1679 | * never actually take place, because the conservative |
| 1680 | * default is much less than the MTUs typically seen |
| 1681 | * on the Internet today. For the moment, we'll sweep |
| 1682 | * this under the carpet. |
| 1683 | * |
| 1684 | * The conservative default might not actually be a problem |
| 1685 | * if the only case this occurs is when sending an initial |
| 1686 | * SYN with options and data to a host we've never talked |
| 1687 | * to before. Then, they will reply with an MSS value which |
| 1688 | * will get recorded and the new parameters should get |
| 1689 | * recomputed. For Further Study. |
| 1690 | */ |
| 1691 | if (rt->rt_rmx.rmx_mssopt && rt->rt_rmx.rmx_mssopt < maxopd) |
| 1692 | maxopd = rt->rt_rmx.rmx_mssopt; |
| 1693 | } else |
| 1694 | maxopd = mtu - |
| 1695 | (isipv6 ? |
| 1696 | sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : |
| 1697 | sizeof(struct tcpiphdr)); |
| 1698 | |
| 1699 | if (tp->t_maxopd <= maxopd) |
| 1700 | return; |
| 1701 | tp->t_maxopd = maxopd; |
| 1702 | |
| 1703 | mss = maxopd; |
| 1704 | if ((tp->t_flags & (TF_REQ_TSTMP | TF_RCVD_TSTMP | TF_NOOPT)) == |
| 1705 | (TF_REQ_TSTMP | TF_RCVD_TSTMP)) |
| 1706 | mss -= TCPOLEN_TSTAMP_APPA; |
| 1707 | |
| 1708 | /* round down to multiple of MCLBYTES */ |
| 1709 | #if (MCLBYTES & (MCLBYTES - 1)) == 0 /* test if MCLBYTES power of 2 */ |
| 1710 | if (mss > MCLBYTES) |
| 1711 | mss &= ~(MCLBYTES - 1); |
| 1712 | #else |
| 1713 | if (mss > MCLBYTES) |
| 1714 | mss = (mss / MCLBYTES) * MCLBYTES; |
| 1715 | #endif |
| 1716 | |
| 1717 | if (so->so_snd.ssb_hiwat < mss) |
| 1718 | mss = so->so_snd.ssb_hiwat; |
| 1719 | |
| 1720 | tp->t_maxseg = mss; |
| 1721 | tp->t_rtttime = 0; |
| 1722 | tp->snd_nxt = tp->snd_una; |
| 1723 | tcp_output(tp); |
| 1724 | tcpstat.tcps_mturesent++; |
| 1725 | } |
| 1726 | |
| 1727 | /* |
| 1728 | * Look-up the routing entry to the peer of this inpcb. If no route |
| 1729 | * is found and it cannot be allocated the return NULL. This routine |
| 1730 | * is called by TCP routines that access the rmx structure and by tcp_mss |
| 1731 | * to get the interface MTU. |
| 1732 | */ |
| 1733 | struct rtentry * |
| 1734 | tcp_rtlookup(struct in_conninfo *inc) |
| 1735 | { |
| 1736 | struct route *ro = &inc->inc_route; |
| 1737 | |
| 1738 | if (ro->ro_rt == NULL || !(ro->ro_rt->rt_flags & RTF_UP)) { |
| 1739 | /* No route yet, so try to acquire one */ |
| 1740 | if (inc->inc_faddr.s_addr != INADDR_ANY) { |
| 1741 | /* |
| 1742 | * unused portions of the structure MUST be zero'd |
| 1743 | * out because rtalloc() treats it as opaque data |
| 1744 | */ |
| 1745 | bzero(&ro->ro_dst, sizeof(struct sockaddr_in)); |
| 1746 | ro->ro_dst.sa_family = AF_INET; |
| 1747 | ro->ro_dst.sa_len = sizeof(struct sockaddr_in); |
| 1748 | ((struct sockaddr_in *) &ro->ro_dst)->sin_addr = |
| 1749 | inc->inc_faddr; |
| 1750 | rtalloc(ro); |
| 1751 | } |
| 1752 | } |
| 1753 | return (ro->ro_rt); |
| 1754 | } |
| 1755 | |
| 1756 | #ifdef INET6 |
| 1757 | struct rtentry * |
| 1758 | tcp_rtlookup6(struct in_conninfo *inc) |
| 1759 | { |
| 1760 | struct route_in6 *ro6 = &inc->inc6_route; |
| 1761 | |
| 1762 | if (ro6->ro_rt == NULL || !(ro6->ro_rt->rt_flags & RTF_UP)) { |
| 1763 | /* No route yet, so try to acquire one */ |
| 1764 | if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) { |
| 1765 | /* |
| 1766 | * unused portions of the structure MUST be zero'd |
| 1767 | * out because rtalloc() treats it as opaque data |
| 1768 | */ |
| 1769 | bzero(&ro6->ro_dst, sizeof(struct sockaddr_in6)); |
| 1770 | ro6->ro_dst.sin6_family = AF_INET6; |
| 1771 | ro6->ro_dst.sin6_len = sizeof(struct sockaddr_in6); |
| 1772 | ro6->ro_dst.sin6_addr = inc->inc6_faddr; |
| 1773 | rtalloc((struct route *)ro6); |
| 1774 | } |
| 1775 | } |
| 1776 | return (ro6->ro_rt); |
| 1777 | } |
| 1778 | #endif |
| 1779 | |
| 1780 | #ifdef IPSEC |
| 1781 | /* compute ESP/AH header size for TCP, including outer IP header. */ |
| 1782 | size_t |
| 1783 | ipsec_hdrsiz_tcp(struct tcpcb *tp) |
| 1784 | { |
| 1785 | struct inpcb *inp; |
| 1786 | struct mbuf *m; |
| 1787 | size_t hdrsiz; |
| 1788 | struct ip *ip; |
| 1789 | struct tcphdr *th; |
| 1790 | |
| 1791 | if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) |
| 1792 | return (0); |
| 1793 | MGETHDR(m, MB_DONTWAIT, MT_DATA); |
| 1794 | if (!m) |
| 1795 | return (0); |
| 1796 | |
| 1797 | #ifdef INET6 |
| 1798 | if (inp->inp_vflag & INP_IPV6) { |
| 1799 | struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); |
| 1800 | |
| 1801 | th = (struct tcphdr *)(ip6 + 1); |
| 1802 | m->m_pkthdr.len = m->m_len = |
| 1803 | sizeof(struct ip6_hdr) + sizeof(struct tcphdr); |
| 1804 | tcp_fillheaders(tp, ip6, th); |
| 1805 | hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); |
| 1806 | } else |
| 1807 | #endif |
| 1808 | { |
| 1809 | ip = mtod(m, struct ip *); |
| 1810 | th = (struct tcphdr *)(ip + 1); |
| 1811 | m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr); |
| 1812 | tcp_fillheaders(tp, ip, th); |
| 1813 | hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); |
| 1814 | } |
| 1815 | |
| 1816 | m_free(m); |
| 1817 | return (hdrsiz); |
| 1818 | } |
| 1819 | #endif |
| 1820 | |
| 1821 | /* |
| 1822 | * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING |
| 1823 | * |
| 1824 | * This code attempts to calculate the bandwidth-delay product as a |
| 1825 | * means of determining the optimal window size to maximize bandwidth, |
| 1826 | * minimize RTT, and avoid the over-allocation of buffers on interfaces and |
| 1827 | * routers. This code also does a fairly good job keeping RTTs in check |
| 1828 | * across slow links like modems. We implement an algorithm which is very |
| 1829 | * similar (but not meant to be) TCP/Vegas. The code operates on the |
| 1830 | * transmitter side of a TCP connection and so only effects the transmit |
| 1831 | * side of the connection. |
| 1832 | * |
| 1833 | * BACKGROUND: TCP makes no provision for the management of buffer space |
| 1834 | * at the end points or at the intermediate routers and switches. A TCP |
| 1835 | * stream, whether using NewReno or not, will eventually buffer as |
| 1836 | * many packets as it is able and the only reason this typically works is |
| 1837 | * due to the fairly small default buffers made available for a connection |
| 1838 | * (typicaly 16K or 32K). As machines use larger windows and/or window |
| 1839 | * scaling it is now fairly easy for even a single TCP connection to blow-out |
| 1840 | * all available buffer space not only on the local interface, but on |
| 1841 | * intermediate routers and switches as well. NewReno makes a misguided |
| 1842 | * attempt to 'solve' this problem by waiting for an actual failure to occur, |
| 1843 | * then backing off, then steadily increasing the window again until another |
| 1844 | * failure occurs, ad-infinitum. This results in terrible oscillation that |
| 1845 | * is only made worse as network loads increase and the idea of intentionally |
| 1846 | * blowing out network buffers is, frankly, a terrible way to manage network |
| 1847 | * resources. |
| 1848 | * |
| 1849 | * It is far better to limit the transmit window prior to the failure |
| 1850 | * condition being achieved. There are two general ways to do this: First |
| 1851 | * you can 'scan' through different transmit window sizes and locate the |
| 1852 | * point where the RTT stops increasing, indicating that you have filled the |
| 1853 | * pipe, then scan backwards until you note that RTT stops decreasing, then |
| 1854 | * repeat ad-infinitum. This method works in principle but has severe |
| 1855 | * implementation issues due to RTT variances, timer granularity, and |
| 1856 | * instability in the algorithm which can lead to many false positives and |
| 1857 | * create oscillations as well as interact badly with other TCP streams |
| 1858 | * implementing the same algorithm. |
| 1859 | * |
| 1860 | * The second method is to limit the window to the bandwidth delay product |
| 1861 | * of the link. This is the method we implement. RTT variances and our |
| 1862 | * own manipulation of the congestion window, bwnd, can potentially |
| 1863 | * destabilize the algorithm. For this reason we have to stabilize the |
| 1864 | * elements used to calculate the window. We do this by using the minimum |
| 1865 | * observed RTT, the long term average of the observed bandwidth, and |
| 1866 | * by adding two segments worth of slop. It isn't perfect but it is able |
| 1867 | * to react to changing conditions and gives us a very stable basis on |
| 1868 | * which to extend the algorithm. |
| 1869 | */ |
| 1870 | void |
| 1871 | tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq) |
| 1872 | { |
| 1873 | u_long bw; |
| 1874 | u_long bwnd; |
| 1875 | int save_ticks; |
| 1876 | int delta_ticks; |
| 1877 | |
| 1878 | /* |
| 1879 | * If inflight_enable is disabled in the middle of a tcp connection, |
| 1880 | * make sure snd_bwnd is effectively disabled. |
| 1881 | */ |
| 1882 | if (!tcp_inflight_enable) { |
| 1883 | tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; |
| 1884 | tp->snd_bandwidth = 0; |
| 1885 | return; |
| 1886 | } |
| 1887 | |
| 1888 | /* |
| 1889 | * Validate the delta time. If a connection is new or has been idle |
| 1890 | * a long time we have to reset the bandwidth calculator. |
| 1891 | */ |
| 1892 | save_ticks = ticks; |
| 1893 | delta_ticks = save_ticks - tp->t_bw_rtttime; |
| 1894 | if (tp->t_bw_rtttime == 0 || delta_ticks < 0 || delta_ticks > hz * 10) { |
| 1895 | tp->t_bw_rtttime = ticks; |
| 1896 | tp->t_bw_rtseq = ack_seq; |
| 1897 | if (tp->snd_bandwidth == 0) |
| 1898 | tp->snd_bandwidth = tcp_inflight_min; |
| 1899 | return; |
| 1900 | } |
| 1901 | if (delta_ticks == 0) |
| 1902 | return; |
| 1903 | |
| 1904 | /* |
| 1905 | * Sanity check, plus ignore pure window update acks. |
| 1906 | */ |
| 1907 | if ((int)(ack_seq - tp->t_bw_rtseq) <= 0) |
| 1908 | return; |
| 1909 | |
| 1910 | /* |
| 1911 | * Figure out the bandwidth. Due to the tick granularity this |
| 1912 | * is a very rough number and it MUST be averaged over a fairly |
| 1913 | * long period of time. XXX we need to take into account a link |
| 1914 | * that is not using all available bandwidth, but for now our |
| 1915 | * slop will ramp us up if this case occurs and the bandwidth later |
| 1916 | * increases. |
| 1917 | */ |
| 1918 | bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz / delta_ticks; |
| 1919 | tp->t_bw_rtttime = save_ticks; |
| 1920 | tp->t_bw_rtseq = ack_seq; |
| 1921 | bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4; |
| 1922 | |
| 1923 | tp->snd_bandwidth = bw; |
| 1924 | |
| 1925 | /* |
| 1926 | * Calculate the semi-static bandwidth delay product, plus two maximal |
| 1927 | * segments. The additional slop puts us squarely in the sweet |
| 1928 | * spot and also handles the bandwidth run-up case. Without the |
| 1929 | * slop we could be locking ourselves into a lower bandwidth. |
| 1930 | * |
| 1931 | * Situations Handled: |
| 1932 | * (1) Prevents over-queueing of packets on LANs, especially on |
| 1933 | * high speed LANs, allowing larger TCP buffers to be |
| 1934 | * specified, and also does a good job preventing |
| 1935 | * over-queueing of packets over choke points like modems |
| 1936 | * (at least for the transmit side). |
| 1937 | * |
| 1938 | * (2) Is able to handle changing network loads (bandwidth |
| 1939 | * drops so bwnd drops, bandwidth increases so bwnd |
| 1940 | * increases). |
| 1941 | * |
| 1942 | * (3) Theoretically should stabilize in the face of multiple |
| 1943 | * connections implementing the same algorithm (this may need |
| 1944 | * a little work). |
| 1945 | * |
| 1946 | * (4) Stability value (defaults to 20 = 2 maximal packets) can |
| 1947 | * be adjusted with a sysctl but typically only needs to be on |
| 1948 | * very slow connections. A value no smaller then 5 should |
| 1949 | * be used, but only reduce this default if you have no other |
| 1950 | * choice. |
| 1951 | */ |
| 1952 | |
| 1953 | #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2) |
| 1954 | bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + |
| 1955 | tcp_inflight_stab * (int)tp->t_maxseg / 10; |
| 1956 | #undef USERTT |
| 1957 | |
| 1958 | if (tcp_inflight_debug > 0) { |
| 1959 | static int ltime; |
| 1960 | if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) { |
| 1961 | ltime = ticks; |
| 1962 | kprintf("%p bw %ld rttbest %d srtt %d bwnd %ld\n", |
| 1963 | tp, bw, tp->t_rttbest, tp->t_srtt, bwnd); |
| 1964 | } |
| 1965 | } |
| 1966 | if ((long)bwnd < tcp_inflight_min) |
| 1967 | bwnd = tcp_inflight_min; |
| 1968 | if (bwnd > tcp_inflight_max) |
| 1969 | bwnd = tcp_inflight_max; |
| 1970 | if ((long)bwnd < tp->t_maxseg * 2) |
| 1971 | bwnd = tp->t_maxseg * 2; |
| 1972 | tp->snd_bwnd = bwnd; |
| 1973 | } |
| 1974 | |
| 1975 | u_long |
| 1976 | tcp_initial_window(const struct tcpcb *tp) |
| 1977 | { |
| 1978 | if (tcp_do_rfc3390) { |
| 1979 | /* |
| 1980 | * RFC3390: |
| 1981 | * "If the SYN or SYN/ACK is lost, the initial window |
| 1982 | * used by a sender after a correctly transmitted SYN |
| 1983 | * MUST be one segment consisting of MSS bytes." |
| 1984 | * |
| 1985 | * However, we do something a little bit more aggressive |
| 1986 | * then RFC3390 here: |
| 1987 | * - Only if time spent in the SYN or SYN|ACK retransmition |
| 1988 | * >= 3 seconds, the IW is reduced. We do this mainly |
| 1989 | * because when RFC3390 is published, the initial RTO is |
| 1990 | * still 3 seconds (the threshold we test here), while |
| 1991 | * after RFC6298, the initial RTO is 1 second. This |
| 1992 | * behaviour probably still falls within the spirit of |
| 1993 | * RFC3390. |
| 1994 | * - When IW is reduced, 2*MSS is used instead of 1*MSS. |
| 1995 | * Mainly to avoid sender and receiver deadlock until |
| 1996 | * delayed ACK timer expires. And even RFC2581 does not |
| 1997 | * try to reduce IW upon SYN or SYN|ACK retransmition |
| 1998 | * timeout. |
| 1999 | * |
| 2000 | * See also: |
| 2001 | * http://tools.ietf.org/html/draft-ietf-tcpm-initcwnd-03 |
| 2002 | */ |
| 2003 | if (tp->t_rxtsyn >= TCPTV_RTOBASE3) { |
| 2004 | return (2 * tp->t_maxseg); |
| 2005 | } else { |
| 2006 | return min(tcp_iw_maxsegs * tp->t_maxseg, |
| 2007 | max(2 * tp->t_maxseg, |
| 2008 | tcp_iw_capsegs * 1460)); |
| 2009 | } |
| 2010 | } else { |
| 2011 | /* |
| 2012 | * Even RFC2581 (back to 1999) allows 2*SMSS IW. |
| 2013 | * |
| 2014 | * Mainly to avoid sender and receiver deadlock |
| 2015 | * until delayed ACK timer expires. |
| 2016 | */ |
| 2017 | return (2 * tp->t_maxseg); |
| 2018 | } |
| 2019 | } |
| 2020 | |
| 2021 | #ifdef TCP_SIGNATURE |
| 2022 | /* |
| 2023 | * Compute TCP-MD5 hash of a TCP segment. (RFC2385) |
| 2024 | * |
| 2025 | * We do this over ip, tcphdr, segment data, and the key in the SADB. |
| 2026 | * When called from tcp_input(), we can be sure that th_sum has been |
| 2027 | * zeroed out and verified already. |
| 2028 | * |
| 2029 | * Return 0 if successful, otherwise return -1. |
| 2030 | * |
| 2031 | * XXX The key is retrieved from the system's PF_KEY SADB, by keying a |
| 2032 | * search with the destination IP address, and a 'magic SPI' to be |
| 2033 | * determined by the application. This is hardcoded elsewhere to 1179 |
| 2034 | * right now. Another branch of this code exists which uses the SPD to |
| 2035 | * specify per-application flows but it is unstable. |
| 2036 | */ |
| 2037 | int |
| 2038 | tcpsignature_compute( |
| 2039 | struct mbuf *m, /* mbuf chain */ |
| 2040 | int len, /* length of TCP data */ |
| 2041 | int optlen, /* length of TCP options */ |
| 2042 | u_char *buf, /* storage for MD5 digest */ |
| 2043 | u_int direction) /* direction of flow */ |
| 2044 | { |
| 2045 | struct ippseudo ippseudo; |
| 2046 | MD5_CTX ctx; |
| 2047 | int doff; |
| 2048 | struct ip *ip; |
| 2049 | struct ipovly *ipovly; |
| 2050 | struct secasvar *sav; |
| 2051 | struct tcphdr *th; |
| 2052 | #ifdef INET6 |
| 2053 | struct ip6_hdr *ip6; |
| 2054 | struct in6_addr in6; |
| 2055 | uint32_t plen; |
| 2056 | uint16_t nhdr; |
| 2057 | #endif /* INET6 */ |
| 2058 | u_short savecsum; |
| 2059 | |
| 2060 | KASSERT(m != NULL, ("passed NULL mbuf. Game over.")); |
| 2061 | KASSERT(buf != NULL, ("passed NULL storage pointer for MD5 signature")); |
| 2062 | /* |
| 2063 | * Extract the destination from the IP header in the mbuf. |
| 2064 | */ |
| 2065 | ip = mtod(m, struct ip *); |
| 2066 | #ifdef INET6 |
| 2067 | ip6 = NULL; /* Make the compiler happy. */ |
| 2068 | #endif /* INET6 */ |
| 2069 | /* |
| 2070 | * Look up an SADB entry which matches the address found in |
| 2071 | * the segment. |
| 2072 | */ |
| 2073 | switch (IP_VHL_V(ip->ip_vhl)) { |
| 2074 | case IPVERSION: |
| 2075 | sav = key_allocsa(AF_INET, (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst, |
| 2076 | IPPROTO_TCP, htonl(TCP_SIG_SPI)); |
| 2077 | break; |
| 2078 | #ifdef INET6 |
| 2079 | case (IPV6_VERSION >> 4): |
| 2080 | ip6 = mtod(m, struct ip6_hdr *); |
| 2081 | sav = key_allocsa(AF_INET6, (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst, |
| 2082 | IPPROTO_TCP, htonl(TCP_SIG_SPI)); |
| 2083 | break; |
| 2084 | #endif /* INET6 */ |
| 2085 | default: |
| 2086 | return (EINVAL); |
| 2087 | /* NOTREACHED */ |
| 2088 | break; |
| 2089 | } |
| 2090 | if (sav == NULL) { |
| 2091 | kprintf("%s: SADB lookup failed\n", __func__); |
| 2092 | return (EINVAL); |
| 2093 | } |
| 2094 | MD5Init(&ctx); |
| 2095 | |
| 2096 | /* |
| 2097 | * Step 1: Update MD5 hash with IP pseudo-header. |
| 2098 | * |
| 2099 | * XXX The ippseudo header MUST be digested in network byte order, |
| 2100 | * or else we'll fail the regression test. Assume all fields we've |
| 2101 | * been doing arithmetic on have been in host byte order. |
| 2102 | * XXX One cannot depend on ipovly->ih_len here. When called from |
| 2103 | * tcp_output(), the underlying ip_len member has not yet been set. |
| 2104 | */ |
| 2105 | switch (IP_VHL_V(ip->ip_vhl)) { |
| 2106 | case IPVERSION: |
| 2107 | ipovly = (struct ipovly *)ip; |
| 2108 | ippseudo.ippseudo_src = ipovly->ih_src; |
| 2109 | ippseudo.ippseudo_dst = ipovly->ih_dst; |
| 2110 | ippseudo.ippseudo_pad = 0; |
| 2111 | ippseudo.ippseudo_p = IPPROTO_TCP; |
| 2112 | ippseudo.ippseudo_len = htons(len + sizeof(struct tcphdr) + optlen); |
| 2113 | MD5Update(&ctx, (char *)&ippseudo, sizeof(struct ippseudo)); |
| 2114 | th = (struct tcphdr *)((u_char *)ip + sizeof(struct ip)); |
| 2115 | doff = sizeof(struct ip) + sizeof(struct tcphdr) + optlen; |
| 2116 | break; |
| 2117 | #ifdef INET6 |
| 2118 | /* |
| 2119 | * RFC 2385, 2.0 Proposal |
| 2120 | * For IPv6, the pseudo-header is as described in RFC 2460, namely the |
| 2121 | * 128-bit source IPv6 address, 128-bit destination IPv6 address, zero- |
| 2122 | * extended next header value (to form 32 bits), and 32-bit segment |
| 2123 | * length. |
| 2124 | * Note: Upper-Layer Packet Length comes before Next Header. |
| 2125 | */ |
| 2126 | case (IPV6_VERSION >> 4): |
| 2127 | in6 = ip6->ip6_src; |
| 2128 | in6_clearscope(&in6); |
| 2129 | MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr)); |
| 2130 | in6 = ip6->ip6_dst; |
| 2131 | in6_clearscope(&in6); |
| 2132 | MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr)); |
| 2133 | plen = htonl(len + sizeof(struct tcphdr) + optlen); |
| 2134 | MD5Update(&ctx, (char *)&plen, sizeof(uint32_t)); |
| 2135 | nhdr = 0; |
| 2136 | MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t)); |
| 2137 | MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t)); |
| 2138 | MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t)); |
| 2139 | nhdr = IPPROTO_TCP; |
| 2140 | MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t)); |
| 2141 | th = (struct tcphdr *)((u_char *)ip6 + sizeof(struct ip6_hdr)); |
| 2142 | doff = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + optlen; |
| 2143 | break; |
| 2144 | #endif /* INET6 */ |
| 2145 | default: |
| 2146 | return (EINVAL); |
| 2147 | /* NOTREACHED */ |
| 2148 | break; |
| 2149 | } |
| 2150 | /* |
| 2151 | * Step 2: Update MD5 hash with TCP header, excluding options. |
| 2152 | * The TCP checksum must be set to zero. |
| 2153 | */ |
| 2154 | savecsum = th->th_sum; |
| 2155 | th->th_sum = 0; |
| 2156 | MD5Update(&ctx, (char *)th, sizeof(struct tcphdr)); |
| 2157 | th->th_sum = savecsum; |
| 2158 | /* |
| 2159 | * Step 3: Update MD5 hash with TCP segment data. |
| 2160 | * Use m_apply() to avoid an early m_pullup(). |
| 2161 | */ |
| 2162 | if (len > 0) |
| 2163 | m_apply(m, doff, len, tcpsignature_apply, &ctx); |
| 2164 | /* |
| 2165 | * Step 4: Update MD5 hash with shared secret. |
| 2166 | */ |
| 2167 | MD5Update(&ctx, _KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth)); |
| 2168 | MD5Final(buf, &ctx); |
| 2169 | key_sa_recordxfer(sav, m); |
| 2170 | key_freesav(sav); |
| 2171 | return (0); |
| 2172 | } |
| 2173 | |
| 2174 | int |
| 2175 | tcpsignature_apply(void *fstate, void *data, unsigned int len) |
| 2176 | { |
| 2177 | |
| 2178 | MD5Update((MD5_CTX *)fstate, (unsigned char *)data, len); |
| 2179 | return (0); |
| 2180 | } |
| 2181 | #endif /* TCP_SIGNATURE */ |