2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
63 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.73.2.31 2003/01/24 05:11:34 sam Exp $
66 #include "opt_compat.h"
68 #include "opt_inet6.h"
69 #include "opt_ipsec.h"
70 #include "opt_tcpdebug.h"
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/callout.h>
75 #include <sys/kernel.h>
76 #include <sys/sysctl.h>
77 #include <sys/malloc.h>
78 #include <sys/mpipe.h>
81 #include <sys/domain.h>
85 #include <sys/socket.h>
86 #include <sys/socketops.h>
87 #include <sys/socketvar.h>
88 #include <sys/protosw.h>
89 #include <sys/random.h>
90 #include <sys/in_cksum.h>
93 #include <net/route.h>
95 #include <net/netisr2.h>
98 #include <netinet/in.h>
99 #include <netinet/in_systm.h>
100 #include <netinet/ip.h>
101 #include <netinet/ip6.h>
102 #include <netinet/in_pcb.h>
103 #include <netinet6/in6_pcb.h>
104 #include <netinet/in_var.h>
105 #include <netinet/ip_var.h>
106 #include <netinet6/ip6_var.h>
107 #include <netinet/ip_icmp.h>
109 #include <netinet/icmp6.h>
111 #include <netinet/tcp.h>
112 #include <netinet/tcp_fsm.h>
113 #include <netinet/tcp_seq.h>
114 #include <netinet/tcp_timer.h>
115 #include <netinet/tcp_timer2.h>
116 #include <netinet/tcp_var.h>
117 #include <netinet6/tcp6_var.h>
118 #include <netinet/tcpip.h>
120 #include <netinet/tcp_debug.h>
122 #include <netinet6/ip6protosw.h>
125 #include <netinet6/ipsec.h>
126 #include <netproto/key/key.h>
128 #include <netinet6/ipsec6.h>
133 #include <netproto/ipsec/ipsec.h>
135 #include <netproto/ipsec/ipsec6.h>
141 #include <machine/smp.h>
143 #include <sys/msgport2.h>
144 #include <sys/mplock2.h>
145 #include <net/netmsg2.h>
147 #if !defined(KTR_TCP)
148 #define KTR_TCP KTR_ALL
151 KTR_INFO_MASTER(tcp);
152 KTR_INFO(KTR_TCP, tcp, rxmsg, 0, "tcp getmsg", 0);
153 KTR_INFO(KTR_TCP, tcp, wait, 1, "tcp waitmsg", 0);
154 KTR_INFO(KTR_TCP, tcp, delayed, 2, "tcp execute delayed ops", 0);
155 #define logtcp(name) KTR_LOG(tcp_ ## name)
158 #define TCP_IW_MAXSEGS_DFLT 4
159 #define TCP_IW_CAPSEGS_DFLT 4
161 struct inpcbinfo tcbinfo[MAXCPU];
162 struct tcpcbackqhead tcpcbackq[MAXCPU];
164 int tcp_mssdflt = TCP_MSS;
165 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
166 &tcp_mssdflt, 0, "Default TCP Maximum Segment Size");
169 int tcp_v6mssdflt = TCP6_MSS;
170 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, CTLFLAG_RW,
171 &tcp_v6mssdflt, 0, "Default TCP Maximum Segment Size for IPv6");
175 * Minimum MSS we accept and use. This prevents DoS attacks where
176 * we are forced to a ridiculous low MSS like 20 and send hundreds
177 * of packets instead of one. The effect scales with the available
178 * bandwidth and quickly saturates the CPU and network interface
179 * with packet generation and sending. Set to zero to disable MINMSS
180 * checking. This setting prevents us from sending too small packets.
182 int tcp_minmss = TCP_MINMSS;
183 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW,
184 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size");
187 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
188 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW,
189 &tcp_rttdflt, 0, "Default maximum TCP Round Trip Time");
192 int tcp_do_rfc1323 = 1;
193 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
194 &tcp_do_rfc1323, 0, "Enable rfc1323 (high performance TCP) extensions");
196 static int tcp_tcbhashsize = 0;
197 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD,
198 &tcp_tcbhashsize, 0, "Size of TCP control block hashtable");
200 static int do_tcpdrain = 1;
201 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
202 "Enable tcp_drain routine for extra help when low on mbufs");
204 static int icmp_may_rst = 1;
205 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0,
206 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
208 static int tcp_isn_reseed_interval = 0;
209 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
210 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
213 * TCP bandwidth limiting sysctls. The inflight limiter is now turned on
214 * by default, but with generous values which should allow maximal
215 * bandwidth. In particular, the slop defaults to 50 (5 packets).
217 * The reason for doing this is that the limiter is the only mechanism we
218 * have which seems to do a really good job preventing receiver RX rings
219 * on network interfaces from getting blown out. Even though GigE/10GigE
220 * is supposed to flow control it looks like either it doesn't actually
221 * do it or Open Source drivers do not properly enable it.
223 * People using the limiter to reduce bottlenecks on slower WAN connections
224 * should set the slop to 20 (2 packets).
226 static int tcp_inflight_enable = 1;
227 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_enable, CTLFLAG_RW,
228 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting");
230 static int tcp_inflight_debug = 0;
231 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_debug, CTLFLAG_RW,
232 &tcp_inflight_debug, 0, "Debug TCP inflight calculations");
234 static int tcp_inflight_min = 6144;
235 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_min, CTLFLAG_RW,
236 &tcp_inflight_min, 0, "Lower bound for TCP inflight window");
238 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT;
239 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_max, CTLFLAG_RW,
240 &tcp_inflight_max, 0, "Upper bound for TCP inflight window");
242 static int tcp_inflight_stab = 50;
243 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_stab, CTLFLAG_RW,
244 &tcp_inflight_stab, 0, "Fudge bw 1/10% (50=5%)");
246 static int tcp_inflight_adjrtt = 2;
247 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_adjrtt, CTLFLAG_RW,
248 &tcp_inflight_adjrtt, 0, "Slop for rtt 1/(hz*32)");
250 static int tcp_do_rfc3390 = 1;
251 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW,
253 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
255 static u_long tcp_iw_maxsegs = TCP_IW_MAXSEGS_DFLT;
256 SYSCTL_ULONG(_net_inet_tcp, OID_AUTO, iwmaxsegs, CTLFLAG_RW,
257 &tcp_iw_maxsegs, 0, "TCP IW segments max");
259 static u_long tcp_iw_capsegs = TCP_IW_CAPSEGS_DFLT;
260 SYSCTL_ULONG(_net_inet_tcp, OID_AUTO, iwcapsegs, CTLFLAG_RW,
261 &tcp_iw_capsegs, 0, "TCP IW segments");
263 int tcp_low_rtobase = 1;
264 SYSCTL_INT(_net_inet_tcp, OID_AUTO, low_rtobase, CTLFLAG_RW,
265 &tcp_low_rtobase, 0, "Lowering the Initial RTO (RFC 6298)");
267 static int tcp_do_ncr = 1;
268 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ncr, CTLFLAG_RW,
269 &tcp_do_ncr, 0, "Non-Congestion Robustness (RFC 4653)");
271 int tcp_ncr_rxtthresh_max = 16;
272 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ncr_rxtthresh_max, CTLFLAG_RW,
273 &tcp_ncr_rxtthresh_max, 0,
274 "Non-Congestion Robustness (RFC 4653), DupThresh upper limit");
276 static MALLOC_DEFINE(M_TCPTEMP, "tcptemp", "TCP Templates for Keepalives");
277 static struct malloc_pipe tcptemp_mpipe;
279 static void tcp_willblock(void);
280 static void tcp_notify (struct inpcb *, int);
282 struct tcp_stats tcpstats_percpu[MAXCPU] __cachealign;
284 static struct netmsg_base tcp_drain_netmsg[MAXCPU];
285 static void tcp_drain_dispatch(netmsg_t nmsg);
288 sysctl_tcpstats(SYSCTL_HANDLER_ARGS)
292 for (cpu = 0; cpu < ncpus2; ++cpu) {
293 if ((error = SYSCTL_OUT(req, &tcpstats_percpu[cpu],
294 sizeof(struct tcp_stats))))
296 if ((error = SYSCTL_IN(req, &tcpstats_percpu[cpu],
297 sizeof(struct tcp_stats))))
303 SYSCTL_PROC(_net_inet_tcp, TCPCTL_STATS, stats, (CTLTYPE_OPAQUE | CTLFLAG_RW),
304 0, 0, sysctl_tcpstats, "S,tcp_stats", "TCP statistics");
307 * Target size of TCP PCB hash tables. Must be a power of two.
309 * Note that this can be overridden by the kernel environment
310 * variable net.inet.tcp.tcbhashsize
313 #define TCBHASHSIZE 512
317 * This is the actual shape of what we allocate using the zone
318 * allocator. Doing it this way allows us to protect both structures
319 * using the same generation count, and also eliminates the overhead
320 * of allocating tcpcbs separately. By hiding the structure here,
321 * we avoid changing most of the rest of the code (although it needs
322 * to be changed, eventually, for greater efficiency).
325 #define ALIGNM1 (ALIGNMENT - 1)
329 char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1];
332 struct tcp_callout inp_tp_rexmt;
333 struct tcp_callout inp_tp_persist;
334 struct tcp_callout inp_tp_keep;
335 struct tcp_callout inp_tp_2msl;
336 struct tcp_callout inp_tp_delack;
337 struct netmsg_tcp_timer inp_tp_timermsg;
338 struct netmsg_base inp_tp_sndmore;
349 struct inpcbportinfo *portinfo;
350 struct inpcbinfo *ticb;
351 int hashsize = TCBHASHSIZE;
355 * note: tcptemp is used for keepalives, and it is ok for an
356 * allocation to fail so do not specify MPF_INT.
358 mpipe_init(&tcptemp_mpipe, M_TCPTEMP, sizeof(struct tcptemp),
359 25, -1, 0, NULL, NULL, NULL);
361 tcp_delacktime = TCPTV_DELACK;
362 tcp_keepinit = TCPTV_KEEP_INIT;
363 tcp_keepidle = TCPTV_KEEP_IDLE;
364 tcp_keepintvl = TCPTV_KEEPINTVL;
365 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
367 tcp_rexmit_min = TCPTV_MIN;
368 tcp_rexmit_slop = TCPTV_CPU_VAR;
370 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize);
371 if (!powerof2(hashsize)) {
372 kprintf("WARNING: TCB hash size not a power of 2\n");
373 hashsize = 512; /* safe default */
375 tcp_tcbhashsize = hashsize;
377 portinfo = kmalloc_cachealign(sizeof(*portinfo) * ncpus2, M_PCB,
380 for (cpu = 0; cpu < ncpus2; cpu++) {
381 ticb = &tcbinfo[cpu];
382 in_pcbinfo_init(ticb, cpu, FALSE);
383 ticb->hashbase = hashinit(hashsize, M_PCB,
385 in_pcbportinfo_init(&portinfo[cpu], hashsize, TRUE, cpu);
386 ticb->portinfo = portinfo;
387 ticb->portinfo_mask = ncpus2_mask;
388 ticb->wildcardhashbase = hashinit(hashsize, M_PCB,
389 &ticb->wildcardhashmask);
390 ticb->localgrphashbase = hashinit(hashsize, M_PCB,
391 &ticb->localgrphashmask);
392 ticb->ipi_size = sizeof(struct inp_tp);
393 TAILQ_INIT(&tcpcbackq[cpu]);
396 tcp_reass_maxseg = nmbclusters / 16;
397 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", &tcp_reass_maxseg);
400 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
402 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
404 if (max_protohdr < TCP_MINPROTOHDR)
405 max_protohdr = TCP_MINPROTOHDR;
406 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
408 #undef TCP_MINPROTOHDR
411 * Initialize TCP statistics counters for each CPU.
413 for (cpu = 0; cpu < ncpus2; ++cpu)
414 bzero(&tcpstats_percpu[cpu], sizeof(struct tcp_stats));
417 * Initialize netmsgs for TCP drain
419 for (cpu = 0; cpu < ncpus2; ++cpu) {
420 netmsg_init(&tcp_drain_netmsg[cpu], NULL, &netisr_adone_rport,
421 MSGF_PRIORITY, tcp_drain_dispatch);
425 netisr_register_rollup(tcp_willblock, NETISR_ROLLUP_PRIO_TCP);
432 int cpu = mycpu->gd_cpuid;
434 while ((tp = TAILQ_FIRST(&tcpcbackq[cpu])) != NULL) {
435 KKASSERT(tp->t_flags & TF_ONOUTPUTQ);
436 tp->t_flags &= ~TF_ONOUTPUTQ;
437 TAILQ_REMOVE(&tcpcbackq[cpu], tp, t_outputq);
443 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
444 * tcp_template used to store this data in mbufs, but we now recopy it out
445 * of the tcpcb each time to conserve mbufs.
448 tcp_fillheaders(struct tcpcb *tp, void *ip_ptr, void *tcp_ptr, boolean_t tso)
450 struct inpcb *inp = tp->t_inpcb;
451 struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr;
454 if (INP_ISIPV6(inp)) {
457 ip6 = (struct ip6_hdr *)ip_ptr;
458 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
459 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK);
460 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
461 (IPV6_VERSION & IPV6_VERSION_MASK);
462 ip6->ip6_nxt = IPPROTO_TCP;
463 ip6->ip6_plen = sizeof(struct tcphdr);
464 ip6->ip6_src = inp->in6p_laddr;
465 ip6->ip6_dst = inp->in6p_faddr;
470 struct ip *ip = (struct ip *) ip_ptr;
473 ip->ip_vhl = IP_VHL_BORING;
480 ip->ip_p = IPPROTO_TCP;
481 ip->ip_src = inp->inp_laddr;
482 ip->ip_dst = inp->inp_faddr;
485 plen = htons(IPPROTO_TCP);
487 plen = htons(sizeof(struct tcphdr) + IPPROTO_TCP);
488 tcp_hdr->th_sum = in_pseudo(ip->ip_src.s_addr,
489 ip->ip_dst.s_addr, plen);
492 tcp_hdr->th_sport = inp->inp_lport;
493 tcp_hdr->th_dport = inp->inp_fport;
498 tcp_hdr->th_flags = 0;
504 * Create template to be used to send tcp packets on a connection.
505 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
506 * use for this function is in keepalives, which use tcp_respond.
509 tcp_maketemplate(struct tcpcb *tp)
513 if ((tmp = mpipe_alloc_nowait(&tcptemp_mpipe)) == NULL)
515 tcp_fillheaders(tp, &tmp->tt_ipgen, &tmp->tt_t, FALSE);
520 tcp_freetemplate(struct tcptemp *tmp)
522 mpipe_free(&tcptemp_mpipe, tmp);
526 * Send a single message to the TCP at address specified by
527 * the given TCP/IP header. If m == NULL, then we make a copy
528 * of the tcpiphdr at ti and send directly to the addressed host.
529 * This is used to force keep alive messages out using the TCP
530 * template for a connection. If flags are given then we send
531 * a message back to the TCP which originated the * segment ti,
532 * and discard the mbuf containing it and any other attached mbufs.
534 * In any case the ack and sequence number of the transmitted
535 * segment are as specified by the parameters.
537 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
540 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
541 tcp_seq ack, tcp_seq seq, int flags)
545 struct route *ro = NULL;
547 struct ip *ip = ipgen;
550 struct route_in6 *ro6 = NULL;
551 struct route_in6 sro6;
552 struct ip6_hdr *ip6 = ipgen;
553 boolean_t use_tmpro = TRUE;
555 boolean_t isipv6 = (IP_VHL_V(ip->ip_vhl) == 6);
557 const boolean_t isipv6 = FALSE;
561 if (!(flags & TH_RST)) {
562 win = ssb_space(&tp->t_inpcb->inp_socket->so_rcv);
565 if (win > (long)TCP_MAXWIN << tp->rcv_scale)
566 win = (long)TCP_MAXWIN << tp->rcv_scale;
569 * Don't use the route cache of a listen socket,
570 * it is not MPSAFE; use temporary route cache.
572 if (tp->t_state != TCPS_LISTEN) {
574 ro6 = &tp->t_inpcb->in6p_route;
576 ro = &tp->t_inpcb->inp_route;
583 bzero(ro6, sizeof *ro6);
586 bzero(ro, sizeof *ro);
590 m = m_gethdr(M_NOWAIT, MT_HEADER);
594 m->m_data += max_linkhdr;
596 bcopy(ip6, mtod(m, caddr_t), sizeof(struct ip6_hdr));
597 ip6 = mtod(m, struct ip6_hdr *);
598 nth = (struct tcphdr *)(ip6 + 1);
600 bcopy(ip, mtod(m, caddr_t), sizeof(struct ip));
601 ip = mtod(m, struct ip *);
602 nth = (struct tcphdr *)(ip + 1);
604 bcopy(th, nth, sizeof(struct tcphdr));
609 m->m_data = (caddr_t)ipgen;
610 /* m_len is set later */
612 #define xchg(a, b, type) { type t; t = a; a = b; b = t; }
614 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
615 nth = (struct tcphdr *)(ip6 + 1);
617 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
618 nth = (struct tcphdr *)(ip + 1);
622 * this is usually a case when an extension header
623 * exists between the IPv6 header and the
626 nth->th_sport = th->th_sport;
627 nth->th_dport = th->th_dport;
629 xchg(nth->th_dport, nth->th_sport, n_short);
634 ip6->ip6_vfc = IPV6_VERSION;
635 ip6->ip6_nxt = IPPROTO_TCP;
636 ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) + tlen));
637 tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
639 tlen += sizeof(struct tcpiphdr);
641 ip->ip_ttl = ip_defttl;
644 m->m_pkthdr.len = tlen;
645 m->m_pkthdr.rcvif = NULL;
646 nth->th_seq = htonl(seq);
647 nth->th_ack = htonl(ack);
649 nth->th_off = sizeof(struct tcphdr) >> 2;
650 nth->th_flags = flags;
652 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
654 nth->th_win = htons((u_short)win);
658 nth->th_sum = in6_cksum(m, IPPROTO_TCP,
659 sizeof(struct ip6_hdr),
660 tlen - sizeof(struct ip6_hdr));
661 ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL,
662 (ro6 && ro6->ro_rt) ?
663 ro6->ro_rt->rt_ifp : NULL);
665 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
666 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
667 m->m_pkthdr.csum_flags = CSUM_TCP;
668 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
669 m->m_pkthdr.csum_thlen = sizeof(struct tcphdr);
672 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
673 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
676 ip6_output(m, NULL, ro6, ipflags, NULL, NULL,
677 tp ? tp->t_inpcb : NULL);
678 if ((ro6 == &sro6) && (ro6->ro_rt != NULL)) {
683 ipflags |= IP_DEBUGROUTE;
684 ip_output(m, NULL, ro, ipflags, NULL, tp ? tp->t_inpcb : NULL);
685 if ((ro == &sro) && (ro->ro_rt != NULL)) {
693 * Create a new TCP control block, making an
694 * empty reassembly queue and hooking it to the argument
695 * protocol control block. The `inp' parameter must have
696 * come from the zone allocator set up in tcp_init().
699 tcp_newtcpcb(struct inpcb *inp)
704 boolean_t isipv6 = INP_ISIPV6(inp);
706 const boolean_t isipv6 = FALSE;
709 it = (struct inp_tp *)inp;
711 bzero(tp, sizeof(struct tcpcb));
712 TAILQ_INIT(&tp->t_segq);
713 tp->t_maxseg = tp->t_maxopd = isipv6 ? tcp_v6mssdflt : tcp_mssdflt;
714 tp->t_rxtthresh = tcprexmtthresh;
716 /* Set up our timeouts. */
717 tp->tt_rexmt = &it->inp_tp_rexmt;
718 tp->tt_persist = &it->inp_tp_persist;
719 tp->tt_keep = &it->inp_tp_keep;
720 tp->tt_2msl = &it->inp_tp_2msl;
721 tp->tt_delack = &it->inp_tp_delack;
725 * Zero out timer message. We don't create it here,
726 * since the current CPU may not be the owner of this
729 tp->tt_msg = &it->inp_tp_timermsg;
730 bzero(tp->tt_msg, sizeof(*tp->tt_msg));
732 tp->t_keepinit = tcp_keepinit;
733 tp->t_keepidle = tcp_keepidle;
734 tp->t_keepintvl = tcp_keepintvl;
735 tp->t_keepcnt = tcp_keepcnt;
736 tp->t_maxidle = tp->t_keepintvl * tp->t_keepcnt;
739 tp->t_flags |= TF_NCR;
741 tp->t_flags |= (TF_REQ_SCALE | TF_REQ_TSTMP);
743 tp->t_inpcb = inp; /* XXX */
744 tp->t_state = TCPS_CLOSED;
746 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
747 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
748 * reasonable initial retransmit time.
750 tp->t_srtt = TCPTV_SRTTBASE;
752 ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
753 tp->t_rttmin = tcp_rexmit_min;
754 tp->t_rxtcur = TCPTV_RTOBASE;
755 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
756 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
757 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
758 tp->snd_last = ticks;
759 tp->t_rcvtime = ticks;
761 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
762 * because the socket may be bound to an IPv6 wildcard address,
763 * which may match an IPv4-mapped IPv6 address.
765 inp->inp_ip_ttl = ip_defttl;
767 tcp_sack_tcpcb_init(tp);
769 tp->tt_sndmore = &it->inp_tp_sndmore;
772 return (tp); /* XXX */
776 * Drop a TCP connection, reporting the specified error.
777 * If connection is synchronized, then send a RST to peer.
780 tcp_drop(struct tcpcb *tp, int error)
782 struct socket *so = tp->t_inpcb->inp_socket;
784 if (TCPS_HAVERCVDSYN(tp->t_state)) {
785 tp->t_state = TCPS_CLOSED;
787 tcpstat.tcps_drops++;
789 tcpstat.tcps_conndrops++;
790 if (error == ETIMEDOUT && tp->t_softerror)
791 error = tp->t_softerror;
792 so->so_error = error;
793 return (tcp_close(tp));
796 struct netmsg_listen_detach {
797 struct netmsg_base base;
799 struct tcpcb *nm_tp_inh;
803 tcp_listen_detach_handler(netmsg_t msg)
805 struct netmsg_listen_detach *nmsg = (struct netmsg_listen_detach *)msg;
806 struct tcpcb *tp = nmsg->nm_tp;
807 int cpu = mycpuid, nextcpu;
809 if (tp->t_flags & TF_LISTEN)
810 syncache_destroy(tp, nmsg->nm_tp_inh);
812 in_pcbremwildcardhash_oncpu(tp->t_inpcb, &tcbinfo[cpu]);
815 if (nextcpu < ncpus2)
816 lwkt_forwardmsg(netisr_cpuport(nextcpu), &nmsg->base.lmsg);
818 lwkt_replymsg(&nmsg->base.lmsg, 0);
822 * Close a TCP control block:
823 * discard all space held by the tcp
824 * discard internet protocol block
825 * wake up any sleepers
828 tcp_close(struct tcpcb *tp)
831 struct inpcb *inp = tp->t_inpcb;
832 struct inpcb *inp_inh = NULL;
833 struct tcpcb *tp_inh = NULL;
834 struct socket *so = inp->inp_socket;
836 boolean_t dosavessthresh;
838 boolean_t isipv6 = INP_ISIPV6(inp);
840 const boolean_t isipv6 = FALSE;
843 if (tp->t_flags & TF_LISTEN) {
845 * Pending socket/syncache inheritance
847 * If this is a listen(2) socket, find another listen(2)
848 * socket in the same local group, which could inherit
849 * the syncache and sockets pending on the completion
850 * and incompletion queues.
853 * Currently the inheritance could only happen on the
854 * listen(2) sockets w/ SO_REUSEPORT set.
857 inp_inh = in_pcblocalgroup_last(&tcbinfo[0], inp);
859 tp_inh = intotcpcb(inp_inh);
863 * INP_WILDCARD indicates that listen(2) has been called on
864 * this socket. This implies:
865 * - A wildcard inp's hash is replicated for each protocol thread.
866 * - Syncache for this inp grows independently in each protocol
868 * - There is more than one cpu
870 * We have to chain a message to the rest of the protocol threads
871 * to cleanup the wildcard hash and the syncache. The cleanup
872 * in the current protocol thread is defered till the end of this
873 * function (syncache_destroy and in_pcbdetach).
876 * After cleanup the inp's hash and syncache entries, this inp will
877 * no longer be available to the rest of the protocol threads, so we
878 * are safe to whack the inp in the following code.
880 if ((inp->inp_flags & INP_WILDCARD) && ncpus2 > 1) {
881 struct netmsg_listen_detach nmsg;
883 KKASSERT(so->so_port == netisr_cpuport(0));
885 KKASSERT(inp->inp_pcbinfo == &tcbinfo[0]);
887 netmsg_init(&nmsg.base, NULL, &curthread->td_msgport,
888 MSGF_PRIORITY, tcp_listen_detach_handler);
890 nmsg.nm_tp_inh = tp_inh;
891 lwkt_domsg(netisr_cpuport(1), &nmsg.base.lmsg, 0);
894 KKASSERT(tp->t_state != TCPS_TERMINATING);
895 tp->t_state = TCPS_TERMINATING;
898 * Make sure that all of our timers are stopped before we
899 * delete the PCB. For listen TCP socket (tp->tt_msg == NULL),
900 * timers are never used. If timer message is never created
901 * (tp->tt_msg->tt_tcb == NULL), timers are never used too.
903 if (tp->tt_msg != NULL && tp->tt_msg->tt_tcb != NULL) {
904 tcp_callout_stop(tp, tp->tt_rexmt);
905 tcp_callout_stop(tp, tp->tt_persist);
906 tcp_callout_stop(tp, tp->tt_keep);
907 tcp_callout_stop(tp, tp->tt_2msl);
908 tcp_callout_stop(tp, tp->tt_delack);
911 if (tp->t_flags & TF_ONOUTPUTQ) {
912 KKASSERT(tp->tt_cpu == mycpu->gd_cpuid);
913 TAILQ_REMOVE(&tcpcbackq[tp->tt_cpu], tp, t_outputq);
914 tp->t_flags &= ~TF_ONOUTPUTQ;
918 * If we got enough samples through the srtt filter,
919 * save the rtt and rttvar in the routing entry.
920 * 'Enough' is arbitrarily defined as the 16 samples.
921 * 16 samples is enough for the srtt filter to converge
922 * to within 5% of the correct value; fewer samples and
923 * we could save a very bogus rtt.
925 * Don't update the default route's characteristics and don't
926 * update anything that the user "locked".
928 if (tp->t_rttupdated >= 16) {
932 struct sockaddr_in6 *sin6;
934 if ((rt = inp->in6p_route.ro_rt) == NULL)
936 sin6 = (struct sockaddr_in6 *)rt_key(rt);
937 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
940 if ((rt = inp->inp_route.ro_rt) == NULL ||
941 ((struct sockaddr_in *)rt_key(rt))->
942 sin_addr.s_addr == INADDR_ANY)
945 if (!(rt->rt_rmx.rmx_locks & RTV_RTT)) {
946 i = tp->t_srtt * (RTM_RTTUNIT / (hz * TCP_RTT_SCALE));
947 if (rt->rt_rmx.rmx_rtt && i)
949 * filter this update to half the old & half
950 * the new values, converting scale.
951 * See route.h and tcp_var.h for a
952 * description of the scaling constants.
955 (rt->rt_rmx.rmx_rtt + i) / 2;
957 rt->rt_rmx.rmx_rtt = i;
958 tcpstat.tcps_cachedrtt++;
960 if (!(rt->rt_rmx.rmx_locks & RTV_RTTVAR)) {
962 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE));
963 if (rt->rt_rmx.rmx_rttvar && i)
964 rt->rt_rmx.rmx_rttvar =
965 (rt->rt_rmx.rmx_rttvar + i) / 2;
967 rt->rt_rmx.rmx_rttvar = i;
968 tcpstat.tcps_cachedrttvar++;
971 * The old comment here said:
972 * update the pipelimit (ssthresh) if it has been updated
973 * already or if a pipesize was specified & the threshhold
974 * got below half the pipesize. I.e., wait for bad news
975 * before we start updating, then update on both good
978 * But we want to save the ssthresh even if no pipesize is
979 * specified explicitly in the route, because such
980 * connections still have an implicit pipesize specified
981 * by the global tcp_sendspace. In the absence of a reliable
982 * way to calculate the pipesize, it will have to do.
984 i = tp->snd_ssthresh;
985 if (rt->rt_rmx.rmx_sendpipe != 0)
986 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe/2);
988 dosavessthresh = (i < so->so_snd.ssb_hiwat/2);
989 if (dosavessthresh ||
990 (!(rt->rt_rmx.rmx_locks & RTV_SSTHRESH) && (i != 0) &&
991 (rt->rt_rmx.rmx_ssthresh != 0))) {
993 * convert the limit from user data bytes to
994 * packets then to packet data bytes.
996 i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
1001 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1002 sizeof(struct tcpiphdr));
1003 if (rt->rt_rmx.rmx_ssthresh)
1004 rt->rt_rmx.rmx_ssthresh =
1005 (rt->rt_rmx.rmx_ssthresh + i) / 2;
1007 rt->rt_rmx.rmx_ssthresh = i;
1008 tcpstat.tcps_cachedssthresh++;
1013 /* free the reassembly queue, if any */
1014 while((q = TAILQ_FIRST(&tp->t_segq)) != NULL) {
1015 TAILQ_REMOVE(&tp->t_segq, q, tqe_q);
1018 atomic_add_int(&tcp_reass_qsize, -1);
1020 /* throw away SACK blocks in scoreboard*/
1021 if (TCP_DO_SACK(tp))
1022 tcp_sack_destroy(&tp->scb);
1024 inp->inp_ppcb = NULL;
1025 soisdisconnected(so);
1026 /* note: pcb detached later on */
1028 tcp_destroy_timermsg(tp);
1029 tcp_output_cancel(tp);
1031 if (tp->t_flags & TF_LISTEN) {
1032 syncache_destroy(tp, tp_inh);
1033 if (inp_inh != NULL && inp_inh->inp_socket != NULL) {
1035 * Pending sockets inheritance only needs
1036 * to be done once in the current thread,
1039 soinherit(so, inp_inh->inp_socket);
1043 so_async_rcvd_drop(so);
1044 /* Drop the reference for the asynchronized pru_rcvd */
1049 * pcbdetach removes any wildcard hash entry on the current CPU.
1058 tcpstat.tcps_closed++;
1062 static __inline void
1063 tcp_drain_oncpu(struct inpcbinfo *pcbinfo)
1065 struct inpcbhead *head = &pcbinfo->pcblisthead;
1069 * Since we run in netisr, it is MP safe, even if
1070 * we block during the inpcb list iteration, i.e.
1071 * we don't need to use inpcb marker here.
1073 ASSERT_IN_NETISR(pcbinfo->cpu);
1075 LIST_FOREACH(inpb, head, inp_list) {
1077 struct tseg_qent *te;
1079 if (inpb->inp_flags & INP_PLACEMARKER)
1082 tcpb = intotcpcb(inpb);
1083 KASSERT(tcpb != NULL, ("tcp_drain_oncpu: tcpb is NULL"));
1085 if ((te = TAILQ_FIRST(&tcpb->t_segq)) != NULL) {
1086 TAILQ_REMOVE(&tcpb->t_segq, te, tqe_q);
1087 if (te->tqe_th->th_flags & TH_FIN)
1088 tcpb->t_flags &= ~TF_QUEDFIN;
1091 atomic_add_int(&tcp_reass_qsize, -1);
1098 tcp_drain_dispatch(netmsg_t nmsg)
1101 lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */
1104 tcp_drain_oncpu(&tcbinfo[mycpuid]);
1108 tcp_drain_ipi(void *arg __unused)
1111 struct lwkt_msg *msg = &tcp_drain_netmsg[cpu].lmsg;
1114 if (msg->ms_flags & MSGF_DONE)
1115 lwkt_sendmsg_oncpu(netisr_cpuport(cpu), msg);
1128 * Walk the tcpbs, if existing, and flush the reassembly queue,
1129 * if there is one...
1130 * XXX: The "Net/3" implementation doesn't imply that the TCP
1131 * reassembly queue should be flushed, but in a situation
1132 * where we're really low on mbufs, this is potentially
1134 * YYY: We may consider run tcp_drain_oncpu directly here,
1135 * however, that will require M_WAITOK memory allocation
1136 * for the inpcb marker.
1138 CPUMASK_ASSBMASK(mask, ncpus2);
1139 CPUMASK_ANDMASK(mask, smp_active_mask);
1140 if (CPUMASK_TESTNZERO(mask))
1141 lwkt_send_ipiq_mask(mask, tcp_drain_ipi, NULL);
1145 * Notify a tcp user of an asynchronous error;
1146 * store error as soft error, but wake up user
1147 * (for now, won't do anything until can select for soft error).
1149 * Do not wake up user since there currently is no mechanism for
1150 * reporting soft errors (yet - a kqueue filter may be added).
1153 tcp_notify(struct inpcb *inp, int error)
1155 struct tcpcb *tp = intotcpcb(inp);
1158 * Ignore some errors if we are hooked up.
1159 * If connection hasn't completed, has retransmitted several times,
1160 * and receives a second error, give up now. This is better
1161 * than waiting a long time to establish a connection that
1162 * can never complete.
1164 if (tp->t_state == TCPS_ESTABLISHED &&
1165 (error == EHOSTUNREACH || error == ENETUNREACH ||
1166 error == EHOSTDOWN)) {
1168 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
1170 tcp_drop(tp, error);
1172 tp->t_softerror = error;
1174 wakeup(&so->so_timeo);
1181 tcp_pcblist(SYSCTL_HANDLER_ARGS)
1184 struct inpcb *marker;
1192 * The process of preparing the TCB list is too time-consuming and
1193 * resource-intensive to repeat twice on every request.
1195 if (req->oldptr == NULL) {
1196 for (ccpu = 0; ccpu < ncpus2; ++ccpu)
1197 n += tcbinfo[ccpu].ipi_count;
1198 req->oldidx = (n + n/8 + 10) * sizeof(struct xtcpcb);
1202 if (req->newptr != NULL)
1205 marker = kmalloc(sizeof(struct inpcb), M_TEMP, M_WAITOK|M_ZERO);
1206 marker->inp_flags |= INP_PLACEMARKER;
1209 * OK, now we're committed to doing something. Run the inpcb list
1210 * for each cpu in the system and construct the output. Use a
1211 * list placemarker to deal with list changes occuring during
1212 * copyout blockages (but otherwise depend on being on the correct
1213 * cpu to avoid races).
1215 origcpu = mycpu->gd_cpuid;
1216 for (ccpu = 0; ccpu < ncpus2 && error == 0; ++ccpu) {
1220 lwkt_migratecpu(ccpu);
1222 n = tcbinfo[ccpu].ipi_count;
1224 LIST_INSERT_HEAD(&tcbinfo[ccpu].pcblisthead, marker, inp_list);
1226 while ((inp = LIST_NEXT(marker, inp_list)) != NULL && i < n) {
1228 * process a snapshot of pcbs, ignoring placemarkers
1229 * and using our own to allow SYSCTL_OUT to block.
1231 LIST_REMOVE(marker, inp_list);
1232 LIST_INSERT_AFTER(inp, marker, inp_list);
1234 if (inp->inp_flags & INP_PLACEMARKER)
1236 if (prison_xinpcb(req->td, inp))
1239 xt.xt_len = sizeof xt;
1240 bcopy(inp, &xt.xt_inp, sizeof *inp);
1241 inp_ppcb = inp->inp_ppcb;
1242 if (inp_ppcb != NULL)
1243 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);
1245 bzero(&xt.xt_tp, sizeof xt.xt_tp);
1246 if (inp->inp_socket)
1247 sotoxsocket(inp->inp_socket, &xt.xt_socket);
1248 if ((error = SYSCTL_OUT(req, &xt, sizeof xt)) != 0)
1252 LIST_REMOVE(marker, inp_list);
1253 if (error == 0 && i < n) {
1254 bzero(&xt, sizeof xt);
1255 xt.xt_len = sizeof xt;
1257 error = SYSCTL_OUT(req, &xt, sizeof xt);
1266 * Make sure we are on the same cpu we were on originally, since
1267 * higher level callers expect this. Also don't pollute caches with
1268 * migrated userland data by (eventually) returning to userland
1269 * on a different cpu.
1271 lwkt_migratecpu(origcpu);
1272 kfree(marker, M_TEMP);
1276 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
1277 tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
1280 tcp_getcred(SYSCTL_HANDLER_ARGS)
1282 struct sockaddr_in addrs[2];
1283 struct ucred cred0, *cred = NULL;
1285 int cpu, origcpu, error;
1287 error = priv_check(req->td, PRIV_ROOT);
1290 error = SYSCTL_IN(req, addrs, sizeof addrs);
1295 cpu = tcp_addrcpu(addrs[1].sin_addr.s_addr, addrs[1].sin_port,
1296 addrs[0].sin_addr.s_addr, addrs[0].sin_port);
1298 lwkt_migratecpu(cpu);
1300 inp = in_pcblookup_hash(&tcbinfo[cpu], addrs[1].sin_addr,
1301 addrs[1].sin_port, addrs[0].sin_addr, addrs[0].sin_port, 0, NULL);
1302 if (inp == NULL || inp->inp_socket == NULL) {
1304 } else if (inp->inp_socket->so_cred != NULL) {
1305 cred0 = *(inp->inp_socket->so_cred);
1309 lwkt_migratecpu(origcpu);
1314 return SYSCTL_OUT(req, cred, sizeof(struct ucred));
1317 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW),
1318 0, 0, tcp_getcred, "S,ucred", "Get the ucred of a TCP connection");
1322 tcp6_getcred(SYSCTL_HANDLER_ARGS)
1324 struct sockaddr_in6 addrs[2];
1328 error = priv_check(req->td, PRIV_ROOT);
1331 error = SYSCTL_IN(req, addrs, sizeof addrs);
1335 inp = in6_pcblookup_hash(&tcbinfo[0],
1336 &addrs[1].sin6_addr, addrs[1].sin6_port,
1337 &addrs[0].sin6_addr, addrs[0].sin6_port, 0, NULL);
1338 if (inp == NULL || inp->inp_socket == NULL) {
1342 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred));
1348 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW),
1350 tcp6_getcred, "S,ucred", "Get the ucred of a TCP6 connection");
1353 struct netmsg_tcp_notify {
1354 struct netmsg_base base;
1355 inp_notify_t nm_notify;
1356 struct in_addr nm_faddr;
1361 tcp_notifyall_oncpu(netmsg_t msg)
1363 struct netmsg_tcp_notify *nm = (struct netmsg_tcp_notify *)msg;
1366 in_pcbnotifyall(&tcbinfo[mycpuid], nm->nm_faddr,
1367 nm->nm_arg, nm->nm_notify);
1369 nextcpu = mycpuid + 1;
1370 if (nextcpu < ncpus2)
1371 lwkt_forwardmsg(netisr_cpuport(nextcpu), &nm->base.lmsg);
1373 lwkt_replymsg(&nm->base.lmsg, 0);
1377 tcp_get_inpnotify(int cmd, const struct sockaddr *sa,
1378 int *arg, struct ip **ip0, int *cpuid)
1380 struct ip *ip = *ip0;
1381 struct in_addr faddr;
1382 inp_notify_t notify = tcp_notify;
1384 faddr = ((const struct sockaddr_in *)sa)->sin_addr;
1385 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
1388 *arg = inetctlerrmap[cmd];
1389 if (cmd == PRC_QUENCH) {
1390 notify = tcp_quench;
1391 } else if (icmp_may_rst &&
1392 (cmd == PRC_UNREACH_ADMIN_PROHIB ||
1393 cmd == PRC_UNREACH_PORT ||
1394 cmd == PRC_TIMXCEED_INTRANS) &&
1396 notify = tcp_drop_syn_sent;
1397 } else if (cmd == PRC_MSGSIZE) {
1398 const struct icmp *icmp = (const struct icmp *)
1399 ((caddr_t)ip - offsetof(struct icmp, icmp_ip));
1401 *arg = ntohs(icmp->icmp_nextmtu);
1402 notify = tcp_mtudisc;
1403 } else if (PRC_IS_REDIRECT(cmd)) {
1405 notify = in_rtchange;
1406 } else if (cmd == PRC_HOSTDEAD) {
1408 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
1412 if (cpuid != NULL) {
1414 /* Go through all CPUs */
1417 const struct tcphdr *th;
1419 th = (const struct tcphdr *)
1420 ((caddr_t)ip + (IP_VHL_HL(ip->ip_vhl) << 2));
1421 *cpuid = tcp_addrcpu(faddr.s_addr, th->th_dport,
1422 ip->ip_src.s_addr, th->th_sport);
1431 tcp_ctlinput(netmsg_t msg)
1433 int cmd = msg->ctlinput.nm_cmd;
1434 struct sockaddr *sa = msg->ctlinput.nm_arg;
1435 struct ip *ip = msg->ctlinput.nm_extra;
1436 struct in_addr faddr;
1437 inp_notify_t notify;
1440 notify = tcp_get_inpnotify(cmd, sa, &arg, &ip, &cpuid);
1444 faddr = ((struct sockaddr_in *)sa)->sin_addr;
1446 const struct tcphdr *th;
1449 if (cpuid != mycpuid)
1452 th = (const struct tcphdr *)
1453 ((caddr_t)ip + (IP_VHL_HL(ip->ip_vhl) << 2));
1454 inp = in_pcblookup_hash(&tcbinfo[mycpuid], faddr, th->th_dport,
1455 ip->ip_src, th->th_sport, 0, NULL);
1456 if (inp != NULL && inp->inp_socket != NULL) {
1457 tcp_seq icmpseq = htonl(th->th_seq);
1458 struct tcpcb *tp = intotcpcb(inp);
1460 if (SEQ_GEQ(icmpseq, tp->snd_una) &&
1461 SEQ_LT(icmpseq, tp->snd_max))
1464 struct in_conninfo inc;
1466 inc.inc_fport = th->th_dport;
1467 inc.inc_lport = th->th_sport;
1468 inc.inc_faddr = faddr;
1469 inc.inc_laddr = ip->ip_src;
1473 syncache_unreach(&inc, th);
1475 } else if (msg->ctlinput.nm_direct) {
1476 if (cpuid != ncpus && cpuid != mycpuid)
1478 if (mycpuid >= ncpus2)
1481 in_pcbnotifyall(&tcbinfo[mycpuid], faddr, arg, notify);
1483 struct netmsg_tcp_notify *nm;
1485 ASSERT_IN_NETISR(0);
1486 nm = kmalloc(sizeof(*nm), M_LWKTMSG, M_INTWAIT);
1487 netmsg_init(&nm->base, NULL, &netisr_afree_rport,
1488 0, tcp_notifyall_oncpu);
1489 nm->nm_faddr = faddr;
1491 nm->nm_notify = notify;
1493 lwkt_sendmsg(netisr_cpuport(0), &nm->base.lmsg);
1496 lwkt_replymsg(&msg->lmsg, 0);
1502 tcp6_ctlinput(netmsg_t msg)
1504 int cmd = msg->ctlinput.nm_cmd;
1505 struct sockaddr *sa = msg->ctlinput.nm_arg;
1506 void *d = msg->ctlinput.nm_extra;
1508 inp_notify_t notify = tcp_notify;
1509 struct ip6_hdr *ip6;
1511 struct ip6ctlparam *ip6cp = NULL;
1512 const struct sockaddr_in6 *sa6_src = NULL;
1514 struct tcp_portonly {
1520 if (sa->sa_family != AF_INET6 ||
1521 sa->sa_len != sizeof(struct sockaddr_in6)) {
1526 if (cmd == PRC_QUENCH)
1527 notify = tcp_quench;
1528 else if (cmd == PRC_MSGSIZE) {
1529 struct ip6ctlparam *ip6cp = d;
1530 struct icmp6_hdr *icmp6 = ip6cp->ip6c_icmp6;
1532 arg = ntohl(icmp6->icmp6_mtu);
1533 notify = tcp_mtudisc;
1534 } else if (!PRC_IS_REDIRECT(cmd) &&
1535 ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) {
1539 /* if the parameter is from icmp6, decode it. */
1541 ip6cp = (struct ip6ctlparam *)d;
1543 ip6 = ip6cp->ip6c_ip6;
1544 off = ip6cp->ip6c_off;
1545 sa6_src = ip6cp->ip6c_src;
1549 off = 0; /* fool gcc */
1554 struct in_conninfo inc;
1556 * XXX: We assume that when IPV6 is non NULL,
1557 * M and OFF are valid.
1560 /* check if we can safely examine src and dst ports */
1561 if (m->m_pkthdr.len < off + sizeof *thp)
1564 bzero(&th, sizeof th);
1565 m_copydata(m, off, sizeof *thp, (caddr_t)&th);
1567 in6_pcbnotify(&tcbinfo[0], sa, th.th_dport,
1568 (struct sockaddr *)ip6cp->ip6c_src,
1569 th.th_sport, cmd, arg, notify);
1571 inc.inc_fport = th.th_dport;
1572 inc.inc_lport = th.th_sport;
1573 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr;
1574 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr;
1576 syncache_unreach(&inc, &th);
1578 in6_pcbnotify(&tcbinfo[0], sa, 0,
1579 (const struct sockaddr *)sa6_src, 0, cmd, arg, notify);
1582 lwkt_replymsg(&msg->ctlinput.base.lmsg, 0);
1588 * Following is where TCP initial sequence number generation occurs.
1590 * There are two places where we must use initial sequence numbers:
1591 * 1. In SYN-ACK packets.
1592 * 2. In SYN packets.
1594 * All ISNs for SYN-ACK packets are generated by the syncache. See
1595 * tcp_syncache.c for details.
1597 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
1598 * depends on this property. In addition, these ISNs should be
1599 * unguessable so as to prevent connection hijacking. To satisfy
1600 * the requirements of this situation, the algorithm outlined in
1601 * RFC 1948 is used to generate sequence numbers.
1603 * Implementation details:
1605 * Time is based off the system timer, and is corrected so that it
1606 * increases by one megabyte per second. This allows for proper
1607 * recycling on high speed LANs while still leaving over an hour
1610 * net.inet.tcp.isn_reseed_interval controls the number of seconds
1611 * between seeding of isn_secret. This is normally set to zero,
1612 * as reseeding should not be necessary.
1616 #define ISN_BYTES_PER_SECOND 1048576
1618 u_char isn_secret[32];
1619 int isn_last_reseed;
1623 tcp_new_isn(struct tcpcb *tp)
1625 u_int32_t md5_buffer[4];
1628 /* Seed if this is the first use, reseed if requested. */
1629 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) &&
1630 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz)
1632 read_random_unlimited(&isn_secret, sizeof isn_secret);
1633 isn_last_reseed = ticks;
1636 /* Compute the md5 hash and return the ISN. */
1638 MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_fport, sizeof(u_short));
1639 MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_lport, sizeof(u_short));
1641 if (INP_ISIPV6(tp->t_inpcb)) {
1642 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
1643 sizeof(struct in6_addr));
1644 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
1645 sizeof(struct in6_addr));
1649 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
1650 sizeof(struct in_addr));
1651 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
1652 sizeof(struct in_addr));
1654 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
1655 MD5Final((u_char *) &md5_buffer, &isn_ctx);
1656 new_isn = (tcp_seq) md5_buffer[0];
1657 new_isn += ticks * (ISN_BYTES_PER_SECOND / hz);
1662 * When a source quench is received, close congestion window
1663 * to one segment. We will gradually open it again as we proceed.
1666 tcp_quench(struct inpcb *inp, int error)
1668 struct tcpcb *tp = intotcpcb(inp);
1670 KASSERT(tp != NULL, ("tcp_quench: tp is NULL"));
1671 tp->snd_cwnd = tp->t_maxseg;
1676 * When a specific ICMP unreachable message is received and the
1677 * connection state is SYN-SENT, drop the connection. This behavior
1678 * is controlled by the icmp_may_rst sysctl.
1681 tcp_drop_syn_sent(struct inpcb *inp, int error)
1683 struct tcpcb *tp = intotcpcb(inp);
1685 KASSERT(tp != NULL, ("tcp_drop_syn_sent: tp is NULL"));
1686 if (tp->t_state == TCPS_SYN_SENT)
1687 tcp_drop(tp, error);
1691 * When a `need fragmentation' ICMP is received, update our idea of the MSS
1692 * based on the new value in the route. Also nudge TCP to send something,
1693 * since we know the packet we just sent was dropped.
1694 * This duplicates some code in the tcp_mss() function in tcp_input.c.
1697 tcp_mtudisc(struct inpcb *inp, int mtu)
1699 struct tcpcb *tp = intotcpcb(inp);
1701 struct socket *so = inp->inp_socket;
1704 boolean_t isipv6 = INP_ISIPV6(inp);
1706 const boolean_t isipv6 = FALSE;
1709 KASSERT(tp != NULL, ("tcp_mtudisc: tp is NULL"));
1712 * If no MTU is provided in the ICMP message, use the
1713 * next lower likely value, as specified in RFC 1191.
1718 oldmtu = tp->t_maxopd +
1720 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1721 sizeof(struct tcpiphdr));
1722 mtu = ip_next_mtu(oldmtu, 0);
1726 rt = tcp_rtlookup6(&inp->inp_inc);
1728 rt = tcp_rtlookup(&inp->inp_inc);
1730 if (rt->rt_rmx.rmx_mtu != 0 && rt->rt_rmx.rmx_mtu < mtu)
1731 mtu = rt->rt_rmx.rmx_mtu;
1735 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1736 sizeof(struct tcpiphdr));
1739 * XXX - The following conditional probably violates the TCP
1740 * spec. The problem is that, since we don't know the
1741 * other end's MSS, we are supposed to use a conservative
1742 * default. But, if we do that, then MTU discovery will
1743 * never actually take place, because the conservative
1744 * default is much less than the MTUs typically seen
1745 * on the Internet today. For the moment, we'll sweep
1746 * this under the carpet.
1748 * The conservative default might not actually be a problem
1749 * if the only case this occurs is when sending an initial
1750 * SYN with options and data to a host we've never talked
1751 * to before. Then, they will reply with an MSS value which
1752 * will get recorded and the new parameters should get
1753 * recomputed. For Further Study.
1755 if (rt->rt_rmx.rmx_mssopt && rt->rt_rmx.rmx_mssopt < maxopd)
1756 maxopd = rt->rt_rmx.rmx_mssopt;
1760 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1761 sizeof(struct tcpiphdr));
1763 if (tp->t_maxopd <= maxopd)
1765 tp->t_maxopd = maxopd;
1768 if ((tp->t_flags & (TF_REQ_TSTMP | TF_RCVD_TSTMP | TF_NOOPT)) ==
1769 (TF_REQ_TSTMP | TF_RCVD_TSTMP))
1770 mss -= TCPOLEN_TSTAMP_APPA;
1772 /* round down to multiple of MCLBYTES */
1773 #if (MCLBYTES & (MCLBYTES - 1)) == 0 /* test if MCLBYTES power of 2 */
1775 mss &= ~(MCLBYTES - 1);
1778 mss = (mss / MCLBYTES) * MCLBYTES;
1781 if (so->so_snd.ssb_hiwat < mss)
1782 mss = so->so_snd.ssb_hiwat;
1786 tp->snd_nxt = tp->snd_una;
1788 tcpstat.tcps_mturesent++;
1792 * Look-up the routing entry to the peer of this inpcb. If no route
1793 * is found and it cannot be allocated the return NULL. This routine
1794 * is called by TCP routines that access the rmx structure and by tcp_mss
1795 * to get the interface MTU.
1798 tcp_rtlookup(struct in_conninfo *inc)
1800 struct route *ro = &inc->inc_route;
1802 if (ro->ro_rt == NULL || !(ro->ro_rt->rt_flags & RTF_UP)) {
1803 /* No route yet, so try to acquire one */
1804 if (inc->inc_faddr.s_addr != INADDR_ANY) {
1806 * unused portions of the structure MUST be zero'd
1807 * out because rtalloc() treats it as opaque data
1809 bzero(&ro->ro_dst, sizeof(struct sockaddr_in));
1810 ro->ro_dst.sa_family = AF_INET;
1811 ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
1812 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr =
1822 tcp_rtlookup6(struct in_conninfo *inc)
1824 struct route_in6 *ro6 = &inc->inc6_route;
1826 if (ro6->ro_rt == NULL || !(ro6->ro_rt->rt_flags & RTF_UP)) {
1827 /* No route yet, so try to acquire one */
1828 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) {
1830 * unused portions of the structure MUST be zero'd
1831 * out because rtalloc() treats it as opaque data
1833 bzero(&ro6->ro_dst, sizeof(struct sockaddr_in6));
1834 ro6->ro_dst.sin6_family = AF_INET6;
1835 ro6->ro_dst.sin6_len = sizeof(struct sockaddr_in6);
1836 ro6->ro_dst.sin6_addr = inc->inc6_faddr;
1837 rtalloc((struct route *)ro6);
1840 return (ro6->ro_rt);
1845 /* compute ESP/AH header size for TCP, including outer IP header. */
1847 ipsec_hdrsiz_tcp(struct tcpcb *tp)
1855 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
1857 MGETHDR(m, M_NOWAIT, MT_DATA);
1862 if (INP_ISIPV6(inp)) {
1863 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
1865 th = (struct tcphdr *)(ip6 + 1);
1866 m->m_pkthdr.len = m->m_len =
1867 sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1868 tcp_fillheaders(tp, ip6, th, FALSE);
1869 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1873 ip = mtod(m, struct ip *);
1874 th = (struct tcphdr *)(ip + 1);
1875 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
1876 tcp_fillheaders(tp, ip, th, FALSE);
1877 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1886 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING
1888 * This code attempts to calculate the bandwidth-delay product as a
1889 * means of determining the optimal window size to maximize bandwidth,
1890 * minimize RTT, and avoid the over-allocation of buffers on interfaces and
1891 * routers. This code also does a fairly good job keeping RTTs in check
1892 * across slow links like modems. We implement an algorithm which is very
1893 * similar (but not meant to be) TCP/Vegas. The code operates on the
1894 * transmitter side of a TCP connection and so only effects the transmit
1895 * side of the connection.
1897 * BACKGROUND: TCP makes no provision for the management of buffer space
1898 * at the end points or at the intermediate routers and switches. A TCP
1899 * stream, whether using NewReno or not, will eventually buffer as
1900 * many packets as it is able and the only reason this typically works is
1901 * due to the fairly small default buffers made available for a connection
1902 * (typicaly 16K or 32K). As machines use larger windows and/or window
1903 * scaling it is now fairly easy for even a single TCP connection to blow-out
1904 * all available buffer space not only on the local interface, but on
1905 * intermediate routers and switches as well. NewReno makes a misguided
1906 * attempt to 'solve' this problem by waiting for an actual failure to occur,
1907 * then backing off, then steadily increasing the window again until another
1908 * failure occurs, ad-infinitum. This results in terrible oscillation that
1909 * is only made worse as network loads increase and the idea of intentionally
1910 * blowing out network buffers is, frankly, a terrible way to manage network
1913 * It is far better to limit the transmit window prior to the failure
1914 * condition being achieved. There are two general ways to do this: First
1915 * you can 'scan' through different transmit window sizes and locate the
1916 * point where the RTT stops increasing, indicating that you have filled the
1917 * pipe, then scan backwards until you note that RTT stops decreasing, then
1918 * repeat ad-infinitum. This method works in principle but has severe
1919 * implementation issues due to RTT variances, timer granularity, and
1920 * instability in the algorithm which can lead to many false positives and
1921 * create oscillations as well as interact badly with other TCP streams
1922 * implementing the same algorithm.
1924 * The second method is to limit the window to the bandwidth delay product
1925 * of the link. This is the method we implement. RTT variances and our
1926 * own manipulation of the congestion window, bwnd, can potentially
1927 * destabilize the algorithm. For this reason we have to stabilize the
1928 * elements used to calculate the window. We do this by using the minimum
1929 * observed RTT, the long term average of the observed bandwidth, and
1930 * by adding two segments worth of slop. It isn't perfect but it is able
1931 * to react to changing conditions and gives us a very stable basis on
1932 * which to extend the algorithm.
1935 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
1944 * If inflight_enable is disabled in the middle of a tcp connection,
1945 * make sure snd_bwnd is effectively disabled.
1947 if (!tcp_inflight_enable) {
1948 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1949 tp->snd_bandwidth = 0;
1954 * Validate the delta time. If a connection is new or has been idle
1955 * a long time we have to reset the bandwidth calculator.
1959 delta_ticks = save_ticks - tp->t_bw_rtttime;
1960 if (tp->t_bw_rtttime == 0 || delta_ticks < 0 || delta_ticks > hz * 10) {
1961 tp->t_bw_rtttime = save_ticks;
1962 tp->t_bw_rtseq = ack_seq;
1963 if (tp->snd_bandwidth == 0)
1964 tp->snd_bandwidth = tcp_inflight_min;
1969 * A delta of at least 1 tick is required. Waiting 2 ticks will
1970 * result in better (bw) accuracy. More than that and the ramp-up
1973 if (delta_ticks == 0 || delta_ticks == 1)
1977 * Sanity check, plus ignore pure window update acks.
1979 if ((int)(ack_seq - tp->t_bw_rtseq) <= 0)
1983 * Figure out the bandwidth. Due to the tick granularity this
1984 * is a very rough number and it MUST be averaged over a fairly
1985 * long period of time. XXX we need to take into account a link
1986 * that is not using all available bandwidth, but for now our
1987 * slop will ramp us up if this case occurs and the bandwidth later
1990 ibw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz / delta_ticks;
1991 tp->t_bw_rtttime = save_ticks;
1992 tp->t_bw_rtseq = ack_seq;
1993 bw = ((int64_t)tp->snd_bandwidth * 15 + ibw) >> 4;
1995 tp->snd_bandwidth = bw;
1998 * Calculate the semi-static bandwidth delay product, plus two maximal
1999 * segments. The additional slop puts us squarely in the sweet
2000 * spot and also handles the bandwidth run-up case. Without the
2001 * slop we could be locking ourselves into a lower bandwidth.
2003 * At very high speeds the bw calculation can become overly sensitive
2004 * and error prone when delta_ticks is low (e.g. usually 1). To deal
2005 * with the problem the stab must be scaled to the bw. A stab of 50
2006 * (the default) increases the bw for the purposes of the bwnd
2007 * calculation by 5%.
2009 * Situations Handled:
2010 * (1) Prevents over-queueing of packets on LANs, especially on
2011 * high speed LANs, allowing larger TCP buffers to be
2012 * specified, and also does a good job preventing
2013 * over-queueing of packets over choke points like modems
2014 * (at least for the transmit side).
2016 * (2) Is able to handle changing network loads (bandwidth
2017 * drops so bwnd drops, bandwidth increases so bwnd
2020 * (3) Theoretically should stabilize in the face of multiple
2021 * connections implementing the same algorithm (this may need
2024 * (4) Stability value (defaults to 20 = 2 maximal packets) can
2025 * be adjusted with a sysctl but typically only needs to be on
2026 * very slow connections. A value no smaller then 5 should
2027 * be used, but only reduce this default if you have no other
2031 #define USERTT ((tp->t_srtt + tp->t_rttvar) + tcp_inflight_adjrtt)
2032 bw += bw * tcp_inflight_stab / 1000;
2033 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) +
2034 (int)tp->t_maxseg * 2;
2037 if (tcp_inflight_debug > 0) {
2039 if ((u_int)(save_ticks - ltime) >= hz / tcp_inflight_debug) {
2041 kprintf("%p ibw %ld bw %ld rttvar %d srtt %d "
2042 "bwnd %ld delta %d snd_win %ld\n",
2043 tp, ibw, bw, tp->t_rttvar, tp->t_srtt,
2044 bwnd, delta_ticks, tp->snd_wnd);
2047 if ((long)bwnd < tcp_inflight_min)
2048 bwnd = tcp_inflight_min;
2049 if (bwnd > tcp_inflight_max)
2050 bwnd = tcp_inflight_max;
2051 if ((long)bwnd < tp->t_maxseg * 2)
2052 bwnd = tp->t_maxseg * 2;
2053 tp->snd_bwnd = bwnd;
2057 tcp_rmx_iwsegs(struct tcpcb *tp, u_long *maxsegs, u_long *capsegs)
2060 struct inpcb *inp = tp->t_inpcb;
2062 boolean_t isipv6 = INP_ISIPV6(inp);
2064 const boolean_t isipv6 = FALSE;
2068 if (tcp_iw_maxsegs < TCP_IW_MAXSEGS_DFLT)
2069 tcp_iw_maxsegs = TCP_IW_MAXSEGS_DFLT;
2070 if (tcp_iw_capsegs < TCP_IW_CAPSEGS_DFLT)
2071 tcp_iw_capsegs = TCP_IW_CAPSEGS_DFLT;
2074 rt = tcp_rtlookup6(&inp->inp_inc);
2076 rt = tcp_rtlookup(&inp->inp_inc);
2078 rt->rt_rmx.rmx_iwmaxsegs < TCP_IW_MAXSEGS_DFLT ||
2079 rt->rt_rmx.rmx_iwcapsegs < TCP_IW_CAPSEGS_DFLT) {
2080 *maxsegs = tcp_iw_maxsegs;
2081 *capsegs = tcp_iw_capsegs;
2084 *maxsegs = rt->rt_rmx.rmx_iwmaxsegs;
2085 *capsegs = rt->rt_rmx.rmx_iwcapsegs;
2089 tcp_initial_window(struct tcpcb *tp)
2091 if (tcp_do_rfc3390) {
2094 * "If the SYN or SYN/ACK is lost, the initial window
2095 * used by a sender after a correctly transmitted SYN
2096 * MUST be one segment consisting of MSS bytes."
2098 * However, we do something a little bit more aggressive
2099 * then RFC3390 here:
2100 * - Only if time spent in the SYN or SYN|ACK retransmition
2101 * >= 3 seconds, the IW is reduced. We do this mainly
2102 * because when RFC3390 is published, the initial RTO is
2103 * still 3 seconds (the threshold we test here), while
2104 * after RFC6298, the initial RTO is 1 second. This
2105 * behaviour probably still falls within the spirit of
2107 * - When IW is reduced, 2*MSS is used instead of 1*MSS.
2108 * Mainly to avoid sender and receiver deadlock until
2109 * delayed ACK timer expires. And even RFC2581 does not
2110 * try to reduce IW upon SYN or SYN|ACK retransmition
2114 * http://tools.ietf.org/html/draft-ietf-tcpm-initcwnd-03
2116 if (tp->t_rxtsyn >= TCPTV_RTOBASE3) {
2117 return (2 * tp->t_maxseg);
2119 u_long maxsegs, capsegs;
2121 tcp_rmx_iwsegs(tp, &maxsegs, &capsegs);
2122 return min(maxsegs * tp->t_maxseg,
2123 max(2 * tp->t_maxseg, capsegs * 1460));
2127 * Even RFC2581 (back to 1999) allows 2*SMSS IW.
2129 * Mainly to avoid sender and receiver deadlock
2130 * until delayed ACK timer expires.
2132 return (2 * tp->t_maxseg);
2136 #ifdef TCP_SIGNATURE
2138 * Compute TCP-MD5 hash of a TCP segment. (RFC2385)
2140 * We do this over ip, tcphdr, segment data, and the key in the SADB.
2141 * When called from tcp_input(), we can be sure that th_sum has been
2142 * zeroed out and verified already.
2144 * Return 0 if successful, otherwise return -1.
2146 * XXX The key is retrieved from the system's PF_KEY SADB, by keying a
2147 * search with the destination IP address, and a 'magic SPI' to be
2148 * determined by the application. This is hardcoded elsewhere to 1179
2149 * right now. Another branch of this code exists which uses the SPD to
2150 * specify per-application flows but it is unstable.
2153 tcpsignature_compute(
2154 struct mbuf *m, /* mbuf chain */
2155 int len, /* length of TCP data */
2156 int optlen, /* length of TCP options */
2157 u_char *buf, /* storage for MD5 digest */
2158 u_int direction) /* direction of flow */
2160 struct ippseudo ippseudo;
2164 struct ipovly *ipovly;
2165 struct secasvar *sav;
2168 struct ip6_hdr *ip6;
2169 struct in6_addr in6;
2175 KASSERT(m != NULL, ("passed NULL mbuf. Game over."));
2176 KASSERT(buf != NULL, ("passed NULL storage pointer for MD5 signature"));
2178 * Extract the destination from the IP header in the mbuf.
2180 ip = mtod(m, struct ip *);
2182 ip6 = NULL; /* Make the compiler happy. */
2185 * Look up an SADB entry which matches the address found in
2188 switch (IP_VHL_V(ip->ip_vhl)) {
2190 sav = key_allocsa(AF_INET, (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst,
2191 IPPROTO_TCP, htonl(TCP_SIG_SPI));
2194 case (IPV6_VERSION >> 4):
2195 ip6 = mtod(m, struct ip6_hdr *);
2196 sav = key_allocsa(AF_INET6, (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst,
2197 IPPROTO_TCP, htonl(TCP_SIG_SPI));
2206 kprintf("%s: SADB lookup failed\n", __func__);
2212 * Step 1: Update MD5 hash with IP pseudo-header.
2214 * XXX The ippseudo header MUST be digested in network byte order,
2215 * or else we'll fail the regression test. Assume all fields we've
2216 * been doing arithmetic on have been in host byte order.
2217 * XXX One cannot depend on ipovly->ih_len here. When called from
2218 * tcp_output(), the underlying ip_len member has not yet been set.
2220 switch (IP_VHL_V(ip->ip_vhl)) {
2222 ipovly = (struct ipovly *)ip;
2223 ippseudo.ippseudo_src = ipovly->ih_src;
2224 ippseudo.ippseudo_dst = ipovly->ih_dst;
2225 ippseudo.ippseudo_pad = 0;
2226 ippseudo.ippseudo_p = IPPROTO_TCP;
2227 ippseudo.ippseudo_len = htons(len + sizeof(struct tcphdr) + optlen);
2228 MD5Update(&ctx, (char *)&ippseudo, sizeof(struct ippseudo));
2229 th = (struct tcphdr *)((u_char *)ip + sizeof(struct ip));
2230 doff = sizeof(struct ip) + sizeof(struct tcphdr) + optlen;
2234 * RFC 2385, 2.0 Proposal
2235 * For IPv6, the pseudo-header is as described in RFC 2460, namely the
2236 * 128-bit source IPv6 address, 128-bit destination IPv6 address, zero-
2237 * extended next header value (to form 32 bits), and 32-bit segment
2239 * Note: Upper-Layer Packet Length comes before Next Header.
2241 case (IPV6_VERSION >> 4):
2243 in6_clearscope(&in6);
2244 MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr));
2246 in6_clearscope(&in6);
2247 MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr));
2248 plen = htonl(len + sizeof(struct tcphdr) + optlen);
2249 MD5Update(&ctx, (char *)&plen, sizeof(uint32_t));
2251 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
2252 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
2253 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
2255 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
2256 th = (struct tcphdr *)((u_char *)ip6 + sizeof(struct ip6_hdr));
2257 doff = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + optlen;
2266 * Step 2: Update MD5 hash with TCP header, excluding options.
2267 * The TCP checksum must be set to zero.
2269 savecsum = th->th_sum;
2271 MD5Update(&ctx, (char *)th, sizeof(struct tcphdr));
2272 th->th_sum = savecsum;
2274 * Step 3: Update MD5 hash with TCP segment data.
2275 * Use m_apply() to avoid an early m_pullup().
2278 m_apply(m, doff, len, tcpsignature_apply, &ctx);
2280 * Step 4: Update MD5 hash with shared secret.
2282 MD5Update(&ctx, _KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth));
2283 MD5Final(buf, &ctx);
2284 key_sa_recordxfer(sav, m);
2290 tcpsignature_apply(void *fstate, void *data, unsigned int len)
2293 MD5Update((MD5_CTX *)fstate, (unsigned char *)data, len);
2296 #endif /* TCP_SIGNATURE */
2299 tcp_drop_sysctl_dispatch(netmsg_t nmsg)
2301 struct lwkt_msg *lmsg = &nmsg->lmsg;
2302 /* addrs[0] is a foreign socket, addrs[1] is a local one. */
2303 struct sockaddr_storage *addrs = lmsg->u.ms_resultp;
2305 struct sockaddr_in *fin, *lin;
2307 struct sockaddr_in6 *fin6, *lin6;
2308 struct in6_addr f6, l6;
2312 switch (addrs[0].ss_family) {
2315 fin6 = (struct sockaddr_in6 *)&addrs[0];
2316 lin6 = (struct sockaddr_in6 *)&addrs[1];
2317 error = in6_embedscope(&f6, fin6, NULL, NULL);
2320 error = in6_embedscope(&l6, lin6, NULL, NULL);
2323 inp = in6_pcblookup_hash(&tcbinfo[mycpuid], &f6,
2324 fin6->sin6_port, &l6, lin6->sin6_port, FALSE, NULL);
2329 fin = (struct sockaddr_in *)&addrs[0];
2330 lin = (struct sockaddr_in *)&addrs[1];
2331 inp = in_pcblookup_hash(&tcbinfo[mycpuid], fin->sin_addr,
2332 fin->sin_port, lin->sin_addr, lin->sin_port, FALSE, NULL);
2337 * Must not reach here, since the address family was
2338 * checked in sysctl handler.
2340 panic("unknown address family %d", addrs[0].ss_family);
2343 struct tcpcb *tp = intotcpcb(inp);
2345 KASSERT((inp->inp_flags & INP_WILDCARD) == 0,
2346 ("in wildcard hash"));
2347 KASSERT(tp != NULL, ("tcp_drop_sysctl_dispatch: tp is NULL"));
2348 KASSERT((tp->t_flags & TF_LISTEN) == 0, ("listen socket"));
2349 tcp_drop(tp, ECONNABORTED);
2357 lwkt_replymsg(lmsg, error);
2361 sysctl_tcp_drop(SYSCTL_HANDLER_ARGS)
2363 /* addrs[0] is a foreign socket, addrs[1] is a local one. */
2364 struct sockaddr_storage addrs[2];
2365 struct sockaddr_in *fin, *lin;
2367 struct sockaddr_in6 *fin6, *lin6;
2369 struct netmsg_base nmsg;
2370 struct lwkt_msg *lmsg = &nmsg.lmsg;
2371 struct lwkt_port *port = NULL;
2380 if (req->oldptr != NULL || req->oldlen != 0)
2382 if (req->newptr == NULL)
2384 if (req->newlen < sizeof(addrs))
2386 error = SYSCTL_IN(req, &addrs, sizeof(addrs));
2390 switch (addrs[0].ss_family) {
2393 fin6 = (struct sockaddr_in6 *)&addrs[0];
2394 lin6 = (struct sockaddr_in6 *)&addrs[1];
2395 if (fin6->sin6_len != sizeof(struct sockaddr_in6) ||
2396 lin6->sin6_len != sizeof(struct sockaddr_in6))
2398 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr) ||
2399 IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr))
2400 return (EADDRNOTAVAIL);
2402 error = sa6_embedscope(fin6, V_ip6_use_defzone);
2405 error = sa6_embedscope(lin6, V_ip6_use_defzone);
2409 port = tcp6_addrport();
2414 fin = (struct sockaddr_in *)&addrs[0];
2415 lin = (struct sockaddr_in *)&addrs[1];
2416 if (fin->sin_len != sizeof(struct sockaddr_in) ||
2417 lin->sin_len != sizeof(struct sockaddr_in))
2419 port = tcp_addrport(fin->sin_addr.s_addr, fin->sin_port,
2420 lin->sin_addr.s_addr, lin->sin_port);
2427 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 0,
2428 tcp_drop_sysctl_dispatch);
2429 lmsg->u.ms_resultp = addrs;
2430 return lwkt_domsg(port, lmsg, 0);
2433 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, drop,
2434 CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP, NULL,
2435 0, sysctl_tcp_drop, "", "Drop TCP connection");