2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
63 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.73.2.31 2003/01/24 05:11:34 sam Exp $
66 #include "opt_compat.h"
68 #include "opt_inet6.h"
69 #include "opt_ipsec.h"
70 #include "opt_tcpdebug.h"
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/callout.h>
75 #include <sys/kernel.h>
76 #include <sys/sysctl.h>
77 #include <sys/malloc.h>
78 #include <sys/mpipe.h>
81 #include <sys/domain.h>
85 #include <sys/socket.h>
86 #include <sys/socketops.h>
87 #include <sys/socketvar.h>
88 #include <sys/protosw.h>
89 #include <sys/random.h>
90 #include <sys/in_cksum.h>
93 #include <net/route.h>
95 #include <net/netisr2.h>
98 #include <netinet/in.h>
99 #include <netinet/in_systm.h>
100 #include <netinet/ip.h>
101 #include <netinet/ip6.h>
102 #include <netinet/in_pcb.h>
103 #include <netinet6/in6_pcb.h>
104 #include <netinet/in_var.h>
105 #include <netinet/ip_var.h>
106 #include <netinet6/ip6_var.h>
107 #include <netinet/ip_icmp.h>
109 #include <netinet/icmp6.h>
111 #include <netinet/tcp.h>
112 #include <netinet/tcp_fsm.h>
113 #include <netinet/tcp_seq.h>
114 #include <netinet/tcp_timer.h>
115 #include <netinet/tcp_timer2.h>
116 #include <netinet/tcp_var.h>
117 #include <netinet6/tcp6_var.h>
118 #include <netinet/tcpip.h>
120 #include <netinet/tcp_debug.h>
122 #include <netinet6/ip6protosw.h>
125 #include <netinet6/ipsec.h>
126 #include <netproto/key/key.h>
128 #include <netinet6/ipsec6.h>
133 #include <netproto/ipsec/ipsec.h>
135 #include <netproto/ipsec/ipsec6.h>
141 #include <machine/smp.h>
143 #include <sys/msgport2.h>
144 #include <sys/mplock2.h>
145 #include <net/netmsg2.h>
147 #if !defined(KTR_TCP)
148 #define KTR_TCP KTR_ALL
151 KTR_INFO_MASTER(tcp);
152 KTR_INFO(KTR_TCP, tcp, rxmsg, 0, "tcp getmsg", 0);
153 KTR_INFO(KTR_TCP, tcp, wait, 1, "tcp waitmsg", 0);
154 KTR_INFO(KTR_TCP, tcp, delayed, 2, "tcp execute delayed ops", 0);
155 #define logtcp(name) KTR_LOG(tcp_ ## name)
158 #define TCP_IW_MAXSEGS_DFLT 4
159 #define TCP_IW_CAPSEGS_DFLT 3
161 struct inpcbinfo tcbinfo[MAXCPU];
162 struct tcpcbackqhead tcpcbackq[MAXCPU];
164 static struct lwkt_token tcp_port_token =
165 LWKT_TOKEN_INITIALIZER(tcp_port_token);
167 int tcp_mssdflt = TCP_MSS;
168 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
169 &tcp_mssdflt, 0, "Default TCP Maximum Segment Size");
172 int tcp_v6mssdflt = TCP6_MSS;
173 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, CTLFLAG_RW,
174 &tcp_v6mssdflt, 0, "Default TCP Maximum Segment Size for IPv6");
178 * Minimum MSS we accept and use. This prevents DoS attacks where
179 * we are forced to a ridiculous low MSS like 20 and send hundreds
180 * of packets instead of one. The effect scales with the available
181 * bandwidth and quickly saturates the CPU and network interface
182 * with packet generation and sending. Set to zero to disable MINMSS
183 * checking. This setting prevents us from sending too small packets.
185 int tcp_minmss = TCP_MINMSS;
186 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW,
187 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size");
190 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
191 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW,
192 &tcp_rttdflt, 0, "Default maximum TCP Round Trip Time");
195 int tcp_do_rfc1323 = 1;
196 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
197 &tcp_do_rfc1323, 0, "Enable rfc1323 (high performance TCP) extensions");
199 static int tcp_tcbhashsize = 0;
200 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD,
201 &tcp_tcbhashsize, 0, "Size of TCP control block hashtable");
203 static int do_tcpdrain = 1;
204 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
205 "Enable tcp_drain routine for extra help when low on mbufs");
207 static int icmp_may_rst = 1;
208 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0,
209 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
211 static int tcp_isn_reseed_interval = 0;
212 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
213 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
216 * TCP bandwidth limiting sysctls. The inflight limiter is now turned on
217 * by default, but with generous values which should allow maximal
218 * bandwidth. In particular, the slop defaults to 50 (5 packets).
220 * The reason for doing this is that the limiter is the only mechanism we
221 * have which seems to do a really good job preventing receiver RX rings
222 * on network interfaces from getting blown out. Even though GigE/10GigE
223 * is supposed to flow control it looks like either it doesn't actually
224 * do it or Open Source drivers do not properly enable it.
226 * People using the limiter to reduce bottlenecks on slower WAN connections
227 * should set the slop to 20 (2 packets).
229 static int tcp_inflight_enable = 1;
230 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_enable, CTLFLAG_RW,
231 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting");
233 static int tcp_inflight_debug = 0;
234 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_debug, CTLFLAG_RW,
235 &tcp_inflight_debug, 0, "Debug TCP inflight calculations");
237 static int tcp_inflight_min = 6144;
238 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_min, CTLFLAG_RW,
239 &tcp_inflight_min, 0, "Lower bound for TCP inflight window");
241 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT;
242 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_max, CTLFLAG_RW,
243 &tcp_inflight_max, 0, "Upper bound for TCP inflight window");
245 static int tcp_inflight_stab = 50;
246 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_stab, CTLFLAG_RW,
247 &tcp_inflight_stab, 0, "Slop in maximal packets / 10 (20 = 3 packets)");
249 static int tcp_do_rfc3390 = 1;
250 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW,
252 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
254 static u_long tcp_iw_maxsegs = TCP_IW_MAXSEGS_DFLT;
255 SYSCTL_ULONG(_net_inet_tcp, OID_AUTO, iwmaxsegs, CTLFLAG_RW,
256 &tcp_iw_maxsegs, 0, "TCP IW segments max");
258 static u_long tcp_iw_capsegs = TCP_IW_CAPSEGS_DFLT;
259 SYSCTL_ULONG(_net_inet_tcp, OID_AUTO, iwcapsegs, CTLFLAG_RW,
260 &tcp_iw_capsegs, 0, "TCP IW segments");
262 int tcp_low_rtobase = 1;
263 SYSCTL_INT(_net_inet_tcp, OID_AUTO, low_rtobase, CTLFLAG_RW,
264 &tcp_low_rtobase, 0, "Lowering the Initial RTO (RFC 6298)");
266 static int tcp_do_ncr = 1;
267 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ncr, CTLFLAG_RW,
268 &tcp_do_ncr, 0, "Non-Congestion Robustness (RFC 4653)");
270 static MALLOC_DEFINE(M_TCPTEMP, "tcptemp", "TCP Templates for Keepalives");
271 static struct malloc_pipe tcptemp_mpipe;
273 static void tcp_willblock(void);
274 static void tcp_notify (struct inpcb *, int);
276 struct tcp_stats tcpstats_percpu[MAXCPU] __cachealign;
279 sysctl_tcpstats(SYSCTL_HANDLER_ARGS)
283 for (cpu = 0; cpu < ncpus; ++cpu) {
284 if ((error = SYSCTL_OUT(req, &tcpstats_percpu[cpu],
285 sizeof(struct tcp_stats))))
287 if ((error = SYSCTL_IN(req, &tcpstats_percpu[cpu],
288 sizeof(struct tcp_stats))))
294 SYSCTL_PROC(_net_inet_tcp, TCPCTL_STATS, stats, (CTLTYPE_OPAQUE | CTLFLAG_RW),
295 0, 0, sysctl_tcpstats, "S,tcp_stats", "TCP statistics");
298 * Target size of TCP PCB hash tables. Must be a power of two.
300 * Note that this can be overridden by the kernel environment
301 * variable net.inet.tcp.tcbhashsize
304 #define TCBHASHSIZE 512
308 * This is the actual shape of what we allocate using the zone
309 * allocator. Doing it this way allows us to protect both structures
310 * using the same generation count, and also eliminates the overhead
311 * of allocating tcpcbs separately. By hiding the structure here,
312 * we avoid changing most of the rest of the code (although it needs
313 * to be changed, eventually, for greater efficiency).
316 #define ALIGNM1 (ALIGNMENT - 1)
320 char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1];
323 struct tcp_callout inp_tp_rexmt;
324 struct tcp_callout inp_tp_persist;
325 struct tcp_callout inp_tp_keep;
326 struct tcp_callout inp_tp_2msl;
327 struct tcp_callout inp_tp_delack;
328 struct netmsg_tcp_timer inp_tp_timermsg;
329 struct netmsg_base inp_tp_sndmore;
340 struct inpcbporthead *porthashbase;
341 struct inpcbinfo *ticb;
343 int hashsize = TCBHASHSIZE;
347 * note: tcptemp is used for keepalives, and it is ok for an
348 * allocation to fail so do not specify MPF_INT.
350 mpipe_init(&tcptemp_mpipe, M_TCPTEMP, sizeof(struct tcptemp),
351 25, -1, 0, NULL, NULL, NULL);
353 tcp_delacktime = TCPTV_DELACK;
354 tcp_keepinit = TCPTV_KEEP_INIT;
355 tcp_keepidle = TCPTV_KEEP_IDLE;
356 tcp_keepintvl = TCPTV_KEEPINTVL;
357 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
359 tcp_rexmit_min = TCPTV_MIN;
360 tcp_rexmit_slop = TCPTV_CPU_VAR;
362 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize);
363 if (!powerof2(hashsize)) {
364 kprintf("WARNING: TCB hash size not a power of 2\n");
365 hashsize = 512; /* safe default */
367 tcp_tcbhashsize = hashsize;
368 porthashbase = hashinit(hashsize, M_PCB, &porthashmask);
370 for (cpu = 0; cpu < ncpus2; cpu++) {
371 ticb = &tcbinfo[cpu];
372 in_pcbinfo_init(ticb);
374 ticb->hashbase = hashinit(hashsize, M_PCB,
376 ticb->porthashbase = porthashbase;
377 ticb->porthashmask = porthashmask;
378 ticb->porttoken = &tcp_port_token;
379 ticb->wildcardhashbase = hashinit(hashsize, M_PCB,
380 &ticb->wildcardhashmask);
381 ticb->localgrphashbase = hashinit(hashsize, M_PCB,
382 &ticb->localgrphashmask);
383 ticb->ipi_size = sizeof(struct inp_tp);
384 TAILQ_INIT(&tcpcbackq[cpu]);
387 tcp_reass_maxseg = nmbclusters / 16;
388 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", &tcp_reass_maxseg);
391 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
393 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
395 if (max_protohdr < TCP_MINPROTOHDR)
396 max_protohdr = TCP_MINPROTOHDR;
397 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
399 #undef TCP_MINPROTOHDR
402 * Initialize TCP statistics counters for each CPU.
404 for (cpu = 0; cpu < ncpus; ++cpu) {
405 bzero(&tcpstats_percpu[cpu], sizeof(struct tcp_stats));
409 netisr_register_rollup(tcp_willblock, NETISR_ROLLUP_PRIO_TCP);
416 int cpu = mycpu->gd_cpuid;
418 while ((tp = TAILQ_FIRST(&tcpcbackq[cpu])) != NULL) {
419 KKASSERT(tp->t_flags & TF_ONOUTPUTQ);
420 tp->t_flags &= ~TF_ONOUTPUTQ;
421 TAILQ_REMOVE(&tcpcbackq[cpu], tp, t_outputq);
427 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
428 * tcp_template used to store this data in mbufs, but we now recopy it out
429 * of the tcpcb each time to conserve mbufs.
432 tcp_fillheaders(struct tcpcb *tp, void *ip_ptr, void *tcp_ptr, boolean_t tso)
434 struct inpcb *inp = tp->t_inpcb;
435 struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr;
438 if (inp->inp_vflag & INP_IPV6) {
441 ip6 = (struct ip6_hdr *)ip_ptr;
442 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
443 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK);
444 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
445 (IPV6_VERSION & IPV6_VERSION_MASK);
446 ip6->ip6_nxt = IPPROTO_TCP;
447 ip6->ip6_plen = sizeof(struct tcphdr);
448 ip6->ip6_src = inp->in6p_laddr;
449 ip6->ip6_dst = inp->in6p_faddr;
454 struct ip *ip = (struct ip *) ip_ptr;
457 ip->ip_vhl = IP_VHL_BORING;
464 ip->ip_p = IPPROTO_TCP;
465 ip->ip_src = inp->inp_laddr;
466 ip->ip_dst = inp->inp_faddr;
469 plen = htons(IPPROTO_TCP);
471 plen = htons(sizeof(struct tcphdr) + IPPROTO_TCP);
472 tcp_hdr->th_sum = in_pseudo(ip->ip_src.s_addr,
473 ip->ip_dst.s_addr, plen);
476 tcp_hdr->th_sport = inp->inp_lport;
477 tcp_hdr->th_dport = inp->inp_fport;
482 tcp_hdr->th_flags = 0;
488 * Create template to be used to send tcp packets on a connection.
489 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
490 * use for this function is in keepalives, which use tcp_respond.
493 tcp_maketemplate(struct tcpcb *tp)
497 if ((tmp = mpipe_alloc_nowait(&tcptemp_mpipe)) == NULL)
499 tcp_fillheaders(tp, &tmp->tt_ipgen, &tmp->tt_t, FALSE);
504 tcp_freetemplate(struct tcptemp *tmp)
506 mpipe_free(&tcptemp_mpipe, tmp);
510 * Send a single message to the TCP at address specified by
511 * the given TCP/IP header. If m == NULL, then we make a copy
512 * of the tcpiphdr at ti and send directly to the addressed host.
513 * This is used to force keep alive messages out using the TCP
514 * template for a connection. If flags are given then we send
515 * a message back to the TCP which originated the * segment ti,
516 * and discard the mbuf containing it and any other attached mbufs.
518 * In any case the ack and sequence number of the transmitted
519 * segment are as specified by the parameters.
521 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
524 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
525 tcp_seq ack, tcp_seq seq, int flags)
529 struct route *ro = NULL;
531 struct ip *ip = ipgen;
534 struct route_in6 *ro6 = NULL;
535 struct route_in6 sro6;
536 struct ip6_hdr *ip6 = ipgen;
537 boolean_t use_tmpro = TRUE;
539 boolean_t isipv6 = (IP_VHL_V(ip->ip_vhl) == 6);
541 const boolean_t isipv6 = FALSE;
545 if (!(flags & TH_RST)) {
546 win = ssb_space(&tp->t_inpcb->inp_socket->so_rcv);
549 if (win > (long)TCP_MAXWIN << tp->rcv_scale)
550 win = (long)TCP_MAXWIN << tp->rcv_scale;
553 * Don't use the route cache of a listen socket,
554 * it is not MPSAFE; use temporary route cache.
556 if (tp->t_state != TCPS_LISTEN) {
558 ro6 = &tp->t_inpcb->in6p_route;
560 ro = &tp->t_inpcb->inp_route;
567 bzero(ro6, sizeof *ro6);
570 bzero(ro, sizeof *ro);
574 m = m_gethdr(MB_DONTWAIT, MT_HEADER);
578 m->m_data += max_linkhdr;
580 bcopy(ip6, mtod(m, caddr_t), sizeof(struct ip6_hdr));
581 ip6 = mtod(m, struct ip6_hdr *);
582 nth = (struct tcphdr *)(ip6 + 1);
584 bcopy(ip, mtod(m, caddr_t), sizeof(struct ip));
585 ip = mtod(m, struct ip *);
586 nth = (struct tcphdr *)(ip + 1);
588 bcopy(th, nth, sizeof(struct tcphdr));
593 m->m_data = (caddr_t)ipgen;
594 /* m_len is set later */
596 #define xchg(a, b, type) { type t; t = a; a = b; b = t; }
598 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
599 nth = (struct tcphdr *)(ip6 + 1);
601 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
602 nth = (struct tcphdr *)(ip + 1);
606 * this is usually a case when an extension header
607 * exists between the IPv6 header and the
610 nth->th_sport = th->th_sport;
611 nth->th_dport = th->th_dport;
613 xchg(nth->th_dport, nth->th_sport, n_short);
618 ip6->ip6_vfc = IPV6_VERSION;
619 ip6->ip6_nxt = IPPROTO_TCP;
620 ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) + tlen));
621 tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
623 tlen += sizeof(struct tcpiphdr);
625 ip->ip_ttl = ip_defttl;
628 m->m_pkthdr.len = tlen;
629 m->m_pkthdr.rcvif = NULL;
630 nth->th_seq = htonl(seq);
631 nth->th_ack = htonl(ack);
633 nth->th_off = sizeof(struct tcphdr) >> 2;
634 nth->th_flags = flags;
636 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
638 nth->th_win = htons((u_short)win);
642 nth->th_sum = in6_cksum(m, IPPROTO_TCP,
643 sizeof(struct ip6_hdr),
644 tlen - sizeof(struct ip6_hdr));
645 ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL,
646 (ro6 && ro6->ro_rt) ?
647 ro6->ro_rt->rt_ifp : NULL);
649 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
650 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
651 m->m_pkthdr.csum_flags = CSUM_TCP;
652 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
653 m->m_pkthdr.csum_thlen = sizeof(struct tcphdr);
656 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
657 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
660 ip6_output(m, NULL, ro6, ipflags, NULL, NULL,
661 tp ? tp->t_inpcb : NULL);
662 if ((ro6 == &sro6) && (ro6->ro_rt != NULL)) {
667 ipflags |= IP_DEBUGROUTE;
668 ip_output(m, NULL, ro, ipflags, NULL, tp ? tp->t_inpcb : NULL);
669 if ((ro == &sro) && (ro->ro_rt != NULL)) {
677 * Create a new TCP control block, making an
678 * empty reassembly queue and hooking it to the argument
679 * protocol control block. The `inp' parameter must have
680 * come from the zone allocator set up in tcp_init().
683 tcp_newtcpcb(struct inpcb *inp)
688 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) != 0);
690 const boolean_t isipv6 = FALSE;
693 it = (struct inp_tp *)inp;
695 bzero(tp, sizeof(struct tcpcb));
696 TAILQ_INIT(&tp->t_segq);
697 tp->t_maxseg = tp->t_maxopd = isipv6 ? tcp_v6mssdflt : tcp_mssdflt;
698 tp->t_rxtthresh = tcprexmtthresh;
700 /* Set up our timeouts. */
701 tp->tt_rexmt = &it->inp_tp_rexmt;
702 tp->tt_persist = &it->inp_tp_persist;
703 tp->tt_keep = &it->inp_tp_keep;
704 tp->tt_2msl = &it->inp_tp_2msl;
705 tp->tt_delack = &it->inp_tp_delack;
709 * Zero out timer message. We don't create it here,
710 * since the current CPU may not be the owner of this
713 tp->tt_msg = &it->inp_tp_timermsg;
714 bzero(tp->tt_msg, sizeof(*tp->tt_msg));
716 tp->t_keepinit = tcp_keepinit;
717 tp->t_keepidle = tcp_keepidle;
718 tp->t_keepintvl = tcp_keepintvl;
719 tp->t_keepcnt = tcp_keepcnt;
720 tp->t_maxidle = tp->t_keepintvl * tp->t_keepcnt;
723 tp->t_flags |= TF_NCR;
725 tp->t_flags |= (TF_REQ_SCALE | TF_REQ_TSTMP);
727 tp->t_inpcb = inp; /* XXX */
728 tp->t_state = TCPS_CLOSED;
730 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
731 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
732 * reasonable initial retransmit time.
734 tp->t_srtt = TCPTV_SRTTBASE;
736 ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
737 tp->t_rttmin = tcp_rexmit_min;
738 tp->t_rxtcur = TCPTV_RTOBASE;
739 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
740 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
741 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
742 tp->snd_last = ticks;
743 tp->t_rcvtime = ticks;
745 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
746 * because the socket may be bound to an IPv6 wildcard address,
747 * which may match an IPv4-mapped IPv6 address.
749 inp->inp_ip_ttl = ip_defttl;
751 tcp_sack_tcpcb_init(tp);
753 tp->tt_sndmore = &it->inp_tp_sndmore;
756 return (tp); /* XXX */
760 * Drop a TCP connection, reporting the specified error.
761 * If connection is synchronized, then send a RST to peer.
764 tcp_drop(struct tcpcb *tp, int error)
766 struct socket *so = tp->t_inpcb->inp_socket;
768 if (TCPS_HAVERCVDSYN(tp->t_state)) {
769 tp->t_state = TCPS_CLOSED;
771 tcpstat.tcps_drops++;
773 tcpstat.tcps_conndrops++;
774 if (error == ETIMEDOUT && tp->t_softerror)
775 error = tp->t_softerror;
776 so->so_error = error;
777 return (tcp_close(tp));
780 struct netmsg_listen_detach {
781 struct netmsg_base base;
783 struct tcpcb *nm_tp_inh;
787 tcp_listen_detach_handler(netmsg_t msg)
789 struct netmsg_listen_detach *nmsg = (struct netmsg_listen_detach *)msg;
790 struct tcpcb *tp = nmsg->nm_tp;
791 int cpu = mycpuid, nextcpu;
793 if (tp->t_flags & TF_LISTEN)
794 syncache_destroy(tp, nmsg->nm_tp_inh);
796 in_pcbremwildcardhash_oncpu(tp->t_inpcb, &tcbinfo[cpu]);
799 if (nextcpu < ncpus2)
800 lwkt_forwardmsg(netisr_cpuport(nextcpu), &nmsg->base.lmsg);
802 lwkt_replymsg(&nmsg->base.lmsg, 0);
806 * Close a TCP control block:
807 * discard all space held by the tcp
808 * discard internet protocol block
809 * wake up any sleepers
812 tcp_close(struct tcpcb *tp)
815 struct inpcb *inp = tp->t_inpcb;
816 struct inpcb *inp_inh = NULL;
817 struct tcpcb *tp_inh = NULL;
818 struct socket *so = inp->inp_socket;
820 boolean_t dosavessthresh;
822 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) != 0);
823 boolean_t isafinet6 = (INP_CHECK_SOCKAF(so, AF_INET6) != 0);
825 const boolean_t isipv6 = FALSE;
828 if (tp->t_flags & TF_LISTEN) {
830 * Pending socket/syncache inheritance
832 * If this is a listen(2) socket, find another listen(2)
833 * socket in the same local group, which could inherit
834 * the syncache and sockets pending on the completion
835 * and incompletion queues.
838 * Currently the inheritance could only happen on the
839 * listen(2) sockets w/ SO_REUSEPORT set.
841 KASSERT(&curthread->td_msgport == netisr_cpuport(0),
842 ("listen socket close not in netisr0"));
843 inp_inh = in_pcblocalgroup_last(&tcbinfo[0], inp);
845 tp_inh = intotcpcb(inp_inh);
849 * INP_WILDCARD_MP indicates that listen(2) has been called on
850 * this socket. This implies:
851 * - A wildcard inp's hash is replicated for each protocol thread.
852 * - Syncache for this inp grows independently in each protocol
854 * - There is more than one cpu
856 * We have to chain a message to the rest of the protocol threads
857 * to cleanup the wildcard hash and the syncache. The cleanup
858 * in the current protocol thread is defered till the end of this
862 * After cleanup the inp's hash and syncache entries, this inp will
863 * no longer be available to the rest of the protocol threads, so we
864 * are safe to whack the inp in the following code.
866 if (inp->inp_flags & INP_WILDCARD_MP) {
867 struct netmsg_listen_detach nmsg;
869 KKASSERT(so->so_port == netisr_cpuport(0));
870 KKASSERT(&curthread->td_msgport == netisr_cpuport(0));
871 KKASSERT(inp->inp_pcbinfo == &tcbinfo[0]);
873 netmsg_init(&nmsg.base, NULL, &curthread->td_msgport,
874 MSGF_PRIORITY, tcp_listen_detach_handler);
876 nmsg.nm_tp_inh = tp_inh;
877 lwkt_domsg(netisr_cpuport(1), &nmsg.base.lmsg, 0);
879 inp->inp_flags &= ~INP_WILDCARD_MP;
882 KKASSERT(tp->t_state != TCPS_TERMINATING);
883 tp->t_state = TCPS_TERMINATING;
886 * Make sure that all of our timers are stopped before we
887 * delete the PCB. For listen TCP socket (tp->tt_msg == NULL),
888 * timers are never used. If timer message is never created
889 * (tp->tt_msg->tt_tcb == NULL), timers are never used too.
891 if (tp->tt_msg != NULL && tp->tt_msg->tt_tcb != NULL) {
892 tcp_callout_stop(tp, tp->tt_rexmt);
893 tcp_callout_stop(tp, tp->tt_persist);
894 tcp_callout_stop(tp, tp->tt_keep);
895 tcp_callout_stop(tp, tp->tt_2msl);
896 tcp_callout_stop(tp, tp->tt_delack);
899 if (tp->t_flags & TF_ONOUTPUTQ) {
900 KKASSERT(tp->tt_cpu == mycpu->gd_cpuid);
901 TAILQ_REMOVE(&tcpcbackq[tp->tt_cpu], tp, t_outputq);
902 tp->t_flags &= ~TF_ONOUTPUTQ;
906 * If we got enough samples through the srtt filter,
907 * save the rtt and rttvar in the routing entry.
908 * 'Enough' is arbitrarily defined as the 16 samples.
909 * 16 samples is enough for the srtt filter to converge
910 * to within 5% of the correct value; fewer samples and
911 * we could save a very bogus rtt.
913 * Don't update the default route's characteristics and don't
914 * update anything that the user "locked".
916 if (tp->t_rttupdated >= 16) {
920 struct sockaddr_in6 *sin6;
922 if ((rt = inp->in6p_route.ro_rt) == NULL)
924 sin6 = (struct sockaddr_in6 *)rt_key(rt);
925 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
928 if ((rt = inp->inp_route.ro_rt) == NULL ||
929 ((struct sockaddr_in *)rt_key(rt))->
930 sin_addr.s_addr == INADDR_ANY)
933 if (!(rt->rt_rmx.rmx_locks & RTV_RTT)) {
934 i = tp->t_srtt * (RTM_RTTUNIT / (hz * TCP_RTT_SCALE));
935 if (rt->rt_rmx.rmx_rtt && i)
937 * filter this update to half the old & half
938 * the new values, converting scale.
939 * See route.h and tcp_var.h for a
940 * description of the scaling constants.
943 (rt->rt_rmx.rmx_rtt + i) / 2;
945 rt->rt_rmx.rmx_rtt = i;
946 tcpstat.tcps_cachedrtt++;
948 if (!(rt->rt_rmx.rmx_locks & RTV_RTTVAR)) {
950 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE));
951 if (rt->rt_rmx.rmx_rttvar && i)
952 rt->rt_rmx.rmx_rttvar =
953 (rt->rt_rmx.rmx_rttvar + i) / 2;
955 rt->rt_rmx.rmx_rttvar = i;
956 tcpstat.tcps_cachedrttvar++;
959 * The old comment here said:
960 * update the pipelimit (ssthresh) if it has been updated
961 * already or if a pipesize was specified & the threshhold
962 * got below half the pipesize. I.e., wait for bad news
963 * before we start updating, then update on both good
966 * But we want to save the ssthresh even if no pipesize is
967 * specified explicitly in the route, because such
968 * connections still have an implicit pipesize specified
969 * by the global tcp_sendspace. In the absence of a reliable
970 * way to calculate the pipesize, it will have to do.
972 i = tp->snd_ssthresh;
973 if (rt->rt_rmx.rmx_sendpipe != 0)
974 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe/2);
976 dosavessthresh = (i < so->so_snd.ssb_hiwat/2);
977 if (dosavessthresh ||
978 (!(rt->rt_rmx.rmx_locks & RTV_SSTHRESH) && (i != 0) &&
979 (rt->rt_rmx.rmx_ssthresh != 0))) {
981 * convert the limit from user data bytes to
982 * packets then to packet data bytes.
984 i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
989 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
990 sizeof(struct tcpiphdr));
991 if (rt->rt_rmx.rmx_ssthresh)
992 rt->rt_rmx.rmx_ssthresh =
993 (rt->rt_rmx.rmx_ssthresh + i) / 2;
995 rt->rt_rmx.rmx_ssthresh = i;
996 tcpstat.tcps_cachedssthresh++;
1001 /* free the reassembly queue, if any */
1002 while((q = TAILQ_FIRST(&tp->t_segq)) != NULL) {
1003 TAILQ_REMOVE(&tp->t_segq, q, tqe_q);
1006 atomic_add_int(&tcp_reass_qsize, -1);
1008 /* throw away SACK blocks in scoreboard*/
1009 if (TCP_DO_SACK(tp))
1010 tcp_sack_destroy(&tp->scb);
1012 inp->inp_ppcb = NULL;
1013 soisdisconnected(so);
1014 /* note: pcb detached later on */
1016 tcp_destroy_timermsg(tp);
1017 tcp_output_cancel(tp);
1019 if (tp->t_flags & TF_LISTEN) {
1020 syncache_destroy(tp, tp_inh);
1021 if (inp_inh != NULL && inp_inh->inp_socket != NULL) {
1023 * Pending sockets inheritance only needs
1024 * to be done once in the current thread,
1027 soinherit(so, inp_inh->inp_socket);
1031 so_async_rcvd_drop(so);
1032 /* Drop the reference for the asynchronized pru_rcvd */
1037 * pcbdetach removes any wildcard hash entry on the current CPU.
1046 tcpstat.tcps_closed++;
1050 static __inline void
1051 tcp_drain_oncpu(struct inpcbhead *head)
1053 struct inpcb *marker;
1056 struct tseg_qent *te;
1059 * Allows us to block while running the list
1061 marker = kmalloc(sizeof(struct inpcb), M_TEMP, M_WAITOK|M_ZERO);
1062 marker->inp_flags |= INP_PLACEMARKER;
1063 LIST_INSERT_HEAD(head, marker, inp_list);
1065 while ((inpb = LIST_NEXT(marker, inp_list)) != NULL) {
1066 if ((inpb->inp_flags & INP_PLACEMARKER) == 0 &&
1067 (tcpb = intotcpcb(inpb)) != NULL &&
1068 (te = TAILQ_FIRST(&tcpb->t_segq)) != NULL) {
1069 TAILQ_REMOVE(&tcpb->t_segq, te, tqe_q);
1070 if (te->tqe_th->th_flags & TH_FIN)
1071 tcpb->t_flags &= ~TF_QUEDFIN;
1074 atomic_add_int(&tcp_reass_qsize, -1);
1077 LIST_REMOVE(marker, inp_list);
1078 LIST_INSERT_AFTER(inpb, marker, inp_list);
1081 LIST_REMOVE(marker, inp_list);
1082 kfree(marker, M_TEMP);
1085 struct netmsg_tcp_drain {
1086 struct netmsg_base base;
1087 struct inpcbhead *nm_head;
1091 tcp_drain_handler(netmsg_t msg)
1093 struct netmsg_tcp_drain *nm = (void *)msg;
1095 tcp_drain_oncpu(nm->nm_head);
1096 lwkt_replymsg(&nm->base.lmsg, 0);
1108 * Walk the tcpbs, if existing, and flush the reassembly queue,
1109 * if there is one...
1110 * XXX: The "Net/3" implementation doesn't imply that the TCP
1111 * reassembly queue should be flushed, but in a situation
1112 * where we're really low on mbufs, this is potentially
1115 for (cpu = 0; cpu < ncpus2; cpu++) {
1116 struct netmsg_tcp_drain *nm;
1118 if (cpu == mycpu->gd_cpuid) {
1119 tcp_drain_oncpu(&tcbinfo[cpu].pcblisthead);
1121 nm = kmalloc(sizeof(struct netmsg_tcp_drain),
1122 M_LWKTMSG, M_NOWAIT);
1125 netmsg_init(&nm->base, NULL, &netisr_afree_rport,
1126 0, tcp_drain_handler);
1127 nm->nm_head = &tcbinfo[cpu].pcblisthead;
1128 lwkt_sendmsg(netisr_cpuport(cpu), &nm->base.lmsg);
1134 * Notify a tcp user of an asynchronous error;
1135 * store error as soft error, but wake up user
1136 * (for now, won't do anything until can select for soft error).
1138 * Do not wake up user since there currently is no mechanism for
1139 * reporting soft errors (yet - a kqueue filter may be added).
1142 tcp_notify(struct inpcb *inp, int error)
1144 struct tcpcb *tp = intotcpcb(inp);
1147 * Ignore some errors if we are hooked up.
1148 * If connection hasn't completed, has retransmitted several times,
1149 * and receives a second error, give up now. This is better
1150 * than waiting a long time to establish a connection that
1151 * can never complete.
1153 if (tp->t_state == TCPS_ESTABLISHED &&
1154 (error == EHOSTUNREACH || error == ENETUNREACH ||
1155 error == EHOSTDOWN)) {
1157 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
1159 tcp_drop(tp, error);
1161 tp->t_softerror = error;
1163 wakeup(&so->so_timeo);
1170 tcp_pcblist(SYSCTL_HANDLER_ARGS)
1173 struct inpcb *marker;
1182 * The process of preparing the TCB list is too time-consuming and
1183 * resource-intensive to repeat twice on every request.
1185 if (req->oldptr == NULL) {
1186 for (ccpu = 0; ccpu < ncpus; ++ccpu) {
1187 gd = globaldata_find(ccpu);
1188 n += tcbinfo[gd->gd_cpuid].ipi_count;
1190 req->oldidx = (n + n/8 + 10) * sizeof(struct xtcpcb);
1194 if (req->newptr != NULL)
1197 marker = kmalloc(sizeof(struct inpcb), M_TEMP, M_WAITOK|M_ZERO);
1198 marker->inp_flags |= INP_PLACEMARKER;
1201 * OK, now we're committed to doing something. Run the inpcb list
1202 * for each cpu in the system and construct the output. Use a
1203 * list placemarker to deal with list changes occuring during
1204 * copyout blockages (but otherwise depend on being on the correct
1205 * cpu to avoid races).
1207 origcpu = mycpu->gd_cpuid;
1208 for (ccpu = 1; ccpu <= ncpus && error == 0; ++ccpu) {
1214 cpu_id = (origcpu + ccpu) % ncpus;
1215 if ((smp_active_mask & CPUMASK(cpu_id)) == 0)
1217 rgd = globaldata_find(cpu_id);
1218 lwkt_setcpu_self(rgd);
1220 n = tcbinfo[cpu_id].ipi_count;
1222 LIST_INSERT_HEAD(&tcbinfo[cpu_id].pcblisthead, marker, inp_list);
1224 while ((inp = LIST_NEXT(marker, inp_list)) != NULL && i < n) {
1226 * process a snapshot of pcbs, ignoring placemarkers
1227 * and using our own to allow SYSCTL_OUT to block.
1229 LIST_REMOVE(marker, inp_list);
1230 LIST_INSERT_AFTER(inp, marker, inp_list);
1232 if (inp->inp_flags & INP_PLACEMARKER)
1234 if (prison_xinpcb(req->td, inp))
1237 xt.xt_len = sizeof xt;
1238 bcopy(inp, &xt.xt_inp, sizeof *inp);
1239 inp_ppcb = inp->inp_ppcb;
1240 if (inp_ppcb != NULL)
1241 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);
1243 bzero(&xt.xt_tp, sizeof xt.xt_tp);
1244 if (inp->inp_socket)
1245 sotoxsocket(inp->inp_socket, &xt.xt_socket);
1246 if ((error = SYSCTL_OUT(req, &xt, sizeof xt)) != 0)
1250 LIST_REMOVE(marker, inp_list);
1251 if (error == 0 && i < n) {
1252 bzero(&xt, sizeof xt);
1253 xt.xt_len = sizeof xt;
1255 error = SYSCTL_OUT(req, &xt, sizeof xt);
1264 * Make sure we are on the same cpu we were on originally, since
1265 * higher level callers expect this. Also don't pollute caches with
1266 * migrated userland data by (eventually) returning to userland
1267 * on a different cpu.
1269 lwkt_setcpu_self(globaldata_find(origcpu));
1270 kfree(marker, M_TEMP);
1274 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
1275 tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
1278 tcp_getcred(SYSCTL_HANDLER_ARGS)
1280 struct sockaddr_in addrs[2];
1285 error = priv_check(req->td, PRIV_ROOT);
1288 error = SYSCTL_IN(req, addrs, sizeof addrs);
1292 cpu = tcp_addrcpu(addrs[1].sin_addr.s_addr, addrs[1].sin_port,
1293 addrs[0].sin_addr.s_addr, addrs[0].sin_port);
1294 inp = in_pcblookup_hash(&tcbinfo[cpu], addrs[1].sin_addr,
1295 addrs[1].sin_port, addrs[0].sin_addr, addrs[0].sin_port, 0, NULL);
1296 if (inp == NULL || inp->inp_socket == NULL) {
1300 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred));
1306 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW),
1307 0, 0, tcp_getcred, "S,ucred", "Get the ucred of a TCP connection");
1311 tcp6_getcred(SYSCTL_HANDLER_ARGS)
1313 struct sockaddr_in6 addrs[2];
1316 boolean_t mapped = FALSE;
1318 error = priv_check(req->td, PRIV_ROOT);
1321 error = SYSCTL_IN(req, addrs, sizeof addrs);
1324 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
1325 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
1332 inp = in_pcblookup_hash(&tcbinfo[0],
1333 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
1335 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
1339 inp = in6_pcblookup_hash(&tcbinfo[0],
1340 &addrs[1].sin6_addr, addrs[1].sin6_port,
1341 &addrs[0].sin6_addr, addrs[0].sin6_port,
1344 if (inp == NULL || inp->inp_socket == NULL) {
1348 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred));
1354 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW),
1356 tcp6_getcred, "S,ucred", "Get the ucred of a TCP6 connection");
1359 struct netmsg_tcp_notify {
1360 struct netmsg_base base;
1361 void (*nm_notify)(struct inpcb *, int);
1362 struct in_addr nm_faddr;
1367 tcp_notifyall_oncpu(netmsg_t msg)
1369 struct netmsg_tcp_notify *nm = (struct netmsg_tcp_notify *)msg;
1372 in_pcbnotifyall(&tcbinfo[mycpuid].pcblisthead, nm->nm_faddr,
1373 nm->nm_arg, nm->nm_notify);
1375 nextcpu = mycpuid + 1;
1376 if (nextcpu < ncpus2)
1377 lwkt_forwardmsg(netisr_cpuport(nextcpu), &nm->base.lmsg);
1379 lwkt_replymsg(&nm->base.lmsg, 0);
1383 tcp_ctlinput(netmsg_t msg)
1385 int cmd = msg->ctlinput.nm_cmd;
1386 struct sockaddr *sa = msg->ctlinput.nm_arg;
1387 struct ip *ip = msg->ctlinput.nm_extra;
1389 struct in_addr faddr;
1392 void (*notify)(struct inpcb *, int) = tcp_notify;
1396 if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
1400 faddr = ((struct sockaddr_in *)sa)->sin_addr;
1401 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
1404 arg = inetctlerrmap[cmd];
1405 if (cmd == PRC_QUENCH) {
1406 notify = tcp_quench;
1407 } else if (icmp_may_rst &&
1408 (cmd == PRC_UNREACH_ADMIN_PROHIB ||
1409 cmd == PRC_UNREACH_PORT ||
1410 cmd == PRC_TIMXCEED_INTRANS) &&
1412 notify = tcp_drop_syn_sent;
1413 } else if (cmd == PRC_MSGSIZE) {
1414 struct icmp *icmp = (struct icmp *)
1415 ((caddr_t)ip - offsetof(struct icmp, icmp_ip));
1417 arg = ntohs(icmp->icmp_nextmtu);
1418 notify = tcp_mtudisc;
1419 } else if (PRC_IS_REDIRECT(cmd)) {
1421 notify = in_rtchange;
1422 } else if (cmd == PRC_HOSTDEAD) {
1428 th = (struct tcphdr *)((caddr_t)ip +
1429 (IP_VHL_HL(ip->ip_vhl) << 2));
1430 cpu = tcp_addrcpu(faddr.s_addr, th->th_dport,
1431 ip->ip_src.s_addr, th->th_sport);
1432 inp = in_pcblookup_hash(&tcbinfo[cpu], faddr, th->th_dport,
1433 ip->ip_src, th->th_sport, 0, NULL);
1434 if ((inp != NULL) && (inp->inp_socket != NULL)) {
1435 icmpseq = htonl(th->th_seq);
1436 tp = intotcpcb(inp);
1437 if (SEQ_GEQ(icmpseq, tp->snd_una) &&
1438 SEQ_LT(icmpseq, tp->snd_max))
1439 (*notify)(inp, arg);
1441 struct in_conninfo inc;
1443 inc.inc_fport = th->th_dport;
1444 inc.inc_lport = th->th_sport;
1445 inc.inc_faddr = faddr;
1446 inc.inc_laddr = ip->ip_src;
1450 syncache_unreach(&inc, th);
1454 struct netmsg_tcp_notify *nm;
1456 KKASSERT(&curthread->td_msgport == netisr_cpuport(0));
1457 nm = kmalloc(sizeof(*nm), M_LWKTMSG, M_INTWAIT);
1458 netmsg_init(&nm->base, NULL, &netisr_afree_rport,
1459 0, tcp_notifyall_oncpu);
1460 nm->nm_faddr = faddr;
1462 nm->nm_notify = notify;
1464 lwkt_sendmsg(netisr_cpuport(0), &nm->base.lmsg);
1467 lwkt_replymsg(&msg->lmsg, 0);
1473 tcp6_ctlinput(netmsg_t msg)
1475 int cmd = msg->ctlinput.nm_cmd;
1476 struct sockaddr *sa = msg->ctlinput.nm_arg;
1477 void *d = msg->ctlinput.nm_extra;
1479 void (*notify) (struct inpcb *, int) = tcp_notify;
1480 struct ip6_hdr *ip6;
1482 struct ip6ctlparam *ip6cp = NULL;
1483 const struct sockaddr_in6 *sa6_src = NULL;
1485 struct tcp_portonly {
1491 if (sa->sa_family != AF_INET6 ||
1492 sa->sa_len != sizeof(struct sockaddr_in6)) {
1497 if (cmd == PRC_QUENCH)
1498 notify = tcp_quench;
1499 else if (cmd == PRC_MSGSIZE) {
1500 struct ip6ctlparam *ip6cp = d;
1501 struct icmp6_hdr *icmp6 = ip6cp->ip6c_icmp6;
1503 arg = ntohl(icmp6->icmp6_mtu);
1504 notify = tcp_mtudisc;
1505 } else if (!PRC_IS_REDIRECT(cmd) &&
1506 ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) {
1510 /* if the parameter is from icmp6, decode it. */
1512 ip6cp = (struct ip6ctlparam *)d;
1514 ip6 = ip6cp->ip6c_ip6;
1515 off = ip6cp->ip6c_off;
1516 sa6_src = ip6cp->ip6c_src;
1520 off = 0; /* fool gcc */
1525 struct in_conninfo inc;
1527 * XXX: We assume that when IPV6 is non NULL,
1528 * M and OFF are valid.
1531 /* check if we can safely examine src and dst ports */
1532 if (m->m_pkthdr.len < off + sizeof *thp)
1535 bzero(&th, sizeof th);
1536 m_copydata(m, off, sizeof *thp, (caddr_t)&th);
1538 in6_pcbnotify(&tcbinfo[0].pcblisthead, sa, th.th_dport,
1539 (struct sockaddr *)ip6cp->ip6c_src,
1540 th.th_sport, cmd, arg, notify);
1542 inc.inc_fport = th.th_dport;
1543 inc.inc_lport = th.th_sport;
1544 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr;
1545 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr;
1547 syncache_unreach(&inc, &th);
1549 in6_pcbnotify(&tcbinfo[0].pcblisthead, sa, 0,
1550 (const struct sockaddr *)sa6_src, 0, cmd, arg, notify);
1553 lwkt_replymsg(&msg->ctlinput.base.lmsg, 0);
1559 * Following is where TCP initial sequence number generation occurs.
1561 * There are two places where we must use initial sequence numbers:
1562 * 1. In SYN-ACK packets.
1563 * 2. In SYN packets.
1565 * All ISNs for SYN-ACK packets are generated by the syncache. See
1566 * tcp_syncache.c for details.
1568 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
1569 * depends on this property. In addition, these ISNs should be
1570 * unguessable so as to prevent connection hijacking. To satisfy
1571 * the requirements of this situation, the algorithm outlined in
1572 * RFC 1948 is used to generate sequence numbers.
1574 * Implementation details:
1576 * Time is based off the system timer, and is corrected so that it
1577 * increases by one megabyte per second. This allows for proper
1578 * recycling on high speed LANs while still leaving over an hour
1581 * net.inet.tcp.isn_reseed_interval controls the number of seconds
1582 * between seeding of isn_secret. This is normally set to zero,
1583 * as reseeding should not be necessary.
1587 #define ISN_BYTES_PER_SECOND 1048576
1589 u_char isn_secret[32];
1590 int isn_last_reseed;
1594 tcp_new_isn(struct tcpcb *tp)
1596 u_int32_t md5_buffer[4];
1599 /* Seed if this is the first use, reseed if requested. */
1600 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) &&
1601 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz)
1603 read_random_unlimited(&isn_secret, sizeof isn_secret);
1604 isn_last_reseed = ticks;
1607 /* Compute the md5 hash and return the ISN. */
1609 MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_fport, sizeof(u_short));
1610 MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_lport, sizeof(u_short));
1612 if (tp->t_inpcb->inp_vflag & INP_IPV6) {
1613 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
1614 sizeof(struct in6_addr));
1615 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
1616 sizeof(struct in6_addr));
1620 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
1621 sizeof(struct in_addr));
1622 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
1623 sizeof(struct in_addr));
1625 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
1626 MD5Final((u_char *) &md5_buffer, &isn_ctx);
1627 new_isn = (tcp_seq) md5_buffer[0];
1628 new_isn += ticks * (ISN_BYTES_PER_SECOND / hz);
1633 * When a source quench is received, close congestion window
1634 * to one segment. We will gradually open it again as we proceed.
1637 tcp_quench(struct inpcb *inp, int error)
1639 struct tcpcb *tp = intotcpcb(inp);
1642 tp->snd_cwnd = tp->t_maxseg;
1648 * When a specific ICMP unreachable message is received and the
1649 * connection state is SYN-SENT, drop the connection. This behavior
1650 * is controlled by the icmp_may_rst sysctl.
1653 tcp_drop_syn_sent(struct inpcb *inp, int error)
1655 struct tcpcb *tp = intotcpcb(inp);
1657 if ((tp != NULL) && (tp->t_state == TCPS_SYN_SENT))
1658 tcp_drop(tp, error);
1662 * When a `need fragmentation' ICMP is received, update our idea of the MSS
1663 * based on the new value in the route. Also nudge TCP to send something,
1664 * since we know the packet we just sent was dropped.
1665 * This duplicates some code in the tcp_mss() function in tcp_input.c.
1668 tcp_mtudisc(struct inpcb *inp, int mtu)
1670 struct tcpcb *tp = intotcpcb(inp);
1672 struct socket *so = inp->inp_socket;
1675 boolean_t isipv6 = ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0);
1677 const boolean_t isipv6 = FALSE;
1684 * If no MTU is provided in the ICMP message, use the
1685 * next lower likely value, as specified in RFC 1191.
1690 oldmtu = tp->t_maxopd +
1692 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1693 sizeof(struct tcpiphdr));
1694 mtu = ip_next_mtu(oldmtu, 0);
1698 rt = tcp_rtlookup6(&inp->inp_inc);
1700 rt = tcp_rtlookup(&inp->inp_inc);
1702 if (rt->rt_rmx.rmx_mtu != 0 && rt->rt_rmx.rmx_mtu < mtu)
1703 mtu = rt->rt_rmx.rmx_mtu;
1707 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1708 sizeof(struct tcpiphdr));
1711 * XXX - The following conditional probably violates the TCP
1712 * spec. The problem is that, since we don't know the
1713 * other end's MSS, we are supposed to use a conservative
1714 * default. But, if we do that, then MTU discovery will
1715 * never actually take place, because the conservative
1716 * default is much less than the MTUs typically seen
1717 * on the Internet today. For the moment, we'll sweep
1718 * this under the carpet.
1720 * The conservative default might not actually be a problem
1721 * if the only case this occurs is when sending an initial
1722 * SYN with options and data to a host we've never talked
1723 * to before. Then, they will reply with an MSS value which
1724 * will get recorded and the new parameters should get
1725 * recomputed. For Further Study.
1727 if (rt->rt_rmx.rmx_mssopt && rt->rt_rmx.rmx_mssopt < maxopd)
1728 maxopd = rt->rt_rmx.rmx_mssopt;
1732 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1733 sizeof(struct tcpiphdr));
1735 if (tp->t_maxopd <= maxopd)
1737 tp->t_maxopd = maxopd;
1740 if ((tp->t_flags & (TF_REQ_TSTMP | TF_RCVD_TSTMP | TF_NOOPT)) ==
1741 (TF_REQ_TSTMP | TF_RCVD_TSTMP))
1742 mss -= TCPOLEN_TSTAMP_APPA;
1744 /* round down to multiple of MCLBYTES */
1745 #if (MCLBYTES & (MCLBYTES - 1)) == 0 /* test if MCLBYTES power of 2 */
1747 mss &= ~(MCLBYTES - 1);
1750 mss = (mss / MCLBYTES) * MCLBYTES;
1753 if (so->so_snd.ssb_hiwat < mss)
1754 mss = so->so_snd.ssb_hiwat;
1758 tp->snd_nxt = tp->snd_una;
1760 tcpstat.tcps_mturesent++;
1764 * Look-up the routing entry to the peer of this inpcb. If no route
1765 * is found and it cannot be allocated the return NULL. This routine
1766 * is called by TCP routines that access the rmx structure and by tcp_mss
1767 * to get the interface MTU.
1770 tcp_rtlookup(struct in_conninfo *inc)
1772 struct route *ro = &inc->inc_route;
1774 if (ro->ro_rt == NULL || !(ro->ro_rt->rt_flags & RTF_UP)) {
1775 /* No route yet, so try to acquire one */
1776 if (inc->inc_faddr.s_addr != INADDR_ANY) {
1778 * unused portions of the structure MUST be zero'd
1779 * out because rtalloc() treats it as opaque data
1781 bzero(&ro->ro_dst, sizeof(struct sockaddr_in));
1782 ro->ro_dst.sa_family = AF_INET;
1783 ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
1784 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr =
1794 tcp_rtlookup6(struct in_conninfo *inc)
1796 struct route_in6 *ro6 = &inc->inc6_route;
1798 if (ro6->ro_rt == NULL || !(ro6->ro_rt->rt_flags & RTF_UP)) {
1799 /* No route yet, so try to acquire one */
1800 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) {
1802 * unused portions of the structure MUST be zero'd
1803 * out because rtalloc() treats it as opaque data
1805 bzero(&ro6->ro_dst, sizeof(struct sockaddr_in6));
1806 ro6->ro_dst.sin6_family = AF_INET6;
1807 ro6->ro_dst.sin6_len = sizeof(struct sockaddr_in6);
1808 ro6->ro_dst.sin6_addr = inc->inc6_faddr;
1809 rtalloc((struct route *)ro6);
1812 return (ro6->ro_rt);
1817 /* compute ESP/AH header size for TCP, including outer IP header. */
1819 ipsec_hdrsiz_tcp(struct tcpcb *tp)
1827 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
1829 MGETHDR(m, MB_DONTWAIT, MT_DATA);
1834 if (inp->inp_vflag & INP_IPV6) {
1835 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
1837 th = (struct tcphdr *)(ip6 + 1);
1838 m->m_pkthdr.len = m->m_len =
1839 sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1840 tcp_fillheaders(tp, ip6, th, FALSE);
1841 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1845 ip = mtod(m, struct ip *);
1846 th = (struct tcphdr *)(ip + 1);
1847 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
1848 tcp_fillheaders(tp, ip, th, FALSE);
1849 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1858 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING
1860 * This code attempts to calculate the bandwidth-delay product as a
1861 * means of determining the optimal window size to maximize bandwidth,
1862 * minimize RTT, and avoid the over-allocation of buffers on interfaces and
1863 * routers. This code also does a fairly good job keeping RTTs in check
1864 * across slow links like modems. We implement an algorithm which is very
1865 * similar (but not meant to be) TCP/Vegas. The code operates on the
1866 * transmitter side of a TCP connection and so only effects the transmit
1867 * side of the connection.
1869 * BACKGROUND: TCP makes no provision for the management of buffer space
1870 * at the end points or at the intermediate routers and switches. A TCP
1871 * stream, whether using NewReno or not, will eventually buffer as
1872 * many packets as it is able and the only reason this typically works is
1873 * due to the fairly small default buffers made available for a connection
1874 * (typicaly 16K or 32K). As machines use larger windows and/or window
1875 * scaling it is now fairly easy for even a single TCP connection to blow-out
1876 * all available buffer space not only on the local interface, but on
1877 * intermediate routers and switches as well. NewReno makes a misguided
1878 * attempt to 'solve' this problem by waiting for an actual failure to occur,
1879 * then backing off, then steadily increasing the window again until another
1880 * failure occurs, ad-infinitum. This results in terrible oscillation that
1881 * is only made worse as network loads increase and the idea of intentionally
1882 * blowing out network buffers is, frankly, a terrible way to manage network
1885 * It is far better to limit the transmit window prior to the failure
1886 * condition being achieved. There are two general ways to do this: First
1887 * you can 'scan' through different transmit window sizes and locate the
1888 * point where the RTT stops increasing, indicating that you have filled the
1889 * pipe, then scan backwards until you note that RTT stops decreasing, then
1890 * repeat ad-infinitum. This method works in principle but has severe
1891 * implementation issues due to RTT variances, timer granularity, and
1892 * instability in the algorithm which can lead to many false positives and
1893 * create oscillations as well as interact badly with other TCP streams
1894 * implementing the same algorithm.
1896 * The second method is to limit the window to the bandwidth delay product
1897 * of the link. This is the method we implement. RTT variances and our
1898 * own manipulation of the congestion window, bwnd, can potentially
1899 * destabilize the algorithm. For this reason we have to stabilize the
1900 * elements used to calculate the window. We do this by using the minimum
1901 * observed RTT, the long term average of the observed bandwidth, and
1902 * by adding two segments worth of slop. It isn't perfect but it is able
1903 * to react to changing conditions and gives us a very stable basis on
1904 * which to extend the algorithm.
1907 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
1915 * If inflight_enable is disabled in the middle of a tcp connection,
1916 * make sure snd_bwnd is effectively disabled.
1918 if (!tcp_inflight_enable) {
1919 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1920 tp->snd_bandwidth = 0;
1925 * Validate the delta time. If a connection is new or has been idle
1926 * a long time we have to reset the bandwidth calculator.
1929 delta_ticks = save_ticks - tp->t_bw_rtttime;
1930 if (tp->t_bw_rtttime == 0 || delta_ticks < 0 || delta_ticks > hz * 10) {
1931 tp->t_bw_rtttime = ticks;
1932 tp->t_bw_rtseq = ack_seq;
1933 if (tp->snd_bandwidth == 0)
1934 tp->snd_bandwidth = tcp_inflight_min;
1937 if (delta_ticks == 0)
1941 * Sanity check, plus ignore pure window update acks.
1943 if ((int)(ack_seq - tp->t_bw_rtseq) <= 0)
1947 * Figure out the bandwidth. Due to the tick granularity this
1948 * is a very rough number and it MUST be averaged over a fairly
1949 * long period of time. XXX we need to take into account a link
1950 * that is not using all available bandwidth, but for now our
1951 * slop will ramp us up if this case occurs and the bandwidth later
1954 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz / delta_ticks;
1955 tp->t_bw_rtttime = save_ticks;
1956 tp->t_bw_rtseq = ack_seq;
1957 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4;
1959 tp->snd_bandwidth = bw;
1962 * Calculate the semi-static bandwidth delay product, plus two maximal
1963 * segments. The additional slop puts us squarely in the sweet
1964 * spot and also handles the bandwidth run-up case. Without the
1965 * slop we could be locking ourselves into a lower bandwidth.
1967 * Situations Handled:
1968 * (1) Prevents over-queueing of packets on LANs, especially on
1969 * high speed LANs, allowing larger TCP buffers to be
1970 * specified, and also does a good job preventing
1971 * over-queueing of packets over choke points like modems
1972 * (at least for the transmit side).
1974 * (2) Is able to handle changing network loads (bandwidth
1975 * drops so bwnd drops, bandwidth increases so bwnd
1978 * (3) Theoretically should stabilize in the face of multiple
1979 * connections implementing the same algorithm (this may need
1982 * (4) Stability value (defaults to 20 = 2 maximal packets) can
1983 * be adjusted with a sysctl but typically only needs to be on
1984 * very slow connections. A value no smaller then 5 should
1985 * be used, but only reduce this default if you have no other
1989 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2)
1990 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) +
1991 tcp_inflight_stab * (int)tp->t_maxseg / 10;
1994 if (tcp_inflight_debug > 0) {
1996 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) {
1998 kprintf("%p bw %ld rttbest %d srtt %d bwnd %ld\n",
1999 tp, bw, tp->t_rttbest, tp->t_srtt, bwnd);
2002 if ((long)bwnd < tcp_inflight_min)
2003 bwnd = tcp_inflight_min;
2004 if (bwnd > tcp_inflight_max)
2005 bwnd = tcp_inflight_max;
2006 if ((long)bwnd < tp->t_maxseg * 2)
2007 bwnd = tp->t_maxseg * 2;
2008 tp->snd_bwnd = bwnd;
2012 tcp_rmx_iwsegs(struct tcpcb *tp, u_long *maxsegs, u_long *capsegs)
2015 struct inpcb *inp = tp->t_inpcb;
2017 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) ? TRUE : FALSE);
2019 const boolean_t isipv6 = FALSE;
2023 if (tcp_iw_maxsegs < TCP_IW_MAXSEGS_DFLT)
2024 tcp_iw_maxsegs = TCP_IW_MAXSEGS_DFLT;
2025 if (tcp_iw_capsegs < TCP_IW_CAPSEGS_DFLT)
2026 tcp_iw_capsegs = TCP_IW_CAPSEGS_DFLT;
2029 rt = tcp_rtlookup6(&inp->inp_inc);
2031 rt = tcp_rtlookup(&inp->inp_inc);
2033 rt->rt_rmx.rmx_iwmaxsegs < TCP_IW_MAXSEGS_DFLT ||
2034 rt->rt_rmx.rmx_iwcapsegs < TCP_IW_CAPSEGS_DFLT) {
2035 *maxsegs = tcp_iw_maxsegs;
2036 *capsegs = tcp_iw_capsegs;
2039 *maxsegs = rt->rt_rmx.rmx_iwmaxsegs;
2040 *capsegs = rt->rt_rmx.rmx_iwcapsegs;
2044 tcp_initial_window(struct tcpcb *tp)
2046 if (tcp_do_rfc3390) {
2049 * "If the SYN or SYN/ACK is lost, the initial window
2050 * used by a sender after a correctly transmitted SYN
2051 * MUST be one segment consisting of MSS bytes."
2053 * However, we do something a little bit more aggressive
2054 * then RFC3390 here:
2055 * - Only if time spent in the SYN or SYN|ACK retransmition
2056 * >= 3 seconds, the IW is reduced. We do this mainly
2057 * because when RFC3390 is published, the initial RTO is
2058 * still 3 seconds (the threshold we test here), while
2059 * after RFC6298, the initial RTO is 1 second. This
2060 * behaviour probably still falls within the spirit of
2062 * - When IW is reduced, 2*MSS is used instead of 1*MSS.
2063 * Mainly to avoid sender and receiver deadlock until
2064 * delayed ACK timer expires. And even RFC2581 does not
2065 * try to reduce IW upon SYN or SYN|ACK retransmition
2069 * http://tools.ietf.org/html/draft-ietf-tcpm-initcwnd-03
2071 if (tp->t_rxtsyn >= TCPTV_RTOBASE3) {
2072 return (2 * tp->t_maxseg);
2074 u_long maxsegs, capsegs;
2076 tcp_rmx_iwsegs(tp, &maxsegs, &capsegs);
2077 return min(maxsegs * tp->t_maxseg,
2078 max(2 * tp->t_maxseg, capsegs * 1460));
2082 * Even RFC2581 (back to 1999) allows 2*SMSS IW.
2084 * Mainly to avoid sender and receiver deadlock
2085 * until delayed ACK timer expires.
2087 return (2 * tp->t_maxseg);
2091 #ifdef TCP_SIGNATURE
2093 * Compute TCP-MD5 hash of a TCP segment. (RFC2385)
2095 * We do this over ip, tcphdr, segment data, and the key in the SADB.
2096 * When called from tcp_input(), we can be sure that th_sum has been
2097 * zeroed out and verified already.
2099 * Return 0 if successful, otherwise return -1.
2101 * XXX The key is retrieved from the system's PF_KEY SADB, by keying a
2102 * search with the destination IP address, and a 'magic SPI' to be
2103 * determined by the application. This is hardcoded elsewhere to 1179
2104 * right now. Another branch of this code exists which uses the SPD to
2105 * specify per-application flows but it is unstable.
2108 tcpsignature_compute(
2109 struct mbuf *m, /* mbuf chain */
2110 int len, /* length of TCP data */
2111 int optlen, /* length of TCP options */
2112 u_char *buf, /* storage for MD5 digest */
2113 u_int direction) /* direction of flow */
2115 struct ippseudo ippseudo;
2119 struct ipovly *ipovly;
2120 struct secasvar *sav;
2123 struct ip6_hdr *ip6;
2124 struct in6_addr in6;
2130 KASSERT(m != NULL, ("passed NULL mbuf. Game over."));
2131 KASSERT(buf != NULL, ("passed NULL storage pointer for MD5 signature"));
2133 * Extract the destination from the IP header in the mbuf.
2135 ip = mtod(m, struct ip *);
2137 ip6 = NULL; /* Make the compiler happy. */
2140 * Look up an SADB entry which matches the address found in
2143 switch (IP_VHL_V(ip->ip_vhl)) {
2145 sav = key_allocsa(AF_INET, (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst,
2146 IPPROTO_TCP, htonl(TCP_SIG_SPI));
2149 case (IPV6_VERSION >> 4):
2150 ip6 = mtod(m, struct ip6_hdr *);
2151 sav = key_allocsa(AF_INET6, (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst,
2152 IPPROTO_TCP, htonl(TCP_SIG_SPI));
2161 kprintf("%s: SADB lookup failed\n", __func__);
2167 * Step 1: Update MD5 hash with IP pseudo-header.
2169 * XXX The ippseudo header MUST be digested in network byte order,
2170 * or else we'll fail the regression test. Assume all fields we've
2171 * been doing arithmetic on have been in host byte order.
2172 * XXX One cannot depend on ipovly->ih_len here. When called from
2173 * tcp_output(), the underlying ip_len member has not yet been set.
2175 switch (IP_VHL_V(ip->ip_vhl)) {
2177 ipovly = (struct ipovly *)ip;
2178 ippseudo.ippseudo_src = ipovly->ih_src;
2179 ippseudo.ippseudo_dst = ipovly->ih_dst;
2180 ippseudo.ippseudo_pad = 0;
2181 ippseudo.ippseudo_p = IPPROTO_TCP;
2182 ippseudo.ippseudo_len = htons(len + sizeof(struct tcphdr) + optlen);
2183 MD5Update(&ctx, (char *)&ippseudo, sizeof(struct ippseudo));
2184 th = (struct tcphdr *)((u_char *)ip + sizeof(struct ip));
2185 doff = sizeof(struct ip) + sizeof(struct tcphdr) + optlen;
2189 * RFC 2385, 2.0 Proposal
2190 * For IPv6, the pseudo-header is as described in RFC 2460, namely the
2191 * 128-bit source IPv6 address, 128-bit destination IPv6 address, zero-
2192 * extended next header value (to form 32 bits), and 32-bit segment
2194 * Note: Upper-Layer Packet Length comes before Next Header.
2196 case (IPV6_VERSION >> 4):
2198 in6_clearscope(&in6);
2199 MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr));
2201 in6_clearscope(&in6);
2202 MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr));
2203 plen = htonl(len + sizeof(struct tcphdr) + optlen);
2204 MD5Update(&ctx, (char *)&plen, sizeof(uint32_t));
2206 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
2207 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
2208 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
2210 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
2211 th = (struct tcphdr *)((u_char *)ip6 + sizeof(struct ip6_hdr));
2212 doff = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + optlen;
2221 * Step 2: Update MD5 hash with TCP header, excluding options.
2222 * The TCP checksum must be set to zero.
2224 savecsum = th->th_sum;
2226 MD5Update(&ctx, (char *)th, sizeof(struct tcphdr));
2227 th->th_sum = savecsum;
2229 * Step 3: Update MD5 hash with TCP segment data.
2230 * Use m_apply() to avoid an early m_pullup().
2233 m_apply(m, doff, len, tcpsignature_apply, &ctx);
2235 * Step 4: Update MD5 hash with shared secret.
2237 MD5Update(&ctx, _KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth));
2238 MD5Final(buf, &ctx);
2239 key_sa_recordxfer(sav, m);
2245 tcpsignature_apply(void *fstate, void *data, unsigned int len)
2248 MD5Update((MD5_CTX *)fstate, (unsigned char *)data, len);
2251 #endif /* TCP_SIGNATURE */