2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
63 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.73.2.31 2003/01/24 05:11:34 sam Exp $
66 #include "opt_compat.h"
68 #include "opt_inet6.h"
69 #include "opt_ipsec.h"
70 #include "opt_tcpdebug.h"
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/callout.h>
75 #include <sys/kernel.h>
76 #include <sys/sysctl.h>
77 #include <sys/malloc.h>
78 #include <sys/mpipe.h>
81 #include <sys/domain.h>
85 #include <sys/socket.h>
86 #include <sys/socketops.h>
87 #include <sys/socketvar.h>
88 #include <sys/protosw.h>
89 #include <sys/random.h>
90 #include <sys/in_cksum.h>
93 #include <net/route.h>
95 #include <net/netisr2.h>
98 #include <netinet/in.h>
99 #include <netinet/in_systm.h>
100 #include <netinet/ip.h>
101 #include <netinet/ip6.h>
102 #include <netinet/in_pcb.h>
103 #include <netinet6/in6_pcb.h>
104 #include <netinet/in_var.h>
105 #include <netinet/ip_var.h>
106 #include <netinet6/ip6_var.h>
107 #include <netinet/ip_icmp.h>
109 #include <netinet/icmp6.h>
111 #include <netinet/tcp.h>
112 #include <netinet/tcp_fsm.h>
113 #include <netinet/tcp_seq.h>
114 #include <netinet/tcp_timer.h>
115 #include <netinet/tcp_timer2.h>
116 #include <netinet/tcp_var.h>
117 #include <netinet6/tcp6_var.h>
118 #include <netinet/tcpip.h>
120 #include <netinet/tcp_debug.h>
122 #include <netinet6/ip6protosw.h>
125 #include <netinet6/ipsec.h>
126 #include <netproto/key/key.h>
128 #include <netinet6/ipsec6.h>
133 #include <netproto/ipsec/ipsec.h>
135 #include <netproto/ipsec/ipsec6.h>
141 #include <machine/smp.h>
143 #include <sys/msgport2.h>
144 #include <sys/mplock2.h>
145 #include <net/netmsg2.h>
147 #if !defined(KTR_TCP)
148 #define KTR_TCP KTR_ALL
151 KTR_INFO_MASTER(tcp);
152 KTR_INFO(KTR_TCP, tcp, rxmsg, 0, "tcp getmsg", 0);
153 KTR_INFO(KTR_TCP, tcp, wait, 1, "tcp waitmsg", 0);
154 KTR_INFO(KTR_TCP, tcp, delayed, 2, "tcp execute delayed ops", 0);
155 #define logtcp(name) KTR_LOG(tcp_ ## name)
158 #define TCP_IW_MAXSEGS_DFLT 4
159 #define TCP_IW_CAPSEGS_DFLT 3
161 struct inpcbinfo tcbinfo[MAXCPU];
162 struct tcpcbackqhead tcpcbackq[MAXCPU];
164 int tcp_mssdflt = TCP_MSS;
165 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
166 &tcp_mssdflt, 0, "Default TCP Maximum Segment Size");
169 int tcp_v6mssdflt = TCP6_MSS;
170 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, CTLFLAG_RW,
171 &tcp_v6mssdflt, 0, "Default TCP Maximum Segment Size for IPv6");
175 * Minimum MSS we accept and use. This prevents DoS attacks where
176 * we are forced to a ridiculous low MSS like 20 and send hundreds
177 * of packets instead of one. The effect scales with the available
178 * bandwidth and quickly saturates the CPU and network interface
179 * with packet generation and sending. Set to zero to disable MINMSS
180 * checking. This setting prevents us from sending too small packets.
182 int tcp_minmss = TCP_MINMSS;
183 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW,
184 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size");
187 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
188 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW,
189 &tcp_rttdflt, 0, "Default maximum TCP Round Trip Time");
192 int tcp_do_rfc1323 = 1;
193 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
194 &tcp_do_rfc1323, 0, "Enable rfc1323 (high performance TCP) extensions");
196 static int tcp_tcbhashsize = 0;
197 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD,
198 &tcp_tcbhashsize, 0, "Size of TCP control block hashtable");
200 static int do_tcpdrain = 1;
201 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
202 "Enable tcp_drain routine for extra help when low on mbufs");
204 static int icmp_may_rst = 1;
205 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0,
206 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
208 static int tcp_isn_reseed_interval = 0;
209 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
210 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
213 * TCP bandwidth limiting sysctls. The inflight limiter is now turned on
214 * by default, but with generous values which should allow maximal
215 * bandwidth. In particular, the slop defaults to 50 (5 packets).
217 * The reason for doing this is that the limiter is the only mechanism we
218 * have which seems to do a really good job preventing receiver RX rings
219 * on network interfaces from getting blown out. Even though GigE/10GigE
220 * is supposed to flow control it looks like either it doesn't actually
221 * do it or Open Source drivers do not properly enable it.
223 * People using the limiter to reduce bottlenecks on slower WAN connections
224 * should set the slop to 20 (2 packets).
226 static int tcp_inflight_enable = 1;
227 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_enable, CTLFLAG_RW,
228 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting");
230 static int tcp_inflight_debug = 0;
231 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_debug, CTLFLAG_RW,
232 &tcp_inflight_debug, 0, "Debug TCP inflight calculations");
234 static int tcp_inflight_min = 6144;
235 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_min, CTLFLAG_RW,
236 &tcp_inflight_min, 0, "Lower bound for TCP inflight window");
238 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT;
239 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_max, CTLFLAG_RW,
240 &tcp_inflight_max, 0, "Upper bound for TCP inflight window");
242 static int tcp_inflight_stab = 50;
243 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_stab, CTLFLAG_RW,
244 &tcp_inflight_stab, 0, "Slop in maximal packets / 10 (20 = 3 packets)");
246 static int tcp_do_rfc3390 = 1;
247 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW,
249 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
251 static u_long tcp_iw_maxsegs = TCP_IW_MAXSEGS_DFLT;
252 SYSCTL_ULONG(_net_inet_tcp, OID_AUTO, iwmaxsegs, CTLFLAG_RW,
253 &tcp_iw_maxsegs, 0, "TCP IW segments max");
255 static u_long tcp_iw_capsegs = TCP_IW_CAPSEGS_DFLT;
256 SYSCTL_ULONG(_net_inet_tcp, OID_AUTO, iwcapsegs, CTLFLAG_RW,
257 &tcp_iw_capsegs, 0, "TCP IW segments");
259 int tcp_low_rtobase = 1;
260 SYSCTL_INT(_net_inet_tcp, OID_AUTO, low_rtobase, CTLFLAG_RW,
261 &tcp_low_rtobase, 0, "Lowering the Initial RTO (RFC 6298)");
263 static int tcp_do_ncr = 1;
264 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ncr, CTLFLAG_RW,
265 &tcp_do_ncr, 0, "Non-Congestion Robustness (RFC 4653)");
267 static MALLOC_DEFINE(M_TCPTEMP, "tcptemp", "TCP Templates for Keepalives");
268 static struct malloc_pipe tcptemp_mpipe;
270 static void tcp_willblock(void);
271 static void tcp_notify (struct inpcb *, int);
273 struct tcp_stats tcpstats_percpu[MAXCPU] __cachealign;
276 sysctl_tcpstats(SYSCTL_HANDLER_ARGS)
280 for (cpu = 0; cpu < ncpus2; ++cpu) {
281 if ((error = SYSCTL_OUT(req, &tcpstats_percpu[cpu],
282 sizeof(struct tcp_stats))))
284 if ((error = SYSCTL_IN(req, &tcpstats_percpu[cpu],
285 sizeof(struct tcp_stats))))
291 SYSCTL_PROC(_net_inet_tcp, TCPCTL_STATS, stats, (CTLTYPE_OPAQUE | CTLFLAG_RW),
292 0, 0, sysctl_tcpstats, "S,tcp_stats", "TCP statistics");
295 * Target size of TCP PCB hash tables. Must be a power of two.
297 * Note that this can be overridden by the kernel environment
298 * variable net.inet.tcp.tcbhashsize
301 #define TCBHASHSIZE 512
305 * This is the actual shape of what we allocate using the zone
306 * allocator. Doing it this way allows us to protect both structures
307 * using the same generation count, and also eliminates the overhead
308 * of allocating tcpcbs separately. By hiding the structure here,
309 * we avoid changing most of the rest of the code (although it needs
310 * to be changed, eventually, for greater efficiency).
313 #define ALIGNM1 (ALIGNMENT - 1)
317 char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1];
320 struct tcp_callout inp_tp_rexmt;
321 struct tcp_callout inp_tp_persist;
322 struct tcp_callout inp_tp_keep;
323 struct tcp_callout inp_tp_2msl;
324 struct tcp_callout inp_tp_delack;
325 struct netmsg_tcp_timer inp_tp_timermsg;
326 struct netmsg_base inp_tp_sndmore;
337 struct inpcbportinfo *portinfo;
338 struct inpcbinfo *ticb;
339 int hashsize = TCBHASHSIZE;
343 * note: tcptemp is used for keepalives, and it is ok for an
344 * allocation to fail so do not specify MPF_INT.
346 mpipe_init(&tcptemp_mpipe, M_TCPTEMP, sizeof(struct tcptemp),
347 25, -1, 0, NULL, NULL, NULL);
349 tcp_delacktime = TCPTV_DELACK;
350 tcp_keepinit = TCPTV_KEEP_INIT;
351 tcp_keepidle = TCPTV_KEEP_IDLE;
352 tcp_keepintvl = TCPTV_KEEPINTVL;
353 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
355 tcp_rexmit_min = TCPTV_MIN;
356 tcp_rexmit_slop = TCPTV_CPU_VAR;
358 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize);
359 if (!powerof2(hashsize)) {
360 kprintf("WARNING: TCB hash size not a power of 2\n");
361 hashsize = 512; /* safe default */
363 tcp_tcbhashsize = hashsize;
365 portinfo = kmalloc_cachealign(sizeof(*portinfo) * ncpus2, M_PCB,
368 for (cpu = 0; cpu < ncpus2; cpu++) {
369 ticb = &tcbinfo[cpu];
370 in_pcbinfo_init(ticb);
372 ticb->hashbase = hashinit(hashsize, M_PCB,
374 in_pcbportinfo_init(&portinfo[cpu], hashsize, TRUE, cpu);
375 ticb->portinfo = portinfo;
376 ticb->portinfo_mask = ncpus2_mask;
377 ticb->wildcardhashbase = hashinit(hashsize, M_PCB,
378 &ticb->wildcardhashmask);
379 ticb->localgrphashbase = hashinit(hashsize, M_PCB,
380 &ticb->localgrphashmask);
381 ticb->ipi_size = sizeof(struct inp_tp);
382 TAILQ_INIT(&tcpcbackq[cpu]);
385 tcp_reass_maxseg = nmbclusters / 16;
386 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", &tcp_reass_maxseg);
389 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
391 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
393 if (max_protohdr < TCP_MINPROTOHDR)
394 max_protohdr = TCP_MINPROTOHDR;
395 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
397 #undef TCP_MINPROTOHDR
400 * Initialize TCP statistics counters for each CPU.
402 for (cpu = 0; cpu < ncpus2; ++cpu)
403 bzero(&tcpstats_percpu[cpu], sizeof(struct tcp_stats));
406 netisr_register_rollup(tcp_willblock, NETISR_ROLLUP_PRIO_TCP);
413 int cpu = mycpu->gd_cpuid;
415 while ((tp = TAILQ_FIRST(&tcpcbackq[cpu])) != NULL) {
416 KKASSERT(tp->t_flags & TF_ONOUTPUTQ);
417 tp->t_flags &= ~TF_ONOUTPUTQ;
418 TAILQ_REMOVE(&tcpcbackq[cpu], tp, t_outputq);
424 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
425 * tcp_template used to store this data in mbufs, but we now recopy it out
426 * of the tcpcb each time to conserve mbufs.
429 tcp_fillheaders(struct tcpcb *tp, void *ip_ptr, void *tcp_ptr, boolean_t tso)
431 struct inpcb *inp = tp->t_inpcb;
432 struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr;
435 if (inp->inp_vflag & INP_IPV6) {
438 ip6 = (struct ip6_hdr *)ip_ptr;
439 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
440 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK);
441 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
442 (IPV6_VERSION & IPV6_VERSION_MASK);
443 ip6->ip6_nxt = IPPROTO_TCP;
444 ip6->ip6_plen = sizeof(struct tcphdr);
445 ip6->ip6_src = inp->in6p_laddr;
446 ip6->ip6_dst = inp->in6p_faddr;
451 struct ip *ip = (struct ip *) ip_ptr;
454 ip->ip_vhl = IP_VHL_BORING;
461 ip->ip_p = IPPROTO_TCP;
462 ip->ip_src = inp->inp_laddr;
463 ip->ip_dst = inp->inp_faddr;
466 plen = htons(IPPROTO_TCP);
468 plen = htons(sizeof(struct tcphdr) + IPPROTO_TCP);
469 tcp_hdr->th_sum = in_pseudo(ip->ip_src.s_addr,
470 ip->ip_dst.s_addr, plen);
473 tcp_hdr->th_sport = inp->inp_lport;
474 tcp_hdr->th_dport = inp->inp_fport;
479 tcp_hdr->th_flags = 0;
485 * Create template to be used to send tcp packets on a connection.
486 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
487 * use for this function is in keepalives, which use tcp_respond.
490 tcp_maketemplate(struct tcpcb *tp)
494 if ((tmp = mpipe_alloc_nowait(&tcptemp_mpipe)) == NULL)
496 tcp_fillheaders(tp, &tmp->tt_ipgen, &tmp->tt_t, FALSE);
501 tcp_freetemplate(struct tcptemp *tmp)
503 mpipe_free(&tcptemp_mpipe, tmp);
507 * Send a single message to the TCP at address specified by
508 * the given TCP/IP header. If m == NULL, then we make a copy
509 * of the tcpiphdr at ti and send directly to the addressed host.
510 * This is used to force keep alive messages out using the TCP
511 * template for a connection. If flags are given then we send
512 * a message back to the TCP which originated the * segment ti,
513 * and discard the mbuf containing it and any other attached mbufs.
515 * In any case the ack and sequence number of the transmitted
516 * segment are as specified by the parameters.
518 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
521 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
522 tcp_seq ack, tcp_seq seq, int flags)
526 struct route *ro = NULL;
528 struct ip *ip = ipgen;
531 struct route_in6 *ro6 = NULL;
532 struct route_in6 sro6;
533 struct ip6_hdr *ip6 = ipgen;
534 boolean_t use_tmpro = TRUE;
536 boolean_t isipv6 = (IP_VHL_V(ip->ip_vhl) == 6);
538 const boolean_t isipv6 = FALSE;
542 if (!(flags & TH_RST)) {
543 win = ssb_space(&tp->t_inpcb->inp_socket->so_rcv);
546 if (win > (long)TCP_MAXWIN << tp->rcv_scale)
547 win = (long)TCP_MAXWIN << tp->rcv_scale;
550 * Don't use the route cache of a listen socket,
551 * it is not MPSAFE; use temporary route cache.
553 if (tp->t_state != TCPS_LISTEN) {
555 ro6 = &tp->t_inpcb->in6p_route;
557 ro = &tp->t_inpcb->inp_route;
564 bzero(ro6, sizeof *ro6);
567 bzero(ro, sizeof *ro);
571 m = m_gethdr(MB_DONTWAIT, MT_HEADER);
575 m->m_data += max_linkhdr;
577 bcopy(ip6, mtod(m, caddr_t), sizeof(struct ip6_hdr));
578 ip6 = mtod(m, struct ip6_hdr *);
579 nth = (struct tcphdr *)(ip6 + 1);
581 bcopy(ip, mtod(m, caddr_t), sizeof(struct ip));
582 ip = mtod(m, struct ip *);
583 nth = (struct tcphdr *)(ip + 1);
585 bcopy(th, nth, sizeof(struct tcphdr));
590 m->m_data = (caddr_t)ipgen;
591 /* m_len is set later */
593 #define xchg(a, b, type) { type t; t = a; a = b; b = t; }
595 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
596 nth = (struct tcphdr *)(ip6 + 1);
598 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
599 nth = (struct tcphdr *)(ip + 1);
603 * this is usually a case when an extension header
604 * exists between the IPv6 header and the
607 nth->th_sport = th->th_sport;
608 nth->th_dport = th->th_dport;
610 xchg(nth->th_dport, nth->th_sport, n_short);
615 ip6->ip6_vfc = IPV6_VERSION;
616 ip6->ip6_nxt = IPPROTO_TCP;
617 ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) + tlen));
618 tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
620 tlen += sizeof(struct tcpiphdr);
622 ip->ip_ttl = ip_defttl;
625 m->m_pkthdr.len = tlen;
626 m->m_pkthdr.rcvif = NULL;
627 nth->th_seq = htonl(seq);
628 nth->th_ack = htonl(ack);
630 nth->th_off = sizeof(struct tcphdr) >> 2;
631 nth->th_flags = flags;
633 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
635 nth->th_win = htons((u_short)win);
639 nth->th_sum = in6_cksum(m, IPPROTO_TCP,
640 sizeof(struct ip6_hdr),
641 tlen - sizeof(struct ip6_hdr));
642 ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL,
643 (ro6 && ro6->ro_rt) ?
644 ro6->ro_rt->rt_ifp : NULL);
646 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
647 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
648 m->m_pkthdr.csum_flags = CSUM_TCP;
649 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
650 m->m_pkthdr.csum_thlen = sizeof(struct tcphdr);
653 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
654 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
657 ip6_output(m, NULL, ro6, ipflags, NULL, NULL,
658 tp ? tp->t_inpcb : NULL);
659 if ((ro6 == &sro6) && (ro6->ro_rt != NULL)) {
664 ipflags |= IP_DEBUGROUTE;
665 ip_output(m, NULL, ro, ipflags, NULL, tp ? tp->t_inpcb : NULL);
666 if ((ro == &sro) && (ro->ro_rt != NULL)) {
674 * Create a new TCP control block, making an
675 * empty reassembly queue and hooking it to the argument
676 * protocol control block. The `inp' parameter must have
677 * come from the zone allocator set up in tcp_init().
680 tcp_newtcpcb(struct inpcb *inp)
685 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) != 0);
687 const boolean_t isipv6 = FALSE;
690 it = (struct inp_tp *)inp;
692 bzero(tp, sizeof(struct tcpcb));
693 TAILQ_INIT(&tp->t_segq);
694 tp->t_maxseg = tp->t_maxopd = isipv6 ? tcp_v6mssdflt : tcp_mssdflt;
695 tp->t_rxtthresh = tcprexmtthresh;
697 /* Set up our timeouts. */
698 tp->tt_rexmt = &it->inp_tp_rexmt;
699 tp->tt_persist = &it->inp_tp_persist;
700 tp->tt_keep = &it->inp_tp_keep;
701 tp->tt_2msl = &it->inp_tp_2msl;
702 tp->tt_delack = &it->inp_tp_delack;
706 * Zero out timer message. We don't create it here,
707 * since the current CPU may not be the owner of this
710 tp->tt_msg = &it->inp_tp_timermsg;
711 bzero(tp->tt_msg, sizeof(*tp->tt_msg));
713 tp->t_keepinit = tcp_keepinit;
714 tp->t_keepidle = tcp_keepidle;
715 tp->t_keepintvl = tcp_keepintvl;
716 tp->t_keepcnt = tcp_keepcnt;
717 tp->t_maxidle = tp->t_keepintvl * tp->t_keepcnt;
720 tp->t_flags |= TF_NCR;
722 tp->t_flags |= (TF_REQ_SCALE | TF_REQ_TSTMP);
724 tp->t_inpcb = inp; /* XXX */
725 tp->t_state = TCPS_CLOSED;
727 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
728 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
729 * reasonable initial retransmit time.
731 tp->t_srtt = TCPTV_SRTTBASE;
733 ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
734 tp->t_rttmin = tcp_rexmit_min;
735 tp->t_rxtcur = TCPTV_RTOBASE;
736 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
737 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
738 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
739 tp->snd_last = ticks;
740 tp->t_rcvtime = ticks;
742 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
743 * because the socket may be bound to an IPv6 wildcard address,
744 * which may match an IPv4-mapped IPv6 address.
746 inp->inp_ip_ttl = ip_defttl;
748 tcp_sack_tcpcb_init(tp);
750 tp->tt_sndmore = &it->inp_tp_sndmore;
753 return (tp); /* XXX */
757 * Drop a TCP connection, reporting the specified error.
758 * If connection is synchronized, then send a RST to peer.
761 tcp_drop(struct tcpcb *tp, int error)
763 struct socket *so = tp->t_inpcb->inp_socket;
765 if (TCPS_HAVERCVDSYN(tp->t_state)) {
766 tp->t_state = TCPS_CLOSED;
768 tcpstat.tcps_drops++;
770 tcpstat.tcps_conndrops++;
771 if (error == ETIMEDOUT && tp->t_softerror)
772 error = tp->t_softerror;
773 so->so_error = error;
774 return (tcp_close(tp));
777 struct netmsg_listen_detach {
778 struct netmsg_base base;
780 struct tcpcb *nm_tp_inh;
784 tcp_listen_detach_handler(netmsg_t msg)
786 struct netmsg_listen_detach *nmsg = (struct netmsg_listen_detach *)msg;
787 struct tcpcb *tp = nmsg->nm_tp;
788 int cpu = mycpuid, nextcpu;
790 if (tp->t_flags & TF_LISTEN)
791 syncache_destroy(tp, nmsg->nm_tp_inh);
793 in_pcbremwildcardhash_oncpu(tp->t_inpcb, &tcbinfo[cpu]);
796 if (nextcpu < ncpus2)
797 lwkt_forwardmsg(netisr_cpuport(nextcpu), &nmsg->base.lmsg);
799 lwkt_replymsg(&nmsg->base.lmsg, 0);
803 * Close a TCP control block:
804 * discard all space held by the tcp
805 * discard internet protocol block
806 * wake up any sleepers
809 tcp_close(struct tcpcb *tp)
812 struct inpcb *inp = tp->t_inpcb;
813 struct inpcb *inp_inh = NULL;
814 struct tcpcb *tp_inh = NULL;
815 struct socket *so = inp->inp_socket;
817 boolean_t dosavessthresh;
819 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) != 0);
820 boolean_t isafinet6 = (INP_CHECK_SOCKAF(so, AF_INET6) != 0);
822 const boolean_t isipv6 = FALSE;
825 if (tp->t_flags & TF_LISTEN) {
827 * Pending socket/syncache inheritance
829 * If this is a listen(2) socket, find another listen(2)
830 * socket in the same local group, which could inherit
831 * the syncache and sockets pending on the completion
832 * and incompletion queues.
835 * Currently the inheritance could only happen on the
836 * listen(2) sockets w/ SO_REUSEPORT set.
838 KASSERT(&curthread->td_msgport == netisr_cpuport(0),
839 ("listen socket close not in netisr0"));
840 inp_inh = in_pcblocalgroup_last(&tcbinfo[0], inp);
842 tp_inh = intotcpcb(inp_inh);
846 * INP_WILDCARD_MP indicates that listen(2) has been called on
847 * this socket. This implies:
848 * - A wildcard inp's hash is replicated for each protocol thread.
849 * - Syncache for this inp grows independently in each protocol
851 * - There is more than one cpu
853 * We have to chain a message to the rest of the protocol threads
854 * to cleanup the wildcard hash and the syncache. The cleanup
855 * in the current protocol thread is defered till the end of this
859 * After cleanup the inp's hash and syncache entries, this inp will
860 * no longer be available to the rest of the protocol threads, so we
861 * are safe to whack the inp in the following code.
863 if (inp->inp_flags & INP_WILDCARD_MP) {
864 struct netmsg_listen_detach nmsg;
866 KKASSERT(so->so_port == netisr_cpuport(0));
867 KKASSERT(&curthread->td_msgport == netisr_cpuport(0));
868 KKASSERT(inp->inp_pcbinfo == &tcbinfo[0]);
870 netmsg_init(&nmsg.base, NULL, &curthread->td_msgport,
871 MSGF_PRIORITY, tcp_listen_detach_handler);
873 nmsg.nm_tp_inh = tp_inh;
874 lwkt_domsg(netisr_cpuport(1), &nmsg.base.lmsg, 0);
876 inp->inp_flags &= ~INP_WILDCARD_MP;
879 KKASSERT(tp->t_state != TCPS_TERMINATING);
880 tp->t_state = TCPS_TERMINATING;
883 * Make sure that all of our timers are stopped before we
884 * delete the PCB. For listen TCP socket (tp->tt_msg == NULL),
885 * timers are never used. If timer message is never created
886 * (tp->tt_msg->tt_tcb == NULL), timers are never used too.
888 if (tp->tt_msg != NULL && tp->tt_msg->tt_tcb != NULL) {
889 tcp_callout_stop(tp, tp->tt_rexmt);
890 tcp_callout_stop(tp, tp->tt_persist);
891 tcp_callout_stop(tp, tp->tt_keep);
892 tcp_callout_stop(tp, tp->tt_2msl);
893 tcp_callout_stop(tp, tp->tt_delack);
896 if (tp->t_flags & TF_ONOUTPUTQ) {
897 KKASSERT(tp->tt_cpu == mycpu->gd_cpuid);
898 TAILQ_REMOVE(&tcpcbackq[tp->tt_cpu], tp, t_outputq);
899 tp->t_flags &= ~TF_ONOUTPUTQ;
903 * If we got enough samples through the srtt filter,
904 * save the rtt and rttvar in the routing entry.
905 * 'Enough' is arbitrarily defined as the 16 samples.
906 * 16 samples is enough for the srtt filter to converge
907 * to within 5% of the correct value; fewer samples and
908 * we could save a very bogus rtt.
910 * Don't update the default route's characteristics and don't
911 * update anything that the user "locked".
913 if (tp->t_rttupdated >= 16) {
917 struct sockaddr_in6 *sin6;
919 if ((rt = inp->in6p_route.ro_rt) == NULL)
921 sin6 = (struct sockaddr_in6 *)rt_key(rt);
922 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
925 if ((rt = inp->inp_route.ro_rt) == NULL ||
926 ((struct sockaddr_in *)rt_key(rt))->
927 sin_addr.s_addr == INADDR_ANY)
930 if (!(rt->rt_rmx.rmx_locks & RTV_RTT)) {
931 i = tp->t_srtt * (RTM_RTTUNIT / (hz * TCP_RTT_SCALE));
932 if (rt->rt_rmx.rmx_rtt && i)
934 * filter this update to half the old & half
935 * the new values, converting scale.
936 * See route.h and tcp_var.h for a
937 * description of the scaling constants.
940 (rt->rt_rmx.rmx_rtt + i) / 2;
942 rt->rt_rmx.rmx_rtt = i;
943 tcpstat.tcps_cachedrtt++;
945 if (!(rt->rt_rmx.rmx_locks & RTV_RTTVAR)) {
947 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE));
948 if (rt->rt_rmx.rmx_rttvar && i)
949 rt->rt_rmx.rmx_rttvar =
950 (rt->rt_rmx.rmx_rttvar + i) / 2;
952 rt->rt_rmx.rmx_rttvar = i;
953 tcpstat.tcps_cachedrttvar++;
956 * The old comment here said:
957 * update the pipelimit (ssthresh) if it has been updated
958 * already or if a pipesize was specified & the threshhold
959 * got below half the pipesize. I.e., wait for bad news
960 * before we start updating, then update on both good
963 * But we want to save the ssthresh even if no pipesize is
964 * specified explicitly in the route, because such
965 * connections still have an implicit pipesize specified
966 * by the global tcp_sendspace. In the absence of a reliable
967 * way to calculate the pipesize, it will have to do.
969 i = tp->snd_ssthresh;
970 if (rt->rt_rmx.rmx_sendpipe != 0)
971 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe/2);
973 dosavessthresh = (i < so->so_snd.ssb_hiwat/2);
974 if (dosavessthresh ||
975 (!(rt->rt_rmx.rmx_locks & RTV_SSTHRESH) && (i != 0) &&
976 (rt->rt_rmx.rmx_ssthresh != 0))) {
978 * convert the limit from user data bytes to
979 * packets then to packet data bytes.
981 i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
986 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
987 sizeof(struct tcpiphdr));
988 if (rt->rt_rmx.rmx_ssthresh)
989 rt->rt_rmx.rmx_ssthresh =
990 (rt->rt_rmx.rmx_ssthresh + i) / 2;
992 rt->rt_rmx.rmx_ssthresh = i;
993 tcpstat.tcps_cachedssthresh++;
998 /* free the reassembly queue, if any */
999 while((q = TAILQ_FIRST(&tp->t_segq)) != NULL) {
1000 TAILQ_REMOVE(&tp->t_segq, q, tqe_q);
1003 atomic_add_int(&tcp_reass_qsize, -1);
1005 /* throw away SACK blocks in scoreboard*/
1006 if (TCP_DO_SACK(tp))
1007 tcp_sack_destroy(&tp->scb);
1009 inp->inp_ppcb = NULL;
1010 soisdisconnected(so);
1011 /* note: pcb detached later on */
1013 tcp_destroy_timermsg(tp);
1014 tcp_output_cancel(tp);
1016 if (tp->t_flags & TF_LISTEN) {
1017 syncache_destroy(tp, tp_inh);
1018 if (inp_inh != NULL && inp_inh->inp_socket != NULL) {
1020 * Pending sockets inheritance only needs
1021 * to be done once in the current thread,
1024 soinherit(so, inp_inh->inp_socket);
1028 so_async_rcvd_drop(so);
1029 /* Drop the reference for the asynchronized pru_rcvd */
1034 * pcbdetach removes any wildcard hash entry on the current CPU.
1043 tcpstat.tcps_closed++;
1047 static __inline void
1048 tcp_drain_oncpu(struct inpcbhead *head)
1050 struct inpcb *marker;
1053 struct tseg_qent *te;
1056 * Allows us to block while running the list
1058 marker = kmalloc(sizeof(struct inpcb), M_TEMP, M_WAITOK|M_ZERO);
1059 marker->inp_flags |= INP_PLACEMARKER;
1060 LIST_INSERT_HEAD(head, marker, inp_list);
1062 while ((inpb = LIST_NEXT(marker, inp_list)) != NULL) {
1063 if ((inpb->inp_flags & INP_PLACEMARKER) == 0 &&
1064 (tcpb = intotcpcb(inpb)) != NULL &&
1065 (te = TAILQ_FIRST(&tcpb->t_segq)) != NULL) {
1066 TAILQ_REMOVE(&tcpb->t_segq, te, tqe_q);
1067 if (te->tqe_th->th_flags & TH_FIN)
1068 tcpb->t_flags &= ~TF_QUEDFIN;
1071 atomic_add_int(&tcp_reass_qsize, -1);
1074 LIST_REMOVE(marker, inp_list);
1075 LIST_INSERT_AFTER(inpb, marker, inp_list);
1078 LIST_REMOVE(marker, inp_list);
1079 kfree(marker, M_TEMP);
1082 struct netmsg_tcp_drain {
1083 struct netmsg_base base;
1084 struct inpcbhead *nm_head;
1088 tcp_drain_handler(netmsg_t msg)
1090 struct netmsg_tcp_drain *nm = (void *)msg;
1092 tcp_drain_oncpu(nm->nm_head);
1093 lwkt_replymsg(&nm->base.lmsg, 0);
1105 * Walk the tcpbs, if existing, and flush the reassembly queue,
1106 * if there is one...
1107 * XXX: The "Net/3" implementation doesn't imply that the TCP
1108 * reassembly queue should be flushed, but in a situation
1109 * where we're really low on mbufs, this is potentially
1112 for (cpu = 0; cpu < ncpus2; cpu++) {
1113 struct netmsg_tcp_drain *nm;
1115 if (cpu == mycpu->gd_cpuid) {
1116 tcp_drain_oncpu(&tcbinfo[cpu].pcblisthead);
1118 nm = kmalloc(sizeof(struct netmsg_tcp_drain),
1119 M_LWKTMSG, M_NOWAIT);
1122 netmsg_init(&nm->base, NULL, &netisr_afree_rport,
1123 0, tcp_drain_handler);
1124 nm->nm_head = &tcbinfo[cpu].pcblisthead;
1125 lwkt_sendmsg(netisr_cpuport(cpu), &nm->base.lmsg);
1131 * Notify a tcp user of an asynchronous error;
1132 * store error as soft error, but wake up user
1133 * (for now, won't do anything until can select for soft error).
1135 * Do not wake up user since there currently is no mechanism for
1136 * reporting soft errors (yet - a kqueue filter may be added).
1139 tcp_notify(struct inpcb *inp, int error)
1141 struct tcpcb *tp = intotcpcb(inp);
1144 * Ignore some errors if we are hooked up.
1145 * If connection hasn't completed, has retransmitted several times,
1146 * and receives a second error, give up now. This is better
1147 * than waiting a long time to establish a connection that
1148 * can never complete.
1150 if (tp->t_state == TCPS_ESTABLISHED &&
1151 (error == EHOSTUNREACH || error == ENETUNREACH ||
1152 error == EHOSTDOWN)) {
1154 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
1156 tcp_drop(tp, error);
1158 tp->t_softerror = error;
1160 wakeup(&so->so_timeo);
1167 tcp_pcblist(SYSCTL_HANDLER_ARGS)
1170 struct inpcb *marker;
1179 * The process of preparing the TCB list is too time-consuming and
1180 * resource-intensive to repeat twice on every request.
1182 if (req->oldptr == NULL) {
1183 for (ccpu = 0; ccpu < ncpus2; ++ccpu) {
1184 gd = globaldata_find(ccpu);
1185 n += tcbinfo[gd->gd_cpuid].ipi_count;
1187 req->oldidx = (n + n/8 + 10) * sizeof(struct xtcpcb);
1191 if (req->newptr != NULL)
1194 marker = kmalloc(sizeof(struct inpcb), M_TEMP, M_WAITOK|M_ZERO);
1195 marker->inp_flags |= INP_PLACEMARKER;
1198 * OK, now we're committed to doing something. Run the inpcb list
1199 * for each cpu in the system and construct the output. Use a
1200 * list placemarker to deal with list changes occuring during
1201 * copyout blockages (but otherwise depend on being on the correct
1202 * cpu to avoid races).
1204 origcpu = mycpu->gd_cpuid;
1205 for (ccpu = 0; ccpu < ncpus2 && error == 0; ++ccpu) {
1209 lwkt_migratecpu(ccpu);
1211 n = tcbinfo[ccpu].ipi_count;
1213 LIST_INSERT_HEAD(&tcbinfo[ccpu].pcblisthead, marker, inp_list);
1215 while ((inp = LIST_NEXT(marker, inp_list)) != NULL && i < n) {
1217 * process a snapshot of pcbs, ignoring placemarkers
1218 * and using our own to allow SYSCTL_OUT to block.
1220 LIST_REMOVE(marker, inp_list);
1221 LIST_INSERT_AFTER(inp, marker, inp_list);
1223 if (inp->inp_flags & INP_PLACEMARKER)
1225 if (prison_xinpcb(req->td, inp))
1228 xt.xt_len = sizeof xt;
1229 bcopy(inp, &xt.xt_inp, sizeof *inp);
1230 inp_ppcb = inp->inp_ppcb;
1231 if (inp_ppcb != NULL)
1232 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);
1234 bzero(&xt.xt_tp, sizeof xt.xt_tp);
1235 if (inp->inp_socket)
1236 sotoxsocket(inp->inp_socket, &xt.xt_socket);
1237 if ((error = SYSCTL_OUT(req, &xt, sizeof xt)) != 0)
1241 LIST_REMOVE(marker, inp_list);
1242 if (error == 0 && i < n) {
1243 bzero(&xt, sizeof xt);
1244 xt.xt_len = sizeof xt;
1246 error = SYSCTL_OUT(req, &xt, sizeof xt);
1255 * Make sure we are on the same cpu we were on originally, since
1256 * higher level callers expect this. Also don't pollute caches with
1257 * migrated userland data by (eventually) returning to userland
1258 * on a different cpu.
1260 lwkt_migratecpu(origcpu);
1261 kfree(marker, M_TEMP);
1265 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
1266 tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
1269 tcp_getcred(SYSCTL_HANDLER_ARGS)
1271 struct sockaddr_in addrs[2];
1276 error = priv_check(req->td, PRIV_ROOT);
1279 error = SYSCTL_IN(req, addrs, sizeof addrs);
1283 cpu = tcp_addrcpu(addrs[1].sin_addr.s_addr, addrs[1].sin_port,
1284 addrs[0].sin_addr.s_addr, addrs[0].sin_port);
1285 inp = in_pcblookup_hash(&tcbinfo[cpu], addrs[1].sin_addr,
1286 addrs[1].sin_port, addrs[0].sin_addr, addrs[0].sin_port, 0, NULL);
1287 if (inp == NULL || inp->inp_socket == NULL) {
1291 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred));
1297 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW),
1298 0, 0, tcp_getcred, "S,ucred", "Get the ucred of a TCP connection");
1302 tcp6_getcred(SYSCTL_HANDLER_ARGS)
1304 struct sockaddr_in6 addrs[2];
1307 boolean_t mapped = FALSE;
1309 error = priv_check(req->td, PRIV_ROOT);
1312 error = SYSCTL_IN(req, addrs, sizeof addrs);
1315 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
1316 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
1323 inp = in_pcblookup_hash(&tcbinfo[0],
1324 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
1326 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
1330 inp = in6_pcblookup_hash(&tcbinfo[0],
1331 &addrs[1].sin6_addr, addrs[1].sin6_port,
1332 &addrs[0].sin6_addr, addrs[0].sin6_port,
1335 if (inp == NULL || inp->inp_socket == NULL) {
1339 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred));
1345 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW),
1347 tcp6_getcred, "S,ucred", "Get the ucred of a TCP6 connection");
1350 struct netmsg_tcp_notify {
1351 struct netmsg_base base;
1352 void (*nm_notify)(struct inpcb *, int);
1353 struct in_addr nm_faddr;
1358 tcp_notifyall_oncpu(netmsg_t msg)
1360 struct netmsg_tcp_notify *nm = (struct netmsg_tcp_notify *)msg;
1363 in_pcbnotifyall(&tcbinfo[mycpuid].pcblisthead, nm->nm_faddr,
1364 nm->nm_arg, nm->nm_notify);
1366 nextcpu = mycpuid + 1;
1367 if (nextcpu < ncpus2)
1368 lwkt_forwardmsg(netisr_cpuport(nextcpu), &nm->base.lmsg);
1370 lwkt_replymsg(&nm->base.lmsg, 0);
1374 tcp_ctlinput(netmsg_t msg)
1376 int cmd = msg->ctlinput.nm_cmd;
1377 struct sockaddr *sa = msg->ctlinput.nm_arg;
1378 struct ip *ip = msg->ctlinput.nm_extra;
1380 struct in_addr faddr;
1383 void (*notify)(struct inpcb *, int) = tcp_notify;
1387 if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
1391 faddr = ((struct sockaddr_in *)sa)->sin_addr;
1392 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
1395 arg = inetctlerrmap[cmd];
1396 if (cmd == PRC_QUENCH) {
1397 notify = tcp_quench;
1398 } else if (icmp_may_rst &&
1399 (cmd == PRC_UNREACH_ADMIN_PROHIB ||
1400 cmd == PRC_UNREACH_PORT ||
1401 cmd == PRC_TIMXCEED_INTRANS) &&
1403 notify = tcp_drop_syn_sent;
1404 } else if (cmd == PRC_MSGSIZE) {
1405 struct icmp *icmp = (struct icmp *)
1406 ((caddr_t)ip - offsetof(struct icmp, icmp_ip));
1408 arg = ntohs(icmp->icmp_nextmtu);
1409 notify = tcp_mtudisc;
1410 } else if (PRC_IS_REDIRECT(cmd)) {
1412 notify = in_rtchange;
1413 } else if (cmd == PRC_HOSTDEAD) {
1419 th = (struct tcphdr *)((caddr_t)ip +
1420 (IP_VHL_HL(ip->ip_vhl) << 2));
1421 cpu = tcp_addrcpu(faddr.s_addr, th->th_dport,
1422 ip->ip_src.s_addr, th->th_sport);
1423 inp = in_pcblookup_hash(&tcbinfo[cpu], faddr, th->th_dport,
1424 ip->ip_src, th->th_sport, 0, NULL);
1425 if ((inp != NULL) && (inp->inp_socket != NULL)) {
1426 icmpseq = htonl(th->th_seq);
1427 tp = intotcpcb(inp);
1428 if (SEQ_GEQ(icmpseq, tp->snd_una) &&
1429 SEQ_LT(icmpseq, tp->snd_max))
1430 (*notify)(inp, arg);
1432 struct in_conninfo inc;
1434 inc.inc_fport = th->th_dport;
1435 inc.inc_lport = th->th_sport;
1436 inc.inc_faddr = faddr;
1437 inc.inc_laddr = ip->ip_src;
1441 syncache_unreach(&inc, th);
1445 struct netmsg_tcp_notify *nm;
1447 KKASSERT(&curthread->td_msgport == netisr_cpuport(0));
1448 nm = kmalloc(sizeof(*nm), M_LWKTMSG, M_INTWAIT);
1449 netmsg_init(&nm->base, NULL, &netisr_afree_rport,
1450 0, tcp_notifyall_oncpu);
1451 nm->nm_faddr = faddr;
1453 nm->nm_notify = notify;
1455 lwkt_sendmsg(netisr_cpuport(0), &nm->base.lmsg);
1458 lwkt_replymsg(&msg->lmsg, 0);
1464 tcp6_ctlinput(netmsg_t msg)
1466 int cmd = msg->ctlinput.nm_cmd;
1467 struct sockaddr *sa = msg->ctlinput.nm_arg;
1468 void *d = msg->ctlinput.nm_extra;
1470 void (*notify) (struct inpcb *, int) = tcp_notify;
1471 struct ip6_hdr *ip6;
1473 struct ip6ctlparam *ip6cp = NULL;
1474 const struct sockaddr_in6 *sa6_src = NULL;
1476 struct tcp_portonly {
1482 if (sa->sa_family != AF_INET6 ||
1483 sa->sa_len != sizeof(struct sockaddr_in6)) {
1488 if (cmd == PRC_QUENCH)
1489 notify = tcp_quench;
1490 else if (cmd == PRC_MSGSIZE) {
1491 struct ip6ctlparam *ip6cp = d;
1492 struct icmp6_hdr *icmp6 = ip6cp->ip6c_icmp6;
1494 arg = ntohl(icmp6->icmp6_mtu);
1495 notify = tcp_mtudisc;
1496 } else if (!PRC_IS_REDIRECT(cmd) &&
1497 ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) {
1501 /* if the parameter is from icmp6, decode it. */
1503 ip6cp = (struct ip6ctlparam *)d;
1505 ip6 = ip6cp->ip6c_ip6;
1506 off = ip6cp->ip6c_off;
1507 sa6_src = ip6cp->ip6c_src;
1511 off = 0; /* fool gcc */
1516 struct in_conninfo inc;
1518 * XXX: We assume that when IPV6 is non NULL,
1519 * M and OFF are valid.
1522 /* check if we can safely examine src and dst ports */
1523 if (m->m_pkthdr.len < off + sizeof *thp)
1526 bzero(&th, sizeof th);
1527 m_copydata(m, off, sizeof *thp, (caddr_t)&th);
1529 in6_pcbnotify(&tcbinfo[0].pcblisthead, sa, th.th_dport,
1530 (struct sockaddr *)ip6cp->ip6c_src,
1531 th.th_sport, cmd, arg, notify);
1533 inc.inc_fport = th.th_dport;
1534 inc.inc_lport = th.th_sport;
1535 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr;
1536 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr;
1538 syncache_unreach(&inc, &th);
1540 in6_pcbnotify(&tcbinfo[0].pcblisthead, sa, 0,
1541 (const struct sockaddr *)sa6_src, 0, cmd, arg, notify);
1544 lwkt_replymsg(&msg->ctlinput.base.lmsg, 0);
1550 * Following is where TCP initial sequence number generation occurs.
1552 * There are two places where we must use initial sequence numbers:
1553 * 1. In SYN-ACK packets.
1554 * 2. In SYN packets.
1556 * All ISNs for SYN-ACK packets are generated by the syncache. See
1557 * tcp_syncache.c for details.
1559 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
1560 * depends on this property. In addition, these ISNs should be
1561 * unguessable so as to prevent connection hijacking. To satisfy
1562 * the requirements of this situation, the algorithm outlined in
1563 * RFC 1948 is used to generate sequence numbers.
1565 * Implementation details:
1567 * Time is based off the system timer, and is corrected so that it
1568 * increases by one megabyte per second. This allows for proper
1569 * recycling on high speed LANs while still leaving over an hour
1572 * net.inet.tcp.isn_reseed_interval controls the number of seconds
1573 * between seeding of isn_secret. This is normally set to zero,
1574 * as reseeding should not be necessary.
1578 #define ISN_BYTES_PER_SECOND 1048576
1580 u_char isn_secret[32];
1581 int isn_last_reseed;
1585 tcp_new_isn(struct tcpcb *tp)
1587 u_int32_t md5_buffer[4];
1590 /* Seed if this is the first use, reseed if requested. */
1591 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) &&
1592 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz)
1594 read_random_unlimited(&isn_secret, sizeof isn_secret);
1595 isn_last_reseed = ticks;
1598 /* Compute the md5 hash and return the ISN. */
1600 MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_fport, sizeof(u_short));
1601 MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_lport, sizeof(u_short));
1603 if (tp->t_inpcb->inp_vflag & INP_IPV6) {
1604 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
1605 sizeof(struct in6_addr));
1606 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
1607 sizeof(struct in6_addr));
1611 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
1612 sizeof(struct in_addr));
1613 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
1614 sizeof(struct in_addr));
1616 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
1617 MD5Final((u_char *) &md5_buffer, &isn_ctx);
1618 new_isn = (tcp_seq) md5_buffer[0];
1619 new_isn += ticks * (ISN_BYTES_PER_SECOND / hz);
1624 * When a source quench is received, close congestion window
1625 * to one segment. We will gradually open it again as we proceed.
1628 tcp_quench(struct inpcb *inp, int error)
1630 struct tcpcb *tp = intotcpcb(inp);
1633 tp->snd_cwnd = tp->t_maxseg;
1639 * When a specific ICMP unreachable message is received and the
1640 * connection state is SYN-SENT, drop the connection. This behavior
1641 * is controlled by the icmp_may_rst sysctl.
1644 tcp_drop_syn_sent(struct inpcb *inp, int error)
1646 struct tcpcb *tp = intotcpcb(inp);
1648 if ((tp != NULL) && (tp->t_state == TCPS_SYN_SENT))
1649 tcp_drop(tp, error);
1653 * When a `need fragmentation' ICMP is received, update our idea of the MSS
1654 * based on the new value in the route. Also nudge TCP to send something,
1655 * since we know the packet we just sent was dropped.
1656 * This duplicates some code in the tcp_mss() function in tcp_input.c.
1659 tcp_mtudisc(struct inpcb *inp, int mtu)
1661 struct tcpcb *tp = intotcpcb(inp);
1663 struct socket *so = inp->inp_socket;
1666 boolean_t isipv6 = ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0);
1668 const boolean_t isipv6 = FALSE;
1675 * If no MTU is provided in the ICMP message, use the
1676 * next lower likely value, as specified in RFC 1191.
1681 oldmtu = tp->t_maxopd +
1683 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1684 sizeof(struct tcpiphdr));
1685 mtu = ip_next_mtu(oldmtu, 0);
1689 rt = tcp_rtlookup6(&inp->inp_inc);
1691 rt = tcp_rtlookup(&inp->inp_inc);
1693 if (rt->rt_rmx.rmx_mtu != 0 && rt->rt_rmx.rmx_mtu < mtu)
1694 mtu = rt->rt_rmx.rmx_mtu;
1698 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1699 sizeof(struct tcpiphdr));
1702 * XXX - The following conditional probably violates the TCP
1703 * spec. The problem is that, since we don't know the
1704 * other end's MSS, we are supposed to use a conservative
1705 * default. But, if we do that, then MTU discovery will
1706 * never actually take place, because the conservative
1707 * default is much less than the MTUs typically seen
1708 * on the Internet today. For the moment, we'll sweep
1709 * this under the carpet.
1711 * The conservative default might not actually be a problem
1712 * if the only case this occurs is when sending an initial
1713 * SYN with options and data to a host we've never talked
1714 * to before. Then, they will reply with an MSS value which
1715 * will get recorded and the new parameters should get
1716 * recomputed. For Further Study.
1718 if (rt->rt_rmx.rmx_mssopt && rt->rt_rmx.rmx_mssopt < maxopd)
1719 maxopd = rt->rt_rmx.rmx_mssopt;
1723 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1724 sizeof(struct tcpiphdr));
1726 if (tp->t_maxopd <= maxopd)
1728 tp->t_maxopd = maxopd;
1731 if ((tp->t_flags & (TF_REQ_TSTMP | TF_RCVD_TSTMP | TF_NOOPT)) ==
1732 (TF_REQ_TSTMP | TF_RCVD_TSTMP))
1733 mss -= TCPOLEN_TSTAMP_APPA;
1735 /* round down to multiple of MCLBYTES */
1736 #if (MCLBYTES & (MCLBYTES - 1)) == 0 /* test if MCLBYTES power of 2 */
1738 mss &= ~(MCLBYTES - 1);
1741 mss = (mss / MCLBYTES) * MCLBYTES;
1744 if (so->so_snd.ssb_hiwat < mss)
1745 mss = so->so_snd.ssb_hiwat;
1749 tp->snd_nxt = tp->snd_una;
1751 tcpstat.tcps_mturesent++;
1755 * Look-up the routing entry to the peer of this inpcb. If no route
1756 * is found and it cannot be allocated the return NULL. This routine
1757 * is called by TCP routines that access the rmx structure and by tcp_mss
1758 * to get the interface MTU.
1761 tcp_rtlookup(struct in_conninfo *inc)
1763 struct route *ro = &inc->inc_route;
1765 if (ro->ro_rt == NULL || !(ro->ro_rt->rt_flags & RTF_UP)) {
1766 /* No route yet, so try to acquire one */
1767 if (inc->inc_faddr.s_addr != INADDR_ANY) {
1769 * unused portions of the structure MUST be zero'd
1770 * out because rtalloc() treats it as opaque data
1772 bzero(&ro->ro_dst, sizeof(struct sockaddr_in));
1773 ro->ro_dst.sa_family = AF_INET;
1774 ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
1775 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr =
1785 tcp_rtlookup6(struct in_conninfo *inc)
1787 struct route_in6 *ro6 = &inc->inc6_route;
1789 if (ro6->ro_rt == NULL || !(ro6->ro_rt->rt_flags & RTF_UP)) {
1790 /* No route yet, so try to acquire one */
1791 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) {
1793 * unused portions of the structure MUST be zero'd
1794 * out because rtalloc() treats it as opaque data
1796 bzero(&ro6->ro_dst, sizeof(struct sockaddr_in6));
1797 ro6->ro_dst.sin6_family = AF_INET6;
1798 ro6->ro_dst.sin6_len = sizeof(struct sockaddr_in6);
1799 ro6->ro_dst.sin6_addr = inc->inc6_faddr;
1800 rtalloc((struct route *)ro6);
1803 return (ro6->ro_rt);
1808 /* compute ESP/AH header size for TCP, including outer IP header. */
1810 ipsec_hdrsiz_tcp(struct tcpcb *tp)
1818 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
1820 MGETHDR(m, MB_DONTWAIT, MT_DATA);
1825 if (inp->inp_vflag & INP_IPV6) {
1826 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
1828 th = (struct tcphdr *)(ip6 + 1);
1829 m->m_pkthdr.len = m->m_len =
1830 sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1831 tcp_fillheaders(tp, ip6, th, FALSE);
1832 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1836 ip = mtod(m, struct ip *);
1837 th = (struct tcphdr *)(ip + 1);
1838 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
1839 tcp_fillheaders(tp, ip, th, FALSE);
1840 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1849 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING
1851 * This code attempts to calculate the bandwidth-delay product as a
1852 * means of determining the optimal window size to maximize bandwidth,
1853 * minimize RTT, and avoid the over-allocation of buffers on interfaces and
1854 * routers. This code also does a fairly good job keeping RTTs in check
1855 * across slow links like modems. We implement an algorithm which is very
1856 * similar (but not meant to be) TCP/Vegas. The code operates on the
1857 * transmitter side of a TCP connection and so only effects the transmit
1858 * side of the connection.
1860 * BACKGROUND: TCP makes no provision for the management of buffer space
1861 * at the end points or at the intermediate routers and switches. A TCP
1862 * stream, whether using NewReno or not, will eventually buffer as
1863 * many packets as it is able and the only reason this typically works is
1864 * due to the fairly small default buffers made available for a connection
1865 * (typicaly 16K or 32K). As machines use larger windows and/or window
1866 * scaling it is now fairly easy for even a single TCP connection to blow-out
1867 * all available buffer space not only on the local interface, but on
1868 * intermediate routers and switches as well. NewReno makes a misguided
1869 * attempt to 'solve' this problem by waiting for an actual failure to occur,
1870 * then backing off, then steadily increasing the window again until another
1871 * failure occurs, ad-infinitum. This results in terrible oscillation that
1872 * is only made worse as network loads increase and the idea of intentionally
1873 * blowing out network buffers is, frankly, a terrible way to manage network
1876 * It is far better to limit the transmit window prior to the failure
1877 * condition being achieved. There are two general ways to do this: First
1878 * you can 'scan' through different transmit window sizes and locate the
1879 * point where the RTT stops increasing, indicating that you have filled the
1880 * pipe, then scan backwards until you note that RTT stops decreasing, then
1881 * repeat ad-infinitum. This method works in principle but has severe
1882 * implementation issues due to RTT variances, timer granularity, and
1883 * instability in the algorithm which can lead to many false positives and
1884 * create oscillations as well as interact badly with other TCP streams
1885 * implementing the same algorithm.
1887 * The second method is to limit the window to the bandwidth delay product
1888 * of the link. This is the method we implement. RTT variances and our
1889 * own manipulation of the congestion window, bwnd, can potentially
1890 * destabilize the algorithm. For this reason we have to stabilize the
1891 * elements used to calculate the window. We do this by using the minimum
1892 * observed RTT, the long term average of the observed bandwidth, and
1893 * by adding two segments worth of slop. It isn't perfect but it is able
1894 * to react to changing conditions and gives us a very stable basis on
1895 * which to extend the algorithm.
1898 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
1906 * If inflight_enable is disabled in the middle of a tcp connection,
1907 * make sure snd_bwnd is effectively disabled.
1909 if (!tcp_inflight_enable) {
1910 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1911 tp->snd_bandwidth = 0;
1916 * Validate the delta time. If a connection is new or has been idle
1917 * a long time we have to reset the bandwidth calculator.
1920 delta_ticks = save_ticks - tp->t_bw_rtttime;
1921 if (tp->t_bw_rtttime == 0 || delta_ticks < 0 || delta_ticks > hz * 10) {
1922 tp->t_bw_rtttime = ticks;
1923 tp->t_bw_rtseq = ack_seq;
1924 if (tp->snd_bandwidth == 0)
1925 tp->snd_bandwidth = tcp_inflight_min;
1928 if (delta_ticks == 0)
1932 * Sanity check, plus ignore pure window update acks.
1934 if ((int)(ack_seq - tp->t_bw_rtseq) <= 0)
1938 * Figure out the bandwidth. Due to the tick granularity this
1939 * is a very rough number and it MUST be averaged over a fairly
1940 * long period of time. XXX we need to take into account a link
1941 * that is not using all available bandwidth, but for now our
1942 * slop will ramp us up if this case occurs and the bandwidth later
1945 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz / delta_ticks;
1946 tp->t_bw_rtttime = save_ticks;
1947 tp->t_bw_rtseq = ack_seq;
1948 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4;
1950 tp->snd_bandwidth = bw;
1953 * Calculate the semi-static bandwidth delay product, plus two maximal
1954 * segments. The additional slop puts us squarely in the sweet
1955 * spot and also handles the bandwidth run-up case. Without the
1956 * slop we could be locking ourselves into a lower bandwidth.
1958 * Situations Handled:
1959 * (1) Prevents over-queueing of packets on LANs, especially on
1960 * high speed LANs, allowing larger TCP buffers to be
1961 * specified, and also does a good job preventing
1962 * over-queueing of packets over choke points like modems
1963 * (at least for the transmit side).
1965 * (2) Is able to handle changing network loads (bandwidth
1966 * drops so bwnd drops, bandwidth increases so bwnd
1969 * (3) Theoretically should stabilize in the face of multiple
1970 * connections implementing the same algorithm (this may need
1973 * (4) Stability value (defaults to 20 = 2 maximal packets) can
1974 * be adjusted with a sysctl but typically only needs to be on
1975 * very slow connections. A value no smaller then 5 should
1976 * be used, but only reduce this default if you have no other
1980 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2)
1981 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) +
1982 tcp_inflight_stab * (int)tp->t_maxseg / 10;
1985 if (tcp_inflight_debug > 0) {
1987 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) {
1989 kprintf("%p bw %ld rttbest %d srtt %d bwnd %ld\n",
1990 tp, bw, tp->t_rttbest, tp->t_srtt, bwnd);
1993 if ((long)bwnd < tcp_inflight_min)
1994 bwnd = tcp_inflight_min;
1995 if (bwnd > tcp_inflight_max)
1996 bwnd = tcp_inflight_max;
1997 if ((long)bwnd < tp->t_maxseg * 2)
1998 bwnd = tp->t_maxseg * 2;
1999 tp->snd_bwnd = bwnd;
2003 tcp_rmx_iwsegs(struct tcpcb *tp, u_long *maxsegs, u_long *capsegs)
2006 struct inpcb *inp = tp->t_inpcb;
2008 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) ? TRUE : FALSE);
2010 const boolean_t isipv6 = FALSE;
2014 if (tcp_iw_maxsegs < TCP_IW_MAXSEGS_DFLT)
2015 tcp_iw_maxsegs = TCP_IW_MAXSEGS_DFLT;
2016 if (tcp_iw_capsegs < TCP_IW_CAPSEGS_DFLT)
2017 tcp_iw_capsegs = TCP_IW_CAPSEGS_DFLT;
2020 rt = tcp_rtlookup6(&inp->inp_inc);
2022 rt = tcp_rtlookup(&inp->inp_inc);
2024 rt->rt_rmx.rmx_iwmaxsegs < TCP_IW_MAXSEGS_DFLT ||
2025 rt->rt_rmx.rmx_iwcapsegs < TCP_IW_CAPSEGS_DFLT) {
2026 *maxsegs = tcp_iw_maxsegs;
2027 *capsegs = tcp_iw_capsegs;
2030 *maxsegs = rt->rt_rmx.rmx_iwmaxsegs;
2031 *capsegs = rt->rt_rmx.rmx_iwcapsegs;
2035 tcp_initial_window(struct tcpcb *tp)
2037 if (tcp_do_rfc3390) {
2040 * "If the SYN or SYN/ACK is lost, the initial window
2041 * used by a sender after a correctly transmitted SYN
2042 * MUST be one segment consisting of MSS bytes."
2044 * However, we do something a little bit more aggressive
2045 * then RFC3390 here:
2046 * - Only if time spent in the SYN or SYN|ACK retransmition
2047 * >= 3 seconds, the IW is reduced. We do this mainly
2048 * because when RFC3390 is published, the initial RTO is
2049 * still 3 seconds (the threshold we test here), while
2050 * after RFC6298, the initial RTO is 1 second. This
2051 * behaviour probably still falls within the spirit of
2053 * - When IW is reduced, 2*MSS is used instead of 1*MSS.
2054 * Mainly to avoid sender and receiver deadlock until
2055 * delayed ACK timer expires. And even RFC2581 does not
2056 * try to reduce IW upon SYN or SYN|ACK retransmition
2060 * http://tools.ietf.org/html/draft-ietf-tcpm-initcwnd-03
2062 if (tp->t_rxtsyn >= TCPTV_RTOBASE3) {
2063 return (2 * tp->t_maxseg);
2065 u_long maxsegs, capsegs;
2067 tcp_rmx_iwsegs(tp, &maxsegs, &capsegs);
2068 return min(maxsegs * tp->t_maxseg,
2069 max(2 * tp->t_maxseg, capsegs * 1460));
2073 * Even RFC2581 (back to 1999) allows 2*SMSS IW.
2075 * Mainly to avoid sender and receiver deadlock
2076 * until delayed ACK timer expires.
2078 return (2 * tp->t_maxseg);
2082 #ifdef TCP_SIGNATURE
2084 * Compute TCP-MD5 hash of a TCP segment. (RFC2385)
2086 * We do this over ip, tcphdr, segment data, and the key in the SADB.
2087 * When called from tcp_input(), we can be sure that th_sum has been
2088 * zeroed out and verified already.
2090 * Return 0 if successful, otherwise return -1.
2092 * XXX The key is retrieved from the system's PF_KEY SADB, by keying a
2093 * search with the destination IP address, and a 'magic SPI' to be
2094 * determined by the application. This is hardcoded elsewhere to 1179
2095 * right now. Another branch of this code exists which uses the SPD to
2096 * specify per-application flows but it is unstable.
2099 tcpsignature_compute(
2100 struct mbuf *m, /* mbuf chain */
2101 int len, /* length of TCP data */
2102 int optlen, /* length of TCP options */
2103 u_char *buf, /* storage for MD5 digest */
2104 u_int direction) /* direction of flow */
2106 struct ippseudo ippseudo;
2110 struct ipovly *ipovly;
2111 struct secasvar *sav;
2114 struct ip6_hdr *ip6;
2115 struct in6_addr in6;
2121 KASSERT(m != NULL, ("passed NULL mbuf. Game over."));
2122 KASSERT(buf != NULL, ("passed NULL storage pointer for MD5 signature"));
2124 * Extract the destination from the IP header in the mbuf.
2126 ip = mtod(m, struct ip *);
2128 ip6 = NULL; /* Make the compiler happy. */
2131 * Look up an SADB entry which matches the address found in
2134 switch (IP_VHL_V(ip->ip_vhl)) {
2136 sav = key_allocsa(AF_INET, (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst,
2137 IPPROTO_TCP, htonl(TCP_SIG_SPI));
2140 case (IPV6_VERSION >> 4):
2141 ip6 = mtod(m, struct ip6_hdr *);
2142 sav = key_allocsa(AF_INET6, (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst,
2143 IPPROTO_TCP, htonl(TCP_SIG_SPI));
2152 kprintf("%s: SADB lookup failed\n", __func__);
2158 * Step 1: Update MD5 hash with IP pseudo-header.
2160 * XXX The ippseudo header MUST be digested in network byte order,
2161 * or else we'll fail the regression test. Assume all fields we've
2162 * been doing arithmetic on have been in host byte order.
2163 * XXX One cannot depend on ipovly->ih_len here. When called from
2164 * tcp_output(), the underlying ip_len member has not yet been set.
2166 switch (IP_VHL_V(ip->ip_vhl)) {
2168 ipovly = (struct ipovly *)ip;
2169 ippseudo.ippseudo_src = ipovly->ih_src;
2170 ippseudo.ippseudo_dst = ipovly->ih_dst;
2171 ippseudo.ippseudo_pad = 0;
2172 ippseudo.ippseudo_p = IPPROTO_TCP;
2173 ippseudo.ippseudo_len = htons(len + sizeof(struct tcphdr) + optlen);
2174 MD5Update(&ctx, (char *)&ippseudo, sizeof(struct ippseudo));
2175 th = (struct tcphdr *)((u_char *)ip + sizeof(struct ip));
2176 doff = sizeof(struct ip) + sizeof(struct tcphdr) + optlen;
2180 * RFC 2385, 2.0 Proposal
2181 * For IPv6, the pseudo-header is as described in RFC 2460, namely the
2182 * 128-bit source IPv6 address, 128-bit destination IPv6 address, zero-
2183 * extended next header value (to form 32 bits), and 32-bit segment
2185 * Note: Upper-Layer Packet Length comes before Next Header.
2187 case (IPV6_VERSION >> 4):
2189 in6_clearscope(&in6);
2190 MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr));
2192 in6_clearscope(&in6);
2193 MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr));
2194 plen = htonl(len + sizeof(struct tcphdr) + optlen);
2195 MD5Update(&ctx, (char *)&plen, sizeof(uint32_t));
2197 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
2198 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
2199 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
2201 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
2202 th = (struct tcphdr *)((u_char *)ip6 + sizeof(struct ip6_hdr));
2203 doff = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + optlen;
2212 * Step 2: Update MD5 hash with TCP header, excluding options.
2213 * The TCP checksum must be set to zero.
2215 savecsum = th->th_sum;
2217 MD5Update(&ctx, (char *)th, sizeof(struct tcphdr));
2218 th->th_sum = savecsum;
2220 * Step 3: Update MD5 hash with TCP segment data.
2221 * Use m_apply() to avoid an early m_pullup().
2224 m_apply(m, doff, len, tcpsignature_apply, &ctx);
2226 * Step 4: Update MD5 hash with shared secret.
2228 MD5Update(&ctx, _KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth));
2229 MD5Final(buf, &ctx);
2230 key_sa_recordxfer(sav, m);
2236 tcpsignature_apply(void *fstate, void *data, unsigned int len)
2239 MD5Update((MD5_CTX *)fstate, (unsigned char *)data, len);
2242 #endif /* TCP_SIGNATURE */