2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1993
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * From: @(#)tcp_usrreq.c 8.2 (Berkeley) 1/3/94
63 * $FreeBSD: src/sys/netinet/tcp_usrreq.c,v 1.51.2.17 2002/10/11 11:46:44 ume Exp $
66 #include "opt_ipsec.h"
68 #include "opt_inet6.h"
69 #include "opt_tcpdebug.h"
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/sysctl.h>
76 #include <sys/globaldata.h>
77 #include <sys/thread.h>
81 #include <sys/domain.h>
83 #include <sys/socket.h>
84 #include <sys/socketvar.h>
85 #include <sys/socketops.h>
86 #include <sys/protosw.h>
88 #include <sys/thread2.h>
89 #include <sys/msgport2.h>
90 #include <sys/socketvar2.h>
93 #include <net/netisr.h>
94 #include <net/route.h>
96 #include <net/netmsg2.h>
97 #include <net/netisr2.h>
99 #include <netinet/in.h>
100 #include <netinet/in_systm.h>
102 #include <netinet/ip6.h>
104 #include <netinet/in_pcb.h>
106 #include <netinet6/in6_pcb.h>
108 #include <netinet/in_var.h>
109 #include <netinet/ip_var.h>
111 #include <netinet6/ip6_var.h>
112 #include <netinet6/tcp6_var.h>
114 #include <netinet/tcp.h>
115 #include <netinet/tcp_fsm.h>
116 #include <netinet/tcp_seq.h>
117 #include <netinet/tcp_timer.h>
118 #include <netinet/tcp_timer2.h>
119 #include <netinet/tcp_var.h>
120 #include <netinet/tcpip.h>
122 #include <netinet/tcp_debug.h>
126 #include <netinet6/ipsec.h>
130 * TCP protocol interface to socket abstraction.
132 extern char *tcpstates[]; /* XXX ??? */
134 static int tcp_attach (struct socket *, struct pru_attach_info *);
135 static void tcp_connect (netmsg_t msg);
137 static void tcp6_connect (netmsg_t msg);
138 static int tcp6_connect_oncpu(struct tcpcb *tp, int flags,
140 struct sockaddr_in6 *sin6,
141 struct in6_addr *addr6);
143 static struct tcpcb *
144 tcp_disconnect (struct tcpcb *);
145 static struct tcpcb *
146 tcp_usrclosed (struct tcpcb *);
149 #define TCPDEBUG0 int ostate = 0
150 #define TCPDEBUG1() ostate = tp ? tp->t_state : 0
151 #define TCPDEBUG2(req) if (tp && (so->so_options & SO_DEBUG)) \
152 tcp_trace(TA_USER, ostate, tp, 0, 0, req)
156 #define TCPDEBUG2(req)
160 * For some ill optimized programs, which try to use TCP_NOPUSH
161 * to improve performance, will have small amount of data sits
162 * in the sending buffer. These small amount of data will _not_
163 * be pushed into the network until more data are written into
164 * the socket or the socket write side is shutdown.
166 static int tcp_disable_nopush = 1;
167 SYSCTL_INT(_net_inet_tcp, OID_AUTO, disable_nopush, CTLFLAG_RW,
168 &tcp_disable_nopush, 0, "TCP_NOPUSH socket option will have no effect");
171 * Allocate socket buffer space.
174 tcp_usr_preattach(struct socket *so, int proto __unused,
175 struct pru_attach_info *ai)
179 if (so->so_snd.ssb_hiwat == 0 || so->so_rcv.ssb_hiwat == 0) {
180 error = soreserve(so, tcp_sendspace, tcp_recvspace,
185 atomic_set_int(&so->so_rcv.ssb_flags, SSB_AUTOSIZE);
186 atomic_set_int(&so->so_snd.ssb_flags, SSB_AUTOSIZE | SSB_PREALLOC);
192 * TCP attaches to socket via pru_attach(), reserving space,
193 * and an internet control block. This socket may move to
194 * other CPU later when we bind/connect.
197 tcp_usr_attach(netmsg_t msg)
199 struct socket *so = msg->base.nm_so;
200 struct pru_attach_info *ai = msg->attach.nm_ai;
203 struct tcpcb *tp = NULL;
207 KASSERT(inp == NULL, ("tcp socket attached"));
210 error = tcp_attach(so, ai);
214 if ((so->so_options & SO_LINGER) && so->so_linger == 0)
215 so->so_linger = TCP_LINGERTIME;
218 TCPDEBUG2(PRU_ATTACH);
219 lwkt_replymsg(&msg->lmsg, error);
223 * pru_detach() detaches the TCP protocol from the socket.
224 * If the protocol state is non-embryonic, then can't
225 * do this directly: have to initiate a pru_disconnect(),
226 * which may finish later; embryonic TCB's can just
230 tcp_usr_detach(netmsg_t msg)
232 struct socket *so = msg->base.nm_so;
241 * If the inp is already detached or never attached, it may have
242 * been due to an async close or async attach failure. Just return
243 * as if no error occured.
247 KASSERT(tp != NULL, ("tcp_usr_detach: tp is NULL"));
249 tp = tcp_disconnect(tp);
250 TCPDEBUG2(PRU_DETACH);
252 lwkt_replymsg(&msg->lmsg, error);
256 * NOTE: ignore_error is non-zero for certain disconnection races
257 * which we want to silently allow, otherwise close() may return
258 * an unexpected error.
260 * NOTE: The variables (msg) and (tp) are assumed.
262 #define COMMON_START(so, inp, ignore_error) \
268 error = ignore_error ? 0 : EINVAL; \
272 tp = intotcpcb(inp); \
276 #define COMMON_END1(req, noreply) \
280 lwkt_replymsg(&msg->lmsg, error); \
284 #define COMMON_END(req) COMMON_END1((req), 0)
287 tcp_sosetport(struct lwkt_msg *msg, lwkt_port_t port)
289 sosetport(((struct netmsg_base *)msg)->nm_so, port);
293 * Give the socket an address.
296 tcp_usr_bind(netmsg_t msg)
298 struct socket *so = msg->bind.base.nm_so;
299 struct sockaddr *nam = msg->bind.nm_nam;
300 struct thread *td = msg->bind.nm_td;
304 struct sockaddr_in *sinp;
305 lwkt_port_t port0 = netisr_cpuport(0);
307 COMMON_START(so, inp, 0);
310 * Must check for multicast addresses and disallow binding
313 sinp = (struct sockaddr_in *)nam;
314 if (sinp->sin_family == AF_INET &&
315 IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) {
316 error = EAFNOSUPPORT;
321 * Check "already bound" here (in_pcbbind() does the same check
322 * though), so we don't forward a connected socket to netisr0,
323 * which would panic in the following in_pcbunlink().
325 if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY) {
326 error = EINVAL; /* already bound */
331 * Use netisr0 to serialize in_pcbbind(), so that pru_detach and
332 * pru_bind for different sockets on the same local port could be
333 * properly ordered. The original race is illustrated here for
338 * close(s1); <----- asynchronous
342 * All will expect bind(s2, *.PORT) to succeed. However, it will
343 * fail, if following sequence happens due to random socket initial
344 * msgport and asynchronous close(2):
348 * : pru_bind(s2) [*.PORT is used by s1]
351 if (&curthread->td_msgport != port0) {
352 lwkt_msg_t lmsg = &msg->bind.base.lmsg;
354 KASSERT((msg->bind.nm_flags & PRUB_RELINK) == 0,
355 ("already asked to relink"));
357 in_pcbunlink(so->so_pcb, &tcbinfo[mycpuid]);
358 msg->bind.nm_flags |= PRUB_RELINK;
360 TCP_STATE_MIGRATE_START(tp);
362 /* See the related comment in tcp_connect() */
363 lwkt_setmsg_receipt(lmsg, tcp_sosetport);
364 lwkt_forwardmsg(port0, lmsg);
365 /* msg invalid now */
368 KASSERT(so->so_port == port0, ("so_port is not netisr0"));
370 if (msg->bind.nm_flags & PRUB_RELINK) {
371 msg->bind.nm_flags &= ~PRUB_RELINK;
372 TCP_STATE_MIGRATE_END(tp);
373 in_pcblink(so->so_pcb, &tcbinfo[mycpuid]);
375 KASSERT(inp->inp_pcbinfo == &tcbinfo[0], ("pcbinfo is not tcbinfo0"));
377 error = in_pcbbind(inp, nam, td);
381 COMMON_END(PRU_BIND);
387 tcp6_usr_bind(netmsg_t msg)
389 struct socket *so = msg->bind.base.nm_so;
390 struct sockaddr *nam = msg->bind.nm_nam;
391 struct thread *td = msg->bind.nm_td;
395 struct sockaddr_in6 *sin6p;
397 COMMON_START(so, inp, 0);
400 * Must check for multicast addresses and disallow binding
403 sin6p = (struct sockaddr_in6 *)nam;
404 if (sin6p->sin6_family == AF_INET6 &&
405 IN6_IS_ADDR_MULTICAST(&sin6p->sin6_addr)) {
406 error = EAFNOSUPPORT;
409 error = in6_pcbbind(inp, nam, td);
412 COMMON_END(PRU_BIND);
416 struct netmsg_inswildcard {
417 struct netmsg_base base;
418 struct inpcb *nm_inp;
422 in_pcbinswildcardhash_handler(netmsg_t msg)
424 struct netmsg_inswildcard *nm = (struct netmsg_inswildcard *)msg;
425 int cpu = mycpuid, nextcpu;
427 in_pcbinswildcardhash_oncpu(nm->nm_inp, &tcbinfo[cpu]);
430 if (nextcpu < ncpus2)
431 lwkt_forwardmsg(netisr_cpuport(nextcpu), &nm->base.lmsg);
433 lwkt_replymsg(&nm->base.lmsg, 0);
437 * Prepare to accept connections.
440 tcp_usr_listen(netmsg_t msg)
442 struct socket *so = msg->listen.base.nm_so;
443 struct thread *td = msg->listen.nm_td;
447 struct netmsg_inswildcard nm;
448 lwkt_port_t port0 = netisr_cpuport(0);
450 COMMON_START(so, inp, 0);
452 if (&curthread->td_msgport != port0) {
453 lwkt_msg_t lmsg = &msg->listen.base.lmsg;
455 KASSERT((msg->listen.nm_flags & PRUL_RELINK) == 0,
456 ("already asked to relink"));
458 in_pcbunlink(so->so_pcb, &tcbinfo[mycpuid]);
459 msg->listen.nm_flags |= PRUL_RELINK;
461 TCP_STATE_MIGRATE_START(tp);
463 /* See the related comment in tcp_connect() */
464 lwkt_setmsg_receipt(lmsg, tcp_sosetport);
465 lwkt_forwardmsg(port0, lmsg);
466 /* msg invalid now */
469 KASSERT(so->so_port == port0, ("so_port is not netisr0"));
471 if (msg->listen.nm_flags & PRUL_RELINK) {
472 msg->listen.nm_flags &= ~PRUL_RELINK;
473 TCP_STATE_MIGRATE_END(tp);
474 in_pcblink(so->so_pcb, &tcbinfo[mycpuid]);
476 KASSERT(inp->inp_pcbinfo == &tcbinfo[0], ("pcbinfo is not tcbinfo0"));
478 if (tp->t_flags & TF_LISTEN)
481 if (inp->inp_lport == 0) {
482 error = in_pcbbind(inp, NULL, td);
487 TCP_STATE_CHANGE(tp, TCPS_LISTEN);
488 tp->t_flags |= TF_LISTEN;
489 tp->tt_msg = NULL; /* Catch any invalid timer usage */
492 * Create tcpcb per-cpu port cache
495 * This _must_ be done before installing this inpcb into
498 tcp_pcbport_create(tp);
502 * Put this inpcb into wildcard hash on other cpus.
504 ASSERT_INP_NOTINHASH(inp);
505 netmsg_init(&nm.base, NULL, &curthread->td_msgport,
506 MSGF_PRIORITY, in_pcbinswildcardhash_handler);
508 lwkt_domsg(netisr_cpuport(1), &nm.base.lmsg, 0);
510 in_pcbinswildcardhash(inp);
511 COMMON_END(PRU_LISTEN);
517 tcp6_usr_listen(netmsg_t msg)
519 struct socket *so = msg->listen.base.nm_so;
520 struct thread *td = msg->listen.nm_td;
524 struct netmsg_inswildcard nm;
526 COMMON_START(so, inp, 0);
528 if (tp->t_flags & TF_LISTEN)
531 if (inp->inp_lport == 0) {
532 error = in6_pcbbind(inp, NULL, td);
537 TCP_STATE_CHANGE(tp, TCPS_LISTEN);
538 tp->t_flags |= TF_LISTEN;
539 tp->tt_msg = NULL; /* Catch any invalid timer usage */
542 * Create tcpcb per-cpu port cache
545 * This _must_ be done before installing this inpcb into
548 tcp_pcbport_create(tp);
552 * Put this inpcb into wildcard hash on other cpus.
554 KKASSERT(so->so_port == netisr_cpuport(0));
556 KKASSERT(inp->inp_pcbinfo == &tcbinfo[0]);
557 ASSERT_INP_NOTINHASH(inp);
559 netmsg_init(&nm.base, NULL, &curthread->td_msgport,
560 MSGF_PRIORITY, in_pcbinswildcardhash_handler);
562 lwkt_domsg(netisr_cpuport(1), &nm.base.lmsg, 0);
564 in_pcbinswildcardhash(inp);
565 COMMON_END(PRU_LISTEN);
570 * Initiate connection to peer.
571 * Create a template for use in transmissions on this connection.
572 * Enter SYN_SENT state, and mark socket as connecting.
573 * Start keep-alive timer, and seed output sequence space.
574 * Send initial segment on connection.
577 tcp_usr_connect(netmsg_t msg)
579 struct socket *so = msg->connect.base.nm_so;
580 struct sockaddr *nam = msg->connect.nm_nam;
581 struct thread *td = msg->connect.nm_td;
585 struct sockaddr_in *sinp;
587 COMMON_START(so, inp, 0);
590 * Must disallow TCP ``connections'' to multicast addresses.
592 sinp = (struct sockaddr_in *)nam;
593 if (sinp->sin_family == AF_INET
594 && IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) {
595 error = EAFNOSUPPORT;
599 if (!prison_remote_ip(td, (struct sockaddr*)sinp)) {
600 error = EAFNOSUPPORT; /* IPv6 only jail */
605 /* msg is invalid now */
608 if (msg->connect.nm_m) {
609 m_freem(msg->connect.nm_m);
610 msg->connect.nm_m = NULL;
612 if (msg->connect.nm_flags & PRUC_HELDTD)
614 if (error && (msg->connect.nm_flags & PRUC_ASYNC)) {
615 so->so_error = error;
616 soisdisconnected(so);
618 lwkt_replymsg(&msg->lmsg, error);
624 tcp6_usr_connect(netmsg_t msg)
626 struct socket *so = msg->connect.base.nm_so;
627 struct sockaddr *nam = msg->connect.nm_nam;
628 struct thread *td = msg->connect.nm_td;
632 struct sockaddr_in6 *sin6p;
634 COMMON_START(so, inp, 0);
637 * Must disallow TCP ``connections'' to multicast addresses.
639 sin6p = (struct sockaddr_in6 *)nam;
640 if (sin6p->sin6_family == AF_INET6
641 && IN6_IS_ADDR_MULTICAST(&sin6p->sin6_addr)) {
642 error = EAFNOSUPPORT;
646 if (!prison_remote_ip(td, nam)) {
647 error = EAFNOSUPPORT; /* IPv4 only jail */
651 /* Reject v4-mapped address */
652 if (IN6_IS_ADDR_V4MAPPED(&sin6p->sin6_addr)) {
653 error = EADDRNOTAVAIL;
657 inp->inp_inc.inc_isipv6 = 1;
659 /* msg is invalid now */
662 if (msg->connect.nm_m) {
663 m_freem(msg->connect.nm_m);
664 msg->connect.nm_m = NULL;
666 lwkt_replymsg(&msg->lmsg, error);
672 * Initiate disconnect from peer.
673 * If connection never passed embryonic stage, just drop;
674 * else if don't need to let data drain, then can just drop anyways,
675 * else have to begin TCP shutdown process: mark socket disconnecting,
676 * drain unread data, state switch to reflect user close, and
677 * send segment (e.g. FIN) to peer. Socket will be really disconnected
678 * when peer sends FIN and acks ours.
680 * SHOULD IMPLEMENT LATER PRU_CONNECT VIA REALLOC TCPCB.
683 tcp_usr_disconnect(netmsg_t msg)
685 struct socket *so = msg->disconnect.base.nm_so;
690 COMMON_START(so, inp, 1);
691 tp = tcp_disconnect(tp);
692 COMMON_END(PRU_DISCONNECT);
696 * Accept a connection. Essentially all the work is
697 * done at higher levels; just return the address
698 * of the peer, storing through addr.
701 tcp_usr_accept(netmsg_t msg)
703 struct socket *so = msg->accept.base.nm_so;
704 struct sockaddr **nam = msg->accept.nm_nam;
707 struct tcpcb *tp = NULL;
711 if (so->so_state & SS_ISDISCONNECTED) {
712 error = ECONNABORTED;
722 in_setpeeraddr(so, nam);
723 COMMON_END(PRU_ACCEPT);
728 tcp6_usr_accept(netmsg_t msg)
730 struct socket *so = msg->accept.base.nm_so;
731 struct sockaddr **nam = msg->accept.nm_nam;
734 struct tcpcb *tp = NULL;
739 if (so->so_state & SS_ISDISCONNECTED) {
740 error = ECONNABORTED;
749 in6_setpeeraddr(so, nam);
750 COMMON_END(PRU_ACCEPT);
755 * Mark the connection as being incapable of further output.
758 tcp_usr_shutdown(netmsg_t msg)
760 struct socket *so = msg->shutdown.base.nm_so;
765 COMMON_START(so, inp, 0);
767 tp = tcp_usrclosed(tp);
769 error = tcp_output(tp);
770 COMMON_END(PRU_SHUTDOWN);
774 * After a receive, possibly send window update to peer.
777 tcp_usr_rcvd(netmsg_t msg)
779 struct socket *so = msg->rcvd.base.nm_so;
780 int error = 0, noreply = 0;
784 COMMON_START(so, inp, 0);
786 if (msg->rcvd.nm_pru_flags & PRUR_ASYNC) {
788 so_async_rcvd_reply(so);
792 COMMON_END1(PRU_RCVD, noreply);
796 * Do a send by putting data in output queue and updating urgent
797 * marker if URG set. Possibly send more data. Unlike the other
798 * pru_*() routines, the mbuf chains are our responsibility. We
799 * must either enqueue them or free them. The other pru_* routines
800 * generally are caller-frees.
803 tcp_usr_send(netmsg_t msg)
805 struct socket *so = msg->send.base.nm_so;
806 int flags = msg->send.nm_flags;
807 struct mbuf *m = msg->send.nm_m;
813 KKASSERT(msg->send.nm_control == NULL);
814 KKASSERT(msg->send.nm_addr == NULL);
815 KKASSERT((flags & PRUS_FREEADDR) == 0);
821 * OOPS! we lost a race, the TCP session got reset after
822 * we checked SS_CANTSENDMORE, eg: while doing uiomove or a
823 * network interrupt in the non-critical section of sosend().
826 error = ECONNRESET; /* XXX EPIPE? */
836 * This is no longer necessary, since:
837 * - sosendtcp() has already checked it for us
838 * - It does not work with asynchronized send
842 * Don't let too much OOB data build up
844 if (flags & PRUS_OOB) {
845 if (ssb_space(&so->so_snd) < -512) {
854 * Pump the data into the socket.
857 ssb_appendstream(&so->so_snd, m);
860 if (flags & PRUS_OOB) {
862 * According to RFC961 (Assigned Protocols),
863 * the urgent pointer points to the last octet
864 * of urgent data. We continue, however,
865 * to consider it to indicate the first octet
866 * of data past the urgent section.
867 * Otherwise, snd_up should be one lower.
869 tp->snd_up = tp->snd_una + so->so_snd.ssb_cc;
870 tp->t_flags |= TF_FORCE;
871 error = tcp_output(tp);
872 tp->t_flags &= ~TF_FORCE;
874 if (flags & PRUS_EOF) {
876 * Close the send side of the connection after
880 tp = tcp_usrclosed(tp);
882 if (tp != NULL && !tcp_output_pending(tp)) {
883 if (flags & PRUS_MORETOCOME)
884 tp->t_flags |= TF_MORETOCOME;
885 error = tcp_output_fair(tp);
886 if (flags & PRUS_MORETOCOME)
887 tp->t_flags &= ~TF_MORETOCOME;
890 COMMON_END1((flags & PRUS_OOB) ? PRU_SENDOOB :
891 ((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND),
892 (flags & PRUS_NOREPLY));
896 * NOTE: (so) is referenced from soabort*() and netmsg_pru_abort()
897 * will sofree() it when we return.
900 tcp_usr_abort(netmsg_t msg)
902 struct socket *so = msg->abort.base.nm_so;
907 COMMON_START(so, inp, 1);
908 tp = tcp_drop(tp, ECONNABORTED);
909 COMMON_END(PRU_ABORT);
913 * Receive out-of-band data.
916 tcp_usr_rcvoob(netmsg_t msg)
918 struct socket *so = msg->rcvoob.base.nm_so;
919 struct mbuf *m = msg->rcvoob.nm_m;
920 int flags = msg->rcvoob.nm_flags;
925 COMMON_START(so, inp, 0);
926 if ((so->so_oobmark == 0 &&
927 (so->so_state & SS_RCVATMARK) == 0) ||
928 so->so_options & SO_OOBINLINE ||
929 tp->t_oobflags & TCPOOB_HADDATA) {
933 if ((tp->t_oobflags & TCPOOB_HAVEDATA) == 0) {
938 *mtod(m, caddr_t) = tp->t_iobc;
939 if ((flags & MSG_PEEK) == 0)
940 tp->t_oobflags ^= (TCPOOB_HAVEDATA | TCPOOB_HADDATA);
941 COMMON_END(PRU_RCVOOB);
945 tcp_usr_savefaddr(struct socket *so, const struct sockaddr *faddr)
947 in_savefaddr(so, faddr);
952 tcp6_usr_savefaddr(struct socket *so, const struct sockaddr *faddr)
954 in6_savefaddr(so, faddr);
959 tcp_usr_preconnect(struct socket *so, const struct sockaddr *nam,
960 struct thread *td __unused)
962 const struct sockaddr_in *sinp;
964 sinp = (const struct sockaddr_in *)nam;
965 if (sinp->sin_family == AF_INET &&
966 IN_MULTICAST(ntohl(sinp->sin_addr.s_addr)))
973 /* xxx - should be const */
974 struct pr_usrreqs tcp_usrreqs = {
975 .pru_abort = tcp_usr_abort,
976 .pru_accept = tcp_usr_accept,
977 .pru_attach = tcp_usr_attach,
978 .pru_bind = tcp_usr_bind,
979 .pru_connect = tcp_usr_connect,
980 .pru_connect2 = pr_generic_notsupp,
981 .pru_control = in_control_dispatch,
982 .pru_detach = tcp_usr_detach,
983 .pru_disconnect = tcp_usr_disconnect,
984 .pru_listen = tcp_usr_listen,
985 .pru_peeraddr = in_setpeeraddr_dispatch,
986 .pru_rcvd = tcp_usr_rcvd,
987 .pru_rcvoob = tcp_usr_rcvoob,
988 .pru_send = tcp_usr_send,
989 .pru_sense = pru_sense_null,
990 .pru_shutdown = tcp_usr_shutdown,
991 .pru_sockaddr = in_setsockaddr_dispatch,
992 .pru_sosend = sosendtcp,
993 .pru_soreceive = sorecvtcp,
994 .pru_savefaddr = tcp_usr_savefaddr,
995 .pru_preconnect = tcp_usr_preconnect,
996 .pru_preattach = tcp_usr_preattach
1000 struct pr_usrreqs tcp6_usrreqs = {
1001 .pru_abort = tcp_usr_abort,
1002 .pru_accept = tcp6_usr_accept,
1003 .pru_attach = tcp_usr_attach,
1004 .pru_bind = tcp6_usr_bind,
1005 .pru_connect = tcp6_usr_connect,
1006 .pru_connect2 = pr_generic_notsupp,
1007 .pru_control = in6_control_dispatch,
1008 .pru_detach = tcp_usr_detach,
1009 .pru_disconnect = tcp_usr_disconnect,
1010 .pru_listen = tcp6_usr_listen,
1011 .pru_peeraddr = in6_setpeeraddr_dispatch,
1012 .pru_rcvd = tcp_usr_rcvd,
1013 .pru_rcvoob = tcp_usr_rcvoob,
1014 .pru_send = tcp_usr_send,
1015 .pru_sense = pru_sense_null,
1016 .pru_shutdown = tcp_usr_shutdown,
1017 .pru_sockaddr = in6_setsockaddr_dispatch,
1018 .pru_sosend = sosendtcp,
1019 .pru_soreceive = sorecvtcp,
1020 .pru_savefaddr = tcp6_usr_savefaddr
1025 tcp_connect_oncpu(struct tcpcb *tp, int flags, struct mbuf *m,
1026 struct sockaddr_in *sin, struct sockaddr_in *if_sin,
1029 struct inpcb *inp = tp->t_inpcb, *oinp;
1030 struct socket *so = inp->inp_socket;
1031 struct route *ro = &inp->inp_route;
1033 KASSERT(inp->inp_pcbinfo == &tcbinfo[mycpu->gd_cpuid],
1034 ("pcbinfo mismatch"));
1036 oinp = in_pcblookup_hash(inp->inp_pcbinfo,
1037 sin->sin_addr, sin->sin_port,
1038 (inp->inp_laddr.s_addr != INADDR_ANY ?
1039 inp->inp_laddr : if_sin->sin_addr),
1040 inp->inp_lport, 0, NULL);
1043 return (EADDRINUSE);
1045 if (inp->inp_laddr.s_addr == INADDR_ANY)
1046 inp->inp_laddr = if_sin->sin_addr;
1047 inp->inp_faddr = sin->sin_addr;
1048 inp->inp_fport = sin->sin_port;
1049 in_pcbinsconnhash(inp);
1051 inp->inp_flags |= INP_HASH;
1052 inp->inp_hashval = hash;
1055 * We are now on the inpcb's owner CPU, if the cached route was
1056 * freed because the rtentry's owner CPU is not the current CPU
1057 * (e.g. in tcp_connect()), then we try to reallocate it here with
1058 * the hope that a rtentry may be cloned from a RTF_PRCLONING
1061 if (!(inp->inp_socket->so_options & SO_DONTROUTE) && /*XXX*/
1062 ro->ro_rt == NULL) {
1063 bzero(&ro->ro_dst, sizeof(struct sockaddr_in));
1064 ro->ro_dst.sa_family = AF_INET;
1065 ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
1066 ((struct sockaddr_in *)&ro->ro_dst)->sin_addr =
1072 * Now that no more errors can occur, change the protocol processing
1073 * port to the current thread (which is the correct thread).
1075 * Create TCP timer message now; we are on the tcpcb's owner
1078 tcp_create_timermsg(tp, &curthread->td_msgport);
1081 * Compute window scaling to request. Use a larger scaling then
1082 * needed for the initial receive buffer in case the receive buffer
1085 if (tp->request_r_scale < TCP_MIN_WINSHIFT)
1086 tp->request_r_scale = TCP_MIN_WINSHIFT;
1087 while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
1088 (TCP_MAXWIN << tp->request_r_scale) < so->so_rcv.ssb_hiwat
1090 tp->request_r_scale++;
1094 tcpstat.tcps_connattempt++;
1095 TCP_STATE_CHANGE(tp, TCPS_SYN_SENT);
1096 tcp_callout_reset(tp, tp->tt_keep, tp->t_keepinit, tcp_timer_keep);
1097 tp->iss = tcp_new_isn(tp);
1098 tcp_sendseqinit(tp);
1100 ssb_appendstream(&so->so_snd, m);
1102 if (flags & PRUS_OOB)
1103 tp->snd_up = tp->snd_una + so->so_snd.ssb_cc;
1107 * Close the send side of the connection after
1108 * the data is sent if flagged.
1110 if ((flags & (PRUS_OOB|PRUS_EOF)) == PRUS_EOF) {
1112 tp = tcp_usrclosed(tp);
1114 return (tcp_output(tp));
1118 * Common subroutine to open a TCP connection to remote host specified
1119 * by struct sockaddr_in in mbuf *nam. Call in_pcbbind to assign a local
1120 * port number if needed. Call in_pcbladdr to do the routing and to choose
1121 * a local host address (interface).
1122 * Initialize connection parameters and enter SYN-SENT state.
1125 tcp_connect(netmsg_t msg)
1127 struct socket *so = msg->connect.base.nm_so;
1128 struct sockaddr *nam = msg->connect.nm_nam;
1129 struct thread *td = msg->connect.nm_td;
1130 struct sockaddr_in *sin = (struct sockaddr_in *)nam;
1131 struct sockaddr_in *if_sin = NULL;
1138 COMMON_START(so, inp, 0);
1141 * Reconnect our pcb if we have to
1143 if (msg->connect.nm_flags & PRUC_RECONNECT) {
1144 msg->connect.nm_flags &= ~PRUC_RECONNECT;
1145 TCP_STATE_MIGRATE_END(tp);
1146 in_pcblink(so->so_pcb, &tcbinfo[mycpu->gd_cpuid]);
1150 * Select local port, if it is not yet selected.
1152 if (inp->inp_lport == 0) {
1153 KKASSERT(inp->inp_laddr.s_addr == INADDR_ANY);
1155 error = in_pcbladdr(inp, nam, &if_sin, td);
1158 inp->inp_laddr.s_addr = if_sin->sin_addr.s_addr;
1159 msg->connect.nm_flags |= PRUC_HASLADDR;
1161 error = in_pcbbind_remote(inp, nam, td);
1166 if ((msg->connect.nm_flags & PRUC_HASLADDR) == 0) {
1168 * Calculate the correct protocol processing thread. The
1169 * connect operation must run there. Set the forwarding
1170 * port before we forward the message or it will get bounced
1173 error = in_pcbladdr(inp, nam, &if_sin, td);
1177 KKASSERT(inp->inp_socket == so);
1179 hash = tcp_addrhash(sin->sin_addr.s_addr, sin->sin_port,
1180 (inp->inp_laddr.s_addr != INADDR_ANY ?
1181 inp->inp_laddr.s_addr : if_sin->sin_addr.s_addr),
1183 port = netisr_hashport(hash);
1185 if (port != &curthread->td_msgport) {
1186 lwkt_msg_t lmsg = &msg->connect.base.lmsg;
1189 * in_pcbladdr() may have allocated a route entry for us
1190 * on the current CPU, but we need a route entry on the
1191 * inpcb's owner CPU, so free it here.
1193 in_pcbresetroute(inp);
1196 * We are moving the protocol processing port the socket
1197 * is on, we have to unlink here and re-link on the
1200 in_pcbunlink(so->so_pcb, &tcbinfo[mycpu->gd_cpuid]);
1201 msg->connect.nm_flags |= PRUC_RECONNECT;
1202 msg->connect.base.nm_dispatch = tcp_connect;
1204 TCP_STATE_MIGRATE_START(tp);
1207 * Use message put done receipt to change this socket's
1208 * so_port, i.e. _after_ this message was put onto the
1209 * target netisr's msgport but _before_ the message could
1210 * be pulled from the target netisr's msgport, so that:
1211 * - The upper half (socket code) will not see the new
1212 * msgport before this message reaches the new msgport
1213 * and messages for this socket will be ordered.
1214 * - This message will see the new msgport, when its
1215 * handler is called in the target netisr.
1218 * We MUST use messege put done receipt to change this
1220 * If we changed the so_port in this netisr after the
1221 * lwkt_forwardmsg (so messages for this socket will be
1222 * ordered) and changed the so_port in the target netisr
1223 * at the very beginning of this message's handler, we
1224 * would suffer so_port overwritten race, given this
1225 * message might be forwarded again.
1228 * This mechanism depends on that the netisr's msgport
1229 * is spin msgport (currently it is :).
1231 * If the upper half saw the new msgport before this
1232 * message reached the target netisr's msgport, the
1233 * messages sent from the upper half could reach the new
1234 * msgport before this message, thus there would be
1235 * message reordering. The worst case could be soclose()
1236 * saw the new msgport and the detach message could reach
1237 * the new msgport before this message, i.e. the inpcb
1238 * could have been destroyed when this message was still
1239 * pending on or on its way to the new msgport. Other
1240 * weird cases could also happen, e.g. inpcb->inp_pcbinfo,
1241 * since we have unlinked this inpcb from the current
1244 lwkt_setmsg_receipt(lmsg, tcp_sosetport);
1245 lwkt_forwardmsg(port, lmsg);
1246 /* msg invalid now */
1248 } else if (msg->connect.nm_flags & PRUC_HELDTD) {
1250 * The original thread is no longer needed; release it.
1253 msg->connect.nm_flags &= ~PRUC_HELDTD;
1255 error = tcp_connect_oncpu(tp, msg->connect.nm_sndflags,
1256 msg->connect.nm_m, sin, if_sin, hash);
1257 msg->connect.nm_m = NULL;
1259 if (msg->connect.nm_m) {
1260 m_freem(msg->connect.nm_m);
1261 msg->connect.nm_m = NULL;
1263 if (msg->connect.nm_flags & PRUC_HELDTD)
1265 if (error && (msg->connect.nm_flags & PRUC_ASYNC)) {
1266 so->so_error = error;
1267 soisdisconnected(so);
1269 lwkt_replymsg(&msg->connect.base.lmsg, error);
1270 /* msg invalid now */
1276 tcp6_connect(netmsg_t msg)
1279 struct socket *so = msg->connect.base.nm_so;
1280 struct sockaddr *nam = msg->connect.nm_nam;
1281 struct thread *td = msg->connect.nm_td;
1283 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)nam;
1284 struct in6_addr *addr6;
1288 COMMON_START(so, inp, 0);
1291 * Reconnect our pcb if we have to
1293 if (msg->connect.nm_flags & PRUC_RECONNECT) {
1294 msg->connect.nm_flags &= ~PRUC_RECONNECT;
1295 TCP_STATE_MIGRATE_END(tp);
1296 in_pcblink(so->so_pcb, &tcbinfo[mycpu->gd_cpuid]);
1300 * Bind if we have to
1302 if (inp->inp_lport == 0) {
1303 error = in6_pcbbind(inp, NULL, td);
1309 * Cannot simply call in_pcbconnect, because there might be an
1310 * earlier incarnation of this same connection still in
1311 * TIME_WAIT state, creating an ADDRINUSE error.
1313 error = in6_pcbladdr(inp, nam, &addr6, td);
1317 port = tcp6_addrport(); /* XXX hack for now, always cpu0 */
1319 if (port != &curthread->td_msgport) {
1320 lwkt_msg_t lmsg = &msg->connect.base.lmsg;
1323 * in_pcbladdr() may have allocated a route entry for us
1324 * on the current CPU, but we need a route entry on the
1325 * inpcb's owner CPU, so free it here.
1327 in_pcbresetroute(inp);
1329 in_pcbunlink(so->so_pcb, &tcbinfo[mycpu->gd_cpuid]);
1330 msg->connect.nm_flags |= PRUC_RECONNECT;
1331 msg->connect.base.nm_dispatch = tcp6_connect;
1333 TCP_STATE_MIGRATE_START(tp);
1335 /* See the related comment in tcp_connect() */
1336 lwkt_setmsg_receipt(lmsg, tcp_sosetport);
1337 lwkt_forwardmsg(port, lmsg);
1338 /* msg invalid now */
1341 error = tcp6_connect_oncpu(tp, msg->connect.nm_sndflags,
1342 &msg->connect.nm_m, sin6, addr6);
1343 /* nm_m may still be intact */
1345 if (msg->connect.nm_m) {
1346 m_freem(msg->connect.nm_m);
1347 msg->connect.nm_m = NULL;
1349 lwkt_replymsg(&msg->connect.base.lmsg, error);
1350 /* msg invalid now */
1354 tcp6_connect_oncpu(struct tcpcb *tp, int flags, struct mbuf **mp,
1355 struct sockaddr_in6 *sin6, struct in6_addr *addr6)
1357 struct mbuf *m = *mp;
1358 struct inpcb *inp = tp->t_inpcb;
1359 struct socket *so = inp->inp_socket;
1363 * Cannot simply call in_pcbconnect, because there might be an
1364 * earlier incarnation of this same connection still in
1365 * TIME_WAIT state, creating an ADDRINUSE error.
1367 oinp = in6_pcblookup_hash(inp->inp_pcbinfo,
1368 &sin6->sin6_addr, sin6->sin6_port,
1369 (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ?
1370 addr6 : &inp->in6p_laddr),
1371 inp->inp_lport, 0, NULL);
1373 return (EADDRINUSE);
1375 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
1376 inp->in6p_laddr = *addr6;
1377 inp->in6p_faddr = sin6->sin6_addr;
1378 inp->inp_fport = sin6->sin6_port;
1379 if ((sin6->sin6_flowinfo & IPV6_FLOWINFO_MASK) != 0)
1380 inp->in6p_flowinfo = sin6->sin6_flowinfo;
1381 in_pcbinsconnhash(inp);
1384 * Now that no more errors can occur, change the protocol processing
1385 * port to the current thread (which is the correct thread).
1387 * Create TCP timer message now; we are on the tcpcb's owner
1390 tcp_create_timermsg(tp, &curthread->td_msgport);
1392 /* Compute window scaling to request. */
1393 if (tp->request_r_scale < TCP_MIN_WINSHIFT)
1394 tp->request_r_scale = TCP_MIN_WINSHIFT;
1395 while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
1396 (TCP_MAXWIN << tp->request_r_scale) < so->so_rcv.ssb_hiwat) {
1397 tp->request_r_scale++;
1401 tcpstat.tcps_connattempt++;
1402 TCP_STATE_CHANGE(tp, TCPS_SYN_SENT);
1403 tcp_callout_reset(tp, tp->tt_keep, tp->t_keepinit, tcp_timer_keep);
1404 tp->iss = tcp_new_isn(tp);
1405 tcp_sendseqinit(tp);
1407 ssb_appendstream(&so->so_snd, m);
1409 if (flags & PRUS_OOB)
1410 tp->snd_up = tp->snd_una + so->so_snd.ssb_cc;
1414 * Close the send side of the connection after
1415 * the data is sent if flagged.
1417 if ((flags & (PRUS_OOB|PRUS_EOF)) == PRUS_EOF) {
1419 tp = tcp_usrclosed(tp);
1421 return (tcp_output(tp));
1427 * The new sockopt interface makes it possible for us to block in the
1428 * copyin/out step (if we take a page fault). Taking a page fault while
1429 * in a critical section is probably a Bad Thing. (Since sockets and pcbs
1430 * both now use TSM, there probably isn't any need for this function to
1431 * run in a critical section any more. This needs more examination.)
1434 tcp_ctloutput(netmsg_t msg)
1436 struct socket *so = msg->base.nm_so;
1437 struct sockopt *sopt = msg->ctloutput.nm_sopt;
1438 struct thread *td = NULL;
1439 int error, opt, optval, opthz;
1443 if (msg->ctloutput.nm_flags & PRCO_HELDTD)
1452 tp = intotcpcb(inp);
1454 /* Get socket's owner cpuid hint */
1455 if (sopt->sopt_level == SOL_SOCKET &&
1456 sopt->sopt_dir == SOPT_GET &&
1457 sopt->sopt_name == SO_CPUHINT) {
1458 if (tp->t_flags & TF_LISTEN) {
1460 * Listen sockets owner cpuid is always 0,
1461 * which does not make sense if SO_REUSEPORT
1464 if (so->so_options & SO_REUSEPORT)
1465 optval = (inp->inp_lgrpindex & ncpus2_mask);
1467 optval = -1; /* no hint */
1471 soopt_from_kbuf(sopt, &optval, sizeof(optval));
1475 if (sopt->sopt_level != IPPROTO_TCP) {
1476 if (sopt->sopt_level == IPPROTO_IP) {
1477 switch (sopt->sopt_name) {
1478 case IP_MULTICAST_IF:
1479 case IP_MULTICAST_VIF:
1480 case IP_MULTICAST_TTL:
1481 case IP_MULTICAST_LOOP:
1482 case IP_ADD_MEMBERSHIP:
1483 case IP_DROP_MEMBERSHIP:
1485 * Multicast does not make sense on
1493 if (INP_CHECK_SOCKAF(so, AF_INET6))
1494 ip6_ctloutput_dispatch(msg);
1498 /* msg invalid now */
1504 switch (sopt->sopt_dir) {
1506 error = soopt_to_kbuf(sopt, &optval, sizeof optval,
1510 switch (sopt->sopt_name) {
1513 tp->t_keepidle = tp->t_keepintvl;
1515 tp->t_keepidle = tcp_keepidle;
1516 tcp_timer_keep_activity(tp, 0);
1518 #ifdef TCP_SIGNATURE
1519 case TCP_SIGNATURE_ENABLE:
1520 if (tp->t_state == TCPS_CLOSED) {
1522 * This is the only safe state that this
1523 * option could be changed. Some segments
1524 * could already have been sent in other
1528 tp->t_flags |= TF_SIGNATURE;
1530 tp->t_flags &= ~TF_SIGNATURE;
1535 #endif /* TCP_SIGNATURE */
1538 switch (sopt->sopt_name) {
1546 opt = 0; /* dead code to fool gcc */
1553 tp->t_flags &= ~opt;
1557 if (tcp_disable_nopush)
1560 tp->t_flags |= TF_NOPUSH;
1562 tp->t_flags &= ~TF_NOPUSH;
1563 error = tcp_output(tp);
1569 * Must be between 0 and maxseg. If the requested
1570 * maxseg is too small to satisfy the desired minmss,
1571 * pump it up (silently so sysctl modifications of
1572 * minmss do not create unexpected program failures).
1573 * Handle degenerate cases.
1575 if (optval > 0 && optval <= tp->t_maxseg) {
1576 if (optval + 40 < tcp_minmss) {
1577 optval = tcp_minmss - 40;
1581 tp->t_maxseg = optval;
1588 opthz = ((int64_t)optval * hz) / 1000;
1590 tp->t_keepinit = opthz;
1596 opthz = ((int64_t)optval * hz) / 1000;
1598 tp->t_keepidle = opthz;
1599 tcp_timer_keep_activity(tp, 0);
1606 opthz = ((int64_t)optval * hz) / 1000;
1608 tp->t_keepintvl = opthz;
1609 tp->t_maxidle = tp->t_keepintvl * tp->t_keepcnt;
1617 tp->t_keepcnt = optval;
1618 tp->t_maxidle = tp->t_keepintvl * tp->t_keepcnt;
1625 error = ENOPROTOOPT;
1631 switch (sopt->sopt_name) {
1632 #ifdef TCP_SIGNATURE
1633 case TCP_SIGNATURE_ENABLE:
1634 optval = (tp->t_flags & TF_SIGNATURE) ? 1 : 0;
1636 #endif /* TCP_SIGNATURE */
1638 optval = tp->t_flags & TF_NODELAY;
1641 optval = tp->t_maxseg;
1644 optval = tp->t_flags & TF_NOOPT;
1647 optval = tp->t_flags & TF_NOPUSH;
1650 optval = ((int64_t)tp->t_keepinit * 1000) / hz;
1653 optval = ((int64_t)tp->t_keepidle * 1000) / hz;
1656 optval = ((int64_t)tp->t_keepintvl * 1000) / hz;
1659 optval = tp->t_keepcnt;
1662 error = ENOPROTOOPT;
1666 soopt_from_kbuf(sopt, &optval, sizeof optval);
1672 lwkt_replymsg(&msg->lmsg, error);
1675 struct netmsg_tcp_ctloutput {
1676 struct netmsg_pr_ctloutput ctloutput;
1677 struct sockopt sopt;
1682 * Allocate netmsg_pr_ctloutput for asynchronous tcp_ctloutput.
1684 struct netmsg_pr_ctloutput *
1685 tcp_ctloutmsg(struct sockopt *sopt)
1687 struct netmsg_tcp_ctloutput *msg;
1688 int flags = 0, error;
1690 KASSERT(sopt->sopt_dir == SOPT_SET, ("not from ctloutput"));
1692 /* Only small set of options allows asynchronous setting. */
1693 if (sopt->sopt_level != IPPROTO_TCP)
1695 switch (sopt->sopt_name) {
1705 msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK | M_NULLOK);
1707 /* Fallback to synchronous tcp_ctloutput */
1711 /* Save the sockopt */
1714 /* Fixup the sopt.sopt_val ptr */
1715 error = sooptcopyin(sopt, &msg->sopt_val,
1716 sizeof(msg->sopt_val), sizeof(msg->sopt_val));
1718 kfree(msg, M_LWKTMSG);
1721 msg->sopt.sopt_val = &msg->sopt_val;
1723 /* Hold the current thread */
1724 if (msg->sopt.sopt_td != NULL) {
1725 flags |= PRCO_HELDTD;
1726 lwkt_hold(msg->sopt.sopt_td);
1729 msg->ctloutput.nm_flags = flags;
1730 msg->ctloutput.nm_sopt = &msg->sopt;
1732 return &msg->ctloutput;
1736 * tcp_sendspace and tcp_recvspace are the default send and receive window
1737 * sizes, respectively. These are obsolescent (this information should
1738 * be set by the route).
1740 * Use a default that does not require tcp window scaling to be turned
1741 * on. Individual programs or the administrator can increase the default.
1743 u_long tcp_sendspace = 57344; /* largest multiple of PAGE_SIZE < 64k */
1744 SYSCTL_INT(_net_inet_tcp, TCPCTL_SENDSPACE, sendspace, CTLFLAG_RW,
1745 &tcp_sendspace , 0, "Maximum outgoing TCP datagram size");
1746 u_long tcp_recvspace = 57344; /* largest multiple of PAGE_SIZE < 64k */
1747 SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
1748 &tcp_recvspace , 0, "Maximum incoming TCP datagram size");
1751 * Attach TCP protocol to socket, allocating internet protocol control
1752 * block, tcp control block, buffer space, and entering CLOSED state.
1755 tcp_attach(struct socket *so, struct pru_attach_info *ai)
1761 boolean_t isipv6 = INP_CHECK_SOCKAF(so, AF_INET6);
1765 error = tcp_usr_preattach(so, 0 /* don't care */, ai);
1769 /* Post attach; do nothing */
1772 cpu = mycpu->gd_cpuid;
1775 * Set the default pcbinfo. This will likely change when we
1778 error = in_pcballoc(so, &tcbinfo[cpu]);
1784 inp->in6p_hops = -1; /* use kernel default */
1787 /* Keep a reference for asynchronized pru_rcvd */
1793 * Initiate (or continue) disconnect.
1794 * If embryonic state, just send reset (once).
1795 * If in ``let data drain'' option and linger null, just drop.
1796 * Otherwise (hard), mark socket disconnecting and drop
1797 * current input data; switch states based on user close, and
1798 * send segment to peer (with FIN).
1800 static struct tcpcb *
1801 tcp_disconnect(struct tcpcb *tp)
1803 struct socket *so = tp->t_inpcb->inp_socket;
1805 if (tp->t_state < TCPS_ESTABLISHED) {
1807 } else if ((so->so_options & SO_LINGER) && so->so_linger == 0) {
1808 tp = tcp_drop(tp, 0);
1810 lwkt_gettoken(&so->so_rcv.ssb_token);
1811 soisdisconnecting(so);
1812 sbflush(&so->so_rcv.sb);
1813 tp = tcp_usrclosed(tp);
1816 lwkt_reltoken(&so->so_rcv.ssb_token);
1822 * User issued close, and wish to trail through shutdown states:
1823 * if never received SYN, just forget it. If got a SYN from peer,
1824 * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN.
1825 * If already got a FIN from peer, then almost done; go to LAST_ACK
1826 * state. In all other cases, have already sent FIN to peer (e.g.
1827 * after PRU_SHUTDOWN), and just have to play tedious game waiting
1828 * for peer to send FIN or not respond to keep-alives, etc.
1829 * We can let the user exit from the close as soon as the FIN is acked.
1831 static struct tcpcb *
1832 tcp_usrclosed(struct tcpcb *tp)
1835 switch (tp->t_state) {
1839 TCP_STATE_CHANGE(tp, TCPS_CLOSED);
1844 case TCPS_SYN_RECEIVED:
1845 tp->t_flags |= TF_NEEDFIN;
1848 case TCPS_ESTABLISHED:
1849 TCP_STATE_CHANGE(tp, TCPS_FIN_WAIT_1);
1852 case TCPS_CLOSE_WAIT:
1853 TCP_STATE_CHANGE(tp, TCPS_LAST_ACK);
1856 if (tp && tp->t_state >= TCPS_FIN_WAIT_2) {
1857 soisdisconnected(tp->t_inpcb->inp_socket);
1858 /* To prevent the connection hanging in FIN_WAIT_2 forever. */
1859 if (tp->t_state == TCPS_FIN_WAIT_2) {
1860 tcp_callout_reset(tp, tp->tt_2msl, tp->t_maxidle,