2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2004 The FreeBSD Foundation
5 * Copyright (c) 2004-2008 Robert N. M. Watson
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
36 * Comments on the socket life cycle:
38 * soalloc() sets of socket layer state for a socket, called only by
39 * socreate() and sonewconn(). Socket layer private.
41 * sodealloc() tears down socket layer state for a socket, called only by
42 * sofree() and sonewconn(). Socket layer private.
44 * pru_attach() associates protocol layer state with an allocated socket;
45 * called only once, may fail, aborting socket allocation. This is called
46 * from socreate() and sonewconn(). Socket layer private.
48 * pru_detach() disassociates protocol layer state from an attached socket,
49 * and will be called exactly once for sockets in which pru_attach() has
50 * been successfully called. If pru_attach() returned an error,
51 * pru_detach() will not be called. Socket layer private.
53 * pru_abort() and pru_close() notify the protocol layer that the last
54 * consumer of a socket is starting to tear down the socket, and that the
55 * protocol should terminate the connection. Historically, pru_abort() also
56 * detached protocol state from the socket state, but this is no longer the
59 * socreate() creates a socket and attaches protocol state. This is a public
60 * interface that may be used by socket layer consumers to create new
63 * sonewconn() creates a socket and attaches protocol state. This is a
64 * public interface that may be used by protocols to create new sockets when
65 * a new connection is received and will be available for accept() on a
68 * soclose() destroys a socket after possibly waiting for it to disconnect.
69 * This is a public interface that socket consumers should use to close and
70 * release a socket when done with it.
72 * soabort() destroys a socket without waiting for it to disconnect (used
73 * only for incoming connections that are already partially or fully
74 * connected). This is used internally by the socket layer when clearing
75 * listen socket queues (due to overflow or close on the listen socket), but
76 * is also a public interface protocols may use to abort connections in
77 * their incomplete listen queues should they no longer be required. Sockets
78 * placed in completed connection listen queues should not be aborted for
79 * reasons described in the comment above the soclose() implementation. This
80 * is not a general purpose close routine, and except in the specific
81 * circumstances described here, should not be used.
83 * sofree() will free a socket and its protocol state if all references on
84 * the socket have been released, and is the public interface to attempt to
85 * free a socket when a reference is removed. This is a socket layer private
88 * NOTE: In addition to socreate() and soclose(), which provide a single
89 * socket reference to the consumer to be managed as required, there are two
90 * calls to explicitly manage socket references, soref(), and sorele().
91 * Currently, these are generally required only when transitioning a socket
92 * from a listen queue to a file descriptor, in order to prevent garbage
93 * collection of the socket at an untimely moment. For a number of reasons,
94 * these interfaces are not preferred, and should be avoided.
96 * NOTE: With regard to VNETs the general rule is that callers do not set
97 * curvnet. Exceptions to this rule include soabort(), sodisconnect(),
98 * sofree() (and with that sorele(), sotryfree()), as well as sonewconn()
99 * and sorflush(), which are usually called from a pre-set VNET context.
100 * sopoll() currently does not need a VNET context to be set.
103 #include <sys/cdefs.h>
104 __FBSDID("$FreeBSD$");
106 #include "opt_inet.h"
107 #include "opt_inet6.h"
108 #include "opt_compat.h"
110 #include <sys/param.h>
111 #include <sys/systm.h>
112 #include <sys/fcntl.h>
113 #include <sys/limits.h>
114 #include <sys/lock.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/domain.h>
120 #include <sys/file.h> /* for struct knote */
121 #include <sys/hhook.h>
122 #include <sys/kernel.h>
123 #include <sys/khelp.h>
124 #include <sys/event.h>
125 #include <sys/eventhandler.h>
126 #include <sys/poll.h>
127 #include <sys/proc.h>
128 #include <sys/protosw.h>
129 #include <sys/socket.h>
130 #include <sys/socketvar.h>
131 #include <sys/resourcevar.h>
132 #include <net/route.h>
133 #include <sys/signalvar.h>
134 #include <sys/stat.h>
136 #include <sys/sysctl.h>
137 #include <sys/taskqueue.h>
139 #include <sys/jail.h>
140 #include <sys/syslog.h>
141 #include <netinet/in.h>
143 #include <net/vnet.h>
145 #include <security/mac/mac_framework.h>
149 #ifdef COMPAT_FREEBSD32
150 #include <sys/mount.h>
151 #include <sys/sysent.h>
152 #include <compat/freebsd32/freebsd32.h>
155 static int soreceive_rcvoob(struct socket *so, struct uio *uio,
158 static void filt_sordetach(struct knote *kn);
159 static int filt_soread(struct knote *kn, long hint);
160 static void filt_sowdetach(struct knote *kn);
161 static int filt_sowrite(struct knote *kn, long hint);
162 static int filt_solisten(struct knote *kn, long hint);
163 static int inline hhook_run_socket(struct socket *so, void *hctx, int32_t h_id);
164 fo_kqfilter_t soo_kqfilter;
166 static struct filterops solisten_filtops = {
168 .f_detach = filt_sordetach,
169 .f_event = filt_solisten,
171 static struct filterops soread_filtops = {
173 .f_detach = filt_sordetach,
174 .f_event = filt_soread,
176 static struct filterops sowrite_filtops = {
178 .f_detach = filt_sowdetach,
179 .f_event = filt_sowrite,
182 so_gen_t so_gencnt; /* generation count for sockets */
184 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
185 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
187 #define VNET_SO_ASSERT(so) \
188 VNET_ASSERT(curvnet != NULL, \
189 ("%s:%d curvnet is NULL, so=%p", __func__, __LINE__, (so)));
191 VNET_DEFINE(struct hhook_head *, socket_hhh[HHOOK_SOCKET_LAST + 1]);
192 #define V_socket_hhh VNET(socket_hhh)
195 * Limit on the number of connections in the listen queue waiting
197 * NB: The original sysctl somaxconn is still available but hidden
198 * to prevent confusion about the actual purpose of this number.
200 static u_int somaxconn = SOMAXCONN;
203 sysctl_somaxconn(SYSCTL_HANDLER_ARGS)
209 error = sysctl_handle_int(oidp, &val, 0, req);
210 if (error || !req->newptr )
214 * The purpose of the UINT_MAX / 3 limit, is so that the formula
216 * below, will not overflow.
219 if (val < 1 || val > UINT_MAX / 3)
225 SYSCTL_PROC(_kern_ipc, OID_AUTO, soacceptqueue, CTLTYPE_UINT | CTLFLAG_RW,
226 0, sizeof(int), sysctl_somaxconn, "I",
227 "Maximum listen socket pending connection accept queue size");
228 SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn,
229 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_SKIP,
230 0, sizeof(int), sysctl_somaxconn, "I",
231 "Maximum listen socket pending connection accept queue size (compat)");
233 static int numopensockets;
234 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
235 &numopensockets, 0, "Number of open sockets");
238 * accept_mtx locks down per-socket fields relating to accept queues. See
239 * socketvar.h for an annotation of the protected fields of struct socket.
241 struct mtx accept_mtx;
242 MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF);
245 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket
248 static struct mtx so_global_mtx;
249 MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
252 * General IPC sysctl name space, used by sockets and a variety of other IPC
255 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
258 * Initialize the socket subsystem and set up the socket
261 static uma_zone_t socket_zone;
265 socket_zone_change(void *tag)
268 maxsockets = uma_zone_set_max(socket_zone, maxsockets);
272 socket_hhook_register(int subtype)
275 if (hhook_head_register(HHOOK_TYPE_SOCKET, subtype,
276 &V_socket_hhh[subtype],
277 HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
278 printf("%s: WARNING: unable to register hook\n", __func__);
282 socket_hhook_deregister(int subtype)
285 if (hhook_head_deregister(V_socket_hhh[subtype]) != 0)
286 printf("%s: WARNING: unable to deregister hook\n", __func__);
290 socket_init(void *tag)
293 socket_zone = uma_zcreate("socket", sizeof(struct socket), NULL, NULL,
294 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
295 maxsockets = uma_zone_set_max(socket_zone, maxsockets);
296 uma_zone_set_warning(socket_zone, "kern.ipc.maxsockets limit reached");
297 EVENTHANDLER_REGISTER(maxsockets_change, socket_zone_change, NULL,
298 EVENTHANDLER_PRI_FIRST);
300 SYSINIT(socket, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, socket_init, NULL);
303 socket_vnet_init(const void *unused __unused)
307 /* We expect a contiguous range */
308 for (i = 0; i <= HHOOK_SOCKET_LAST; i++)
309 socket_hhook_register(i);
311 VNET_SYSINIT(socket_vnet_init, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY,
312 socket_vnet_init, NULL);
315 socket_vnet_uninit(const void *unused __unused)
319 for (i = 0; i <= HHOOK_SOCKET_LAST; i++)
320 socket_hhook_deregister(i);
322 VNET_SYSUNINIT(socket_vnet_uninit, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY,
323 socket_vnet_uninit, NULL);
326 * Initialise maxsockets. This SYSINIT must be run after
330 init_maxsockets(void *ignored)
333 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
334 maxsockets = imax(maxsockets, maxfiles);
336 SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);
339 * Sysctl to get and set the maximum global sockets limit. Notify protocols
340 * of the change so that they can update their dependent limits as required.
343 sysctl_maxsockets(SYSCTL_HANDLER_ARGS)
345 int error, newmaxsockets;
347 newmaxsockets = maxsockets;
348 error = sysctl_handle_int(oidp, &newmaxsockets, 0, req);
349 if (error == 0 && req->newptr) {
350 if (newmaxsockets > maxsockets &&
351 newmaxsockets <= maxfiles) {
352 maxsockets = newmaxsockets;
353 EVENTHANDLER_INVOKE(maxsockets_change);
359 SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW,
360 &maxsockets, 0, sysctl_maxsockets, "IU",
361 "Maximum number of sockets available");
364 * Socket operation routines. These routines are called by the routines in
365 * sys_socket.c or from a system process, and implement the semantics of
366 * socket operations by switching out to the protocol specific routines.
370 * Get a socket structure from our zone, and initialize it. Note that it
371 * would probably be better to allocate socket and PCB at the same time, but
372 * I'm not convinced that all the protocols can be easily modified to do
375 * soalloc() returns a socket with a ref count of 0.
377 static struct socket *
378 soalloc(struct vnet *vnet)
382 so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO);
386 if (mac_socket_init(so, M_NOWAIT) != 0) {
387 uma_zfree(socket_zone, so);
391 if (khelp_init_osd(HELPER_CLASS_SOCKET, &so->osd)) {
392 uma_zfree(socket_zone, so);
396 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd");
397 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv");
398 sx_init(&so->so_snd.sb_sx, "so_snd_sx");
399 sx_init(&so->so_rcv.sb_sx, "so_rcv_sx");
400 TAILQ_INIT(&so->so_snd.sb_aiojobq);
401 TAILQ_INIT(&so->so_rcv.sb_aiojobq);
402 TASK_INIT(&so->so_snd.sb_aiotask, 0, soaio_snd, so);
403 TASK_INIT(&so->so_rcv.sb_aiotask, 0, soaio_rcv, so);
405 VNET_ASSERT(vnet != NULL, ("%s:%d vnet is NULL, so=%p",
406 __func__, __LINE__, so));
409 /* We shouldn't need the so_global_mtx */
410 if (hhook_run_socket(so, NULL, HHOOK_SOCKET_CREATE)) {
411 /* Do we need more comprehensive error returns? */
412 uma_zfree(socket_zone, so);
415 mtx_lock(&so_global_mtx);
416 so->so_gencnt = ++so_gencnt;
419 vnet->vnet_sockcnt++;
421 mtx_unlock(&so_global_mtx);
427 * Free the storage associated with a socket at the socket layer, tear down
428 * locks, labels, etc. All protocol state is assumed already to have been
429 * torn down (and possibly never set up) by the caller.
432 sodealloc(struct socket *so)
435 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
436 KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL"));
438 mtx_lock(&so_global_mtx);
439 so->so_gencnt = ++so_gencnt;
440 --numopensockets; /* Could be below, but faster here. */
442 VNET_ASSERT(so->so_vnet != NULL, ("%s:%d so_vnet is NULL, so=%p",
443 __func__, __LINE__, so));
444 so->so_vnet->vnet_sockcnt--;
446 mtx_unlock(&so_global_mtx);
447 if (so->so_rcv.sb_hiwat)
448 (void)chgsbsize(so->so_cred->cr_uidinfo,
449 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
450 if (so->so_snd.sb_hiwat)
451 (void)chgsbsize(so->so_cred->cr_uidinfo,
452 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
453 /* remove accept filter if one is present. */
454 if (so->so_accf != NULL)
455 do_setopt_accept_filter(so, NULL);
457 mac_socket_destroy(so);
459 hhook_run_socket(so, NULL, HHOOK_SOCKET_CLOSE);
462 khelp_destroy_osd(&so->osd);
463 sx_destroy(&so->so_snd.sb_sx);
464 sx_destroy(&so->so_rcv.sb_sx);
465 SOCKBUF_LOCK_DESTROY(&so->so_snd);
466 SOCKBUF_LOCK_DESTROY(&so->so_rcv);
467 uma_zfree(socket_zone, so);
471 * socreate returns a socket with a ref count of 1. The socket should be
472 * closed with soclose().
475 socreate(int dom, struct socket **aso, int type, int proto,
476 struct ucred *cred, struct thread *td)
483 prp = pffindproto(dom, proto, type);
485 prp = pffindtype(dom, type);
488 /* No support for domain. */
489 if (pffinddomain(dom) == NULL)
490 return (EAFNOSUPPORT);
491 /* No support for socket type. */
492 if (proto == 0 && type != 0)
494 return (EPROTONOSUPPORT);
496 if (prp->pr_usrreqs->pru_attach == NULL ||
497 prp->pr_usrreqs->pru_attach == pru_attach_notsupp)
498 return (EPROTONOSUPPORT);
500 if (prison_check_af(cred, prp->pr_domain->dom_family) != 0)
501 return (EPROTONOSUPPORT);
503 if (prp->pr_type != type)
505 so = soalloc(CRED_TO_VNET(cred));
509 TAILQ_INIT(&so->so_incomp);
510 TAILQ_INIT(&so->so_comp);
512 so->so_cred = crhold(cred);
513 if ((prp->pr_domain->dom_family == PF_INET) ||
514 (prp->pr_domain->dom_family == PF_INET6) ||
515 (prp->pr_domain->dom_family == PF_ROUTE))
516 so->so_fibnum = td->td_proc->p_fibnum;
521 mac_socket_create(cred, so);
523 knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
524 knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
527 * Auto-sizing of socket buffers is managed by the protocols and
528 * the appropriate flags must be set in the pru_attach function.
530 CURVNET_SET(so->so_vnet);
531 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
534 KASSERT(so->so_count == 1, ("socreate: so_count %d",
545 static int regression_sonewconn_earlytest = 1;
546 SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW,
547 ®ression_sonewconn_earlytest, 0, "Perform early sonewconn limit test");
551 * When an attempt at a new connection is noted on a socket which accepts
552 * connections, sonewconn is called. If the connection is possible (subject
553 * to space constraints, etc.) then we allocate a new structure, properly
554 * linked into the data structure of the original socket, and return this.
555 * Connstatus may be 0, or SS_ISCONFIRMING, or SS_ISCONNECTED.
557 * Note: the ref count on the socket is 0 on return.
560 sonewconn(struct socket *head, int connstatus)
562 static struct timeval lastover;
563 static struct timeval overinterval = { 60, 0 };
564 static int overcount;
570 over = (head->so_qlen > 3 * head->so_qlimit / 2);
573 if (regression_sonewconn_earlytest && over) {
579 if (ratecheck(&lastover, &overinterval)) {
580 log(LOG_DEBUG, "%s: pcb %p: Listen queue overflow: "
581 "%i already in queue awaiting acceptance "
582 "(%d occurrences)\n",
583 __func__, head->so_pcb, head->so_qlen, overcount);
590 VNET_ASSERT(head->so_vnet != NULL, ("%s:%d so_vnet is NULL, head=%p",
591 __func__, __LINE__, head));
592 so = soalloc(head->so_vnet);
594 log(LOG_DEBUG, "%s: pcb %p: New socket allocation failure: "
595 "limit reached or out of memory\n",
596 __func__, head->so_pcb);
599 if ((head->so_options & SO_ACCEPTFILTER) != 0)
602 so->so_type = head->so_type;
603 so->so_options = head->so_options &~ SO_ACCEPTCONN;
604 so->so_linger = head->so_linger;
605 so->so_state = head->so_state | SS_NOFDREF;
606 so->so_fibnum = head->so_fibnum;
607 so->so_proto = head->so_proto;
608 so->so_cred = crhold(head->so_cred);
610 mac_socket_newconn(head, so);
612 knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
613 knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
614 VNET_SO_ASSERT(head);
615 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) {
617 log(LOG_DEBUG, "%s: pcb %p: soreserve() failed\n",
618 __func__, head->so_pcb);
621 if ((*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
623 log(LOG_DEBUG, "%s: pcb %p: pru_attach() failed\n",
624 __func__, head->so_pcb);
627 so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
628 so->so_snd.sb_lowat = head->so_snd.sb_lowat;
629 so->so_rcv.sb_timeo = head->so_rcv.sb_timeo;
630 so->so_snd.sb_timeo = head->so_snd.sb_timeo;
631 so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE;
632 so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE;
633 so->so_state |= connstatus;
636 * The accept socket may be tearing down but we just
637 * won a race on the ACCEPT_LOCK.
638 * However, if sctp_peeloff() is called on a 1-to-many
639 * style socket, the SO_ACCEPTCONN doesn't need to be set.
641 if (!(head->so_options & SO_ACCEPTCONN) &&
642 ((head->so_proto->pr_protocol != IPPROTO_SCTP) ||
643 (head->so_type != SOCK_SEQPACKET))) {
646 sofree(so); /* NB: returns ACCEPT_UNLOCK'ed. */
650 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
651 so->so_qstate |= SQ_COMP;
655 * Keep removing sockets from the head until there's room for
656 * us to insert on the tail. In pre-locking revisions, this
657 * was a simple if(), but as we could be racing with other
658 * threads and soabort() requires dropping locks, we must
659 * loop waiting for the condition to be true.
661 while (head->so_incqlen > head->so_qlimit) {
663 sp = TAILQ_FIRST(&head->so_incomp);
664 TAILQ_REMOVE(&head->so_incomp, sp, so_list);
666 sp->so_qstate &= ~SQ_INCOMP;
672 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
673 so->so_qstate |= SQ_INCOMP;
679 wakeup_one(&head->so_timeo);
685 sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
689 CURVNET_SET(so->so_vnet);
690 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td);
696 sobindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
700 CURVNET_SET(so->so_vnet);
701 error = (*so->so_proto->pr_usrreqs->pru_bindat)(fd, so, nam, td);
707 * solisten() transitions a socket from a non-listening state to a listening
708 * state, but can also be used to update the listen queue depth on an
709 * existing listen socket. The protocol will call back into the sockets
710 * layer using solisten_proto_check() and solisten_proto() to check and set
711 * socket-layer listen state. Call backs are used so that the protocol can
712 * acquire both protocol and socket layer locks in whatever order is required
715 * Protocol implementors are advised to hold the socket lock across the
716 * socket-layer test and set to avoid races at the socket layer.
719 solisten(struct socket *so, int backlog, struct thread *td)
723 CURVNET_SET(so->so_vnet);
724 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td);
730 solisten_proto_check(struct socket *so)
733 SOCK_LOCK_ASSERT(so);
735 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
742 solisten_proto(struct socket *so, int backlog)
745 SOCK_LOCK_ASSERT(so);
747 if (backlog < 0 || backlog > somaxconn)
749 so->so_qlimit = backlog;
750 so->so_options |= SO_ACCEPTCONN;
754 * Evaluate the reference count and named references on a socket; if no
755 * references remain, free it. This should be called whenever a reference is
756 * released, such as in sorele(), but also when named reference flags are
757 * cleared in socket or protocol code.
759 * sofree() will free the socket if:
761 * - There are no outstanding file descriptor references or related consumers
764 * - The socket has been closed by user space, if ever open (SS_NOFDREF).
766 * - The protocol does not have an outstanding strong reference on the socket
769 * - The socket is not in a completed connection queue, so a process has been
770 * notified that it is present. If it is removed, the user process may
771 * block in accept() despite select() saying the socket was ready.
774 sofree(struct socket *so)
776 struct protosw *pr = so->so_proto;
779 ACCEPT_LOCK_ASSERT();
780 SOCK_LOCK_ASSERT(so);
782 if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 ||
783 (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) {
791 KASSERT((so->so_qstate & SQ_COMP) != 0 ||
792 (so->so_qstate & SQ_INCOMP) != 0,
793 ("sofree: so_head != NULL, but neither SQ_COMP nor "
795 KASSERT((so->so_qstate & SQ_COMP) == 0 ||
796 (so->so_qstate & SQ_INCOMP) == 0,
797 ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP"));
798 TAILQ_REMOVE(&head->so_incomp, so, so_list);
800 so->so_qstate &= ~SQ_INCOMP;
803 KASSERT((so->so_qstate & SQ_COMP) == 0 &&
804 (so->so_qstate & SQ_INCOMP) == 0,
805 ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)",
806 so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP));
807 if (so->so_options & SO_ACCEPTCONN) {
808 KASSERT((TAILQ_EMPTY(&so->so_comp)),
809 ("sofree: so_comp populated"));
810 KASSERT((TAILQ_EMPTY(&so->so_incomp)),
811 ("sofree: so_incomp populated"));
817 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
818 (*pr->pr_domain->dom_dispose)(so);
819 if (pr->pr_usrreqs->pru_detach != NULL)
820 (*pr->pr_usrreqs->pru_detach)(so);
823 * From this point on, we assume that no other references to this
824 * socket exist anywhere else in the stack. Therefore, no locks need
825 * to be acquired or held.
827 * We used to do a lot of socket buffer and socket locking here, as
828 * well as invoke sorflush() and perform wakeups. The direct call to
829 * dom_dispose() and sbrelease_internal() are an inlining of what was
830 * necessary from sorflush().
832 * Notice that the socket buffer and kqueue state are torn down
833 * before calling pru_detach. This means that protocols shold not
834 * assume they can perform socket wakeups, etc, in their detach code.
836 sbdestroy(&so->so_snd, so);
837 sbdestroy(&so->so_rcv, so);
838 seldrain(&so->so_snd.sb_sel);
839 seldrain(&so->so_rcv.sb_sel);
840 knlist_destroy(&so->so_rcv.sb_sel.si_note);
841 knlist_destroy(&so->so_snd.sb_sel.si_note);
846 * Close a socket on last file table reference removal. Initiate disconnect
847 * if connected. Free socket when disconnect complete.
849 * This function will sorele() the socket. Note that soclose() may be called
850 * prior to the ref count reaching zero. The actual socket structure will
851 * not be freed until the ref count reaches zero.
854 soclose(struct socket *so)
858 KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter"));
860 CURVNET_SET(so->so_vnet);
861 funsetown(&so->so_sigio);
862 if (so->so_state & SS_ISCONNECTED) {
863 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
864 error = sodisconnect(so);
866 if (error == ENOTCONN)
871 if (so->so_options & SO_LINGER) {
872 if ((so->so_state & SS_ISDISCONNECTING) &&
873 (so->so_state & SS_NBIO))
875 while (so->so_state & SS_ISCONNECTED) {
876 error = tsleep(&so->so_timeo,
877 PSOCK | PCATCH, "soclos",
886 if (so->so_proto->pr_usrreqs->pru_close != NULL)
887 (*so->so_proto->pr_usrreqs->pru_close)(so);
889 if (so->so_options & SO_ACCEPTCONN) {
892 * Prevent new additions to the accept queues due
893 * to ACCEPT_LOCK races while we are draining them.
895 so->so_options &= ~SO_ACCEPTCONN;
896 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
897 TAILQ_REMOVE(&so->so_incomp, sp, so_list);
899 sp->so_qstate &= ~SQ_INCOMP;
905 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
906 TAILQ_REMOVE(&so->so_comp, sp, so_list);
908 sp->so_qstate &= ~SQ_COMP;
914 KASSERT((TAILQ_EMPTY(&so->so_comp)),
915 ("%s: so_comp populated", __func__));
916 KASSERT((TAILQ_EMPTY(&so->so_incomp)),
917 ("%s: so_incomp populated", __func__));
920 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF"));
921 so->so_state |= SS_NOFDREF;
922 sorele(so); /* NB: Returns with ACCEPT_UNLOCK(). */
928 * soabort() is used to abruptly tear down a connection, such as when a
929 * resource limit is reached (listen queue depth exceeded), or if a listen
930 * socket is closed while there are sockets waiting to be accepted.
932 * This interface is tricky, because it is called on an unreferenced socket,
933 * and must be called only by a thread that has actually removed the socket
934 * from the listen queue it was on, or races with other threads are risked.
936 * This interface will call into the protocol code, so must not be called
937 * with any socket locks held. Protocols do call it while holding their own
938 * recursible protocol mutexes, but this is something that should be subject
939 * to review in the future.
942 soabort(struct socket *so)
946 * In as much as is possible, assert that no references to this
947 * socket are held. This is not quite the same as asserting that the
948 * current thread is responsible for arranging for no references, but
949 * is as close as we can get for now.
951 KASSERT(so->so_count == 0, ("soabort: so_count"));
952 KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF"));
953 KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF"));
954 KASSERT((so->so_state & SQ_COMP) == 0, ("soabort: SQ_COMP"));
955 KASSERT((so->so_state & SQ_INCOMP) == 0, ("soabort: SQ_INCOMP"));
958 if (so->so_proto->pr_usrreqs->pru_abort != NULL)
959 (*so->so_proto->pr_usrreqs->pru_abort)(so);
966 soaccept(struct socket *so, struct sockaddr **nam)
971 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF"));
972 so->so_state &= ~SS_NOFDREF;
975 CURVNET_SET(so->so_vnet);
976 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
982 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
985 return (soconnectat(AT_FDCWD, so, nam, td));
989 soconnectat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
993 if (so->so_options & SO_ACCEPTCONN)
996 CURVNET_SET(so->so_vnet);
998 * If protocol is connection-based, can only connect once.
999 * Otherwise, if connected, try to disconnect first. This allows
1000 * user to disconnect by connecting to, e.g., a null address.
1002 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
1003 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
1004 (error = sodisconnect(so)))) {
1008 * Prevent accumulated error from previous connection from
1012 if (fd == AT_FDCWD) {
1013 error = (*so->so_proto->pr_usrreqs->pru_connect)(so,
1016 error = (*so->so_proto->pr_usrreqs->pru_connectat)(fd,
1026 soconnect2(struct socket *so1, struct socket *so2)
1030 CURVNET_SET(so1->so_vnet);
1031 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
1037 sodisconnect(struct socket *so)
1041 if ((so->so_state & SS_ISCONNECTED) == 0)
1043 if (so->so_state & SS_ISDISCONNECTING)
1046 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
1050 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
1053 sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
1054 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1058 int clen = 0, error, dontroute;
1060 KASSERT(so->so_type == SOCK_DGRAM, ("sosend_dgram: !SOCK_DGRAM"));
1061 KASSERT(so->so_proto->pr_flags & PR_ATOMIC,
1062 ("sosend_dgram: !PR_ATOMIC"));
1065 resid = uio->uio_resid;
1067 resid = top->m_pkthdr.len;
1069 * In theory resid should be unsigned. However, space must be
1070 * signed, as it might be less than 0 if we over-committed, and we
1071 * must use a signed comparison of space and resid. On the other
1072 * hand, a negative resid causes us to loop sending 0-length
1073 * segments to the protocol.
1081 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0;
1083 td->td_ru.ru_msgsnd++;
1084 if (control != NULL)
1085 clen = control->m_len;
1087 SOCKBUF_LOCK(&so->so_snd);
1088 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1089 SOCKBUF_UNLOCK(&so->so_snd);
1094 error = so->so_error;
1096 SOCKBUF_UNLOCK(&so->so_snd);
1099 if ((so->so_state & SS_ISCONNECTED) == 0) {
1101 * `sendto' and `sendmsg' is allowed on a connection-based
1102 * socket if it supports implied connect. Return ENOTCONN if
1103 * not connected and no address is supplied.
1105 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1106 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1107 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1108 !(resid == 0 && clen != 0)) {
1109 SOCKBUF_UNLOCK(&so->so_snd);
1113 } else if (addr == NULL) {
1114 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1117 error = EDESTADDRREQ;
1118 SOCKBUF_UNLOCK(&so->so_snd);
1124 * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a
1125 * problem and need fixing.
1127 space = sbspace(&so->so_snd);
1128 if (flags & MSG_OOB)
1131 SOCKBUF_UNLOCK(&so->so_snd);
1132 if (resid > space) {
1138 if (flags & MSG_EOR)
1139 top->m_flags |= M_EOR;
1142 * Copy the data from userland into a mbuf chain.
1143 * If no data is to be copied in, a single empty mbuf
1146 top = m_uiotombuf(uio, M_WAITOK, space, max_hdr,
1147 (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0)));
1149 error = EFAULT; /* only possible error */
1152 space -= resid - uio->uio_resid;
1153 resid = uio->uio_resid;
1155 KASSERT(resid == 0, ("sosend_dgram: resid != 0"));
1157 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock
1162 so->so_options |= SO_DONTROUTE;
1166 * XXX all the SBS_CANTSENDMORE checks previously done could be out
1167 * of date. We could have received a reset packet in an interrupt or
1168 * maybe we slept while doing page faults in uiomove() etc. We could
1169 * probably recheck again inside the locking protection here, but
1170 * there are probably other places that this also happens. We must
1174 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1175 (flags & MSG_OOB) ? PRUS_OOB :
1177 * If the user set MSG_EOF, the protocol understands this flag and
1178 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND.
1180 ((flags & MSG_EOF) &&
1181 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1184 /* If there is more to send set PRUS_MORETOCOME */
1185 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1186 top, addr, control, td);
1189 so->so_options &= ~SO_DONTROUTE;
1198 if (control != NULL)
1204 * Send on a socket. If send must go all at once and message is larger than
1205 * send buffering, then hard error. Lock against other senders. If must go
1206 * all at once and not enough room now, then inform user that this would
1207 * block and do nothing. Otherwise, if nonblocking, send as much as
1208 * possible. The data to be sent is described by "uio" if nonzero, otherwise
1209 * by the mbuf chain "top" (which must be null if uio is not). Data provided
1210 * in mbuf chain must be small enough to send all at once.
1212 * Returns nonzero on error, timeout or signal; callers must check for short
1213 * counts if EINTR/ERESTART are returned. Data and control buffers are freed
1217 sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio,
1218 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1222 int clen = 0, error, dontroute;
1223 int atomic = sosendallatonce(so) || top;
1226 resid = uio->uio_resid;
1228 resid = top->m_pkthdr.len;
1230 * In theory resid should be unsigned. However, space must be
1231 * signed, as it might be less than 0 if we over-committed, and we
1232 * must use a signed comparison of space and resid. On the other
1233 * hand, a negative resid causes us to loop sending 0-length
1234 * segments to the protocol.
1236 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
1237 * type sockets since that's an error.
1239 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
1245 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
1246 (so->so_proto->pr_flags & PR_ATOMIC);
1248 td->td_ru.ru_msgsnd++;
1249 if (control != NULL)
1250 clen = control->m_len;
1252 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
1258 SOCKBUF_LOCK(&so->so_snd);
1259 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1260 SOCKBUF_UNLOCK(&so->so_snd);
1265 error = so->so_error;
1267 SOCKBUF_UNLOCK(&so->so_snd);
1270 if ((so->so_state & SS_ISCONNECTED) == 0) {
1272 * `sendto' and `sendmsg' is allowed on a connection-
1273 * based socket if it supports implied connect.
1274 * Return ENOTCONN if not connected and no address is
1277 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1278 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1279 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1280 !(resid == 0 && clen != 0)) {
1281 SOCKBUF_UNLOCK(&so->so_snd);
1285 } else if (addr == NULL) {
1286 SOCKBUF_UNLOCK(&so->so_snd);
1287 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1290 error = EDESTADDRREQ;
1294 space = sbspace(&so->so_snd);
1295 if (flags & MSG_OOB)
1297 if ((atomic && resid > so->so_snd.sb_hiwat) ||
1298 clen > so->so_snd.sb_hiwat) {
1299 SOCKBUF_UNLOCK(&so->so_snd);
1303 if (space < resid + clen &&
1304 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
1305 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) {
1306 SOCKBUF_UNLOCK(&so->so_snd);
1307 error = EWOULDBLOCK;
1310 error = sbwait(&so->so_snd);
1311 SOCKBUF_UNLOCK(&so->so_snd);
1316 SOCKBUF_UNLOCK(&so->so_snd);
1321 if (flags & MSG_EOR)
1322 top->m_flags |= M_EOR;
1325 * Copy the data from userland into a mbuf
1326 * chain. If resid is 0, which can happen
1327 * only if we have control to send, then
1328 * a single empty mbuf is returned. This
1329 * is a workaround to prevent protocol send
1332 top = m_uiotombuf(uio, M_WAITOK, space,
1333 (atomic ? max_hdr : 0),
1334 (atomic ? M_PKTHDR : 0) |
1335 ((flags & MSG_EOR) ? M_EOR : 0));
1337 error = EFAULT; /* only possible error */
1340 space -= resid - uio->uio_resid;
1341 resid = uio->uio_resid;
1345 so->so_options |= SO_DONTROUTE;
1349 * XXX all the SBS_CANTSENDMORE checks previously
1350 * done could be out of date. We could have received
1351 * a reset packet in an interrupt or maybe we slept
1352 * while doing page faults in uiomove() etc. We
1353 * could probably recheck again inside the locking
1354 * protection here, but there are probably other
1355 * places that this also happens. We must rethink
1359 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1360 (flags & MSG_OOB) ? PRUS_OOB :
1362 * If the user set MSG_EOF, the protocol understands
1363 * this flag and nothing left to send then use
1364 * PRU_SEND_EOF instead of PRU_SEND.
1366 ((flags & MSG_EOF) &&
1367 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1370 /* If there is more to send set PRUS_MORETOCOME. */
1371 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1372 top, addr, control, td);
1375 so->so_options &= ~SO_DONTROUTE;
1383 } while (resid && space > 0);
1387 sbunlock(&so->so_snd);
1391 if (control != NULL)
1397 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
1398 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1402 CURVNET_SET(so->so_vnet);
1403 error = so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio, top,
1404 control, flags, td);
1410 * The part of soreceive() that implements reading non-inline out-of-band
1411 * data from a socket. For more complete comments, see soreceive(), from
1412 * which this code originated.
1414 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is
1415 * unable to return an mbuf chain to the caller.
1418 soreceive_rcvoob(struct socket *so, struct uio *uio, int flags)
1420 struct protosw *pr = so->so_proto;
1424 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0"));
1427 m = m_get(M_WAITOK, MT_DATA);
1428 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
1432 error = uiomove(mtod(m, void *),
1433 (int) min(uio->uio_resid, m->m_len), uio);
1435 } while (uio->uio_resid && error == 0 && m);
1443 * Following replacement or removal of the first mbuf on the first mbuf chain
1444 * of a socket buffer, push necessary state changes back into the socket
1445 * buffer so that other consumers see the values consistently. 'nextrecord'
1446 * is the callers locally stored value of the original value of
1447 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes.
1448 * NOTE: 'nextrecord' may be NULL.
1450 static __inline void
1451 sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord)
1454 SOCKBUF_LOCK_ASSERT(sb);
1456 * First, update for the new value of nextrecord. If necessary, make
1457 * it the first record.
1459 if (sb->sb_mb != NULL)
1460 sb->sb_mb->m_nextpkt = nextrecord;
1462 sb->sb_mb = nextrecord;
1465 * Now update any dependent socket buffer fields to reflect the new
1466 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the
1467 * addition of a second clause that takes care of the case where
1468 * sb_mb has been updated, but remains the last record.
1470 if (sb->sb_mb == NULL) {
1471 sb->sb_mbtail = NULL;
1472 sb->sb_lastrecord = NULL;
1473 } else if (sb->sb_mb->m_nextpkt == NULL)
1474 sb->sb_lastrecord = sb->sb_mb;
1478 * Implement receive operations on a socket. We depend on the way that
1479 * records are added to the sockbuf by sbappend. In particular, each record
1480 * (mbufs linked through m_next) must begin with an address if the protocol
1481 * so specifies, followed by an optional mbuf or mbufs containing ancillary
1482 * data, and then zero or more mbufs of data. In order to allow parallelism
1483 * between network receive and copying to user space, as well as avoid
1484 * sleeping with a mutex held, we release the socket buffer mutex during the
1485 * user space copy. Although the sockbuf is locked, new data may still be
1486 * appended, and thus we must maintain consistency of the sockbuf during that
1489 * The caller may receive the data as a single mbuf chain by supplying an
1490 * mbuf **mp0 for use in returning the chain. The uio is then used only for
1491 * the count in uio_resid.
1494 soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio,
1495 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1497 struct mbuf *m, **mp;
1498 int flags, error, offset;
1500 struct protosw *pr = so->so_proto;
1501 struct mbuf *nextrecord;
1503 ssize_t orig_resid = uio->uio_resid;
1508 if (controlp != NULL)
1511 flags = *flagsp &~ MSG_EOR;
1514 if (flags & MSG_OOB)
1515 return (soreceive_rcvoob(so, uio, flags));
1518 if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING)
1519 && uio->uio_resid) {
1521 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
1524 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
1529 SOCKBUF_LOCK(&so->so_rcv);
1530 m = so->so_rcv.sb_mb;
1532 * If we have less data than requested, block awaiting more (subject
1533 * to any timeout) if:
1534 * 1. the current count is less than the low water mark, or
1535 * 2. MSG_DONTWAIT is not set
1537 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1538 sbavail(&so->so_rcv) < uio->uio_resid) &&
1539 sbavail(&so->so_rcv) < so->so_rcv.sb_lowat &&
1540 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
1541 KASSERT(m != NULL || !sbavail(&so->so_rcv),
1542 ("receive: m == %p sbavail == %u",
1543 m, sbavail(&so->so_rcv)));
1547 error = so->so_error;
1548 if ((flags & MSG_PEEK) == 0)
1550 SOCKBUF_UNLOCK(&so->so_rcv);
1553 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1554 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1556 SOCKBUF_UNLOCK(&so->so_rcv);
1561 for (; m != NULL; m = m->m_next)
1562 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1563 m = so->so_rcv.sb_mb;
1566 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1567 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1568 SOCKBUF_UNLOCK(&so->so_rcv);
1572 if (uio->uio_resid == 0) {
1573 SOCKBUF_UNLOCK(&so->so_rcv);
1576 if ((so->so_state & SS_NBIO) ||
1577 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
1578 SOCKBUF_UNLOCK(&so->so_rcv);
1579 error = EWOULDBLOCK;
1582 SBLASTRECORDCHK(&so->so_rcv);
1583 SBLASTMBUFCHK(&so->so_rcv);
1584 error = sbwait(&so->so_rcv);
1585 SOCKBUF_UNLOCK(&so->so_rcv);
1592 * From this point onward, we maintain 'nextrecord' as a cache of the
1593 * pointer to the next record in the socket buffer. We must keep the
1594 * various socket buffer pointers and local stack versions of the
1595 * pointers in sync, pushing out modifications before dropping the
1596 * socket buffer mutex, and re-reading them when picking it up.
1598 * Otherwise, we will race with the network stack appending new data
1599 * or records onto the socket buffer by using inconsistent/stale
1600 * versions of the field, possibly resulting in socket buffer
1603 * By holding the high-level sblock(), we prevent simultaneous
1604 * readers from pulling off the front of the socket buffer.
1606 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1608 uio->uio_td->td_ru.ru_msgrcv++;
1609 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
1610 SBLASTRECORDCHK(&so->so_rcv);
1611 SBLASTMBUFCHK(&so->so_rcv);
1612 nextrecord = m->m_nextpkt;
1613 if (pr->pr_flags & PR_ADDR) {
1614 KASSERT(m->m_type == MT_SONAME,
1615 ("m->m_type == %d", m->m_type));
1618 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
1620 if (flags & MSG_PEEK) {
1623 sbfree(&so->so_rcv, m);
1624 so->so_rcv.sb_mb = m_free(m);
1625 m = so->so_rcv.sb_mb;
1626 sockbuf_pushsync(&so->so_rcv, nextrecord);
1631 * Process one or more MT_CONTROL mbufs present before any data mbufs
1632 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
1633 * just copy the data; if !MSG_PEEK, we call into the protocol to
1634 * perform externalization (or freeing if controlp == NULL).
1636 if (m != NULL && m->m_type == MT_CONTROL) {
1637 struct mbuf *cm = NULL, *cmn;
1638 struct mbuf **cme = &cm;
1641 if (flags & MSG_PEEK) {
1642 if (controlp != NULL) {
1643 *controlp = m_copym(m, 0, m->m_len,
1645 controlp = &(*controlp)->m_next;
1649 sbfree(&so->so_rcv, m);
1650 so->so_rcv.sb_mb = m->m_next;
1653 cme = &(*cme)->m_next;
1654 m = so->so_rcv.sb_mb;
1656 } while (m != NULL && m->m_type == MT_CONTROL);
1657 if ((flags & MSG_PEEK) == 0)
1658 sockbuf_pushsync(&so->so_rcv, nextrecord);
1659 while (cm != NULL) {
1662 if (pr->pr_domain->dom_externalize != NULL) {
1663 SOCKBUF_UNLOCK(&so->so_rcv);
1665 error = (*pr->pr_domain->dom_externalize)
1666 (cm, controlp, flags);
1667 SOCKBUF_LOCK(&so->so_rcv);
1668 } else if (controlp != NULL)
1672 if (controlp != NULL) {
1674 while (*controlp != NULL)
1675 controlp = &(*controlp)->m_next;
1680 nextrecord = so->so_rcv.sb_mb->m_nextpkt;
1682 nextrecord = so->so_rcv.sb_mb;
1686 if ((flags & MSG_PEEK) == 0) {
1687 KASSERT(m->m_nextpkt == nextrecord,
1688 ("soreceive: post-control, nextrecord !sync"));
1689 if (nextrecord == NULL) {
1690 KASSERT(so->so_rcv.sb_mb == m,
1691 ("soreceive: post-control, sb_mb!=m"));
1692 KASSERT(so->so_rcv.sb_lastrecord == m,
1693 ("soreceive: post-control, lastrecord!=m"));
1697 if (type == MT_OOBDATA)
1700 if ((flags & MSG_PEEK) == 0) {
1701 KASSERT(so->so_rcv.sb_mb == nextrecord,
1702 ("soreceive: sb_mb != nextrecord"));
1703 if (so->so_rcv.sb_mb == NULL) {
1704 KASSERT(so->so_rcv.sb_lastrecord == NULL,
1705 ("soreceive: sb_lastercord != NULL"));
1709 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1710 SBLASTRECORDCHK(&so->so_rcv);
1711 SBLASTMBUFCHK(&so->so_rcv);
1714 * Now continue to read any data mbufs off of the head of the socket
1715 * buffer until the read request is satisfied. Note that 'type' is
1716 * used to store the type of any mbuf reads that have happened so far
1717 * such that soreceive() can stop reading if the type changes, which
1718 * causes soreceive() to return only one of regular data and inline
1719 * out-of-band data in a single socket receive operation.
1723 while (m != NULL && !(m->m_flags & M_NOTAVAIL) && uio->uio_resid > 0
1726 * If the type of mbuf has changed since the last mbuf
1727 * examined ('type'), end the receive operation.
1729 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1730 if (m->m_type == MT_OOBDATA || m->m_type == MT_CONTROL) {
1731 if (type != m->m_type)
1733 } else if (type == MT_OOBDATA)
1736 KASSERT(m->m_type == MT_DATA,
1737 ("m->m_type == %d", m->m_type));
1738 so->so_rcv.sb_state &= ~SBS_RCVATMARK;
1739 len = uio->uio_resid;
1740 if (so->so_oobmark && len > so->so_oobmark - offset)
1741 len = so->so_oobmark - offset;
1742 if (len > m->m_len - moff)
1743 len = m->m_len - moff;
1745 * If mp is set, just pass back the mbufs. Otherwise copy
1746 * them out via the uio, then free. Sockbuf must be
1747 * consistent here (points to current mbuf, it points to next
1748 * record) when we drop priority; we must note any additions
1749 * to the sockbuf when we block interrupts again.
1752 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1753 SBLASTRECORDCHK(&so->so_rcv);
1754 SBLASTMBUFCHK(&so->so_rcv);
1755 SOCKBUF_UNLOCK(&so->so_rcv);
1756 error = uiomove(mtod(m, char *) + moff, (int)len, uio);
1757 SOCKBUF_LOCK(&so->so_rcv);
1760 * The MT_SONAME mbuf has already been removed
1761 * from the record, so it is necessary to
1762 * remove the data mbufs, if any, to preserve
1763 * the invariant in the case of PR_ADDR that
1764 * requires MT_SONAME mbufs at the head of
1767 if (m && pr->pr_flags & PR_ATOMIC &&
1768 ((flags & MSG_PEEK) == 0))
1769 (void)sbdroprecord_locked(&so->so_rcv);
1770 SOCKBUF_UNLOCK(&so->so_rcv);
1774 uio->uio_resid -= len;
1775 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1776 if (len == m->m_len - moff) {
1777 if (m->m_flags & M_EOR)
1779 if (flags & MSG_PEEK) {
1783 nextrecord = m->m_nextpkt;
1784 sbfree(&so->so_rcv, m);
1786 m->m_nextpkt = NULL;
1789 so->so_rcv.sb_mb = m = m->m_next;
1792 so->so_rcv.sb_mb = m_free(m);
1793 m = so->so_rcv.sb_mb;
1795 sockbuf_pushsync(&so->so_rcv, nextrecord);
1796 SBLASTRECORDCHK(&so->so_rcv);
1797 SBLASTMBUFCHK(&so->so_rcv);
1800 if (flags & MSG_PEEK)
1804 if (flags & MSG_DONTWAIT) {
1805 *mp = m_copym(m, 0, len,
1809 * m_copym() couldn't
1811 * Adjust uio_resid back
1813 * down by len bytes,
1814 * which we didn't end
1815 * up "copying" over).
1817 uio->uio_resid += len;
1821 SOCKBUF_UNLOCK(&so->so_rcv);
1822 *mp = m_copym(m, 0, len,
1824 SOCKBUF_LOCK(&so->so_rcv);
1827 sbcut_locked(&so->so_rcv, len);
1830 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1831 if (so->so_oobmark) {
1832 if ((flags & MSG_PEEK) == 0) {
1833 so->so_oobmark -= len;
1834 if (so->so_oobmark == 0) {
1835 so->so_rcv.sb_state |= SBS_RCVATMARK;
1840 if (offset == so->so_oobmark)
1844 if (flags & MSG_EOR)
1847 * If the MSG_WAITALL flag is set (for non-atomic socket), we
1848 * must not quit until "uio->uio_resid == 0" or an error
1849 * termination. If a signal/timeout occurs, return with a
1850 * short count but without error. Keep sockbuf locked
1851 * against other readers.
1853 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
1854 !sosendallatonce(so) && nextrecord == NULL) {
1855 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1857 so->so_rcv.sb_state & SBS_CANTRCVMORE)
1860 * Notify the protocol that some data has been
1861 * drained before blocking.
1863 if (pr->pr_flags & PR_WANTRCVD) {
1864 SOCKBUF_UNLOCK(&so->so_rcv);
1866 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1867 SOCKBUF_LOCK(&so->so_rcv);
1869 SBLASTRECORDCHK(&so->so_rcv);
1870 SBLASTMBUFCHK(&so->so_rcv);
1872 * We could receive some data while was notifying
1873 * the protocol. Skip blocking in this case.
1875 if (so->so_rcv.sb_mb == NULL) {
1876 error = sbwait(&so->so_rcv);
1878 SOCKBUF_UNLOCK(&so->so_rcv);
1882 m = so->so_rcv.sb_mb;
1884 nextrecord = m->m_nextpkt;
1888 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1889 if (m != NULL && pr->pr_flags & PR_ATOMIC) {
1891 if ((flags & MSG_PEEK) == 0)
1892 (void) sbdroprecord_locked(&so->so_rcv);
1894 if ((flags & MSG_PEEK) == 0) {
1897 * First part is an inline SB_EMPTY_FIXUP(). Second
1898 * part makes sure sb_lastrecord is up-to-date if
1899 * there is still data in the socket buffer.
1901 so->so_rcv.sb_mb = nextrecord;
1902 if (so->so_rcv.sb_mb == NULL) {
1903 so->so_rcv.sb_mbtail = NULL;
1904 so->so_rcv.sb_lastrecord = NULL;
1905 } else if (nextrecord->m_nextpkt == NULL)
1906 so->so_rcv.sb_lastrecord = nextrecord;
1908 SBLASTRECORDCHK(&so->so_rcv);
1909 SBLASTMBUFCHK(&so->so_rcv);
1911 * If soreceive() is being done from the socket callback,
1912 * then don't need to generate ACK to peer to update window,
1913 * since ACK will be generated on return to TCP.
1915 if (!(flags & MSG_SOCALLBCK) &&
1916 (pr->pr_flags & PR_WANTRCVD)) {
1917 SOCKBUF_UNLOCK(&so->so_rcv);
1919 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1920 SOCKBUF_LOCK(&so->so_rcv);
1923 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1924 if (orig_resid == uio->uio_resid && orig_resid &&
1925 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
1926 SOCKBUF_UNLOCK(&so->so_rcv);
1929 SOCKBUF_UNLOCK(&so->so_rcv);
1934 sbunlock(&so->so_rcv);
1939 * Optimized version of soreceive() for stream (TCP) sockets.
1940 * XXXAO: (MSG_WAITALL | MSG_PEEK) isn't properly handled.
1943 soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
1944 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1946 int len = 0, error = 0, flags, oresid;
1948 struct mbuf *m, *n = NULL;
1950 /* We only do stream sockets. */
1951 if (so->so_type != SOCK_STREAM)
1955 if (controlp != NULL)
1958 flags = *flagsp &~ MSG_EOR;
1961 if (flags & MSG_OOB)
1962 return (soreceive_rcvoob(so, uio, flags));
1968 /* Prevent other readers from entering the socket. */
1969 error = sblock(sb, SBLOCKWAIT(flags));
1974 /* Easy one, no space to copyout anything. */
1975 if (uio->uio_resid == 0) {
1979 oresid = uio->uio_resid;
1981 /* We will never ever get anything unless we are or were connected. */
1982 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
1988 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1990 /* Abort if socket has reported problems. */
1992 if (sbavail(sb) > 0)
1994 if (oresid > uio->uio_resid)
1996 error = so->so_error;
1997 if (!(flags & MSG_PEEK))
2002 /* Door is closed. Deliver what is left, if any. */
2003 if (sb->sb_state & SBS_CANTRCVMORE) {
2004 if (sbavail(sb) > 0)
2010 /* Socket buffer is empty and we shall not block. */
2011 if (sbavail(sb) == 0 &&
2012 ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
2017 /* Socket buffer got some data that we shall deliver now. */
2018 if (sbavail(sb) > 0 && !(flags & MSG_WAITALL) &&
2019 ((so->so_state & SS_NBIO) ||
2020 (flags & (MSG_DONTWAIT|MSG_NBIO)) ||
2021 sbavail(sb) >= sb->sb_lowat ||
2022 sbavail(sb) >= uio->uio_resid ||
2023 sbavail(sb) >= sb->sb_hiwat) ) {
2027 /* On MSG_WAITALL we must wait until all data or error arrives. */
2028 if ((flags & MSG_WAITALL) &&
2029 (sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_hiwat))
2033 * Wait and block until (more) data comes in.
2034 * NB: Drops the sockbuf lock during wait.
2042 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2043 KASSERT(sbavail(sb) > 0, ("%s: sockbuf empty", __func__));
2044 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
2048 uio->uio_td->td_ru.ru_msgrcv++;
2050 /* Fill uio until full or current end of socket buffer is reached. */
2051 len = min(uio->uio_resid, sbavail(sb));
2053 /* Dequeue as many mbufs as possible. */
2054 if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
2058 m_cat(*mp0, sb->sb_mb);
2060 m != NULL && m->m_len <= len;
2062 KASSERT(!(m->m_flags & M_NOTAVAIL),
2063 ("%s: m %p not available", __func__, m));
2065 uio->uio_resid -= m->m_len;
2071 sb->sb_lastrecord = sb->sb_mb;
2072 if (sb->sb_mb == NULL)
2075 /* Copy the remainder. */
2077 KASSERT(sb->sb_mb != NULL,
2078 ("%s: len > 0 && sb->sb_mb empty", __func__));
2080 m = m_copym(sb->sb_mb, 0, len, M_NOWAIT);
2082 len = 0; /* Don't flush data from sockbuf. */
2084 uio->uio_resid -= len;
2095 /* NB: Must unlock socket buffer as uiomove may sleep. */
2097 error = m_mbuftouio(uio, sb->sb_mb, len);
2102 SBLASTRECORDCHK(sb);
2106 * Remove the delivered data from the socket buffer unless we
2107 * were only peeking.
2109 if (!(flags & MSG_PEEK)) {
2111 sbdrop_locked(sb, len);
2113 /* Notify protocol that we drained some data. */
2114 if ((so->so_proto->pr_flags & PR_WANTRCVD) &&
2115 (((flags & MSG_WAITALL) && uio->uio_resid > 0) ||
2116 !(flags & MSG_SOCALLBCK))) {
2119 (*so->so_proto->pr_usrreqs->pru_rcvd)(so, flags);
2125 * For MSG_WAITALL we may have to loop again and wait for
2126 * more data to come in.
2128 if ((flags & MSG_WAITALL) && uio->uio_resid > 0)
2131 SOCKBUF_LOCK_ASSERT(sb);
2132 SBLASTRECORDCHK(sb);
2140 * Optimized version of soreceive() for simple datagram cases from userspace.
2141 * Unlike in the stream case, we're able to drop a datagram if copyout()
2142 * fails, and because we handle datagrams atomically, we don't need to use a
2143 * sleep lock to prevent I/O interlacing.
2146 soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
2147 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2149 struct mbuf *m, *m2;
2152 struct protosw *pr = so->so_proto;
2153 struct mbuf *nextrecord;
2157 if (controlp != NULL)
2160 flags = *flagsp &~ MSG_EOR;
2165 * For any complicated cases, fall back to the full
2166 * soreceive_generic().
2168 if (mp0 != NULL || (flags & MSG_PEEK) || (flags & MSG_OOB))
2169 return (soreceive_generic(so, psa, uio, mp0, controlp,
2173 * Enforce restrictions on use.
2175 KASSERT((pr->pr_flags & PR_WANTRCVD) == 0,
2176 ("soreceive_dgram: wantrcvd"));
2177 KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic"));
2178 KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0,
2179 ("soreceive_dgram: SBS_RCVATMARK"));
2180 KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0,
2181 ("soreceive_dgram: P_CONNREQUIRED"));
2184 * Loop blocking while waiting for a datagram.
2186 SOCKBUF_LOCK(&so->so_rcv);
2187 while ((m = so->so_rcv.sb_mb) == NULL) {
2188 KASSERT(sbavail(&so->so_rcv) == 0,
2189 ("soreceive_dgram: sb_mb NULL but sbavail %u",
2190 sbavail(&so->so_rcv)));
2192 error = so->so_error;
2194 SOCKBUF_UNLOCK(&so->so_rcv);
2197 if (so->so_rcv.sb_state & SBS_CANTRCVMORE ||
2198 uio->uio_resid == 0) {
2199 SOCKBUF_UNLOCK(&so->so_rcv);
2202 if ((so->so_state & SS_NBIO) ||
2203 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
2204 SOCKBUF_UNLOCK(&so->so_rcv);
2205 return (EWOULDBLOCK);
2207 SBLASTRECORDCHK(&so->so_rcv);
2208 SBLASTMBUFCHK(&so->so_rcv);
2209 error = sbwait(&so->so_rcv);
2211 SOCKBUF_UNLOCK(&so->so_rcv);
2215 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2218 uio->uio_td->td_ru.ru_msgrcv++;
2219 SBLASTRECORDCHK(&so->so_rcv);
2220 SBLASTMBUFCHK(&so->so_rcv);
2221 nextrecord = m->m_nextpkt;
2222 if (nextrecord == NULL) {
2223 KASSERT(so->so_rcv.sb_lastrecord == m,
2224 ("soreceive_dgram: lastrecord != m"));
2227 KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord,
2228 ("soreceive_dgram: m_nextpkt != nextrecord"));
2231 * Pull 'm' and its chain off the front of the packet queue.
2233 so->so_rcv.sb_mb = NULL;
2234 sockbuf_pushsync(&so->so_rcv, nextrecord);
2237 * Walk 'm's chain and free that many bytes from the socket buffer.
2239 for (m2 = m; m2 != NULL; m2 = m2->m_next)
2240 sbfree(&so->so_rcv, m2);
2243 * Do a few last checks before we let go of the lock.
2245 SBLASTRECORDCHK(&so->so_rcv);
2246 SBLASTMBUFCHK(&so->so_rcv);
2247 SOCKBUF_UNLOCK(&so->so_rcv);
2249 if (pr->pr_flags & PR_ADDR) {
2250 KASSERT(m->m_type == MT_SONAME,
2251 ("m->m_type == %d", m->m_type));
2253 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
2258 /* XXXRW: Can this happen? */
2263 * Packet to copyout() is now in 'm' and it is disconnected from the
2266 * Process one or more MT_CONTROL mbufs present before any data mbufs
2267 * in the first mbuf chain on the socket buffer. We call into the
2268 * protocol to perform externalization (or freeing if controlp ==
2269 * NULL). In some cases there can be only MT_CONTROL mbufs without
2272 if (m->m_type == MT_CONTROL) {
2273 struct mbuf *cm = NULL, *cmn;
2274 struct mbuf **cme = &cm;
2280 cme = &(*cme)->m_next;
2282 } while (m != NULL && m->m_type == MT_CONTROL);
2283 while (cm != NULL) {
2286 if (pr->pr_domain->dom_externalize != NULL) {
2287 error = (*pr->pr_domain->dom_externalize)
2288 (cm, controlp, flags);
2289 } else if (controlp != NULL)
2293 if (controlp != NULL) {
2294 while (*controlp != NULL)
2295 controlp = &(*controlp)->m_next;
2300 KASSERT(m == NULL || m->m_type == MT_DATA,
2301 ("soreceive_dgram: !data"));
2302 while (m != NULL && uio->uio_resid > 0) {
2303 len = uio->uio_resid;
2306 error = uiomove(mtod(m, char *), (int)len, uio);
2311 if (len == m->m_len)
2328 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
2329 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2333 CURVNET_SET(so->so_vnet);
2334 error = (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio, mp0,
2341 soshutdown(struct socket *so, int how)
2343 struct protosw *pr = so->so_proto;
2346 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
2349 (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0)
2352 CURVNET_SET(so->so_vnet);
2353 if (pr->pr_usrreqs->pru_flush != NULL)
2354 (*pr->pr_usrreqs->pru_flush)(so, how);
2357 if (how != SHUT_RD) {
2358 error = (*pr->pr_usrreqs->pru_shutdown)(so);
2359 wakeup(&so->so_timeo);
2363 wakeup(&so->so_timeo);
2369 sorflush(struct socket *so)
2371 struct sockbuf *sb = &so->so_rcv;
2372 struct protosw *pr = so->so_proto;
2378 * In order to avoid calling dom_dispose with the socket buffer mutex
2379 * held, and in order to generally avoid holding the lock for a long
2380 * time, we make a copy of the socket buffer and clear the original
2381 * (except locks, state). The new socket buffer copy won't have
2382 * initialized locks so we can only call routines that won't use or
2383 * assert those locks.
2385 * Dislodge threads currently blocked in receive and wait to acquire
2386 * a lock against other simultaneous readers before clearing the
2387 * socket buffer. Don't let our acquire be interrupted by a signal
2388 * despite any existing socket disposition on interruptable waiting.
2391 (void) sblock(sb, SBL_WAIT | SBL_NOINTR);
2394 * Invalidate/clear most of the sockbuf structure, but leave selinfo
2395 * and mutex data unchanged.
2398 bzero(&aso, sizeof(aso));
2399 aso.so_pcb = so->so_pcb;
2400 bcopy(&sb->sb_startzero, &aso.so_rcv.sb_startzero,
2401 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2402 bzero(&sb->sb_startzero,
2403 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2408 * Dispose of special rights and flush the copied socket. Don't call
2409 * any unsafe routines (that rely on locks being initialized) on aso.
2411 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
2412 (*pr->pr_domain->dom_dispose)(&aso);
2413 sbrelease_internal(&aso.so_rcv, so);
2417 * Wrapper for Socket established helper hook.
2418 * Parameters: socket, context of the hook point, hook id.
2421 hhook_run_socket(struct socket *so, void *hctx, int32_t h_id)
2423 struct socket_hhook_data hhook_data = {
2430 CURVNET_SET(so->so_vnet);
2431 HHOOKS_RUN_IF(V_socket_hhh[h_id], &hhook_data, &so->osd);
2434 /* Ugly but needed, since hhooks return void for now */
2435 return (hhook_data.status);
2439 * Perhaps this routine, and sooptcopyout(), below, ought to come in an
2440 * additional variant to handle the case where the option value needs to be
2441 * some kind of integer, but not a specific size. In addition to their use
2442 * here, these functions are also called by the protocol-level pr_ctloutput()
2446 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
2451 * If the user gives us more than we wanted, we ignore it, but if we
2452 * don't get the minimum length the caller wants, we return EINVAL.
2453 * On success, sopt->sopt_valsize is set to however much we actually
2456 if ((valsize = sopt->sopt_valsize) < minlen)
2459 sopt->sopt_valsize = valsize = len;
2461 if (sopt->sopt_td != NULL)
2462 return (copyin(sopt->sopt_val, buf, valsize));
2464 bcopy(sopt->sopt_val, buf, valsize);
2469 * Kernel version of setsockopt(2).
2471 * XXX: optlen is size_t, not socklen_t
2474 so_setsockopt(struct socket *so, int level, int optname, void *optval,
2477 struct sockopt sopt;
2479 sopt.sopt_level = level;
2480 sopt.sopt_name = optname;
2481 sopt.sopt_dir = SOPT_SET;
2482 sopt.sopt_val = optval;
2483 sopt.sopt_valsize = optlen;
2484 sopt.sopt_td = NULL;
2485 return (sosetopt(so, &sopt));
2489 sosetopt(struct socket *so, struct sockopt *sopt)
2500 CURVNET_SET(so->so_vnet);
2502 if (sopt->sopt_level != SOL_SOCKET) {
2503 if (so->so_proto->pr_ctloutput != NULL) {
2504 error = (*so->so_proto->pr_ctloutput)(so, sopt);
2508 error = ENOPROTOOPT;
2510 switch (sopt->sopt_name) {
2511 case SO_ACCEPTFILTER:
2512 error = do_setopt_accept_filter(so, sopt);
2518 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
2523 so->so_linger = l.l_linger;
2525 so->so_options |= SO_LINGER;
2527 so->so_options &= ~SO_LINGER;
2534 case SO_USELOOPBACK:
2544 error = sooptcopyin(sopt, &optval, sizeof optval,
2550 so->so_options |= sopt->sopt_name;
2552 so->so_options &= ~sopt->sopt_name;
2557 error = sooptcopyin(sopt, &optval, sizeof optval,
2562 if (optval < 0 || optval >= rt_numfibs) {
2566 if (((so->so_proto->pr_domain->dom_family == PF_INET) ||
2567 (so->so_proto->pr_domain->dom_family == PF_INET6) ||
2568 (so->so_proto->pr_domain->dom_family == PF_ROUTE)))
2569 so->so_fibnum = optval;
2574 case SO_USER_COOKIE:
2575 error = sooptcopyin(sopt, &val32, sizeof val32,
2579 so->so_user_cookie = val32;
2586 error = sooptcopyin(sopt, &optval, sizeof optval,
2592 * Values < 1 make no sense for any of these options,
2600 switch (sopt->sopt_name) {
2603 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
2604 &so->so_snd : &so->so_rcv, (u_long)optval,
2605 so, curthread) == 0) {
2609 (sopt->sopt_name == SO_SNDBUF ? &so->so_snd :
2610 &so->so_rcv)->sb_flags &= ~SB_AUTOSIZE;
2614 * Make sure the low-water is never greater than the
2618 SOCKBUF_LOCK(&so->so_snd);
2619 so->so_snd.sb_lowat =
2620 (optval > so->so_snd.sb_hiwat) ?
2621 so->so_snd.sb_hiwat : optval;
2622 SOCKBUF_UNLOCK(&so->so_snd);
2625 SOCKBUF_LOCK(&so->so_rcv);
2626 so->so_rcv.sb_lowat =
2627 (optval > so->so_rcv.sb_hiwat) ?
2628 so->so_rcv.sb_hiwat : optval;
2629 SOCKBUF_UNLOCK(&so->so_rcv);
2636 #ifdef COMPAT_FREEBSD32
2637 if (SV_CURPROC_FLAG(SV_ILP32)) {
2638 struct timeval32 tv32;
2640 error = sooptcopyin(sopt, &tv32, sizeof tv32,
2642 CP(tv32, tv, tv_sec);
2643 CP(tv32, tv, tv_usec);
2646 error = sooptcopyin(sopt, &tv, sizeof tv,
2650 if (tv.tv_sec < 0 || tv.tv_usec < 0 ||
2651 tv.tv_usec >= 1000000) {
2655 if (tv.tv_sec > INT32_MAX)
2659 switch (sopt->sopt_name) {
2661 so->so_snd.sb_timeo = val;
2664 so->so_rcv.sb_timeo = val;
2671 error = sooptcopyin(sopt, &extmac, sizeof extmac,
2675 error = mac_setsockopt_label(sopt->sopt_td->td_ucred,
2683 if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0)
2684 error = hhook_run_socket(so, sopt,
2687 error = ENOPROTOOPT;
2690 if (error == 0 && so->so_proto->pr_ctloutput != NULL)
2691 (void)(*so->so_proto->pr_ctloutput)(so, sopt);
2699 * Helper routine for getsockopt.
2702 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
2710 * Documented get behavior is that we always return a value, possibly
2711 * truncated to fit in the user's buffer. Traditional behavior is
2712 * that we always tell the user precisely how much we copied, rather
2713 * than something useful like the total amount we had available for
2714 * her. Note that this interface is not idempotent; the entire
2715 * answer must be generated ahead of time.
2717 valsize = min(len, sopt->sopt_valsize);
2718 sopt->sopt_valsize = valsize;
2719 if (sopt->sopt_val != NULL) {
2720 if (sopt->sopt_td != NULL)
2721 error = copyout(buf, sopt->sopt_val, valsize);
2723 bcopy(buf, sopt->sopt_val, valsize);
2729 sogetopt(struct socket *so, struct sockopt *sopt)
2738 CURVNET_SET(so->so_vnet);
2740 if (sopt->sopt_level != SOL_SOCKET) {
2741 if (so->so_proto->pr_ctloutput != NULL)
2742 error = (*so->so_proto->pr_ctloutput)(so, sopt);
2744 error = ENOPROTOOPT;
2748 switch (sopt->sopt_name) {
2749 case SO_ACCEPTFILTER:
2750 error = do_getopt_accept_filter(so, sopt);
2755 l.l_onoff = so->so_options & SO_LINGER;
2756 l.l_linger = so->so_linger;
2758 error = sooptcopyout(sopt, &l, sizeof l);
2761 case SO_USELOOPBACK:
2773 optval = so->so_options & sopt->sopt_name;
2775 error = sooptcopyout(sopt, &optval, sizeof optval);
2779 optval = so->so_type;
2783 optval = so->so_proto->pr_protocol;
2788 optval = so->so_error;
2794 optval = so->so_snd.sb_hiwat;
2798 optval = so->so_rcv.sb_hiwat;
2802 optval = so->so_snd.sb_lowat;
2806 optval = so->so_rcv.sb_lowat;
2811 tv = sbttotv(sopt->sopt_name == SO_SNDTIMEO ?
2812 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
2813 #ifdef COMPAT_FREEBSD32
2814 if (SV_CURPROC_FLAG(SV_ILP32)) {
2815 struct timeval32 tv32;
2817 CP(tv, tv32, tv_sec);
2818 CP(tv, tv32, tv_usec);
2819 error = sooptcopyout(sopt, &tv32, sizeof tv32);
2822 error = sooptcopyout(sopt, &tv, sizeof tv);
2827 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2831 error = mac_getsockopt_label(sopt->sopt_td->td_ucred,
2835 error = sooptcopyout(sopt, &extmac, sizeof extmac);
2843 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2847 error = mac_getsockopt_peerlabel(
2848 sopt->sopt_td->td_ucred, so, &extmac);
2851 error = sooptcopyout(sopt, &extmac, sizeof extmac);
2857 case SO_LISTENQLIMIT:
2858 optval = so->so_qlimit;
2862 optval = so->so_qlen;
2865 case SO_LISTENINCQLEN:
2866 optval = so->so_incqlen;
2870 if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0)
2871 error = hhook_run_socket(so, sopt,
2874 error = ENOPROTOOPT;
2886 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
2888 struct mbuf *m, *m_prev;
2889 int sopt_size = sopt->sopt_valsize;
2891 MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
2894 if (sopt_size > MLEN) {
2895 MCLGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT);
2896 if ((m->m_flags & M_EXT) == 0) {
2900 m->m_len = min(MCLBYTES, sopt_size);
2902 m->m_len = min(MLEN, sopt_size);
2904 sopt_size -= m->m_len;
2909 MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
2914 if (sopt_size > MLEN) {
2915 MCLGET(m, sopt->sopt_td != NULL ? M_WAITOK :
2917 if ((m->m_flags & M_EXT) == 0) {
2922 m->m_len = min(MCLBYTES, sopt_size);
2924 m->m_len = min(MLEN, sopt_size);
2926 sopt_size -= m->m_len;
2934 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
2936 struct mbuf *m0 = m;
2938 if (sopt->sopt_val == NULL)
2940 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2941 if (sopt->sopt_td != NULL) {
2944 error = copyin(sopt->sopt_val, mtod(m, char *),
2951 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
2952 sopt->sopt_valsize -= m->m_len;
2953 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2956 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2957 panic("ip6_sooptmcopyin");
2962 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
2964 struct mbuf *m0 = m;
2967 if (sopt->sopt_val == NULL)
2969 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2970 if (sopt->sopt_td != NULL) {
2973 error = copyout(mtod(m, char *), sopt->sopt_val,
2980 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
2981 sopt->sopt_valsize -= m->m_len;
2982 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2983 valsize += m->m_len;
2987 /* enough soopt buffer should be given from user-land */
2991 sopt->sopt_valsize = valsize;
2996 * sohasoutofband(): protocol notifies socket layer of the arrival of new
2997 * out-of-band data, which will then notify socket consumers.
3000 sohasoutofband(struct socket *so)
3003 if (so->so_sigio != NULL)
3004 pgsigio(&so->so_sigio, SIGURG, 0);
3005 selwakeuppri(&so->so_rcv.sb_sel, PSOCK);
3009 sopoll(struct socket *so, int events, struct ucred *active_cred,
3014 * We do not need to set or assert curvnet as long as everyone uses
3017 return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred,
3022 sopoll_generic(struct socket *so, int events, struct ucred *active_cred,
3027 SOCKBUF_LOCK(&so->so_snd);
3028 SOCKBUF_LOCK(&so->so_rcv);
3029 if (events & (POLLIN | POLLRDNORM))
3030 if (soreadabledata(so))
3031 revents |= events & (POLLIN | POLLRDNORM);
3033 if (events & (POLLOUT | POLLWRNORM))
3034 if (sowriteable(so))
3035 revents |= events & (POLLOUT | POLLWRNORM);
3037 if (events & (POLLPRI | POLLRDBAND))
3038 if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK))
3039 revents |= events & (POLLPRI | POLLRDBAND);
3041 if ((events & POLLINIGNEOF) == 0) {
3042 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3043 revents |= events & (POLLIN | POLLRDNORM);
3044 if (so->so_snd.sb_state & SBS_CANTSENDMORE)
3050 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
3051 selrecord(td, &so->so_rcv.sb_sel);
3052 so->so_rcv.sb_flags |= SB_SEL;
3055 if (events & (POLLOUT | POLLWRNORM)) {
3056 selrecord(td, &so->so_snd.sb_sel);
3057 so->so_snd.sb_flags |= SB_SEL;
3061 SOCKBUF_UNLOCK(&so->so_rcv);
3062 SOCKBUF_UNLOCK(&so->so_snd);
3067 soo_kqfilter(struct file *fp, struct knote *kn)
3069 struct socket *so = kn->kn_fp->f_data;
3072 switch (kn->kn_filter) {
3074 if (so->so_options & SO_ACCEPTCONN)
3075 kn->kn_fop = &solisten_filtops;
3077 kn->kn_fop = &soread_filtops;
3081 kn->kn_fop = &sowrite_filtops;
3089 knlist_add(&sb->sb_sel.si_note, kn, 1);
3090 sb->sb_flags |= SB_KNOTE;
3096 * Some routines that return EOPNOTSUPP for entry points that are not
3097 * supported by a protocol. Fill in as needed.
3100 pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
3107 pru_aio_queue_notsupp(struct socket *so, struct kaiocb *job)
3114 pru_attach_notsupp(struct socket *so, int proto, struct thread *td)
3121 pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
3128 pru_bindat_notsupp(int fd, struct socket *so, struct sockaddr *nam,
3136 pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
3143 pru_connectat_notsupp(int fd, struct socket *so, struct sockaddr *nam,
3151 pru_connect2_notsupp(struct socket *so1, struct socket *so2)
3158 pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
3159 struct ifnet *ifp, struct thread *td)
3166 pru_disconnect_notsupp(struct socket *so)
3173 pru_listen_notsupp(struct socket *so, int backlog, struct thread *td)
3180 pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam)
3187 pru_rcvd_notsupp(struct socket *so, int flags)
3194 pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
3201 pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
3202 struct sockaddr *addr, struct mbuf *control, struct thread *td)
3209 pru_ready_notsupp(struct socket *so, struct mbuf *m, int count)
3212 return (EOPNOTSUPP);
3216 * This isn't really a ``null'' operation, but it's the default one and
3217 * doesn't do anything destructive.
3220 pru_sense_null(struct socket *so, struct stat *sb)
3223 sb->st_blksize = so->so_snd.sb_hiwat;
3228 pru_shutdown_notsupp(struct socket *so)
3235 pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam)
3242 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
3243 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
3250 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
3251 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
3258 pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred,
3266 filt_sordetach(struct knote *kn)
3268 struct socket *so = kn->kn_fp->f_data;
3270 SOCKBUF_LOCK(&so->so_rcv);
3271 knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1);
3272 if (knlist_empty(&so->so_rcv.sb_sel.si_note))
3273 so->so_rcv.sb_flags &= ~SB_KNOTE;
3274 SOCKBUF_UNLOCK(&so->so_rcv);
3279 filt_soread(struct knote *kn, long hint)
3283 so = kn->kn_fp->f_data;
3284 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3286 kn->kn_data = sbavail(&so->so_rcv) - so->so_rcv.sb_ctl;
3287 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3288 kn->kn_flags |= EV_EOF;
3289 kn->kn_fflags = so->so_error;
3291 } else if (so->so_error) /* temporary udp error */
3294 if (kn->kn_sfflags & NOTE_LOWAT) {
3295 if (kn->kn_data >= kn->kn_sdata)
3298 if (sbavail(&so->so_rcv) >= so->so_rcv.sb_lowat)
3302 /* This hook returning non-zero indicates an event, not error */
3303 return (hhook_run_socket(so, NULL, HHOOK_FILT_SOREAD));
3307 filt_sowdetach(struct knote *kn)
3309 struct socket *so = kn->kn_fp->f_data;
3311 SOCKBUF_LOCK(&so->so_snd);
3312 knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1);
3313 if (knlist_empty(&so->so_snd.sb_sel.si_note))
3314 so->so_snd.sb_flags &= ~SB_KNOTE;
3315 SOCKBUF_UNLOCK(&so->so_snd);
3320 filt_sowrite(struct knote *kn, long hint)
3324 so = kn->kn_fp->f_data;
3325 SOCKBUF_LOCK_ASSERT(&so->so_snd);
3326 kn->kn_data = sbspace(&so->so_snd);
3328 hhook_run_socket(so, kn, HHOOK_FILT_SOWRITE);
3330 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
3331 kn->kn_flags |= EV_EOF;
3332 kn->kn_fflags = so->so_error;
3334 } else if (so->so_error) /* temporary udp error */
3336 else if (((so->so_state & SS_ISCONNECTED) == 0) &&
3337 (so->so_proto->pr_flags & PR_CONNREQUIRED))
3339 else if (kn->kn_sfflags & NOTE_LOWAT)
3340 return (kn->kn_data >= kn->kn_sdata);
3342 return (kn->kn_data >= so->so_snd.sb_lowat);
3347 filt_solisten(struct knote *kn, long hint)
3349 struct socket *so = kn->kn_fp->f_data;
3351 kn->kn_data = so->so_qlen;
3352 return (!TAILQ_EMPTY(&so->so_comp));
3356 socheckuid(struct socket *so, uid_t uid)
3361 if (so->so_cred->cr_uid != uid)
3367 * These functions are used by protocols to notify the socket layer (and its
3368 * consumers) of state changes in the sockets driven by protocol-side events.
3372 * Procedures to manipulate state flags of socket and do appropriate wakeups.
3374 * Normal sequence from the active (originating) side is that
3375 * soisconnecting() is called during processing of connect() call, resulting
3376 * in an eventual call to soisconnected() if/when the connection is
3377 * established. When the connection is torn down soisdisconnecting() is
3378 * called during processing of disconnect() call, and soisdisconnected() is
3379 * called when the connection to the peer is totally severed. The semantics
3380 * of these routines are such that connectionless protocols can call
3381 * soisconnected() and soisdisconnected() only, bypassing the in-progress
3382 * calls when setting up a ``connection'' takes no time.
3384 * From the passive side, a socket is created with two queues of sockets:
3385 * so_incomp for connections in progress and so_comp for connections already
3386 * made and awaiting user acceptance. As a protocol is preparing incoming
3387 * connections, it creates a socket structure queued on so_incomp by calling
3388 * sonewconn(). When the connection is established, soisconnected() is
3389 * called, and transfers the socket structure to so_comp, making it available
3392 * If a socket is closed with sockets on either so_incomp or so_comp, these
3393 * sockets are dropped.
3395 * If higher-level protocols are implemented in the kernel, the wakeups done
3396 * here will sometimes cause software-interrupt process scheduling.
3399 soisconnecting(struct socket *so)
3403 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
3404 so->so_state |= SS_ISCONNECTING;
3409 soisconnected(struct socket *so)
3411 struct socket *head;
3417 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
3418 so->so_state |= SS_ISCONNECTED;
3420 if (head != NULL && (so->so_qstate & SQ_INCOMP)) {
3421 if ((so->so_options & SO_ACCEPTFILTER) == 0) {
3423 TAILQ_REMOVE(&head->so_incomp, so, so_list);
3425 so->so_qstate &= ~SQ_INCOMP;
3426 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
3428 so->so_qstate |= SQ_COMP;
3431 wakeup_one(&head->so_timeo);
3434 soupcall_set(so, SO_RCV,
3435 head->so_accf->so_accept_filter->accf_callback,
3436 head->so_accf->so_accept_filter_arg);
3437 so->so_options &= ~SO_ACCEPTFILTER;
3438 ret = head->so_accf->so_accept_filter->accf_callback(so,
3439 head->so_accf->so_accept_filter_arg, M_NOWAIT);
3440 if (ret == SU_ISCONNECTED)
3441 soupcall_clear(so, SO_RCV);
3443 if (ret == SU_ISCONNECTED)
3450 wakeup(&so->so_timeo);
3456 soisdisconnecting(struct socket *so)
3460 * Note: This code assumes that SOCK_LOCK(so) and
3461 * SOCKBUF_LOCK(&so->so_rcv) are the same.
3463 SOCKBUF_LOCK(&so->so_rcv);
3464 so->so_state &= ~SS_ISCONNECTING;
3465 so->so_state |= SS_ISDISCONNECTING;
3466 socantrcvmore_locked(so);
3467 SOCKBUF_LOCK(&so->so_snd);
3468 socantsendmore_locked(so);
3469 wakeup(&so->so_timeo);
3473 soisdisconnected(struct socket *so)
3477 * Note: This code assumes that SOCK_LOCK(so) and
3478 * SOCKBUF_LOCK(&so->so_rcv) are the same.
3480 SOCKBUF_LOCK(&so->so_rcv);
3481 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
3482 so->so_state |= SS_ISDISCONNECTED;
3483 socantrcvmore_locked(so);
3484 SOCKBUF_LOCK(&so->so_snd);
3485 sbdrop_locked(&so->so_snd, sbused(&so->so_snd));
3486 socantsendmore_locked(so);
3487 wakeup(&so->so_timeo);
3491 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
3494 sodupsockaddr(const struct sockaddr *sa, int mflags)
3496 struct sockaddr *sa2;
3498 sa2 = malloc(sa->sa_len, M_SONAME, mflags);
3500 bcopy(sa, sa2, sa->sa_len);
3505 * Register per-socket buffer upcalls.
3508 soupcall_set(struct socket *so, int which,
3509 int (*func)(struct socket *, void *, int), void *arg)
3521 panic("soupcall_set: bad which");
3523 SOCKBUF_LOCK_ASSERT(sb);
3525 /* XXX: accf_http actually wants to do this on purpose. */
3526 KASSERT(sb->sb_upcall == NULL, ("soupcall_set: overwriting upcall"));
3528 sb->sb_upcall = func;
3529 sb->sb_upcallarg = arg;
3530 sb->sb_flags |= SB_UPCALL;
3534 soupcall_clear(struct socket *so, int which)
3546 panic("soupcall_clear: bad which");
3548 SOCKBUF_LOCK_ASSERT(sb);
3549 KASSERT(sb->sb_upcall != NULL, ("soupcall_clear: no upcall to clear"));
3550 sb->sb_upcall = NULL;
3551 sb->sb_upcallarg = NULL;
3552 sb->sb_flags &= ~SB_UPCALL;
3556 * Create an external-format (``xsocket'') structure using the information in
3557 * the kernel-format socket structure pointed to by so. This is done to
3558 * reduce the spew of irrelevant information over this interface, to isolate
3559 * user code from changes in the kernel structure, and potentially to provide
3560 * information-hiding if we decide that some of this information should be
3561 * hidden from users.
3564 sotoxsocket(struct socket *so, struct xsocket *xso)
3567 xso->xso_len = sizeof *xso;
3569 xso->so_type = so->so_type;
3570 xso->so_options = so->so_options;
3571 xso->so_linger = so->so_linger;
3572 xso->so_state = so->so_state;
3573 xso->so_pcb = so->so_pcb;
3574 xso->xso_protocol = so->so_proto->pr_protocol;
3575 xso->xso_family = so->so_proto->pr_domain->dom_family;
3576 xso->so_qlen = so->so_qlen;
3577 xso->so_incqlen = so->so_incqlen;
3578 xso->so_qlimit = so->so_qlimit;
3579 xso->so_timeo = so->so_timeo;
3580 xso->so_error = so->so_error;
3581 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
3582 xso->so_oobmark = so->so_oobmark;
3583 sbtoxsockbuf(&so->so_snd, &xso->so_snd);
3584 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
3585 xso->so_uid = so->so_cred->cr_uid;
3590 * Socket accessor functions to provide external consumers with
3591 * a safe interface to socket state
3596 so_listeners_apply_all(struct socket *so, void (*func)(struct socket *, void *),
3600 TAILQ_FOREACH(so, &so->so_comp, so_list)
3605 so_sockbuf_rcv(struct socket *so)
3608 return (&so->so_rcv);
3612 so_sockbuf_snd(struct socket *so)
3615 return (&so->so_snd);
3619 so_state_get(const struct socket *so)
3622 return (so->so_state);
3626 so_state_set(struct socket *so, int val)
3633 so_options_get(const struct socket *so)
3636 return (so->so_options);
3640 so_options_set(struct socket *so, int val)
3643 so->so_options = val;
3647 so_error_get(const struct socket *so)
3650 return (so->so_error);
3654 so_error_set(struct socket *so, int val)
3661 so_linger_get(const struct socket *so)
3664 return (so->so_linger);
3668 so_linger_set(struct socket *so, int val)
3671 so->so_linger = val;
3675 so_protosw_get(const struct socket *so)
3678 return (so->so_proto);
3682 so_protosw_set(struct socket *so, struct protosw *val)
3689 so_sorwakeup(struct socket *so)
3696 so_sowwakeup(struct socket *so)
3703 so_sorwakeup_locked(struct socket *so)
3706 sorwakeup_locked(so);
3710 so_sowwakeup_locked(struct socket *so)
3713 sowwakeup_locked(so);
3717 so_lock(struct socket *so)
3724 so_unlock(struct socket *so)