2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
67 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/fcntl.h>
76 #include <sys/malloc.h>
78 #include <sys/domain.h>
79 #include <sys/file.h> /* for struct knote */
80 #include <sys/kernel.h>
81 #include <sys/event.h>
83 #include <sys/protosw.h>
84 #include <sys/socket.h>
85 #include <sys/socketvar.h>
86 #include <sys/socketops.h>
87 #include <sys/resourcevar.h>
88 #include <sys/signalvar.h>
89 #include <sys/sysctl.h>
92 #include <vm/vm_zone.h>
94 #include <net/netmsg2.h>
96 #include <sys/thread2.h>
97 #include <sys/socketvar2.h>
99 #include <machine/limits.h>
101 extern int tcp_sosnd_agglim;
102 extern int tcp_sosnd_async;
105 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt);
108 static void filt_sordetach(struct knote *kn);
109 static int filt_soread(struct knote *kn, long hint);
110 static void filt_sowdetach(struct knote *kn);
111 static int filt_sowrite(struct knote *kn, long hint);
112 static int filt_solisten(struct knote *kn, long hint);
114 static void sodiscard(struct socket *so);
115 static int soclose_sync(struct socket *so, int fflag);
116 static void soclose_fast(struct socket *so);
118 static struct filterops solisten_filtops =
119 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten };
120 static struct filterops soread_filtops =
121 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread };
122 static struct filterops sowrite_filtops =
123 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite };
124 static struct filterops soexcept_filtops =
125 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread };
127 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct");
128 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
129 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
132 static int somaxconn = SOMAXCONN;
133 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW,
134 &somaxconn, 0, "Maximum pending socket connection queue size");
136 static int use_soclose_fast = 1;
137 SYSCTL_INT(_kern_ipc, OID_AUTO, soclose_fast, CTLFLAG_RW,
138 &use_soclose_fast, 0, "Fast socket close");
141 * Socket operation routines.
142 * These routines are called by the routines in
143 * sys_socket.c or from a system process, and
144 * implement the semantics of socket operations by
145 * switching out to the protocol specific routines.
149 * Get a socket structure, and initialize it.
150 * Note that it would probably be better to allocate socket
151 * and PCB at the same time, but I'm not convinced that all
152 * the protocols can be easily modified to do this.
160 waitmask = waitok ? M_WAITOK : M_NOWAIT;
161 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask);
163 /* XXX race condition for reentrant kernel */
164 TAILQ_INIT(&so->so_aiojobq);
165 TAILQ_INIT(&so->so_rcv.ssb_kq.ki_mlist);
166 TAILQ_INIT(&so->so_snd.ssb_kq.ki_mlist);
167 lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok");
168 lwkt_token_init(&so->so_snd.ssb_token, "sndtok");
169 so->so_state = SS_NOFDREF;
176 socreate(int dom, struct socket **aso, int type,
177 int proto, struct thread *td)
179 struct proc *p = td->td_proc;
182 struct pru_attach_info ai;
186 prp = pffindproto(dom, proto, type);
188 prp = pffindtype(dom, type);
190 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
191 return (EPROTONOSUPPORT);
193 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only &&
194 prp->pr_domain->dom_family != PF_LOCAL &&
195 prp->pr_domain->dom_family != PF_INET &&
196 prp->pr_domain->dom_family != PF_INET6 &&
197 prp->pr_domain->dom_family != PF_ROUTE) {
198 return (EPROTONOSUPPORT);
201 if (prp->pr_type != type)
203 so = soalloc(p != 0);
208 * Callers of socreate() presumably will connect up a descriptor
209 * and call soclose() if they cannot. This represents our so_refs
210 * (which should be 1) from soalloc().
212 soclrstate(so, SS_NOFDREF);
215 * Set a default port for protocol processing. No action will occur
216 * on the socket on this port until an inpcb is attached to it and
217 * is able to match incoming packets, or until the socket becomes
218 * available to userland.
220 * We normally default the socket to the protocol thread on cpu 0.
221 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol
222 * thread and all pr_*()/pru_*() calls are executed synchronously.
224 if (prp->pr_flags & PR_SYNC_PORT)
225 so->so_port = &netisr_sync_port;
227 so->so_port = cpu_portfn(0);
229 TAILQ_INIT(&so->so_incomp);
230 TAILQ_INIT(&so->so_comp);
232 so->so_cred = crhold(p->p_ucred);
234 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE];
235 ai.p_ucred = p->p_ucred;
236 ai.fd_rdir = p->p_fd->fd_rdir;
239 * Auto-sizing of socket buffers is managed by the protocols and
240 * the appropriate flags must be set in the pru_attach function.
242 error = so_pru_attach(so, proto, &ai);
244 sosetstate(so, SS_NOFDREF);
245 sofree(so); /* from soalloc */
250 * NOTE: Returns referenced socket.
257 sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
261 error = so_pru_bind(so, nam, td);
266 sodealloc(struct socket *so)
268 if (so->so_rcv.ssb_hiwat)
269 (void)chgsbsize(so->so_cred->cr_uidinfo,
270 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY);
271 if (so->so_snd.ssb_hiwat)
272 (void)chgsbsize(so->so_cred->cr_uidinfo,
273 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY);
275 /* remove accept filter if present */
276 if (so->so_accf != NULL)
277 do_setopt_accept_filter(so, NULL);
284 solisten(struct socket *so, int backlog, struct thread *td)
288 short oldopt, oldqlimit;
291 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING))
295 oldopt = so->so_options;
296 oldqlimit = so->so_qlimit;
299 lwkt_gettoken(&so->so_rcv.ssb_token);
300 if (TAILQ_EMPTY(&so->so_comp))
301 so->so_options |= SO_ACCEPTCONN;
302 lwkt_reltoken(&so->so_rcv.ssb_token);
303 if (backlog < 0 || backlog > somaxconn)
305 so->so_qlimit = backlog;
306 /* SCTP needs to look at tweak both the inbound backlog parameter AND
307 * the so_options (UDP model both connect's and gets inbound
308 * connections .. implicitly).
310 error = so_pru_listen(so, td);
313 /* Restore the params */
314 so->so_options = oldopt;
315 so->so_qlimit = oldqlimit;
323 * Destroy a disconnected socket. This routine is a NOP if entities
324 * still have a reference on the socket:
326 * so_pcb - The protocol stack still has a reference
327 * SS_NOFDREF - There is no longer a file pointer reference
330 sofree(struct socket *so)
335 * This is a bit hackish at the moment. We need to interlock
336 * any accept queue we are on before we potentially lose the
337 * last reference to avoid races against a re-reference from
338 * someone operating on the queue.
340 while ((head = so->so_head) != NULL) {
341 lwkt_getpooltoken(head);
342 if (so->so_head == head)
344 lwkt_relpooltoken(head);
348 * Arbitrage the last free.
350 KKASSERT(so->so_refs > 0);
351 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) {
353 lwkt_relpooltoken(head);
357 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF));
358 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0);
361 * We're done, remove ourselves from the accept queue we are
362 * on, if we are on one.
365 if (so->so_state & SS_INCOMP) {
366 TAILQ_REMOVE(&head->so_incomp, so, so_list);
368 } else if (so->so_state & SS_COMP) {
370 * We must not decommission a socket that's
371 * on the accept(2) queue. If we do, then
372 * accept(2) may hang after select(2) indicated
373 * that the listening socket was ready.
375 lwkt_relpooltoken(head);
378 panic("sofree: not queued");
380 soclrstate(so, SS_INCOMP);
382 lwkt_relpooltoken(head);
384 ssb_release(&so->so_snd, so);
390 * Close a socket on last file table reference removal.
391 * Initiate disconnect if connected.
392 * Free socket when disconnect complete.
395 soclose(struct socket *so, int fflag)
399 funsetown(&so->so_sigio);
400 if (!use_soclose_fast ||
401 (so->so_proto->pr_flags & PR_SYNC_PORT) ||
402 (so->so_options & SO_LINGER)) {
403 error = soclose_sync(so, fflag);
412 sodiscard(struct socket *so)
414 lwkt_getpooltoken(so);
415 if (so->so_options & SO_ACCEPTCONN) {
418 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
419 TAILQ_REMOVE(&so->so_incomp, sp, so_list);
420 soclrstate(sp, SS_INCOMP);
425 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
426 TAILQ_REMOVE(&so->so_comp, sp, so_list);
427 soclrstate(sp, SS_COMP);
433 lwkt_relpooltoken(so);
435 if (so->so_state & SS_NOFDREF)
436 panic("soclose: NOFDREF");
437 sosetstate(so, SS_NOFDREF); /* take ref */
441 soclose_sync(struct socket *so, int fflag)
445 if (so->so_pcb == NULL)
447 if (so->so_state & SS_ISCONNECTED) {
448 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
449 error = sodisconnect(so);
453 if (so->so_options & SO_LINGER) {
454 if ((so->so_state & SS_ISDISCONNECTING) &&
457 while (so->so_state & SS_ISCONNECTED) {
458 error = tsleep(&so->so_timeo, PCATCH,
459 "soclos", so->so_linger * hz);
469 error2 = so_pru_detach(so);
475 so_pru_sync(so); /* unpend async sending */
476 sofree(so); /* dispose of ref */
482 soclose_sofree_async_handler(netmsg_t msg)
484 sofree(msg->base.nm_so);
488 soclose_sofree_async(struct socket *so)
490 struct netmsg_base *base = &so->so_clomsg;
492 netmsg_init(base, so, &netisr_apanic_rport, 0,
493 soclose_sofree_async_handler);
494 lwkt_sendmsg(so->so_port, &base->lmsg);
498 soclose_disconn_async_handler(netmsg_t msg)
500 struct socket *so = msg->base.nm_so;
502 if ((so->so_state & SS_ISCONNECTED) &&
503 (so->so_state & SS_ISDISCONNECTING) == 0)
504 so_pru_disconnect_direct(so);
507 so_pru_detach_direct(so);
514 soclose_disconn_async(struct socket *so)
516 struct netmsg_base *base = &so->so_clomsg;
518 netmsg_init(base, so, &netisr_apanic_rport, 0,
519 soclose_disconn_async_handler);
520 lwkt_sendmsg(so->so_port, &base->lmsg);
524 soclose_detach_async_handler(netmsg_t msg)
526 struct socket *so = msg->base.nm_so;
529 so_pru_detach_direct(so);
536 soclose_detach_async(struct socket *so)
538 struct netmsg_base *base = &so->so_clomsg;
540 netmsg_init(base, so, &netisr_apanic_rport, 0,
541 soclose_detach_async_handler);
542 lwkt_sendmsg(so->so_port, &base->lmsg);
546 soclose_fast(struct socket *so)
548 if (so->so_pcb == NULL)
551 if ((so->so_state & SS_ISCONNECTED) &&
552 (so->so_state & SS_ISDISCONNECTING) == 0) {
553 soclose_disconn_async(so);
558 soclose_detach_async(so);
564 soclose_sofree_async(so);
568 * Abort and destroy a socket. Only one abort can be in progress
569 * at any given moment.
572 soabort(struct socket *so)
579 soaborta(struct socket *so)
586 soabort_oncpu(struct socket *so)
589 so_pru_abort_oncpu(so);
593 * so is passed in ref'd, which becomes owned by
594 * the cleared SS_NOFDREF flag.
597 soaccept(struct socket *so, struct sockaddr **nam)
601 if ((so->so_state & SS_NOFDREF) == 0)
602 panic("soaccept: !NOFDREF");
603 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */
604 error = so_pru_accept_direct(so, nam);
609 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
613 if (so->so_options & SO_ACCEPTCONN)
616 * If protocol is connection-based, can only connect once.
617 * Otherwise, if connected, try to disconnect first.
618 * This allows user to disconnect by connecting to, e.g.,
621 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
622 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
623 (error = sodisconnect(so)))) {
627 * Prevent accumulated error from previous connection
631 error = so_pru_connect(so, nam, td);
637 soconnect2(struct socket *so1, struct socket *so2)
641 error = so_pru_connect2(so1, so2);
646 sodisconnect(struct socket *so)
650 if ((so->so_state & SS_ISCONNECTED) == 0) {
654 if (so->so_state & SS_ISDISCONNECTING) {
658 error = so_pru_disconnect(so);
663 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
666 * If send must go all at once and message is larger than
667 * send buffering, then hard error.
668 * Lock against other senders.
669 * If must go all at once and not enough room now, then
670 * inform user that this would block and do nothing.
671 * Otherwise, if nonblocking, send as much as possible.
672 * The data to be sent is described by "uio" if nonzero,
673 * otherwise by the mbuf chain "top" (which must be null
674 * if uio is not). Data provided in mbuf chain must be small
675 * enough to send all at once.
677 * Returns nonzero on error, timeout or signal; callers
678 * must check for short counts if EINTR/ERESTART are returned.
679 * Data and control buffers are freed on return.
682 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
683 struct mbuf *top, struct mbuf *control, int flags,
690 int clen = 0, error, dontroute, mlen;
691 int atomic = sosendallatonce(so) || top;
695 resid = uio->uio_resid;
697 resid = (size_t)top->m_pkthdr.len;
700 for (m = top; m; m = m->m_next)
702 KKASSERT(top->m_pkthdr.len == len);
707 * WARNING! resid is unsigned, space and len are signed. space
708 * can wind up negative if the sockbuf is overcommitted.
710 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
711 * type sockets since that's an error.
713 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) {
719 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
720 (so->so_proto->pr_flags & PR_ATOMIC);
721 if (td->td_lwp != NULL)
722 td->td_lwp->lwp_ru.ru_msgsnd++;
724 clen = control->m_len;
725 #define gotoerr(errcode) { error = errcode; goto release; }
728 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags));
733 if (so->so_state & SS_CANTSENDMORE)
736 error = so->so_error;
740 if ((so->so_state & SS_ISCONNECTED) == 0) {
742 * `sendto' and `sendmsg' is allowed on a connection-
743 * based socket if it supports implied connect.
744 * Return ENOTCONN if not connected and no address is
747 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
748 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
749 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
750 !(resid == 0 && clen != 0))
752 } else if (addr == 0)
753 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
754 ENOTCONN : EDESTADDRREQ);
756 if ((atomic && resid > so->so_snd.ssb_hiwat) ||
757 clen > so->so_snd.ssb_hiwat) {
760 space = ssb_space(&so->so_snd);
763 if ((space < 0 || (size_t)space < resid + clen) && uio &&
764 (atomic || space < so->so_snd.ssb_lowat || space < clen)) {
765 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT))
766 gotoerr(EWOULDBLOCK);
767 ssb_unlock(&so->so_snd);
768 error = ssb_wait(&so->so_snd);
778 * Data is prepackaged in "top".
782 top->m_flags |= M_EOR;
786 m = m_getl((int)resid, MB_WAIT, MT_DATA,
787 top == NULL ? M_PKTHDR : 0, &mlen);
790 m->m_pkthdr.rcvif = NULL;
792 len = imin((int)szmin(mlen, resid), space);
793 if (resid < MINCLSIZE) {
795 * For datagram protocols, leave room
796 * for protocol headers in first mbuf.
798 if (atomic && top == 0 && len < mlen)
802 error = uiomove(mtod(m, caddr_t), (size_t)len, uio);
803 resid = uio->uio_resid;
806 top->m_pkthdr.len += len;
812 top->m_flags |= M_EOR;
815 } while (space > 0 && atomic);
817 so->so_options |= SO_DONTROUTE;
818 if (flags & MSG_OOB) {
819 pru_flags = PRUS_OOB;
820 } else if ((flags & MSG_EOF) &&
821 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
824 * If the user set MSG_EOF, the protocol
825 * understands this flag and nothing left to
826 * send then use PRU_SEND_EOF instead of PRU_SEND.
828 pru_flags = PRUS_EOF;
829 } else if (resid > 0 && space > 0) {
830 /* If there is more to send, set PRUS_MORETOCOME */
831 pru_flags = PRUS_MORETOCOME;
836 * XXX all the SS_CANTSENDMORE checks previously
837 * done could be out of date. We could have recieved
838 * a reset packet in an interrupt or maybe we slept
839 * while doing page faults in uiomove() etc. We could
840 * probably recheck again inside the splnet() protection
841 * here, but there are probably other places that this
842 * also happens. We must rethink this.
844 error = so_pru_send(so, pru_flags, top, addr, control, td);
846 so->so_options &= ~SO_DONTROUTE;
853 } while (resid && space > 0);
857 ssb_unlock(&so->so_snd);
867 * A specialization of sosend() for UDP based on protocol-specific knowledge:
868 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that
869 * sosendallatonce() returns true,
870 * the "atomic" variable is true,
871 * and sosendudp() blocks until space is available for the entire send.
872 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or
873 * PR_IMPLOPCL flags set.
874 * UDP has no out-of-band data.
875 * UDP has no control data.
876 * UDP does not support MSG_EOR.
879 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio,
880 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
882 boolean_t dontroute; /* temporary SO_DONTROUTE setting */
887 if (td->td_lwp != NULL)
888 td->td_lwp->lwp_ru.ru_msgsnd++;
892 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp"));
893 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len;
896 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags));
900 if (so->so_state & SS_CANTSENDMORE)
903 error = so->so_error;
907 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL)
908 gotoerr(EDESTADDRREQ);
909 if (resid > so->so_snd.ssb_hiwat)
911 space = ssb_space(&so->so_snd);
912 if (uio && (space < 0 || (size_t)space < resid)) {
913 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT))
914 gotoerr(EWOULDBLOCK);
915 ssb_unlock(&so->so_snd);
916 error = ssb_wait(&so->so_snd);
923 top = m_uiomove(uio);
928 dontroute = (flags & MSG_DONTROUTE) && !(so->so_options & SO_DONTROUTE);
930 so->so_options |= SO_DONTROUTE;
932 error = so_pru_send(so, 0, top, addr, NULL, td);
933 top = NULL; /* sent or freed in lower layer */
936 so->so_options &= ~SO_DONTROUTE;
939 ssb_unlock(&so->so_snd);
947 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio,
948 struct mbuf *top, struct mbuf *control, int flags,
960 KKASSERT(top == NULL);
962 resid = uio->uio_resid;
965 resid = (size_t)top->m_pkthdr.len;
968 for (m = top; m; m = m->m_next)
970 KKASSERT(top->m_pkthdr.len == len);
975 * WARNING! resid is unsigned, space and len are signed. space
976 * can wind up negative if the sockbuf is overcommitted.
978 * Also check to make sure that MSG_EOR isn't used on TCP
980 if (flags & MSG_EOR) {
986 /* TCP doesn't do control messages (rights, creds, etc) */
987 if (control->m_len) {
991 m_freem(control); /* empty control, just free it */
995 if (td->td_lwp != NULL)
996 td->td_lwp->lwp_ru.ru_msgsnd++;
998 #define gotoerr(errcode) { error = errcode; goto release; }
1001 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags));
1006 if (so->so_state & SS_CANTSENDMORE)
1009 error = so->so_error;
1013 if ((so->so_state & SS_ISCONNECTED) == 0 &&
1014 (so->so_state & SS_ISCONFIRMING) == 0)
1016 if (allatonce && resid > so->so_snd.ssb_hiwat)
1019 space = ssb_space(&so->so_snd);
1020 if (flags & MSG_OOB)
1022 if ((space < 0 || (size_t)space < resid) && !allatonce &&
1023 space < so->so_snd.ssb_lowat) {
1024 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT))
1025 gotoerr(EWOULDBLOCK);
1026 ssb_unlock(&so->so_snd);
1027 error = ssb_wait(&so->so_snd);
1034 int cnt = 0, async = 0;
1038 * Data is prepackaged in "top".
1042 if (resid > INT_MAX)
1044 m = m_getl((int)resid, MB_WAIT, MT_DATA,
1045 top == NULL ? M_PKTHDR : 0, &mlen);
1047 m->m_pkthdr.len = 0;
1048 m->m_pkthdr.rcvif = NULL;
1050 len = imin((int)szmin(mlen, resid), space);
1052 error = uiomove(mtod(m, caddr_t), (size_t)len, uio);
1053 resid = uio->uio_resid;
1056 top->m_pkthdr.len += len;
1063 } while (space > 0 && cnt < tcp_sosnd_agglim);
1065 if (tcp_sosnd_async)
1068 if (flags & MSG_OOB) {
1069 pru_flags = PRUS_OOB;
1071 } else if ((flags & MSG_EOF) && resid == 0) {
1072 pru_flags = PRUS_EOF;
1073 } else if (resid > 0 && space > 0) {
1074 /* If there is more to send, set PRUS_MORETOCOME */
1075 pru_flags = PRUS_MORETOCOME;
1081 if (flags & MSG_SYNC)
1085 * XXX all the SS_CANTSENDMORE checks previously
1086 * done could be out of date. We could have recieved
1087 * a reset packet in an interrupt or maybe we slept
1088 * while doing page faults in uiomove() etc. We could
1089 * probably recheck again inside the splnet() protection
1090 * here, but there are probably other places that this
1091 * also happens. We must rethink this.
1094 error = so_pru_send(so, pru_flags, top,
1097 so_pru_send_async(so, pru_flags, top,
1106 } while (resid && space > 0);
1110 ssb_unlock(&so->so_snd);
1120 * Implement receive operations on a socket.
1122 * We depend on the way that records are added to the signalsockbuf
1123 * by sbappend*. In particular, each record (mbufs linked through m_next)
1124 * must begin with an address if the protocol so specifies,
1125 * followed by an optional mbuf or mbufs containing ancillary data,
1126 * and then zero or more mbufs of data.
1128 * Although the signalsockbuf is locked, new data may still be appended.
1129 * A token inside the ssb_lock deals with MP issues and still allows
1130 * the network to access the socket if we block in a uio.
1132 * The caller may receive the data as a single mbuf chain by supplying
1133 * an mbuf **mp0 for use in returning the chain. The uio is then used
1134 * only for the count in uio_resid.
1137 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
1138 struct sockbuf *sio, struct mbuf **controlp, int *flagsp)
1141 struct mbuf *free_chain = NULL;
1142 int flags, len, error, offset;
1143 struct protosw *pr = so->so_proto;
1145 size_t resid, orig_resid;
1148 resid = uio->uio_resid;
1150 resid = (size_t)(sio->sb_climit - sio->sb_cc);
1158 flags = *flagsp &~ MSG_EOR;
1161 if (flags & MSG_OOB) {
1162 m = m_get(MB_WAIT, MT_DATA);
1165 error = so_pru_rcvoob(so, m, flags & MSG_PEEK);
1171 KKASSERT(resid >= (size_t)m->m_len);
1172 resid -= (size_t)m->m_len;
1173 } while (resid > 0 && m);
1176 uio->uio_resid = resid;
1177 error = uiomove(mtod(m, caddr_t),
1178 (int)szmin(resid, m->m_len),
1180 resid = uio->uio_resid;
1182 } while (uio->uio_resid && error == 0 && m);
1189 if ((so->so_state & SS_ISCONFIRMING) && resid)
1193 * The token interlocks against the protocol thread while
1194 * ssb_lock is a blocking lock against other userland entities.
1196 lwkt_gettoken(&so->so_rcv.ssb_token);
1198 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags));
1202 m = so->so_rcv.ssb_mb;
1204 * If we have less data than requested, block awaiting more
1205 * (subject to any timeout) if:
1206 * 1. the current count is less than the low water mark, or
1207 * 2. MSG_WAITALL is set, and it is possible to do the entire
1208 * receive operation at once if we block (resid <= hiwat).
1209 * 3. MSG_DONTWAIT is not set
1210 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1211 * we have to do the receive in sections, and thus risk returning
1212 * a short count if a timeout or signal occurs after we start.
1214 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1215 (size_t)so->so_rcv.ssb_cc < resid) &&
1216 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat ||
1217 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) &&
1218 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
1219 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1"));
1223 error = so->so_error;
1224 if ((flags & MSG_PEEK) == 0)
1228 if (so->so_state & SS_CANTRCVMORE) {
1234 for (; m; m = m->m_next) {
1235 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1236 m = so->so_rcv.ssb_mb;
1240 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1241 (pr->pr_flags & PR_CONNREQUIRED)) {
1247 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) {
1248 error = EWOULDBLOCK;
1251 ssb_unlock(&so->so_rcv);
1252 error = ssb_wait(&so->so_rcv);
1258 if (uio && uio->uio_td && uio->uio_td->td_proc)
1259 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++;
1262 * note: m should be == sb_mb here. Cache the next record while
1263 * cleaning up. Note that calling m_free*() will break out critical
1266 KKASSERT(m == so->so_rcv.ssb_mb);
1269 * Skip any address mbufs prepending the record.
1271 if (pr->pr_flags & PR_ADDR) {
1272 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
1275 *psa = dup_sockaddr(mtod(m, struct sockaddr *));
1276 if (flags & MSG_PEEK)
1279 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain);
1283 * Skip any control mbufs prepending the record.
1286 if (pr->pr_flags & PR_ADDR_OPT) {
1288 * For SCTP we may be getting a
1289 * whole message OR a partial delivery.
1291 if (m && m->m_type == MT_SONAME) {
1294 *psa = dup_sockaddr(mtod(m, struct sockaddr *));
1295 if (flags & MSG_PEEK)
1298 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain);
1302 while (m && m->m_type == MT_CONTROL && error == 0) {
1303 if (flags & MSG_PEEK) {
1305 *controlp = m_copy(m, 0, m->m_len);
1306 m = m->m_next; /* XXX race */
1309 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL);
1310 if (pr->pr_domain->dom_externalize &&
1311 mtod(m, struct cmsghdr *)->cmsg_type ==
1313 error = (*pr->pr_domain->dom_externalize)(m);
1317 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain);
1320 if (controlp && *controlp) {
1322 controlp = &(*controlp)->m_next;
1331 if (type == MT_OOBDATA)
1336 * Copy to the UIO or mbuf return chain (*mp).
1340 while (m && resid > 0 && error == 0) {
1341 if (m->m_type == MT_OOBDATA) {
1342 if (type != MT_OOBDATA)
1344 } else if (type == MT_OOBDATA)
1347 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
1349 soclrstate(so, SS_RCVATMARK);
1350 len = (resid > INT_MAX) ? INT_MAX : resid;
1351 if (so->so_oobmark && len > so->so_oobmark - offset)
1352 len = so->so_oobmark - offset;
1353 if (len > m->m_len - moff)
1354 len = m->m_len - moff;
1357 * Copy out to the UIO or pass the mbufs back to the SIO.
1358 * The SIO is dealt with when we eat the mbuf, but deal
1359 * with the resid here either way.
1362 uio->uio_resid = resid;
1363 error = uiomove(mtod(m, caddr_t) + moff, len, uio);
1364 resid = uio->uio_resid;
1368 resid -= (size_t)len;
1372 * Eat the entire mbuf or just a piece of it
1374 if (len == m->m_len - moff) {
1375 if (m->m_flags & M_EOR)
1378 if (m->m_flags & M_NOTIFICATION)
1379 flags |= MSG_NOTIFICATION;
1381 if (flags & MSG_PEEK) {
1386 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL);
1390 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain);
1394 if (flags & MSG_PEEK) {
1398 n = m_copym(m, 0, len, MB_WAIT);
1404 so->so_rcv.ssb_cc -= len;
1407 if (so->so_oobmark) {
1408 if ((flags & MSG_PEEK) == 0) {
1409 so->so_oobmark -= len;
1410 if (so->so_oobmark == 0) {
1411 sosetstate(so, SS_RCVATMARK);
1416 if (offset == so->so_oobmark)
1420 if (flags & MSG_EOR)
1423 * If the MSG_WAITALL flag is set (for non-atomic socket),
1424 * we must not quit until resid == 0 or an error
1425 * termination. If a signal/timeout occurs, return
1426 * with a short count but without error.
1427 * Keep signalsockbuf locked against other readers.
1429 while ((flags & MSG_WAITALL) && m == NULL &&
1430 resid > 0 && !sosendallatonce(so) &&
1431 so->so_rcv.ssb_mb == NULL) {
1432 if (so->so_error || so->so_state & SS_CANTRCVMORE)
1435 * The window might have closed to zero, make
1436 * sure we send an ack now that we've drained
1437 * the buffer or we might end up blocking until
1438 * the idle takes over (5 seconds).
1440 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
1441 so_pru_rcvd(so, flags);
1442 error = ssb_wait(&so->so_rcv);
1444 ssb_unlock(&so->so_rcv);
1448 m = so->so_rcv.ssb_mb;
1453 * If an atomic read was requested but unread data still remains
1454 * in the record, set MSG_TRUNC.
1456 if (m && pr->pr_flags & PR_ATOMIC)
1460 * Cleanup. If an atomic read was requested drop any unread data.
1462 if ((flags & MSG_PEEK) == 0) {
1463 if (m && (pr->pr_flags & PR_ATOMIC))
1464 sbdroprecord(&so->so_rcv.sb);
1465 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb)
1466 so_pru_rcvd(so, flags);
1469 if (orig_resid == resid && orig_resid &&
1470 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
1471 ssb_unlock(&so->so_rcv);
1478 ssb_unlock(&so->so_rcv);
1480 lwkt_reltoken(&so->so_rcv.ssb_token);
1482 m_freem(free_chain);
1487 * Shut a socket down. Note that we do not get a frontend lock as we
1488 * want to be able to shut the socket down even if another thread is
1489 * blocked in a read(), thus waking it up.
1492 soshutdown(struct socket *so, int how)
1494 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
1497 if (how != SHUT_WR) {
1498 /*ssb_lock(&so->so_rcv, M_WAITOK);*/
1500 /*ssb_unlock(&so->so_rcv);*/
1503 return (so_pru_shutdown(so));
1508 sorflush(struct socket *so)
1510 struct signalsockbuf *ssb = &so->so_rcv;
1511 struct protosw *pr = so->so_proto;
1512 struct signalsockbuf asb;
1514 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR);
1516 lwkt_gettoken(&ssb->ssb_token);
1521 * Can't just blow up the ssb structure here
1523 bzero(&ssb->sb, sizeof(ssb->sb));
1528 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK);
1530 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose)
1531 (*pr->pr_domain->dom_dispose)(asb.ssb_mb);
1532 ssb_release(&asb, so);
1534 lwkt_reltoken(&ssb->ssb_token);
1539 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt)
1541 struct accept_filter_arg *afap = NULL;
1542 struct accept_filter *afp;
1543 struct so_accf *af = so->so_accf;
1546 /* do not set/remove accept filters on non listen sockets */
1547 if ((so->so_options & SO_ACCEPTCONN) == 0) {
1552 /* removing the filter */
1555 if (af->so_accept_filter != NULL &&
1556 af->so_accept_filter->accf_destroy != NULL) {
1557 af->so_accept_filter->accf_destroy(so);
1559 if (af->so_accept_filter_str != NULL) {
1560 FREE(af->so_accept_filter_str, M_ACCF);
1565 so->so_options &= ~SO_ACCEPTFILTER;
1568 /* adding a filter */
1569 /* must remove previous filter first */
1574 /* don't put large objects on the kernel stack */
1575 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), M_TEMP, M_WAITOK);
1576 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap);
1577 afap->af_name[sizeof(afap->af_name)-1] = '\0';
1578 afap->af_arg[sizeof(afap->af_arg)-1] = '\0';
1581 afp = accept_filt_get(afap->af_name);
1586 MALLOC(af, struct so_accf *, sizeof(*af), M_ACCF, M_WAITOK | M_ZERO);
1587 if (afp->accf_create != NULL) {
1588 if (afap->af_name[0] != '\0') {
1589 int len = strlen(afap->af_name) + 1;
1591 MALLOC(af->so_accept_filter_str, char *, len, M_ACCF, M_WAITOK);
1592 strcpy(af->so_accept_filter_str, afap->af_name);
1594 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg);
1595 if (af->so_accept_filter_arg == NULL) {
1596 FREE(af->so_accept_filter_str, M_ACCF);
1603 af->so_accept_filter = afp;
1605 so->so_options |= SO_ACCEPTFILTER;
1614 * Perhaps this routine, and sooptcopyout(), below, ought to come in
1615 * an additional variant to handle the case where the option value needs
1616 * to be some kind of integer, but not a specific size.
1617 * In addition to their use here, these functions are also called by the
1618 * protocol-level pr_ctloutput() routines.
1621 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
1623 return soopt_to_kbuf(sopt, buf, len, minlen);
1627 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
1631 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
1632 KKASSERT(kva_p(buf));
1635 * If the user gives us more than we wanted, we ignore it,
1636 * but if we don't get the minimum length the caller
1637 * wants, we return EINVAL. On success, sopt->sopt_valsize
1638 * is set to however much we actually retrieved.
1640 if ((valsize = sopt->sopt_valsize) < minlen)
1643 sopt->sopt_valsize = valsize = len;
1645 bcopy(sopt->sopt_val, buf, valsize);
1651 sosetopt(struct socket *so, struct sockopt *sopt)
1657 struct signalsockbuf *sotmp;
1660 sopt->sopt_dir = SOPT_SET;
1661 if (sopt->sopt_level != SOL_SOCKET) {
1662 if (so->so_proto && so->so_proto->pr_ctloutput) {
1663 return (so_pr_ctloutput(so, sopt));
1665 error = ENOPROTOOPT;
1667 switch (sopt->sopt_name) {
1669 case SO_ACCEPTFILTER:
1670 error = do_setopt_accept_filter(so, sopt);
1676 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1680 so->so_linger = l.l_linger;
1682 so->so_options |= SO_LINGER;
1684 so->so_options &= ~SO_LINGER;
1690 case SO_USELOOPBACK:
1696 error = sooptcopyin(sopt, &optval, sizeof optval,
1701 so->so_options |= sopt->sopt_name;
1703 so->so_options &= ~sopt->sopt_name;
1710 error = sooptcopyin(sopt, &optval, sizeof optval,
1716 * Values < 1 make no sense for any of these
1717 * options, so disallow them.
1724 switch (sopt->sopt_name) {
1727 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ?
1728 &so->so_snd : &so->so_rcv, (u_long)optval,
1730 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) {
1734 sotmp = (sopt->sopt_name == SO_SNDBUF) ?
1735 &so->so_snd : &so->so_rcv;
1736 atomic_clear_int(&sotmp->ssb_flags,
1741 * Make sure the low-water is never greater than
1745 so->so_snd.ssb_lowat =
1746 (optval > so->so_snd.ssb_hiwat) ?
1747 so->so_snd.ssb_hiwat : optval;
1748 atomic_clear_int(&so->so_snd.ssb_flags,
1752 so->so_rcv.ssb_lowat =
1753 (optval > so->so_rcv.ssb_hiwat) ?
1754 so->so_rcv.ssb_hiwat : optval;
1755 atomic_clear_int(&so->so_rcv.ssb_flags,
1763 error = sooptcopyin(sopt, &tv, sizeof tv,
1768 /* assert(hz > 0); */
1769 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz ||
1770 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
1774 /* assert(tick > 0); */
1775 /* assert(ULONG_MAX - INT_MAX >= 1000000); */
1776 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick;
1777 if (val > INT_MAX) {
1781 if (val == 0 && tv.tv_usec != 0)
1784 switch (sopt->sopt_name) {
1786 so->so_snd.ssb_timeo = val;
1789 so->so_rcv.ssb_timeo = val;
1794 error = ENOPROTOOPT;
1797 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
1798 (void) so_pr_ctloutput(so, sopt);
1805 /* Helper routine for getsockopt */
1807 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
1809 soopt_from_kbuf(sopt, buf, len);
1814 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len)
1819 sopt->sopt_valsize = 0;
1823 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
1824 KKASSERT(kva_p(buf));
1827 * Documented get behavior is that we always return a value,
1828 * possibly truncated to fit in the user's buffer.
1829 * Traditional behavior is that we always tell the user
1830 * precisely how much we copied, rather than something useful
1831 * like the total amount we had available for her.
1832 * Note that this interface is not idempotent; the entire answer must
1833 * generated ahead of time.
1835 valsize = szmin(len, sopt->sopt_valsize);
1836 sopt->sopt_valsize = valsize;
1837 if (sopt->sopt_val != 0) {
1838 bcopy(buf, sopt->sopt_val, valsize);
1843 sogetopt(struct socket *so, struct sockopt *sopt)
1850 struct accept_filter_arg *afap;
1854 sopt->sopt_dir = SOPT_GET;
1855 if (sopt->sopt_level != SOL_SOCKET) {
1856 if (so->so_proto && so->so_proto->pr_ctloutput) {
1857 return (so_pr_ctloutput(so, sopt));
1859 return (ENOPROTOOPT);
1861 switch (sopt->sopt_name) {
1863 case SO_ACCEPTFILTER:
1864 if ((so->so_options & SO_ACCEPTCONN) == 0)
1866 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap),
1867 M_TEMP, M_WAITOK | M_ZERO);
1868 if ((so->so_options & SO_ACCEPTFILTER) != 0) {
1869 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name);
1870 if (so->so_accf->so_accept_filter_str != NULL)
1871 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str);
1873 error = sooptcopyout(sopt, afap, sizeof(*afap));
1879 l.l_onoff = so->so_options & SO_LINGER;
1880 l.l_linger = so->so_linger;
1881 error = sooptcopyout(sopt, &l, sizeof l);
1884 case SO_USELOOPBACK:
1893 optval = so->so_options & sopt->sopt_name;
1895 error = sooptcopyout(sopt, &optval, sizeof optval);
1899 optval = so->so_type;
1903 optval = so->so_error;
1908 optval = so->so_snd.ssb_hiwat;
1912 optval = so->so_rcv.ssb_hiwat;
1916 optval = so->so_snd.ssb_lowat;
1920 optval = so->so_rcv.ssb_lowat;
1925 optval = (sopt->sopt_name == SO_SNDTIMEO ?
1926 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo);
1928 tv.tv_sec = optval / hz;
1929 tv.tv_usec = (optval % hz) * ustick;
1930 error = sooptcopyout(sopt, &tv, sizeof tv);
1934 optval_l = ssb_space(&so->so_snd);
1935 error = sooptcopyout(sopt, &optval_l, sizeof(optval_l));
1939 error = ENOPROTOOPT;
1946 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
1948 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
1950 struct mbuf *m, *m_prev;
1951 int sopt_size = sopt->sopt_valsize, msize;
1953 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, MT_DATA,
1957 m->m_len = min(msize, sopt_size);
1958 sopt_size -= m->m_len;
1962 while (sopt_size > 0) {
1963 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT,
1964 MT_DATA, 0, &msize);
1969 m->m_len = min(msize, sopt_size);
1970 sopt_size -= m->m_len;
1977 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
1979 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
1981 soopt_to_mbuf(sopt, m);
1986 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m)
1991 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
1993 if (sopt->sopt_val == NULL)
1995 val = sopt->sopt_val;
1996 valsize = sopt->sopt_valsize;
1997 while (m != NULL && valsize >= m->m_len) {
1998 bcopy(val, mtod(m, char *), m->m_len);
1999 valsize -= m->m_len;
2000 val = (caddr_t)val + m->m_len;
2003 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2004 panic("ip6_sooptmcopyin");
2007 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2009 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
2011 return soopt_from_mbuf(sopt, m);
2015 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m)
2017 struct mbuf *m0 = m;
2022 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
2024 if (sopt->sopt_val == NULL)
2026 val = sopt->sopt_val;
2027 maxsize = sopt->sopt_valsize;
2028 while (m != NULL && maxsize >= m->m_len) {
2029 bcopy(mtod(m, char *), val, m->m_len);
2030 maxsize -= m->m_len;
2031 val = (caddr_t)val + m->m_len;
2032 valsize += m->m_len;
2036 /* enough soopt buffer should be given from user-land */
2040 sopt->sopt_valsize = valsize;
2045 sohasoutofband(struct socket *so)
2047 if (so->so_sigio != NULL)
2048 pgsigio(so->so_sigio, SIGURG, 0);
2049 KNOTE(&so->so_rcv.ssb_kq.ki_note, NOTE_OOB);
2053 sokqfilter(struct file *fp, struct knote *kn)
2055 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2056 struct signalsockbuf *ssb;
2058 switch (kn->kn_filter) {
2060 if (so->so_options & SO_ACCEPTCONN)
2061 kn->kn_fop = &solisten_filtops;
2063 kn->kn_fop = &soread_filtops;
2067 kn->kn_fop = &sowrite_filtops;
2071 kn->kn_fop = &soexcept_filtops;
2075 return (EOPNOTSUPP);
2078 knote_insert(&ssb->ssb_kq.ki_note, kn);
2079 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE);
2084 filt_sordetach(struct knote *kn)
2086 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2088 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn);
2089 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note))
2090 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE);
2095 filt_soread(struct knote *kn, long hint)
2097 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2099 if (kn->kn_sfflags & NOTE_OOB) {
2100 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) {
2101 kn->kn_fflags |= NOTE_OOB;
2106 kn->kn_data = so->so_rcv.ssb_cc;
2108 if (so->so_state & SS_CANTRCVMORE) {
2110 * Only set NODATA if all data has been exhausted.
2112 if (kn->kn_data == 0)
2113 kn->kn_flags |= EV_NODATA;
2114 kn->kn_flags |= EV_EOF;
2115 kn->kn_fflags = so->so_error;
2118 if (so->so_error) /* temporary udp error */
2120 if (kn->kn_sfflags & NOTE_LOWAT)
2121 return (kn->kn_data >= kn->kn_sdata);
2122 return ((kn->kn_data >= so->so_rcv.ssb_lowat) ||
2123 !TAILQ_EMPTY(&so->so_comp));
2127 filt_sowdetach(struct knote *kn)
2129 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2131 knote_remove(&so->so_snd.ssb_kq.ki_note, kn);
2132 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note))
2133 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE);
2138 filt_sowrite(struct knote *kn, long hint)
2140 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2142 kn->kn_data = ssb_space(&so->so_snd);
2143 if (so->so_state & SS_CANTSENDMORE) {
2144 kn->kn_flags |= (EV_EOF | EV_NODATA);
2145 kn->kn_fflags = so->so_error;
2148 if (so->so_error) /* temporary udp error */
2150 if (((so->so_state & SS_ISCONNECTED) == 0) &&
2151 (so->so_proto->pr_flags & PR_CONNREQUIRED))
2153 if (kn->kn_sfflags & NOTE_LOWAT)
2154 return (kn->kn_data >= kn->kn_sdata);
2155 return (kn->kn_data >= so->so_snd.ssb_lowat);
2160 filt_solisten(struct knote *kn, long hint)
2162 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2164 kn->kn_data = so->so_qlen;
2165 return (! TAILQ_EMPTY(&so->so_comp));