2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
67 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/fcntl.h>
76 #include <sys/malloc.h>
78 #include <sys/domain.h>
79 #include <sys/file.h> /* for struct knote */
80 #include <sys/kernel.h>
81 #include <sys/event.h>
83 #include <sys/protosw.h>
84 #include <sys/socket.h>
85 #include <sys/socketvar.h>
86 #include <sys/socketops.h>
87 #include <sys/resourcevar.h>
88 #include <sys/signalvar.h>
89 #include <sys/sysctl.h>
92 #include <vm/vm_zone.h>
95 #include <sys/thread2.h>
96 #include <sys/socketvar2.h>
98 #include <machine/limits.h>
100 extern int tcp_sosnd_agglim;
101 extern int tcp_sosnd_async;
104 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt);
107 static void filt_sordetach(struct knote *kn);
108 static int filt_soread(struct knote *kn, long hint);
109 static void filt_sowdetach(struct knote *kn);
110 static int filt_sowrite(struct knote *kn, long hint);
111 static int filt_solisten(struct knote *kn, long hint);
113 static struct filterops solisten_filtops =
114 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten };
115 static struct filterops soread_filtops =
116 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread };
117 static struct filterops sowrite_filtops =
118 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite };
119 static struct filterops soexcept_filtops =
120 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread };
122 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct");
123 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
124 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
127 static int somaxconn = SOMAXCONN;
128 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW,
129 &somaxconn, 0, "Maximum pending socket connection queue size");
132 * Socket operation routines.
133 * These routines are called by the routines in
134 * sys_socket.c or from a system process, and
135 * implement the semantics of socket operations by
136 * switching out to the protocol specific routines.
140 * Get a socket structure, and initialize it.
141 * Note that it would probably be better to allocate socket
142 * and PCB at the same time, but I'm not convinced that all
143 * the protocols can be easily modified to do this.
151 waitmask = waitok ? M_WAITOK : M_NOWAIT;
152 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask);
154 /* XXX race condition for reentrant kernel */
155 TAILQ_INIT(&so->so_aiojobq);
156 TAILQ_INIT(&so->so_rcv.ssb_kq.ki_mlist);
157 TAILQ_INIT(&so->so_snd.ssb_kq.ki_mlist);
158 lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok");
159 lwkt_token_init(&so->so_snd.ssb_token, "sndtok");
160 so->so_state = SS_NOFDREF;
167 socreate(int dom, struct socket **aso, int type,
168 int proto, struct thread *td)
170 struct proc *p = td->td_proc;
173 struct pru_attach_info ai;
177 prp = pffindproto(dom, proto, type);
179 prp = pffindtype(dom, type);
181 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
182 return (EPROTONOSUPPORT);
184 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only &&
185 prp->pr_domain->dom_family != PF_LOCAL &&
186 prp->pr_domain->dom_family != PF_INET &&
187 prp->pr_domain->dom_family != PF_INET6 &&
188 prp->pr_domain->dom_family != PF_ROUTE) {
189 return (EPROTONOSUPPORT);
192 if (prp->pr_type != type)
194 so = soalloc(p != 0);
199 * Callers of socreate() presumably will connect up a descriptor
200 * and call soclose() if they cannot. This represents our so_refs
201 * (which should be 1) from soalloc().
203 soclrstate(so, SS_NOFDREF);
206 * Set a default port for protocol processing. No action will occur
207 * on the socket on this port until an inpcb is attached to it and
208 * is able to match incoming packets, or until the socket becomes
209 * available to userland.
211 * We normally default the socket to the protocol thread on cpu 0.
212 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol
213 * thread and all pr_*()/pru_*() calls are executed synchronously.
215 if (prp->pr_flags & PR_SYNC_PORT)
216 so->so_port = &netisr_sync_port;
218 so->so_port = cpu_portfn(0);
220 TAILQ_INIT(&so->so_incomp);
221 TAILQ_INIT(&so->so_comp);
223 so->so_cred = crhold(p->p_ucred);
225 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE];
226 ai.p_ucred = p->p_ucred;
227 ai.fd_rdir = p->p_fd->fd_rdir;
230 * Auto-sizing of socket buffers is managed by the protocols and
231 * the appropriate flags must be set in the pru_attach function.
233 error = so_pru_attach(so, proto, &ai);
235 sosetstate(so, SS_NOFDREF);
236 sofree(so); /* from soalloc */
241 * NOTE: Returns referenced socket.
248 sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
252 error = so_pru_bind(so, nam, td);
257 sodealloc(struct socket *so)
259 if (so->so_rcv.ssb_hiwat)
260 (void)chgsbsize(so->so_cred->cr_uidinfo,
261 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY);
262 if (so->so_snd.ssb_hiwat)
263 (void)chgsbsize(so->so_cred->cr_uidinfo,
264 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY);
266 /* remove accept filter if present */
267 if (so->so_accf != NULL)
268 do_setopt_accept_filter(so, NULL);
275 solisten(struct socket *so, int backlog, struct thread *td)
279 short oldopt, oldqlimit;
282 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING))
286 oldopt = so->so_options;
287 oldqlimit = so->so_qlimit;
290 lwkt_gettoken(&so->so_rcv.ssb_token);
291 if (TAILQ_EMPTY(&so->so_comp))
292 so->so_options |= SO_ACCEPTCONN;
293 lwkt_reltoken(&so->so_rcv.ssb_token);
294 if (backlog < 0 || backlog > somaxconn)
296 so->so_qlimit = backlog;
297 /* SCTP needs to look at tweak both the inbound backlog parameter AND
298 * the so_options (UDP model both connect's and gets inbound
299 * connections .. implicitly).
301 error = so_pru_listen(so, td);
304 /* Restore the params */
305 so->so_options = oldopt;
306 so->so_qlimit = oldqlimit;
314 * Destroy a disconnected socket. This routine is a NOP if entities
315 * still have a reference on the socket:
317 * so_pcb - The protocol stack still has a reference
318 * SS_NOFDREF - There is no longer a file pointer reference
321 sofree(struct socket *so)
326 * This is a bit hackish at the moment. We need to interlock
327 * any accept queue we are on before we potentially lose the
328 * last reference to avoid races against a re-reference from
329 * someone operating on the queue.
331 while ((head = so->so_head) != NULL) {
332 lwkt_getpooltoken(head);
333 if (so->so_head == head)
335 lwkt_relpooltoken(head);
339 * Arbitrage the last free.
341 KKASSERT(so->so_refs > 0);
342 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) {
344 lwkt_relpooltoken(head);
348 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF));
349 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0);
352 * We're done, remove ourselves from the accept queue we are
353 * on, if we are on one.
356 if (so->so_state & SS_INCOMP) {
357 TAILQ_REMOVE(&head->so_incomp, so, so_list);
359 } else if (so->so_state & SS_COMP) {
361 * We must not decommission a socket that's
362 * on the accept(2) queue. If we do, then
363 * accept(2) may hang after select(2) indicated
364 * that the listening socket was ready.
366 lwkt_relpooltoken(head);
369 panic("sofree: not queued");
371 soclrstate(so, SS_INCOMP);
373 lwkt_relpooltoken(head);
375 ssb_release(&so->so_snd, so);
381 * Close a socket on last file table reference removal.
382 * Initiate disconnect if connected.
383 * Free socket when disconnect complete.
386 soclose(struct socket *so, int fflag)
390 funsetown(&so->so_sigio);
391 if (so->so_pcb == NULL)
393 if (so->so_state & SS_ISCONNECTED) {
394 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
395 error = sodisconnect(so);
399 if (so->so_options & SO_LINGER) {
400 if ((so->so_state & SS_ISDISCONNECTING) &&
403 while (so->so_state & SS_ISCONNECTED) {
404 error = tsleep(&so->so_timeo, PCATCH,
405 "soclos", so->so_linger * hz);
415 error2 = so_pru_detach(so);
420 lwkt_getpooltoken(so);
421 if (so->so_options & SO_ACCEPTCONN) {
424 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
425 TAILQ_REMOVE(&so->so_incomp, sp, so_list);
426 soclrstate(sp, SS_INCOMP);
431 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
432 TAILQ_REMOVE(&so->so_comp, sp, so_list);
433 soclrstate(sp, SS_COMP);
439 lwkt_relpooltoken(so);
440 if (so->so_state & SS_NOFDREF)
441 panic("soclose: NOFDREF");
442 sosetstate(so, SS_NOFDREF); /* take ref */
443 sofree(so); /* dispose of ref */
448 * Abort and destroy a socket. Only one abort can be in progress
449 * at any given moment.
452 soabort(struct socket *so)
459 soaborta(struct socket *so)
466 soabort_oncpu(struct socket *so)
469 so_pru_abort_oncpu(so);
473 * so is passed in ref'd, which becomes owned by
474 * the cleared SS_NOFDREF flag.
477 soaccept(struct socket *so, struct sockaddr **nam)
481 if ((so->so_state & SS_NOFDREF) == 0)
482 panic("soaccept: !NOFDREF");
483 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */
484 error = so_pru_accept_direct(so, nam);
489 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
493 if (so->so_options & SO_ACCEPTCONN)
496 * If protocol is connection-based, can only connect once.
497 * Otherwise, if connected, try to disconnect first.
498 * This allows user to disconnect by connecting to, e.g.,
501 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
502 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
503 (error = sodisconnect(so)))) {
507 * Prevent accumulated error from previous connection
511 error = so_pru_connect(so, nam, td);
517 soconnect2(struct socket *so1, struct socket *so2)
521 error = so_pru_connect2(so1, so2);
526 sodisconnect(struct socket *so)
530 if ((so->so_state & SS_ISCONNECTED) == 0) {
534 if (so->so_state & SS_ISDISCONNECTING) {
538 error = so_pru_disconnect(so);
543 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
546 * If send must go all at once and message is larger than
547 * send buffering, then hard error.
548 * Lock against other senders.
549 * If must go all at once and not enough room now, then
550 * inform user that this would block and do nothing.
551 * Otherwise, if nonblocking, send as much as possible.
552 * The data to be sent is described by "uio" if nonzero,
553 * otherwise by the mbuf chain "top" (which must be null
554 * if uio is not). Data provided in mbuf chain must be small
555 * enough to send all at once.
557 * Returns nonzero on error, timeout or signal; callers
558 * must check for short counts if EINTR/ERESTART are returned.
559 * Data and control buffers are freed on return.
562 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
563 struct mbuf *top, struct mbuf *control, int flags,
570 int clen = 0, error, dontroute, mlen;
571 int atomic = sosendallatonce(so) || top;
575 resid = uio->uio_resid;
577 resid = (size_t)top->m_pkthdr.len;
580 for (m = top; m; m = m->m_next)
582 KKASSERT(top->m_pkthdr.len == len);
587 * WARNING! resid is unsigned, space and len are signed. space
588 * can wind up negative if the sockbuf is overcommitted.
590 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
591 * type sockets since that's an error.
593 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) {
599 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
600 (so->so_proto->pr_flags & PR_ATOMIC);
601 if (td->td_lwp != NULL)
602 td->td_lwp->lwp_ru.ru_msgsnd++;
604 clen = control->m_len;
605 #define gotoerr(errcode) { error = errcode; goto release; }
608 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags));
613 if (so->so_state & SS_CANTSENDMORE)
616 error = so->so_error;
620 if ((so->so_state & SS_ISCONNECTED) == 0) {
622 * `sendto' and `sendmsg' is allowed on a connection-
623 * based socket if it supports implied connect.
624 * Return ENOTCONN if not connected and no address is
627 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
628 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
629 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
630 !(resid == 0 && clen != 0))
632 } else if (addr == 0)
633 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
634 ENOTCONN : EDESTADDRREQ);
636 if ((atomic && resid > so->so_snd.ssb_hiwat) ||
637 clen > so->so_snd.ssb_hiwat) {
640 space = ssb_space(&so->so_snd);
643 if ((space < 0 || (size_t)space < resid + clen) && uio &&
644 (atomic || space < so->so_snd.ssb_lowat || space < clen)) {
645 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT))
646 gotoerr(EWOULDBLOCK);
647 ssb_unlock(&so->so_snd);
648 error = ssb_wait(&so->so_snd);
658 * Data is prepackaged in "top".
662 top->m_flags |= M_EOR;
666 m = m_getl((int)resid, MB_WAIT, MT_DATA,
667 top == NULL ? M_PKTHDR : 0, &mlen);
670 m->m_pkthdr.rcvif = NULL;
672 len = imin((int)szmin(mlen, resid), space);
673 if (resid < MINCLSIZE) {
675 * For datagram protocols, leave room
676 * for protocol headers in first mbuf.
678 if (atomic && top == 0 && len < mlen)
682 error = uiomove(mtod(m, caddr_t), (size_t)len, uio);
683 resid = uio->uio_resid;
686 top->m_pkthdr.len += len;
692 top->m_flags |= M_EOR;
695 } while (space > 0 && atomic);
697 so->so_options |= SO_DONTROUTE;
698 if (flags & MSG_OOB) {
699 pru_flags = PRUS_OOB;
700 } else if ((flags & MSG_EOF) &&
701 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
704 * If the user set MSG_EOF, the protocol
705 * understands this flag and nothing left to
706 * send then use PRU_SEND_EOF instead of PRU_SEND.
708 pru_flags = PRUS_EOF;
709 } else if (resid > 0 && space > 0) {
710 /* If there is more to send, set PRUS_MORETOCOME */
711 pru_flags = PRUS_MORETOCOME;
716 * XXX all the SS_CANTSENDMORE checks previously
717 * done could be out of date. We could have recieved
718 * a reset packet in an interrupt or maybe we slept
719 * while doing page faults in uiomove() etc. We could
720 * probably recheck again inside the splnet() protection
721 * here, but there are probably other places that this
722 * also happens. We must rethink this.
724 error = so_pru_send(so, pru_flags, top, addr, control, td);
726 so->so_options &= ~SO_DONTROUTE;
733 } while (resid && space > 0);
737 ssb_unlock(&so->so_snd);
747 * A specialization of sosend() for UDP based on protocol-specific knowledge:
748 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that
749 * sosendallatonce() returns true,
750 * the "atomic" variable is true,
751 * and sosendudp() blocks until space is available for the entire send.
752 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or
753 * PR_IMPLOPCL flags set.
754 * UDP has no out-of-band data.
755 * UDP has no control data.
756 * UDP does not support MSG_EOR.
759 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio,
760 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
762 boolean_t dontroute; /* temporary SO_DONTROUTE setting */
767 if (td->td_lwp != NULL)
768 td->td_lwp->lwp_ru.ru_msgsnd++;
772 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp"));
773 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len;
776 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags));
780 if (so->so_state & SS_CANTSENDMORE)
783 error = so->so_error;
787 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL)
788 gotoerr(EDESTADDRREQ);
789 if (resid > so->so_snd.ssb_hiwat)
791 space = ssb_space(&so->so_snd);
792 if (uio && (space < 0 || (size_t)space < resid)) {
793 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT))
794 gotoerr(EWOULDBLOCK);
795 ssb_unlock(&so->so_snd);
796 error = ssb_wait(&so->so_snd);
803 top = m_uiomove(uio);
808 dontroute = (flags & MSG_DONTROUTE) && !(so->so_options & SO_DONTROUTE);
810 so->so_options |= SO_DONTROUTE;
812 error = so_pru_send(so, 0, top, addr, NULL, td);
813 top = NULL; /* sent or freed in lower layer */
816 so->so_options &= ~SO_DONTROUTE;
819 ssb_unlock(&so->so_snd);
827 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio,
828 struct mbuf *top, struct mbuf *control, int flags,
840 KKASSERT(top == NULL);
842 resid = uio->uio_resid;
845 resid = (size_t)top->m_pkthdr.len;
848 for (m = top; m; m = m->m_next)
850 KKASSERT(top->m_pkthdr.len == len);
855 * WARNING! resid is unsigned, space and len are signed. space
856 * can wind up negative if the sockbuf is overcommitted.
858 * Also check to make sure that MSG_EOR isn't used on TCP
860 if (flags & MSG_EOR) {
866 /* TCP doesn't do control messages (rights, creds, etc) */
867 if (control->m_len) {
871 m_freem(control); /* empty control, just free it */
875 if (td->td_lwp != NULL)
876 td->td_lwp->lwp_ru.ru_msgsnd++;
878 #define gotoerr(errcode) { error = errcode; goto release; }
881 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags));
886 if (so->so_state & SS_CANTSENDMORE)
889 error = so->so_error;
893 if ((so->so_state & SS_ISCONNECTED) == 0 &&
894 (so->so_state & SS_ISCONFIRMING) == 0)
896 if (allatonce && resid > so->so_snd.ssb_hiwat)
899 space = ssb_space(&so->so_snd);
902 if ((space < 0 || (size_t)space < resid) && !allatonce &&
903 space < so->so_snd.ssb_lowat) {
904 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT))
905 gotoerr(EWOULDBLOCK);
906 ssb_unlock(&so->so_snd);
907 error = ssb_wait(&so->so_snd);
914 int cnt = 0, async = 0;
918 * Data is prepackaged in "top".
924 m = m_getl((int)resid, MB_WAIT, MT_DATA,
925 top == NULL ? M_PKTHDR : 0, &mlen);
928 m->m_pkthdr.rcvif = NULL;
930 len = imin((int)szmin(mlen, resid), space);
932 error = uiomove(mtod(m, caddr_t), (size_t)len, uio);
933 resid = uio->uio_resid;
936 top->m_pkthdr.len += len;
943 } while (space > 0 && cnt < tcp_sosnd_agglim);
945 if (flags & MSG_OOB) {
946 pru_flags = PRUS_OOB;
947 } else if (resid > 0 && space > 0) {
948 /* If there is more to send, set PRUS_MORETOCOME */
949 pru_flags = PRUS_MORETOCOME;
957 if (flags & MSG_SYNC)
961 * XXX all the SS_CANTSENDMORE checks previously
962 * done could be out of date. We could have recieved
963 * a reset packet in an interrupt or maybe we slept
964 * while doing page faults in uiomove() etc. We could
965 * probably recheck again inside the splnet() protection
966 * here, but there are probably other places that this
967 * also happens. We must rethink this.
970 error = so_pru_send(so, pru_flags, top,
973 so_pru_send_async(so, pru_flags, top,
982 } while (resid && space > 0);
986 ssb_unlock(&so->so_snd);
996 * Implement receive operations on a socket.
998 * We depend on the way that records are added to the signalsockbuf
999 * by sbappend*. In particular, each record (mbufs linked through m_next)
1000 * must begin with an address if the protocol so specifies,
1001 * followed by an optional mbuf or mbufs containing ancillary data,
1002 * and then zero or more mbufs of data.
1004 * Although the signalsockbuf is locked, new data may still be appended.
1005 * A token inside the ssb_lock deals with MP issues and still allows
1006 * the network to access the socket if we block in a uio.
1008 * The caller may receive the data as a single mbuf chain by supplying
1009 * an mbuf **mp0 for use in returning the chain. The uio is then used
1010 * only for the count in uio_resid.
1013 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
1014 struct sockbuf *sio, struct mbuf **controlp, int *flagsp)
1017 struct mbuf *free_chain = NULL;
1018 int flags, len, error, offset;
1019 struct protosw *pr = so->so_proto;
1021 size_t resid, orig_resid;
1024 resid = uio->uio_resid;
1026 resid = (size_t)(sio->sb_climit - sio->sb_cc);
1034 flags = *flagsp &~ MSG_EOR;
1037 if (flags & MSG_OOB) {
1038 m = m_get(MB_WAIT, MT_DATA);
1041 error = so_pru_rcvoob(so, m, flags & MSG_PEEK);
1047 KKASSERT(resid >= (size_t)m->m_len);
1048 resid -= (size_t)m->m_len;
1049 } while (resid > 0 && m);
1052 uio->uio_resid = resid;
1053 error = uiomove(mtod(m, caddr_t),
1054 (int)szmin(resid, m->m_len),
1056 resid = uio->uio_resid;
1058 } while (uio->uio_resid && error == 0 && m);
1065 if ((so->so_state & SS_ISCONFIRMING) && resid)
1069 * The token interlocks against the protocol thread while
1070 * ssb_lock is a blocking lock against other userland entities.
1072 lwkt_gettoken(&so->so_rcv.ssb_token);
1074 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags));
1078 m = so->so_rcv.ssb_mb;
1080 * If we have less data than requested, block awaiting more
1081 * (subject to any timeout) if:
1082 * 1. the current count is less than the low water mark, or
1083 * 2. MSG_WAITALL is set, and it is possible to do the entire
1084 * receive operation at once if we block (resid <= hiwat).
1085 * 3. MSG_DONTWAIT is not set
1086 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1087 * we have to do the receive in sections, and thus risk returning
1088 * a short count if a timeout or signal occurs after we start.
1090 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1091 (size_t)so->so_rcv.ssb_cc < resid) &&
1092 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat ||
1093 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) &&
1094 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
1095 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1"));
1099 error = so->so_error;
1100 if ((flags & MSG_PEEK) == 0)
1104 if (so->so_state & SS_CANTRCVMORE) {
1110 for (; m; m = m->m_next) {
1111 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1112 m = so->so_rcv.ssb_mb;
1116 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1117 (pr->pr_flags & PR_CONNREQUIRED)) {
1123 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) {
1124 error = EWOULDBLOCK;
1127 ssb_unlock(&so->so_rcv);
1128 error = ssb_wait(&so->so_rcv);
1134 if (uio && uio->uio_td && uio->uio_td->td_proc)
1135 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++;
1138 * note: m should be == sb_mb here. Cache the next record while
1139 * cleaning up. Note that calling m_free*() will break out critical
1142 KKASSERT(m == so->so_rcv.ssb_mb);
1145 * Skip any address mbufs prepending the record.
1147 if (pr->pr_flags & PR_ADDR) {
1148 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
1151 *psa = dup_sockaddr(mtod(m, struct sockaddr *));
1152 if (flags & MSG_PEEK)
1155 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain);
1159 * Skip any control mbufs prepending the record.
1162 if (pr->pr_flags & PR_ADDR_OPT) {
1164 * For SCTP we may be getting a
1165 * whole message OR a partial delivery.
1167 if (m && m->m_type == MT_SONAME) {
1170 *psa = dup_sockaddr(mtod(m, struct sockaddr *));
1171 if (flags & MSG_PEEK)
1174 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain);
1178 while (m && m->m_type == MT_CONTROL && error == 0) {
1179 if (flags & MSG_PEEK) {
1181 *controlp = m_copy(m, 0, m->m_len);
1182 m = m->m_next; /* XXX race */
1185 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL);
1186 if (pr->pr_domain->dom_externalize &&
1187 mtod(m, struct cmsghdr *)->cmsg_type ==
1189 error = (*pr->pr_domain->dom_externalize)(m);
1193 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain);
1196 if (controlp && *controlp) {
1198 controlp = &(*controlp)->m_next;
1207 if (type == MT_OOBDATA)
1212 * Copy to the UIO or mbuf return chain (*mp).
1216 while (m && resid > 0 && error == 0) {
1217 if (m->m_type == MT_OOBDATA) {
1218 if (type != MT_OOBDATA)
1220 } else if (type == MT_OOBDATA)
1223 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
1225 soclrstate(so, SS_RCVATMARK);
1226 len = (resid > INT_MAX) ? INT_MAX : resid;
1227 if (so->so_oobmark && len > so->so_oobmark - offset)
1228 len = so->so_oobmark - offset;
1229 if (len > m->m_len - moff)
1230 len = m->m_len - moff;
1233 * Copy out to the UIO or pass the mbufs back to the SIO.
1234 * The SIO is dealt with when we eat the mbuf, but deal
1235 * with the resid here either way.
1238 uio->uio_resid = resid;
1239 error = uiomove(mtod(m, caddr_t) + moff, len, uio);
1240 resid = uio->uio_resid;
1244 resid -= (size_t)len;
1248 * Eat the entire mbuf or just a piece of it
1250 if (len == m->m_len - moff) {
1251 if (m->m_flags & M_EOR)
1254 if (m->m_flags & M_NOTIFICATION)
1255 flags |= MSG_NOTIFICATION;
1257 if (flags & MSG_PEEK) {
1262 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL);
1266 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain);
1270 if (flags & MSG_PEEK) {
1274 n = m_copym(m, 0, len, MB_WAIT);
1280 so->so_rcv.ssb_cc -= len;
1283 if (so->so_oobmark) {
1284 if ((flags & MSG_PEEK) == 0) {
1285 so->so_oobmark -= len;
1286 if (so->so_oobmark == 0) {
1287 sosetstate(so, SS_RCVATMARK);
1292 if (offset == so->so_oobmark)
1296 if (flags & MSG_EOR)
1299 * If the MSG_WAITALL flag is set (for non-atomic socket),
1300 * we must not quit until resid == 0 or an error
1301 * termination. If a signal/timeout occurs, return
1302 * with a short count but without error.
1303 * Keep signalsockbuf locked against other readers.
1305 while ((flags & MSG_WAITALL) && m == NULL &&
1306 resid > 0 && !sosendallatonce(so) &&
1307 so->so_rcv.ssb_mb == NULL) {
1308 if (so->so_error || so->so_state & SS_CANTRCVMORE)
1311 * The window might have closed to zero, make
1312 * sure we send an ack now that we've drained
1313 * the buffer or we might end up blocking until
1314 * the idle takes over (5 seconds).
1316 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
1317 so_pru_rcvd(so, flags);
1318 error = ssb_wait(&so->so_rcv);
1320 ssb_unlock(&so->so_rcv);
1324 m = so->so_rcv.ssb_mb;
1329 * If an atomic read was requested but unread data still remains
1330 * in the record, set MSG_TRUNC.
1332 if (m && pr->pr_flags & PR_ATOMIC)
1336 * Cleanup. If an atomic read was requested drop any unread data.
1338 if ((flags & MSG_PEEK) == 0) {
1339 if (m && (pr->pr_flags & PR_ATOMIC))
1340 sbdroprecord(&so->so_rcv.sb);
1341 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb)
1342 so_pru_rcvd(so, flags);
1345 if (orig_resid == resid && orig_resid &&
1346 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
1347 ssb_unlock(&so->so_rcv);
1354 ssb_unlock(&so->so_rcv);
1356 lwkt_reltoken(&so->so_rcv.ssb_token);
1358 m_freem(free_chain);
1363 * Shut a socket down. Note that we do not get a frontend lock as we
1364 * want to be able to shut the socket down even if another thread is
1365 * blocked in a read(), thus waking it up.
1368 soshutdown(struct socket *so, int how)
1370 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
1373 if (how != SHUT_WR) {
1374 /*ssb_lock(&so->so_rcv, M_WAITOK);*/
1376 /*ssb_unlock(&so->so_rcv);*/
1379 return (so_pru_shutdown(so));
1384 sorflush(struct socket *so)
1386 struct signalsockbuf *ssb = &so->so_rcv;
1387 struct protosw *pr = so->so_proto;
1388 struct signalsockbuf asb;
1390 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR);
1392 lwkt_gettoken(&ssb->ssb_token);
1397 * Can't just blow up the ssb structure here
1399 bzero(&ssb->sb, sizeof(ssb->sb));
1404 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK);
1406 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose)
1407 (*pr->pr_domain->dom_dispose)(asb.ssb_mb);
1408 ssb_release(&asb, so);
1410 lwkt_reltoken(&ssb->ssb_token);
1415 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt)
1417 struct accept_filter_arg *afap = NULL;
1418 struct accept_filter *afp;
1419 struct so_accf *af = so->so_accf;
1422 /* do not set/remove accept filters on non listen sockets */
1423 if ((so->so_options & SO_ACCEPTCONN) == 0) {
1428 /* removing the filter */
1431 if (af->so_accept_filter != NULL &&
1432 af->so_accept_filter->accf_destroy != NULL) {
1433 af->so_accept_filter->accf_destroy(so);
1435 if (af->so_accept_filter_str != NULL) {
1436 FREE(af->so_accept_filter_str, M_ACCF);
1441 so->so_options &= ~SO_ACCEPTFILTER;
1444 /* adding a filter */
1445 /* must remove previous filter first */
1450 /* don't put large objects on the kernel stack */
1451 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), M_TEMP, M_WAITOK);
1452 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap);
1453 afap->af_name[sizeof(afap->af_name)-1] = '\0';
1454 afap->af_arg[sizeof(afap->af_arg)-1] = '\0';
1457 afp = accept_filt_get(afap->af_name);
1462 MALLOC(af, struct so_accf *, sizeof(*af), M_ACCF, M_WAITOK | M_ZERO);
1463 if (afp->accf_create != NULL) {
1464 if (afap->af_name[0] != '\0') {
1465 int len = strlen(afap->af_name) + 1;
1467 MALLOC(af->so_accept_filter_str, char *, len, M_ACCF, M_WAITOK);
1468 strcpy(af->so_accept_filter_str, afap->af_name);
1470 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg);
1471 if (af->so_accept_filter_arg == NULL) {
1472 FREE(af->so_accept_filter_str, M_ACCF);
1479 af->so_accept_filter = afp;
1481 so->so_options |= SO_ACCEPTFILTER;
1490 * Perhaps this routine, and sooptcopyout(), below, ought to come in
1491 * an additional variant to handle the case where the option value needs
1492 * to be some kind of integer, but not a specific size.
1493 * In addition to their use here, these functions are also called by the
1494 * protocol-level pr_ctloutput() routines.
1497 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
1499 return soopt_to_kbuf(sopt, buf, len, minlen);
1503 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
1507 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
1508 KKASSERT(kva_p(buf));
1511 * If the user gives us more than we wanted, we ignore it,
1512 * but if we don't get the minimum length the caller
1513 * wants, we return EINVAL. On success, sopt->sopt_valsize
1514 * is set to however much we actually retrieved.
1516 if ((valsize = sopt->sopt_valsize) < minlen)
1519 sopt->sopt_valsize = valsize = len;
1521 bcopy(sopt->sopt_val, buf, valsize);
1527 sosetopt(struct socket *so, struct sockopt *sopt)
1533 struct signalsockbuf *sotmp;
1536 sopt->sopt_dir = SOPT_SET;
1537 if (sopt->sopt_level != SOL_SOCKET) {
1538 if (so->so_proto && so->so_proto->pr_ctloutput) {
1539 return (so_pr_ctloutput(so, sopt));
1541 error = ENOPROTOOPT;
1543 switch (sopt->sopt_name) {
1545 case SO_ACCEPTFILTER:
1546 error = do_setopt_accept_filter(so, sopt);
1552 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1556 so->so_linger = l.l_linger;
1558 so->so_options |= SO_LINGER;
1560 so->so_options &= ~SO_LINGER;
1566 case SO_USELOOPBACK:
1572 error = sooptcopyin(sopt, &optval, sizeof optval,
1577 so->so_options |= sopt->sopt_name;
1579 so->so_options &= ~sopt->sopt_name;
1586 error = sooptcopyin(sopt, &optval, sizeof optval,
1592 * Values < 1 make no sense for any of these
1593 * options, so disallow them.
1600 switch (sopt->sopt_name) {
1603 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ?
1604 &so->so_snd : &so->so_rcv, (u_long)optval,
1606 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) {
1610 sotmp = (sopt->sopt_name == SO_SNDBUF) ?
1611 &so->so_snd : &so->so_rcv;
1612 atomic_clear_int(&sotmp->ssb_flags,
1617 * Make sure the low-water is never greater than
1621 so->so_snd.ssb_lowat =
1622 (optval > so->so_snd.ssb_hiwat) ?
1623 so->so_snd.ssb_hiwat : optval;
1624 atomic_clear_int(&so->so_snd.ssb_flags,
1628 so->so_rcv.ssb_lowat =
1629 (optval > so->so_rcv.ssb_hiwat) ?
1630 so->so_rcv.ssb_hiwat : optval;
1631 atomic_clear_int(&so->so_rcv.ssb_flags,
1639 error = sooptcopyin(sopt, &tv, sizeof tv,
1644 /* assert(hz > 0); */
1645 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz ||
1646 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
1650 /* assert(tick > 0); */
1651 /* assert(ULONG_MAX - INT_MAX >= 1000000); */
1652 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick;
1653 if (val > INT_MAX) {
1657 if (val == 0 && tv.tv_usec != 0)
1660 switch (sopt->sopt_name) {
1662 so->so_snd.ssb_timeo = val;
1665 so->so_rcv.ssb_timeo = val;
1670 error = ENOPROTOOPT;
1673 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
1674 (void) so_pr_ctloutput(so, sopt);
1681 /* Helper routine for getsockopt */
1683 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
1685 soopt_from_kbuf(sopt, buf, len);
1690 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len)
1695 sopt->sopt_valsize = 0;
1699 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
1700 KKASSERT(kva_p(buf));
1703 * Documented get behavior is that we always return a value,
1704 * possibly truncated to fit in the user's buffer.
1705 * Traditional behavior is that we always tell the user
1706 * precisely how much we copied, rather than something useful
1707 * like the total amount we had available for her.
1708 * Note that this interface is not idempotent; the entire answer must
1709 * generated ahead of time.
1711 valsize = szmin(len, sopt->sopt_valsize);
1712 sopt->sopt_valsize = valsize;
1713 if (sopt->sopt_val != 0) {
1714 bcopy(buf, sopt->sopt_val, valsize);
1719 sogetopt(struct socket *so, struct sockopt *sopt)
1726 struct accept_filter_arg *afap;
1730 sopt->sopt_dir = SOPT_GET;
1731 if (sopt->sopt_level != SOL_SOCKET) {
1732 if (so->so_proto && so->so_proto->pr_ctloutput) {
1733 return (so_pr_ctloutput(so, sopt));
1735 return (ENOPROTOOPT);
1737 switch (sopt->sopt_name) {
1739 case SO_ACCEPTFILTER:
1740 if ((so->so_options & SO_ACCEPTCONN) == 0)
1742 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap),
1743 M_TEMP, M_WAITOK | M_ZERO);
1744 if ((so->so_options & SO_ACCEPTFILTER) != 0) {
1745 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name);
1746 if (so->so_accf->so_accept_filter_str != NULL)
1747 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str);
1749 error = sooptcopyout(sopt, afap, sizeof(*afap));
1755 l.l_onoff = so->so_options & SO_LINGER;
1756 l.l_linger = so->so_linger;
1757 error = sooptcopyout(sopt, &l, sizeof l);
1760 case SO_USELOOPBACK:
1769 optval = so->so_options & sopt->sopt_name;
1771 error = sooptcopyout(sopt, &optval, sizeof optval);
1775 optval = so->so_type;
1779 optval = so->so_error;
1784 optval = so->so_snd.ssb_hiwat;
1788 optval = so->so_rcv.ssb_hiwat;
1792 optval = so->so_snd.ssb_lowat;
1796 optval = so->so_rcv.ssb_lowat;
1801 optval = (sopt->sopt_name == SO_SNDTIMEO ?
1802 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo);
1804 tv.tv_sec = optval / hz;
1805 tv.tv_usec = (optval % hz) * ustick;
1806 error = sooptcopyout(sopt, &tv, sizeof tv);
1810 optval_l = ssb_space(&so->so_snd);
1811 error = sooptcopyout(sopt, &optval_l, sizeof(optval_l));
1815 error = ENOPROTOOPT;
1822 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
1824 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
1826 struct mbuf *m, *m_prev;
1827 int sopt_size = sopt->sopt_valsize, msize;
1829 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, MT_DATA,
1833 m->m_len = min(msize, sopt_size);
1834 sopt_size -= m->m_len;
1838 while (sopt_size > 0) {
1839 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT,
1840 MT_DATA, 0, &msize);
1845 m->m_len = min(msize, sopt_size);
1846 sopt_size -= m->m_len;
1853 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
1855 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
1857 soopt_to_mbuf(sopt, m);
1862 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m)
1867 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
1869 if (sopt->sopt_val == NULL)
1871 val = sopt->sopt_val;
1872 valsize = sopt->sopt_valsize;
1873 while (m != NULL && valsize >= m->m_len) {
1874 bcopy(val, mtod(m, char *), m->m_len);
1875 valsize -= m->m_len;
1876 val = (caddr_t)val + m->m_len;
1879 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
1880 panic("ip6_sooptmcopyin");
1883 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
1885 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
1887 return soopt_from_mbuf(sopt, m);
1891 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m)
1893 struct mbuf *m0 = m;
1898 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
1900 if (sopt->sopt_val == NULL)
1902 val = sopt->sopt_val;
1903 maxsize = sopt->sopt_valsize;
1904 while (m != NULL && maxsize >= m->m_len) {
1905 bcopy(mtod(m, char *), val, m->m_len);
1906 maxsize -= m->m_len;
1907 val = (caddr_t)val + m->m_len;
1908 valsize += m->m_len;
1912 /* enough soopt buffer should be given from user-land */
1916 sopt->sopt_valsize = valsize;
1921 sohasoutofband(struct socket *so)
1923 if (so->so_sigio != NULL)
1924 pgsigio(so->so_sigio, SIGURG, 0);
1925 KNOTE(&so->so_rcv.ssb_kq.ki_note, NOTE_OOB);
1929 sokqfilter(struct file *fp, struct knote *kn)
1931 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1932 struct signalsockbuf *ssb;
1934 switch (kn->kn_filter) {
1936 if (so->so_options & SO_ACCEPTCONN)
1937 kn->kn_fop = &solisten_filtops;
1939 kn->kn_fop = &soread_filtops;
1943 kn->kn_fop = &sowrite_filtops;
1947 kn->kn_fop = &soexcept_filtops;
1951 return (EOPNOTSUPP);
1954 knote_insert(&ssb->ssb_kq.ki_note, kn);
1955 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE);
1960 filt_sordetach(struct knote *kn)
1962 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1964 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn);
1965 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note))
1966 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE);
1971 filt_soread(struct knote *kn, long hint)
1973 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1975 if (kn->kn_sfflags & NOTE_OOB) {
1976 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) {
1977 kn->kn_fflags |= NOTE_OOB;
1982 kn->kn_data = so->so_rcv.ssb_cc;
1984 if (so->so_state & SS_CANTRCVMORE) {
1986 * Only set NODATA if all data has been exhausted.
1988 if (kn->kn_data == 0)
1989 kn->kn_flags |= EV_NODATA;
1990 kn->kn_flags |= EV_EOF;
1991 kn->kn_fflags = so->so_error;
1994 if (so->so_error) /* temporary udp error */
1996 if (kn->kn_sfflags & NOTE_LOWAT)
1997 return (kn->kn_data >= kn->kn_sdata);
1998 return ((kn->kn_data >= so->so_rcv.ssb_lowat) ||
1999 !TAILQ_EMPTY(&so->so_comp));
2003 filt_sowdetach(struct knote *kn)
2005 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2007 knote_remove(&so->so_snd.ssb_kq.ki_note, kn);
2008 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note))
2009 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE);
2014 filt_sowrite(struct knote *kn, long hint)
2016 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2018 kn->kn_data = ssb_space(&so->so_snd);
2019 if (so->so_state & SS_CANTSENDMORE) {
2020 kn->kn_flags |= (EV_EOF | EV_NODATA);
2021 kn->kn_fflags = so->so_error;
2024 if (so->so_error) /* temporary udp error */
2026 if (((so->so_state & SS_ISCONNECTED) == 0) &&
2027 (so->so_proto->pr_flags & PR_CONNREQUIRED))
2029 if (kn->kn_sfflags & NOTE_LOWAT)
2030 return (kn->kn_data >= kn->kn_sdata);
2031 return (kn->kn_data >= so->so_snd.ssb_lowat);
2036 filt_solisten(struct knote *kn, long hint)
2038 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2040 kn->kn_data = so->so_qlen;
2041 return (! TAILQ_EMPTY(&so->so_comp));