2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
67 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $
68 * $DragonFly: src/sys/kern/uipc_socket.c,v 1.44 2007/04/20 05:42:20 dillon Exp $
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/fcntl.h>
77 #include <sys/malloc.h>
79 #include <sys/domain.h>
80 #include <sys/file.h> /* for struct knote */
81 #include <sys/kernel.h>
82 #include <sys/malloc.h>
83 #include <sys/event.h>
86 #include <sys/protosw.h>
87 #include <sys/socket.h>
88 #include <sys/socketvar.h>
89 #include <sys/socketops.h>
90 #include <sys/resourcevar.h>
91 #include <sys/signalvar.h>
92 #include <sys/sysctl.h>
95 #include <vm/vm_zone.h>
97 #include <sys/thread2.h>
99 #include <machine/limits.h>
102 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt);
105 static void filt_sordetach(struct knote *kn);
106 static int filt_soread(struct knote *kn, long hint);
107 static void filt_sowdetach(struct knote *kn);
108 static int filt_sowrite(struct knote *kn, long hint);
109 static int filt_solisten(struct knote *kn, long hint);
111 static struct filterops solisten_filtops =
112 { 1, NULL, filt_sordetach, filt_solisten };
113 static struct filterops soread_filtops =
114 { 1, NULL, filt_sordetach, filt_soread };
115 static struct filterops sowrite_filtops =
116 { 1, NULL, filt_sowdetach, filt_sowrite };
118 struct vm_zone *socket_zone;
120 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
121 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
124 static int somaxconn = SOMAXCONN;
125 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW,
126 &somaxconn, 0, "Maximum pending socket connection queue size");
129 * Socket operation routines.
130 * These routines are called by the routines in
131 * sys_socket.c or from a system process, and
132 * implement the semantics of socket operations by
133 * switching out to the protocol specific routines.
137 * Get a socket structure from our zone, and initialize it.
138 * We don't implement `waitok' yet (see comments in uipc_domain.c).
139 * Note that it would probably be better to allocate socket
140 * and PCB at the same time, but I'm not convinced that all
141 * the protocols can be easily modified to do this.
148 so = zalloc(socket_zone);
150 /* XXX race condition for reentrant kernel */
151 bzero(so, sizeof *so);
152 TAILQ_INIT(&so->so_aiojobq);
153 TAILQ_INIT(&so->so_rcv.sb_sel.si_mlist);
154 TAILQ_INIT(&so->so_snd.sb_sel.si_mlist);
160 socreate(int dom, struct socket **aso, int type,
161 int proto, struct thread *td)
163 struct proc *p = td->td_proc;
166 struct pru_attach_info ai;
170 prp = pffindproto(dom, proto, type);
172 prp = pffindtype(dom, type);
174 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
175 return (EPROTONOSUPPORT);
177 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only &&
178 prp->pr_domain->dom_family != PF_LOCAL &&
179 prp->pr_domain->dom_family != PF_INET &&
180 prp->pr_domain->dom_family != PF_INET6 &&
181 prp->pr_domain->dom_family != PF_ROUTE) {
182 return (EPROTONOSUPPORT);
185 if (prp->pr_type != type)
187 so = soalloc(p != 0);
191 TAILQ_INIT(&so->so_incomp);
192 TAILQ_INIT(&so->so_comp);
194 so->so_cred = crhold(p->p_ucred);
196 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE];
197 ai.p_ucred = p->p_ucred;
198 ai.fd_rdir = p->p_fd->fd_rdir;
199 error = so_pru_attach(so, proto, &ai);
201 so->so_state |= SS_NOFDREF;
210 sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
215 error = so_pru_bind(so, nam, td);
221 sodealloc(struct socket *so)
223 if (so->so_rcv.sb_hiwat)
224 (void)chgsbsize(so->so_cred->cr_uidinfo,
225 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
226 if (so->so_snd.sb_hiwat)
227 (void)chgsbsize(so->so_cred->cr_uidinfo,
228 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
230 /* remove accept filter if present */
231 if (so->so_accf != NULL)
232 do_setopt_accept_filter(so, NULL);
235 zfree(socket_zone, so);
239 solisten(struct socket *so, int backlog, struct thread *td)
243 short oldopt, oldqlimit;
247 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) {
253 oldopt = so->so_options;
254 oldqlimit = so->so_qlimit;
257 if (TAILQ_EMPTY(&so->so_comp))
258 so->so_options |= SO_ACCEPTCONN;
259 if (backlog < 0 || backlog > somaxconn)
261 so->so_qlimit = backlog;
262 /* SCTP needs to look at tweak both the inbound backlog parameter AND
263 * the so_options (UDP model both connect's and gets inbound
264 * connections .. implicitly).
266 error = so_pru_listen(so, td);
269 /* Restore the params */
270 so->so_options = oldopt;
271 so->so_qlimit = oldqlimit;
281 sofree(struct socket *so)
283 struct socket *head = so->so_head;
285 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
288 if (so->so_state & SS_INCOMP) {
289 TAILQ_REMOVE(&head->so_incomp, so, so_list);
291 } else if (so->so_state & SS_COMP) {
293 * We must not decommission a socket that's
294 * on the accept(2) queue. If we do, then
295 * accept(2) may hang after select(2) indicated
296 * that the listening socket was ready.
300 panic("sofree: not queued");
302 so->so_state &= ~SS_INCOMP;
305 sbrelease(&so->so_snd, so);
311 * Close a socket on last file table reference removal.
312 * Initiate disconnect if connected.
313 * Free socket when disconnect complete.
316 soclose(struct socket *so, int fflag)
321 funsetown(so->so_sigio);
322 if (so->so_pcb == NULL)
324 if (so->so_state & SS_ISCONNECTED) {
325 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
326 error = sodisconnect(so);
330 if (so->so_options & SO_LINGER) {
331 if ((so->so_state & SS_ISDISCONNECTING) &&
334 while (so->so_state & SS_ISCONNECTED) {
335 error = tsleep((caddr_t)&so->so_timeo,
336 PCATCH, "soclos", so->so_linger * hz);
346 error2 = so_pru_detach(so);
351 if (so->so_options & SO_ACCEPTCONN) {
352 struct socket *sp, *sonext;
354 sp = TAILQ_FIRST(&so->so_incomp);
355 for (; sp != NULL; sp = sonext) {
356 sonext = TAILQ_NEXT(sp, so_list);
359 for (sp = TAILQ_FIRST(&so->so_comp); sp != NULL; sp = sonext) {
360 sonext = TAILQ_NEXT(sp, so_list);
361 /* Dequeue from so_comp since sofree() won't do it */
362 TAILQ_REMOVE(&so->so_comp, sp, so_list);
364 sp->so_state &= ~SS_COMP;
369 if (so->so_state & SS_NOFDREF)
370 panic("soclose: NOFDREF");
371 so->so_state |= SS_NOFDREF;
378 * Must be called from a critical section.
381 soabort(struct socket *so)
385 error = so_pru_abort(so);
394 soaccept(struct socket *so, struct sockaddr **nam)
399 if ((so->so_state & SS_NOFDREF) == 0)
400 panic("soaccept: !NOFDREF");
401 so->so_state &= ~SS_NOFDREF;
402 error = so_pru_accept(so, nam);
408 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
412 if (so->so_options & SO_ACCEPTCONN)
416 * If protocol is connection-based, can only connect once.
417 * Otherwise, if connected, try to disconnect first.
418 * This allows user to disconnect by connecting to, e.g.,
421 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
422 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
423 (error = sodisconnect(so)))) {
427 * Prevent accumulated error from previous connection
431 error = so_pru_connect(so, nam, td);
438 soconnect2(struct socket *so1, struct socket *so2)
443 error = so_pru_connect2(so1, so2);
449 sodisconnect(struct socket *so)
454 if ((so->so_state & SS_ISCONNECTED) == 0) {
458 if (so->so_state & SS_ISDISCONNECTING) {
462 error = so_pru_disconnect(so);
468 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
471 * If send must go all at once and message is larger than
472 * send buffering, then hard error.
473 * Lock against other senders.
474 * If must go all at once and not enough room now, then
475 * inform user that this would block and do nothing.
476 * Otherwise, if nonblocking, send as much as possible.
477 * The data to be sent is described by "uio" if nonzero,
478 * otherwise by the mbuf chain "top" (which must be null
479 * if uio is not). Data provided in mbuf chain must be small
480 * enough to send all at once.
482 * Returns nonzero on error, timeout or signal; callers
483 * must check for short counts if EINTR/ERESTART are returned.
484 * Data and control buffers are freed on return.
487 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
488 struct mbuf *top, struct mbuf *control, int flags,
493 long space, len, resid;
494 int clen = 0, error, dontroute, mlen;
495 int atomic = sosendallatonce(so) || top;
499 resid = uio->uio_resid;
501 resid = top->m_pkthdr.len;
503 * In theory resid should be unsigned.
504 * However, space must be signed, as it might be less than 0
505 * if we over-committed, and we must use a signed comparison
506 * of space and resid. On the other hand, a negative resid
507 * causes us to loop sending 0-length segments to the protocol.
509 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
510 * type sockets since that's an error.
512 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
518 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
519 (so->so_proto->pr_flags & PR_ATOMIC);
520 if (td->td_lwp != NULL)
521 td->td_lwp->lwp_ru.ru_msgsnd++;
523 clen = control->m_len;
524 #define gotoerr(errcode) { error = errcode; crit_exit(); goto release; }
527 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
532 if (so->so_state & SS_CANTSENDMORE)
535 error = so->so_error;
540 if ((so->so_state & SS_ISCONNECTED) == 0) {
542 * `sendto' and `sendmsg' is allowed on a connection-
543 * based socket if it supports implied connect.
544 * Return ENOTCONN if not connected and no address is
547 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
548 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
549 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
550 !(resid == 0 && clen != 0))
552 } else if (addr == 0)
553 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
554 ENOTCONN : EDESTADDRREQ);
556 space = sbspace(&so->so_snd);
559 if ((atomic && resid > so->so_snd.sb_hiwat) ||
560 clen > so->so_snd.sb_hiwat)
562 if (space < resid + clen && uio &&
563 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
564 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT))
565 gotoerr(EWOULDBLOCK);
566 sbunlock(&so->so_snd);
567 error = sbwait(&so->so_snd);
579 * Data is prepackaged in "top".
583 top->m_flags |= M_EOR;
585 m = m_getl(resid, MB_WAIT, MT_DATA,
586 top == NULL ? M_PKTHDR : 0, &mlen);
589 m->m_pkthdr.rcvif = (struct ifnet *)0;
591 len = min(min(mlen, resid), space);
592 if (resid < MINCLSIZE) {
594 * For datagram protocols, leave room
595 * for protocol headers in first mbuf.
597 if (atomic && top == 0 && len < mlen)
601 error = uiomove(mtod(m, caddr_t), (int)len, uio);
602 resid = uio->uio_resid;
605 top->m_pkthdr.len += len;
611 top->m_flags |= M_EOR;
614 } while (space > 0 && atomic);
616 so->so_options |= SO_DONTROUTE;
617 if (flags & MSG_OOB) {
618 pru_flags = PRUS_OOB;
619 } else if ((flags & MSG_EOF) &&
620 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
623 * If the user set MSG_EOF, the protocol
624 * understands this flag and nothing left to
625 * send then use PRU_SEND_EOF instead of PRU_SEND.
627 pru_flags = PRUS_EOF;
628 } else if (resid > 0 && space > 0) {
629 /* If there is more to send, set PRUS_MORETOCOME */
630 pru_flags = PRUS_MORETOCOME;
636 * XXX all the SS_CANTSENDMORE checks previously
637 * done could be out of date. We could have recieved
638 * a reset packet in an interrupt or maybe we slept
639 * while doing page faults in uiomove() etc. We could
640 * probably recheck again inside the splnet() protection
641 * here, but there are probably other places that this
642 * also happens. We must rethink this.
644 error = so_pru_send(so, pru_flags, top, addr, control, td);
647 so->so_options &= ~SO_DONTROUTE;
654 } while (resid && space > 0);
658 sbunlock(&so->so_snd);
668 * A specialization of sosend() for UDP based on protocol-specific knowledge:
669 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that
670 * sosendallatonce() returns true,
671 * the "atomic" variable is true,
672 * and sosendudp() blocks until space is available for the entire send.
673 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or
674 * PR_IMPLOPCL flags set.
675 * UDP has no out-of-band data.
676 * UDP has no control data.
677 * UDP does not support MSG_EOR.
680 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio,
681 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
684 boolean_t dontroute; /* temporary SO_DONTROUTE setting */
686 if (td->td_lwp != NULL)
687 td->td_lwp->lwp_ru.ru_msgsnd++;
691 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp"));
692 resid = uio ? uio->uio_resid : top->m_pkthdr.len;
695 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
700 if (so->so_state & SS_CANTSENDMORE)
703 error = so->so_error;
708 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL)
709 gotoerr(EDESTADDRREQ);
710 if (resid > so->so_snd.sb_hiwat)
712 if (uio && sbspace(&so->so_snd) < resid) {
713 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT))
714 gotoerr(EWOULDBLOCK);
715 sbunlock(&so->so_snd);
716 error = sbwait(&so->so_snd);
725 top = m_uiomove(uio);
730 dontroute = (flags & MSG_DONTROUTE) && !(so->so_options & SO_DONTROUTE);
732 so->so_options |= SO_DONTROUTE;
734 error = so_pru_send(so, 0, top, addr, NULL, td);
735 top = NULL; /* sent or freed in lower layer */
738 so->so_options &= ~SO_DONTROUTE;
741 sbunlock(&so->so_snd);
749 * Implement receive operations on a socket.
750 * We depend on the way that records are added to the sockbuf
751 * by sbappend*. In particular, each record (mbufs linked through m_next)
752 * must begin with an address if the protocol so specifies,
753 * followed by an optional mbuf or mbufs containing ancillary data,
754 * and then zero or more mbufs of data.
755 * In order to avoid blocking network interrupts for the entire time here,
756 * we exit the critical section while doing the actual copy to user space.
757 * Although the sockbuf is locked, new data may still be appended,
758 * and thus we must maintain consistency of the sockbuf during that time.
760 * The caller may receive the data as a single mbuf chain by supplying
761 * an mbuf **mp0 for use in returning the chain. The uio is then used
762 * only for the count in uio_resid.
765 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
766 struct sorecv_direct *sio, struct mbuf **controlp, int *flagsp)
769 struct mbuf *free_chain = NULL;
770 int flags, len, error, offset;
771 struct protosw *pr = so->so_proto;
773 int resid, orig_resid;
776 resid = uio->uio_resid;
778 resid = sio->maxlen - sio->len;
786 flags = *flagsp &~ MSG_EOR;
789 if (flags & MSG_OOB) {
790 m = m_get(MB_WAIT, MT_DATA);
793 error = so_pru_rcvoob(so, m, flags & MSG_PEEK);
799 sio->mapp = &m->m_next;
803 sio->len += m->m_len;
804 } while (resid > 0 && m);
807 uio->uio_resid = resid;
808 error = uiomove(mtod(m, caddr_t),
809 (int)min(resid, m->m_len), uio);
810 resid = uio->uio_resid;
812 } while (uio->uio_resid && error == 0 && m);
819 if (so->so_state & SS_ISCONFIRMING && resid)
824 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
828 m = so->so_rcv.sb_mb;
830 * If we have less data than requested, block awaiting more
831 * (subject to any timeout) if:
832 * 1. the current count is less than the low water mark, or
833 * 2. MSG_WAITALL is set, and it is possible to do the entire
834 * receive operation at once if we block (resid <= hiwat).
835 * 3. MSG_DONTWAIT is not set
836 * If MSG_WAITALL is set but resid is larger than the receive buffer,
837 * we have to do the receive in sections, and thus risk returning
838 * a short count if a timeout or signal occurs after we start.
840 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
841 so->so_rcv.sb_cc < resid) &&
842 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
843 ((flags & MSG_WAITALL) && resid <= so->so_rcv.sb_hiwat)) &&
844 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
845 KASSERT(m != NULL || !so->so_rcv.sb_cc, ("receive 1"));
849 error = so->so_error;
850 if ((flags & MSG_PEEK) == 0)
854 if (so->so_state & SS_CANTRCVMORE) {
860 for (; m; m = m->m_next) {
861 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
862 m = so->so_rcv.sb_mb;
866 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
867 (pr->pr_flags & PR_CONNREQUIRED)) {
873 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) {
877 sbunlock(&so->so_rcv);
878 error = sbwait(&so->so_rcv);
885 if (uio && uio->uio_td && uio->uio_td->td_proc)
886 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++;
889 * note: m should be == sb_mb here. Cache the next record while
890 * cleaning up. Note that calling m_free*() will break out critical
893 KKASSERT(m == so->so_rcv.sb_mb);
896 * Skip any address mbufs prepending the record.
898 if (pr->pr_flags & PR_ADDR) {
899 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
902 *psa = dup_sockaddr(mtod(m, struct sockaddr *));
903 if (flags & MSG_PEEK)
906 m = sbunlinkmbuf(&so->so_rcv, m, &free_chain);
910 * Skip any control mbufs prepending the record.
913 if (pr->pr_flags & PR_ADDR_OPT) {
915 * For SCTP we may be getting a
916 * whole message OR a partial delivery.
918 if (m && m->m_type == MT_SONAME) {
921 *psa = dup_sockaddr(mtod(m, struct sockaddr *));
922 if (flags & MSG_PEEK)
925 m = sbunlinkmbuf(&so->so_rcv, m, &free_chain);
929 while (m && m->m_type == MT_CONTROL && error == 0) {
930 if (flags & MSG_PEEK) {
932 *controlp = m_copy(m, 0, m->m_len);
933 m = m->m_next; /* XXX race */
936 n = sbunlinkmbuf(&so->so_rcv, m, NULL);
937 if (pr->pr_domain->dom_externalize &&
938 mtod(m, struct cmsghdr *)->cmsg_type ==
940 error = (*pr->pr_domain->dom_externalize)(m);
944 m = sbunlinkmbuf(&so->so_rcv, m, &free_chain);
947 if (controlp && *controlp) {
949 controlp = &(*controlp)->m_next;
958 if (type == MT_OOBDATA)
963 * Copy to the UIO or mbuf return chain (*mp).
967 while (m && resid > 0 && error == 0) {
968 if (m->m_type == MT_OOBDATA) {
969 if (type != MT_OOBDATA)
971 } else if (type == MT_OOBDATA)
974 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
976 so->so_state &= ~SS_RCVATMARK;
978 if (so->so_oobmark && len > so->so_oobmark - offset)
979 len = so->so_oobmark - offset;
980 if (len > m->m_len - moff)
981 len = m->m_len - moff;
984 * Copy out to the UIO or pass the mbufs back to the SIO.
985 * The SIO is dealt with when we eat the mbuf, but deal
986 * with the resid here either way.
990 uio->uio_resid = resid;
991 error = uiomove(mtod(m, caddr_t) + moff, len, uio);
992 resid = uio->uio_resid;
1001 * Eat the entire mbuf or just a piece of it
1003 if (len == m->m_len - moff) {
1004 if (m->m_flags & M_EOR)
1007 if (m->m_flags & M_NOTIFICATION)
1008 flags |= MSG_NOTIFICATION;
1010 if (flags & MSG_PEEK) {
1015 n = sbunlinkmbuf(&so->so_rcv, m, NULL);
1017 sio->mapp = &m->m_next;
1018 sio->len += m->m_len;
1021 m = sbunlinkmbuf(&so->so_rcv, m, &free_chain);
1025 if (flags & MSG_PEEK) {
1029 *sio->mapp= m_copym(m, 0, len, MB_WAIT);
1030 sio->mapp = &m->m_next;
1035 so->so_rcv.sb_cc -= len;
1038 if (so->so_oobmark) {
1039 if ((flags & MSG_PEEK) == 0) {
1040 so->so_oobmark -= len;
1041 if (so->so_oobmark == 0) {
1042 so->so_state |= SS_RCVATMARK;
1047 if (offset == so->so_oobmark)
1051 if (flags & MSG_EOR)
1054 * If the MSG_WAITALL flag is set (for non-atomic socket),
1055 * we must not quit until resid == 0 or an error
1056 * termination. If a signal/timeout occurs, return
1057 * with a short count but without error.
1058 * Keep sockbuf locked against other readers.
1060 while ((flags & MSG_WAITALL) && m == NULL &&
1061 resid > 0 && !sosendallatonce(so) &&
1062 so->so_rcv.sb_mb == NULL) {
1063 if (so->so_error || so->so_state & SS_CANTRCVMORE)
1066 * The window might have closed to zero, make
1067 * sure we send an ack now that we've drained
1068 * the buffer or we might end up blocking until
1069 * the idle takes over (5 seconds).
1071 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
1072 so_pru_rcvd(so, flags);
1073 error = sbwait(&so->so_rcv);
1075 sbunlock(&so->so_rcv);
1079 m = so->so_rcv.sb_mb;
1084 * If an atomic read was requested but unread data still remains
1085 * in the record, set MSG_TRUNC.
1087 if (m && pr->pr_flags & PR_ATOMIC)
1091 * Cleanup. If an atomic read was requested drop any unread data.
1093 if ((flags & MSG_PEEK) == 0) {
1094 if (m && (pr->pr_flags & PR_ATOMIC))
1095 sbdroprecord(&so->so_rcv);
1096 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb)
1097 so_pru_rcvd(so, flags);
1100 if (orig_resid == resid && orig_resid &&
1101 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
1102 sbunlock(&so->so_rcv);
1110 sbunlock(&so->so_rcv);
1114 m_freem(free_chain);
1119 soshutdown(struct socket *so, int how)
1121 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
1127 return (so_pru_shutdown(so));
1132 sorflush(struct socket *so)
1134 struct sockbuf *sb = &so->so_rcv;
1135 struct protosw *pr = so->so_proto;
1138 sb->sb_flags |= SB_NOINTR;
1139 (void) sblock(sb, M_WAITOK);
1145 bzero((caddr_t)sb, sizeof (*sb));
1146 if (asb.sb_flags & SB_KNOTE) {
1147 sb->sb_sel.si_note = asb.sb_sel.si_note;
1148 sb->sb_flags = SB_KNOTE;
1152 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
1153 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
1154 sbrelease(&asb, so);
1159 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt)
1161 struct accept_filter_arg *afap = NULL;
1162 struct accept_filter *afp;
1163 struct so_accf *af = so->so_accf;
1166 /* do not set/remove accept filters on non listen sockets */
1167 if ((so->so_options & SO_ACCEPTCONN) == 0) {
1172 /* removing the filter */
1175 if (af->so_accept_filter != NULL &&
1176 af->so_accept_filter->accf_destroy != NULL) {
1177 af->so_accept_filter->accf_destroy(so);
1179 if (af->so_accept_filter_str != NULL) {
1180 FREE(af->so_accept_filter_str, M_ACCF);
1185 so->so_options &= ~SO_ACCEPTFILTER;
1188 /* adding a filter */
1189 /* must remove previous filter first */
1194 /* don't put large objects on the kernel stack */
1195 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), M_TEMP, M_WAITOK);
1196 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap);
1197 afap->af_name[sizeof(afap->af_name)-1] = '\0';
1198 afap->af_arg[sizeof(afap->af_arg)-1] = '\0';
1201 afp = accept_filt_get(afap->af_name);
1206 MALLOC(af, struct so_accf *, sizeof(*af), M_ACCF, M_WAITOK);
1207 bzero(af, sizeof(*af));
1208 if (afp->accf_create != NULL) {
1209 if (afap->af_name[0] != '\0') {
1210 int len = strlen(afap->af_name) + 1;
1212 MALLOC(af->so_accept_filter_str, char *, len, M_ACCF, M_WAITOK);
1213 strcpy(af->so_accept_filter_str, afap->af_name);
1215 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg);
1216 if (af->so_accept_filter_arg == NULL) {
1217 FREE(af->so_accept_filter_str, M_ACCF);
1224 af->so_accept_filter = afp;
1226 so->so_options |= SO_ACCEPTFILTER;
1235 * Perhaps this routine, and sooptcopyout(), below, ought to come in
1236 * an additional variant to handle the case where the option value needs
1237 * to be some kind of integer, but not a specific size.
1238 * In addition to their use here, these functions are also called by the
1239 * protocol-level pr_ctloutput() routines.
1242 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
1247 * If the user gives us more than we wanted, we ignore it,
1248 * but if we don't get the minimum length the caller
1249 * wants, we return EINVAL. On success, sopt->sopt_valsize
1250 * is set to however much we actually retrieved.
1252 if ((valsize = sopt->sopt_valsize) < minlen)
1255 sopt->sopt_valsize = valsize = len;
1257 if (sopt->sopt_td != NULL)
1258 return (copyin(sopt->sopt_val, buf, valsize));
1260 bcopy(sopt->sopt_val, buf, valsize);
1265 sosetopt(struct socket *so, struct sockopt *sopt)
1273 sopt->sopt_dir = SOPT_SET;
1274 if (sopt->sopt_level != SOL_SOCKET) {
1275 if (so->so_proto && so->so_proto->pr_ctloutput) {
1276 return (so_pr_ctloutput(so, sopt));
1278 error = ENOPROTOOPT;
1280 switch (sopt->sopt_name) {
1282 case SO_ACCEPTFILTER:
1283 error = do_setopt_accept_filter(so, sopt);
1289 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1293 so->so_linger = l.l_linger;
1295 so->so_options |= SO_LINGER;
1297 so->so_options &= ~SO_LINGER;
1303 case SO_USELOOPBACK:
1309 error = sooptcopyin(sopt, &optval, sizeof optval,
1314 so->so_options |= sopt->sopt_name;
1316 so->so_options &= ~sopt->sopt_name;
1323 error = sooptcopyin(sopt, &optval, sizeof optval,
1329 * Values < 1 make no sense for any of these
1330 * options, so disallow them.
1337 switch (sopt->sopt_name) {
1340 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1341 &so->so_snd : &so->so_rcv, (u_long)optval,
1343 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) {
1350 * Make sure the low-water is never greater than
1354 so->so_snd.sb_lowat =
1355 (optval > so->so_snd.sb_hiwat) ?
1356 so->so_snd.sb_hiwat : optval;
1359 so->so_rcv.sb_lowat =
1360 (optval > so->so_rcv.sb_hiwat) ?
1361 so->so_rcv.sb_hiwat : optval;
1368 error = sooptcopyin(sopt, &tv, sizeof tv,
1373 /* assert(hz > 0); */
1374 if (tv.tv_sec < 0 || tv.tv_sec > SHRT_MAX / hz ||
1375 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
1379 /* assert(tick > 0); */
1380 /* assert(ULONG_MAX - SHRT_MAX >= 1000000); */
1381 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
1382 if (val > SHRT_MAX) {
1386 if (val == 0 && tv.tv_usec != 0)
1389 switch (sopt->sopt_name) {
1391 so->so_snd.sb_timeo = val;
1394 so->so_rcv.sb_timeo = val;
1399 error = ENOPROTOOPT;
1402 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
1403 (void) so_pr_ctloutput(so, sopt);
1410 /* Helper routine for getsockopt */
1412 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
1420 * Documented get behavior is that we always return a value,
1421 * possibly truncated to fit in the user's buffer.
1422 * Traditional behavior is that we always tell the user
1423 * precisely how much we copied, rather than something useful
1424 * like the total amount we had available for her.
1425 * Note that this interface is not idempotent; the entire answer must
1426 * generated ahead of time.
1428 valsize = min(len, sopt->sopt_valsize);
1429 sopt->sopt_valsize = valsize;
1430 if (sopt->sopt_val != 0) {
1431 if (sopt->sopt_td != NULL)
1432 error = copyout(buf, sopt->sopt_val, valsize);
1434 bcopy(buf, sopt->sopt_val, valsize);
1440 sogetopt(struct socket *so, struct sockopt *sopt)
1446 struct accept_filter_arg *afap;
1450 sopt->sopt_dir = SOPT_GET;
1451 if (sopt->sopt_level != SOL_SOCKET) {
1452 if (so->so_proto && so->so_proto->pr_ctloutput) {
1453 return (so_pr_ctloutput(so, sopt));
1455 return (ENOPROTOOPT);
1457 switch (sopt->sopt_name) {
1459 case SO_ACCEPTFILTER:
1460 if ((so->so_options & SO_ACCEPTCONN) == 0)
1462 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap),
1464 bzero(afap, sizeof(*afap));
1465 if ((so->so_options & SO_ACCEPTFILTER) != 0) {
1466 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name);
1467 if (so->so_accf->so_accept_filter_str != NULL)
1468 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str);
1470 error = sooptcopyout(sopt, afap, sizeof(*afap));
1476 l.l_onoff = so->so_options & SO_LINGER;
1477 l.l_linger = so->so_linger;
1478 error = sooptcopyout(sopt, &l, sizeof l);
1481 case SO_USELOOPBACK:
1490 optval = so->so_options & sopt->sopt_name;
1492 error = sooptcopyout(sopt, &optval, sizeof optval);
1496 optval = so->so_type;
1500 optval = so->so_error;
1505 optval = so->so_snd.sb_hiwat;
1509 optval = so->so_rcv.sb_hiwat;
1513 optval = so->so_snd.sb_lowat;
1517 optval = so->so_rcv.sb_lowat;
1522 optval = (sopt->sopt_name == SO_SNDTIMEO ?
1523 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1525 tv.tv_sec = optval / hz;
1526 tv.tv_usec = (optval % hz) * tick;
1527 error = sooptcopyout(sopt, &tv, sizeof tv);
1531 error = ENOPROTOOPT;
1538 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
1540 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
1542 struct mbuf *m, *m_prev;
1543 int sopt_size = sopt->sopt_valsize, msize;
1545 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, MT_DATA,
1549 m->m_len = min(msize, sopt_size);
1550 sopt_size -= m->m_len;
1554 while (sopt_size > 0) {
1555 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT,
1556 MT_DATA, 0, &msize);
1561 m->m_len = min(msize, sopt_size);
1562 sopt_size -= m->m_len;
1569 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
1571 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
1573 struct mbuf *m0 = m;
1575 if (sopt->sopt_val == NULL)
1577 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1578 if (sopt->sopt_td != NULL) {
1581 error = copyin(sopt->sopt_val, mtod(m, char *),
1588 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
1589 sopt->sopt_valsize -= m->m_len;
1590 sopt->sopt_val = (caddr_t)sopt->sopt_val + m->m_len;
1593 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
1594 panic("ip6_sooptmcopyin");
1598 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
1600 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
1602 struct mbuf *m0 = m;
1605 if (sopt->sopt_val == NULL)
1607 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1608 if (sopt->sopt_td != NULL) {
1611 error = copyout(mtod(m, char *), sopt->sopt_val,
1618 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
1619 sopt->sopt_valsize -= m->m_len;
1620 sopt->sopt_val = (caddr_t)sopt->sopt_val + m->m_len;
1621 valsize += m->m_len;
1625 /* enough soopt buffer should be given from user-land */
1629 sopt->sopt_valsize = valsize;
1634 sohasoutofband(struct socket *so)
1636 if (so->so_sigio != NULL)
1637 pgsigio(so->so_sigio, SIGURG, 0);
1638 selwakeup(&so->so_rcv.sb_sel);
1642 sopoll(struct socket *so, int events, struct ucred *cred, struct thread *td)
1648 if (events & (POLLIN | POLLRDNORM))
1650 revents |= events & (POLLIN | POLLRDNORM);
1652 if (events & POLLINIGNEOF)
1653 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat ||
1654 !TAILQ_EMPTY(&so->so_comp) || so->so_error)
1655 revents |= POLLINIGNEOF;
1657 if (events & (POLLOUT | POLLWRNORM))
1658 if (sowriteable(so))
1659 revents |= events & (POLLOUT | POLLWRNORM);
1661 if (events & (POLLPRI | POLLRDBAND))
1662 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
1663 revents |= events & (POLLPRI | POLLRDBAND);
1667 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM |
1669 selrecord(td, &so->so_rcv.sb_sel);
1670 so->so_rcv.sb_flags |= SB_SEL;
1673 if (events & (POLLOUT | POLLWRNORM)) {
1674 selrecord(td, &so->so_snd.sb_sel);
1675 so->so_snd.sb_flags |= SB_SEL;
1684 sokqfilter(struct file *fp, struct knote *kn)
1686 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1689 switch (kn->kn_filter) {
1691 if (so->so_options & SO_ACCEPTCONN)
1692 kn->kn_fop = &solisten_filtops;
1694 kn->kn_fop = &soread_filtops;
1698 kn->kn_fop = &sowrite_filtops;
1706 SLIST_INSERT_HEAD(&sb->sb_sel.si_note, kn, kn_selnext);
1707 sb->sb_flags |= SB_KNOTE;
1713 filt_sordetach(struct knote *kn)
1715 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1718 SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext);
1719 if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note))
1720 so->so_rcv.sb_flags &= ~SB_KNOTE;
1726 filt_soread(struct knote *kn, long hint)
1728 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1730 kn->kn_data = so->so_rcv.sb_cc;
1731 if (so->so_state & SS_CANTRCVMORE) {
1732 kn->kn_flags |= EV_EOF;
1733 kn->kn_fflags = so->so_error;
1736 if (so->so_error) /* temporary udp error */
1738 if (kn->kn_sfflags & NOTE_LOWAT)
1739 return (kn->kn_data >= kn->kn_sdata);
1740 return (kn->kn_data >= so->so_rcv.sb_lowat);
1744 filt_sowdetach(struct knote *kn)
1746 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1749 SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext);
1750 if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note))
1751 so->so_snd.sb_flags &= ~SB_KNOTE;
1757 filt_sowrite(struct knote *kn, long hint)
1759 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1761 kn->kn_data = sbspace(&so->so_snd);
1762 if (so->so_state & SS_CANTSENDMORE) {
1763 kn->kn_flags |= EV_EOF;
1764 kn->kn_fflags = so->so_error;
1767 if (so->so_error) /* temporary udp error */
1769 if (((so->so_state & SS_ISCONNECTED) == 0) &&
1770 (so->so_proto->pr_flags & PR_CONNREQUIRED))
1772 if (kn->kn_sfflags & NOTE_LOWAT)
1773 return (kn->kn_data >= kn->kn_sdata);
1774 return (kn->kn_data >= so->so_snd.sb_lowat);
1779 filt_solisten(struct knote *kn, long hint)
1781 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1783 kn->kn_data = so->so_qlen;
1784 return (! TAILQ_EMPTY(&so->so_comp));