2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
67 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $
68 * $DragonFly: src/sys/kern/uipc_socket.c,v 1.43 2007/03/04 18:51:59 swildner Exp $
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/fcntl.h>
77 #include <sys/malloc.h>
79 #include <sys/domain.h>
80 #include <sys/file.h> /* for struct knote */
81 #include <sys/kernel.h>
82 #include <sys/malloc.h>
83 #include <sys/event.h>
86 #include <sys/protosw.h>
87 #include <sys/socket.h>
88 #include <sys/socketvar.h>
89 #include <sys/socketops.h>
90 #include <sys/resourcevar.h>
91 #include <sys/signalvar.h>
92 #include <sys/sysctl.h>
95 #include <vm/vm_zone.h>
97 #include <sys/thread2.h>
99 #include <machine/limits.h>
102 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt);
105 static void filt_sordetach(struct knote *kn);
106 static int filt_soread(struct knote *kn, long hint);
107 static void filt_sowdetach(struct knote *kn);
108 static int filt_sowrite(struct knote *kn, long hint);
109 static int filt_solisten(struct knote *kn, long hint);
111 static struct filterops solisten_filtops =
112 { 1, NULL, filt_sordetach, filt_solisten };
113 static struct filterops soread_filtops =
114 { 1, NULL, filt_sordetach, filt_soread };
115 static struct filterops sowrite_filtops =
116 { 1, NULL, filt_sowdetach, filt_sowrite };
118 struct vm_zone *socket_zone;
120 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
121 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
124 static int somaxconn = SOMAXCONN;
125 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW,
126 &somaxconn, 0, "Maximum pending socket connection queue size");
129 * Socket operation routines.
130 * These routines are called by the routines in
131 * sys_socket.c or from a system process, and
132 * implement the semantics of socket operations by
133 * switching out to the protocol specific routines.
137 * Get a socket structure from our zone, and initialize it.
138 * We don't implement `waitok' yet (see comments in uipc_domain.c).
139 * Note that it would probably be better to allocate socket
140 * and PCB at the same time, but I'm not convinced that all
141 * the protocols can be easily modified to do this.
148 so = zalloc(socket_zone);
150 /* XXX race condition for reentrant kernel */
151 bzero(so, sizeof *so);
152 TAILQ_INIT(&so->so_aiojobq);
153 TAILQ_INIT(&so->so_rcv.sb_sel.si_mlist);
154 TAILQ_INIT(&so->so_snd.sb_sel.si_mlist);
160 socreate(int dom, struct socket **aso, int type,
161 int proto, struct thread *td)
163 struct proc *p = td->td_proc;
166 struct pru_attach_info ai;
170 prp = pffindproto(dom, proto, type);
172 prp = pffindtype(dom, type);
174 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
175 return (EPROTONOSUPPORT);
177 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only &&
178 prp->pr_domain->dom_family != PF_LOCAL &&
179 prp->pr_domain->dom_family != PF_INET &&
180 prp->pr_domain->dom_family != PF_INET6 &&
181 prp->pr_domain->dom_family != PF_ROUTE) {
182 return (EPROTONOSUPPORT);
185 if (prp->pr_type != type)
187 so = soalloc(p != 0);
191 TAILQ_INIT(&so->so_incomp);
192 TAILQ_INIT(&so->so_comp);
194 so->so_cred = crhold(p->p_ucred);
196 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE];
197 ai.p_ucred = p->p_ucred;
198 ai.fd_rdir = p->p_fd->fd_rdir;
199 error = so_pru_attach(so, proto, &ai);
201 so->so_state |= SS_NOFDREF;
210 sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
215 error = so_pru_bind(so, nam, td);
221 sodealloc(struct socket *so)
223 if (so->so_rcv.sb_hiwat)
224 (void)chgsbsize(so->so_cred->cr_uidinfo,
225 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
226 if (so->so_snd.sb_hiwat)
227 (void)chgsbsize(so->so_cred->cr_uidinfo,
228 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
230 /* remove accept filter if present */
231 if (so->so_accf != NULL)
232 do_setopt_accept_filter(so, NULL);
235 zfree(socket_zone, so);
239 solisten(struct socket *so, int backlog, struct thread *td)
243 short oldopt, oldqlimit;
247 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) {
253 oldopt = so->so_options;
254 oldqlimit = so->so_qlimit;
257 if (TAILQ_EMPTY(&so->so_comp))
258 so->so_options |= SO_ACCEPTCONN;
259 if (backlog < 0 || backlog > somaxconn)
261 so->so_qlimit = backlog;
262 /* SCTP needs to look at tweak both the inbound backlog parameter AND
263 * the so_options (UDP model both connect's and gets inbound
264 * connections .. implicitly).
266 error = so_pru_listen(so, td);
269 /* Restore the params */
270 so->so_options = oldopt;
271 so->so_qlimit = oldqlimit;
281 sofree(struct socket *so)
283 struct socket *head = so->so_head;
285 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
288 if (so->so_state & SS_INCOMP) {
289 TAILQ_REMOVE(&head->so_incomp, so, so_list);
291 } else if (so->so_state & SS_COMP) {
293 * We must not decommission a socket that's
294 * on the accept(2) queue. If we do, then
295 * accept(2) may hang after select(2) indicated
296 * that the listening socket was ready.
300 panic("sofree: not queued");
302 so->so_state &= ~SS_INCOMP;
305 sbrelease(&so->so_snd, so);
311 * Close a socket on last file table reference removal.
312 * Initiate disconnect if connected.
313 * Free socket when disconnect complete.
316 soclose(struct socket *so, int fflag)
321 funsetown(so->so_sigio);
322 if (so->so_pcb == NULL)
324 if (so->so_state & SS_ISCONNECTED) {
325 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
326 error = sodisconnect(so);
330 if (so->so_options & SO_LINGER) {
331 if ((so->so_state & SS_ISDISCONNECTING) &&
334 while (so->so_state & SS_ISCONNECTED) {
335 error = tsleep((caddr_t)&so->so_timeo,
336 PCATCH, "soclos", so->so_linger * hz);
346 error2 = so_pru_detach(so);
351 if (so->so_options & SO_ACCEPTCONN) {
352 struct socket *sp, *sonext;
354 sp = TAILQ_FIRST(&so->so_incomp);
355 for (; sp != NULL; sp = sonext) {
356 sonext = TAILQ_NEXT(sp, so_list);
359 for (sp = TAILQ_FIRST(&so->so_comp); sp != NULL; sp = sonext) {
360 sonext = TAILQ_NEXT(sp, so_list);
361 /* Dequeue from so_comp since sofree() won't do it */
362 TAILQ_REMOVE(&so->so_comp, sp, so_list);
364 sp->so_state &= ~SS_COMP;
369 if (so->so_state & SS_NOFDREF)
370 panic("soclose: NOFDREF");
371 so->so_state |= SS_NOFDREF;
378 * Must be called from a critical section.
381 soabort(struct socket *so)
385 error = so_pru_abort(so);
394 soaccept(struct socket *so, struct sockaddr **nam)
399 if ((so->so_state & SS_NOFDREF) == 0)
400 panic("soaccept: !NOFDREF");
401 so->so_state &= ~SS_NOFDREF;
402 error = so_pru_accept(so, nam);
408 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
412 if (so->so_options & SO_ACCEPTCONN)
416 * If protocol is connection-based, can only connect once.
417 * Otherwise, if connected, try to disconnect first.
418 * This allows user to disconnect by connecting to, e.g.,
421 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
422 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
423 (error = sodisconnect(so)))) {
427 * Prevent accumulated error from previous connection
431 error = so_pru_connect(so, nam, td);
438 soconnect2(struct socket *so1, struct socket *so2)
443 error = so_pru_connect2(so1, so2);
449 sodisconnect(struct socket *so)
454 if ((so->so_state & SS_ISCONNECTED) == 0) {
458 if (so->so_state & SS_ISDISCONNECTING) {
462 error = so_pru_disconnect(so);
468 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
471 * If send must go all at once and message is larger than
472 * send buffering, then hard error.
473 * Lock against other senders.
474 * If must go all at once and not enough room now, then
475 * inform user that this would block and do nothing.
476 * Otherwise, if nonblocking, send as much as possible.
477 * The data to be sent is described by "uio" if nonzero,
478 * otherwise by the mbuf chain "top" (which must be null
479 * if uio is not). Data provided in mbuf chain must be small
480 * enough to send all at once.
482 * Returns nonzero on error, timeout or signal; callers
483 * must check for short counts if EINTR/ERESTART are returned.
484 * Data and control buffers are freed on return.
487 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
488 struct mbuf *top, struct mbuf *control, int flags,
493 long space, len, resid;
494 int clen = 0, error, dontroute, mlen;
495 int atomic = sosendallatonce(so) || top;
499 resid = uio->uio_resid;
501 resid = top->m_pkthdr.len;
503 * In theory resid should be unsigned.
504 * However, space must be signed, as it might be less than 0
505 * if we over-committed, and we must use a signed comparison
506 * of space and resid. On the other hand, a negative resid
507 * causes us to loop sending 0-length segments to the protocol.
509 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
510 * type sockets since that's an error.
512 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
518 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
519 (so->so_proto->pr_flags & PR_ATOMIC);
520 if (td->td_lwp != NULL)
521 td->td_lwp->lwp_ru.ru_msgsnd++;
523 clen = control->m_len;
524 #define gotoerr(errcode) { error = errcode; crit_exit(); goto release; }
527 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
532 if (so->so_state & SS_CANTSENDMORE)
535 error = so->so_error;
540 if ((so->so_state & SS_ISCONNECTED) == 0) {
542 * `sendto' and `sendmsg' is allowed on a connection-
543 * based socket if it supports implied connect.
544 * Return ENOTCONN if not connected and no address is
547 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
548 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
549 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
550 !(resid == 0 && clen != 0))
552 } else if (addr == 0)
553 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
554 ENOTCONN : EDESTADDRREQ);
556 space = sbspace(&so->so_snd);
559 if ((atomic && resid > so->so_snd.sb_hiwat) ||
560 clen > so->so_snd.sb_hiwat)
562 if (space < resid + clen && uio &&
563 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
564 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT))
565 gotoerr(EWOULDBLOCK);
566 sbunlock(&so->so_snd);
567 error = sbwait(&so->so_snd);
579 * Data is prepackaged in "top".
583 top->m_flags |= M_EOR;
585 m = m_getl(resid, MB_WAIT, MT_DATA,
586 top == NULL ? M_PKTHDR : 0, &mlen);
589 m->m_pkthdr.rcvif = (struct ifnet *)0;
591 len = min(min(mlen, resid), space);
592 if (resid < MINCLSIZE) {
594 * For datagram protocols, leave room
595 * for protocol headers in first mbuf.
597 if (atomic && top == 0 && len < mlen)
601 error = uiomove(mtod(m, caddr_t), (int)len, uio);
602 resid = uio->uio_resid;
605 top->m_pkthdr.len += len;
611 top->m_flags |= M_EOR;
614 } while (space > 0 && atomic);
616 so->so_options |= SO_DONTROUTE;
617 if (flags & MSG_OOB) {
618 pru_flags = PRUS_OOB;
619 } else if ((flags & MSG_EOF) &&
620 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
623 * If the user set MSG_EOF, the protocol
624 * understands this flag and nothing left to
625 * send then use PRU_SEND_EOF instead of PRU_SEND.
627 pru_flags = PRUS_EOF;
628 } else if (resid > 0 && space > 0) {
629 /* If there is more to send, set PRUS_MORETOCOME */
630 pru_flags = PRUS_MORETOCOME;
636 * XXX all the SS_CANTSENDMORE checks previously
637 * done could be out of date. We could have recieved
638 * a reset packet in an interrupt or maybe we slept
639 * while doing page faults in uiomove() etc. We could
640 * probably recheck again inside the splnet() protection
641 * here, but there are probably other places that this
642 * also happens. We must rethink this.
644 error = so_pru_send(so, pru_flags, top, addr, control, td);
647 so->so_options &= ~SO_DONTROUTE;
654 } while (resid && space > 0);
658 sbunlock(&so->so_snd);
668 * A specialization of sosend() for UDP based on protocol-specific knowledge:
669 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that
670 * sosendallatonce() returns true,
671 * the "atomic" variable is true,
672 * and sosendudp() blocks until space is available for the entire send.
673 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or
674 * PR_IMPLOPCL flags set.
675 * UDP has no out-of-band data.
676 * UDP has no control data.
677 * UDP does not support MSG_EOR.
680 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio,
681 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
684 boolean_t dontroute; /* temporary SO_DONTROUTE setting */
686 if (td->td_lwp != NULL)
687 td->td_lwp->lwp_ru.ru_msgsnd++;
691 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp"));
692 resid = uio ? uio->uio_resid : top->m_pkthdr.len;
695 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
700 if (so->so_state & SS_CANTSENDMORE)
703 error = so->so_error;
708 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL)
709 gotoerr(EDESTADDRREQ);
710 if (resid > so->so_snd.sb_hiwat)
712 if (uio && sbspace(&so->so_snd) < resid) {
713 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT))
714 gotoerr(EWOULDBLOCK);
715 sbunlock(&so->so_snd);
716 error = sbwait(&so->so_snd);
725 top = m_uiomove(uio);
730 dontroute = (flags & MSG_DONTROUTE) && !(so->so_options & SO_DONTROUTE);
732 so->so_options |= SO_DONTROUTE;
734 error = so_pru_send(so, 0, top, addr, NULL, td);
735 top = NULL; /* sent or freed in lower layer */
738 so->so_options &= ~SO_DONTROUTE;
741 sbunlock(&so->so_snd);
749 * Implement receive operations on a socket.
750 * We depend on the way that records are added to the sockbuf
751 * by sbappend*. In particular, each record (mbufs linked through m_next)
752 * must begin with an address if the protocol so specifies,
753 * followed by an optional mbuf or mbufs containing ancillary data,
754 * and then zero or more mbufs of data.
755 * In order to avoid blocking network interrupts for the entire time here,
756 * we exit the critical section while doing the actual copy to user space.
757 * Although the sockbuf is locked, new data may still be appended,
758 * and thus we must maintain consistency of the sockbuf during that time.
760 * The caller may receive the data as a single mbuf chain by supplying
761 * an mbuf **mp0 for use in returning the chain. The uio is then used
762 * only for the count in uio_resid.
765 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
766 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
768 struct mbuf *m, *n, **mp;
769 struct mbuf *free_chain = NULL;
770 int flags, len, error, offset;
771 struct protosw *pr = so->so_proto;
773 int orig_resid = uio->uio_resid;
781 flags = *flagsp &~ MSG_EOR;
784 if (flags & MSG_OOB) {
785 m = m_get(MB_WAIT, MT_DATA);
788 error = so_pru_rcvoob(so, m, flags & MSG_PEEK);
792 error = uiomove(mtod(m, caddr_t),
793 (int) min(uio->uio_resid, m->m_len), uio);
795 } while (uio->uio_resid && error == 0 && m);
803 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
808 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
812 m = so->so_rcv.sb_mb;
814 * If we have less data than requested, block awaiting more
815 * (subject to any timeout) if:
816 * 1. the current count is less than the low water mark, or
817 * 2. MSG_WAITALL is set, and it is possible to do the entire
818 * receive operation at once if we block (resid <= hiwat).
819 * 3. MSG_DONTWAIT is not set
820 * If MSG_WAITALL is set but resid is larger than the receive buffer,
821 * we have to do the receive in sections, and thus risk returning
822 * a short count if a timeout or signal occurs after we start.
824 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
825 so->so_rcv.sb_cc < uio->uio_resid) &&
826 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
827 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
828 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
829 KASSERT(m != NULL || !so->so_rcv.sb_cc, ("receive 1"));
833 error = so->so_error;
834 if ((flags & MSG_PEEK) == 0)
838 if (so->so_state & SS_CANTRCVMORE) {
844 for (; m; m = m->m_next) {
845 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
846 m = so->so_rcv.sb_mb;
850 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
851 (pr->pr_flags & PR_CONNREQUIRED)) {
855 if (uio->uio_resid == 0)
857 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) {
861 sbunlock(&so->so_rcv);
862 error = sbwait(&so->so_rcv);
869 if (uio->uio_td && uio->uio_td->td_proc)
870 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++;
873 * note: m should be == sb_mb here. Cache the next record while
874 * cleaning up. Note that calling m_free*() will break out critical
877 KKASSERT(m == so->so_rcv.sb_mb);
880 * Skip any address mbufs prepending the record.
882 if (pr->pr_flags & PR_ADDR) {
883 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
886 *psa = dup_sockaddr(mtod(m, struct sockaddr *));
887 if (flags & MSG_PEEK)
890 m = sbunlinkmbuf(&so->so_rcv, m, &free_chain);
894 * Skip any control mbufs prepending the record.
897 if (pr->pr_flags & PR_ADDR_OPT) {
899 * For SCTP we may be getting a
900 * whole message OR a partial delivery.
902 if (m && m->m_type == MT_SONAME) {
905 *psa = dup_sockaddr(mtod(m, struct sockaddr *));
906 if (flags & MSG_PEEK)
909 m = sbunlinkmbuf(&so->so_rcv, m, &free_chain);
913 while (m && m->m_type == MT_CONTROL && error == 0) {
914 if (flags & MSG_PEEK) {
916 *controlp = m_copy(m, 0, m->m_len);
917 m = m->m_next; /* XXX race */
920 n = sbunlinkmbuf(&so->so_rcv, m, NULL);
921 if (pr->pr_domain->dom_externalize &&
922 mtod(m, struct cmsghdr *)->cmsg_type ==
924 error = (*pr->pr_domain->dom_externalize)(m);
928 m = sbunlinkmbuf(&so->so_rcv, m, &free_chain);
931 if (controlp && *controlp) {
933 controlp = &(*controlp)->m_next;
942 if (type == MT_OOBDATA)
947 * Copy to the UIO or mbuf return chain (*mp).
951 while (m && uio->uio_resid > 0 && error == 0) {
952 if (m->m_type == MT_OOBDATA) {
953 if (type != MT_OOBDATA)
955 } else if (type == MT_OOBDATA)
958 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
960 so->so_state &= ~SS_RCVATMARK;
961 len = uio->uio_resid;
962 if (so->so_oobmark && len > so->so_oobmark - offset)
963 len = so->so_oobmark - offset;
964 if (len > m->m_len - moff)
965 len = m->m_len - moff;
967 * If mp is set, just pass back the mbufs.
968 * Otherwise copy them out via the uio, then free.
969 * Sockbuf must be consistent here (points to current mbuf,
970 * it points to next record) when we drop priority;
971 * we must note any additions to the sockbuf when we
972 * block interrupts again.
976 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
981 uio->uio_resid -= len;
985 * Eat the entire mbuf or just a piece of it
987 if (len == m->m_len - moff) {
988 if (m->m_flags & M_EOR)
991 if (m->m_flags & M_NOTIFICATION)
992 flags |= MSG_NOTIFICATION;
994 if (flags & MSG_PEEK) {
999 n = sbunlinkmbuf(&so->so_rcv, m, NULL);
1004 m = sbunlinkmbuf(&so->so_rcv, m, &free_chain);
1008 if (flags & MSG_PEEK) {
1012 *mp = m_copym(m, 0, len, MB_WAIT);
1015 so->so_rcv.sb_cc -= len;
1018 if (so->so_oobmark) {
1019 if ((flags & MSG_PEEK) == 0) {
1020 so->so_oobmark -= len;
1021 if (so->so_oobmark == 0) {
1022 so->so_state |= SS_RCVATMARK;
1027 if (offset == so->so_oobmark)
1031 if (flags & MSG_EOR)
1034 * If the MSG_WAITALL flag is set (for non-atomic socket),
1035 * we must not quit until "uio->uio_resid == 0" or an error
1036 * termination. If a signal/timeout occurs, return
1037 * with a short count but without error.
1038 * Keep sockbuf locked against other readers.
1040 while (flags & MSG_WAITALL && m == NULL &&
1041 uio->uio_resid > 0 && !sosendallatonce(so) &&
1042 so->so_rcv.sb_mb == NULL) {
1043 if (so->so_error || so->so_state & SS_CANTRCVMORE)
1046 * The window might have closed to zero, make
1047 * sure we send an ack now that we've drained
1048 * the buffer or we might end up blocking until
1049 * the idle takes over (5 seconds).
1051 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
1052 so_pru_rcvd(so, flags);
1053 error = sbwait(&so->so_rcv);
1055 sbunlock(&so->so_rcv);
1059 m = so->so_rcv.sb_mb;
1064 * If an atomic read was requested but unread data still remains
1065 * in the record, set MSG_TRUNC.
1067 if (m && pr->pr_flags & PR_ATOMIC)
1071 * Cleanup. If an atomic read was requested drop any unread data.
1073 if ((flags & MSG_PEEK) == 0) {
1074 if (m && (pr->pr_flags & PR_ATOMIC))
1075 sbdroprecord(&so->so_rcv);
1076 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb)
1077 so_pru_rcvd(so, flags);
1080 if (orig_resid == uio->uio_resid && orig_resid &&
1081 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
1082 sbunlock(&so->so_rcv);
1090 sbunlock(&so->so_rcv);
1094 m_freem(free_chain);
1099 soshutdown(struct socket *so, int how)
1101 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
1107 return (so_pru_shutdown(so));
1112 sorflush(struct socket *so)
1114 struct sockbuf *sb = &so->so_rcv;
1115 struct protosw *pr = so->so_proto;
1118 sb->sb_flags |= SB_NOINTR;
1119 (void) sblock(sb, M_WAITOK);
1125 bzero((caddr_t)sb, sizeof (*sb));
1126 if (asb.sb_flags & SB_KNOTE) {
1127 sb->sb_sel.si_note = asb.sb_sel.si_note;
1128 sb->sb_flags = SB_KNOTE;
1132 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
1133 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
1134 sbrelease(&asb, so);
1139 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt)
1141 struct accept_filter_arg *afap = NULL;
1142 struct accept_filter *afp;
1143 struct so_accf *af = so->so_accf;
1146 /* do not set/remove accept filters on non listen sockets */
1147 if ((so->so_options & SO_ACCEPTCONN) == 0) {
1152 /* removing the filter */
1155 if (af->so_accept_filter != NULL &&
1156 af->so_accept_filter->accf_destroy != NULL) {
1157 af->so_accept_filter->accf_destroy(so);
1159 if (af->so_accept_filter_str != NULL) {
1160 FREE(af->so_accept_filter_str, M_ACCF);
1165 so->so_options &= ~SO_ACCEPTFILTER;
1168 /* adding a filter */
1169 /* must remove previous filter first */
1174 /* don't put large objects on the kernel stack */
1175 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), M_TEMP, M_WAITOK);
1176 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap);
1177 afap->af_name[sizeof(afap->af_name)-1] = '\0';
1178 afap->af_arg[sizeof(afap->af_arg)-1] = '\0';
1181 afp = accept_filt_get(afap->af_name);
1186 MALLOC(af, struct so_accf *, sizeof(*af), M_ACCF, M_WAITOK);
1187 bzero(af, sizeof(*af));
1188 if (afp->accf_create != NULL) {
1189 if (afap->af_name[0] != '\0') {
1190 int len = strlen(afap->af_name) + 1;
1192 MALLOC(af->so_accept_filter_str, char *, len, M_ACCF, M_WAITOK);
1193 strcpy(af->so_accept_filter_str, afap->af_name);
1195 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg);
1196 if (af->so_accept_filter_arg == NULL) {
1197 FREE(af->so_accept_filter_str, M_ACCF);
1204 af->so_accept_filter = afp;
1206 so->so_options |= SO_ACCEPTFILTER;
1215 * Perhaps this routine, and sooptcopyout(), below, ought to come in
1216 * an additional variant to handle the case where the option value needs
1217 * to be some kind of integer, but not a specific size.
1218 * In addition to their use here, these functions are also called by the
1219 * protocol-level pr_ctloutput() routines.
1222 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
1227 * If the user gives us more than we wanted, we ignore it,
1228 * but if we don't get the minimum length the caller
1229 * wants, we return EINVAL. On success, sopt->sopt_valsize
1230 * is set to however much we actually retrieved.
1232 if ((valsize = sopt->sopt_valsize) < minlen)
1235 sopt->sopt_valsize = valsize = len;
1237 if (sopt->sopt_td != NULL)
1238 return (copyin(sopt->sopt_val, buf, valsize));
1240 bcopy(sopt->sopt_val, buf, valsize);
1245 sosetopt(struct socket *so, struct sockopt *sopt)
1253 sopt->sopt_dir = SOPT_SET;
1254 if (sopt->sopt_level != SOL_SOCKET) {
1255 if (so->so_proto && so->so_proto->pr_ctloutput) {
1256 return (so_pr_ctloutput(so, sopt));
1258 error = ENOPROTOOPT;
1260 switch (sopt->sopt_name) {
1262 case SO_ACCEPTFILTER:
1263 error = do_setopt_accept_filter(so, sopt);
1269 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1273 so->so_linger = l.l_linger;
1275 so->so_options |= SO_LINGER;
1277 so->so_options &= ~SO_LINGER;
1283 case SO_USELOOPBACK:
1289 error = sooptcopyin(sopt, &optval, sizeof optval,
1294 so->so_options |= sopt->sopt_name;
1296 so->so_options &= ~sopt->sopt_name;
1303 error = sooptcopyin(sopt, &optval, sizeof optval,
1309 * Values < 1 make no sense for any of these
1310 * options, so disallow them.
1317 switch (sopt->sopt_name) {
1320 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1321 &so->so_snd : &so->so_rcv, (u_long)optval,
1323 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) {
1330 * Make sure the low-water is never greater than
1334 so->so_snd.sb_lowat =
1335 (optval > so->so_snd.sb_hiwat) ?
1336 so->so_snd.sb_hiwat : optval;
1339 so->so_rcv.sb_lowat =
1340 (optval > so->so_rcv.sb_hiwat) ?
1341 so->so_rcv.sb_hiwat : optval;
1348 error = sooptcopyin(sopt, &tv, sizeof tv,
1353 /* assert(hz > 0); */
1354 if (tv.tv_sec < 0 || tv.tv_sec > SHRT_MAX / hz ||
1355 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
1359 /* assert(tick > 0); */
1360 /* assert(ULONG_MAX - SHRT_MAX >= 1000000); */
1361 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
1362 if (val > SHRT_MAX) {
1366 if (val == 0 && tv.tv_usec != 0)
1369 switch (sopt->sopt_name) {
1371 so->so_snd.sb_timeo = val;
1374 so->so_rcv.sb_timeo = val;
1379 error = ENOPROTOOPT;
1382 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
1383 (void) so_pr_ctloutput(so, sopt);
1390 /* Helper routine for getsockopt */
1392 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
1400 * Documented get behavior is that we always return a value,
1401 * possibly truncated to fit in the user's buffer.
1402 * Traditional behavior is that we always tell the user
1403 * precisely how much we copied, rather than something useful
1404 * like the total amount we had available for her.
1405 * Note that this interface is not idempotent; the entire answer must
1406 * generated ahead of time.
1408 valsize = min(len, sopt->sopt_valsize);
1409 sopt->sopt_valsize = valsize;
1410 if (sopt->sopt_val != 0) {
1411 if (sopt->sopt_td != NULL)
1412 error = copyout(buf, sopt->sopt_val, valsize);
1414 bcopy(buf, sopt->sopt_val, valsize);
1420 sogetopt(struct socket *so, struct sockopt *sopt)
1426 struct accept_filter_arg *afap;
1430 sopt->sopt_dir = SOPT_GET;
1431 if (sopt->sopt_level != SOL_SOCKET) {
1432 if (so->so_proto && so->so_proto->pr_ctloutput) {
1433 return (so_pr_ctloutput(so, sopt));
1435 return (ENOPROTOOPT);
1437 switch (sopt->sopt_name) {
1439 case SO_ACCEPTFILTER:
1440 if ((so->so_options & SO_ACCEPTCONN) == 0)
1442 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap),
1444 bzero(afap, sizeof(*afap));
1445 if ((so->so_options & SO_ACCEPTFILTER) != 0) {
1446 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name);
1447 if (so->so_accf->so_accept_filter_str != NULL)
1448 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str);
1450 error = sooptcopyout(sopt, afap, sizeof(*afap));
1456 l.l_onoff = so->so_options & SO_LINGER;
1457 l.l_linger = so->so_linger;
1458 error = sooptcopyout(sopt, &l, sizeof l);
1461 case SO_USELOOPBACK:
1470 optval = so->so_options & sopt->sopt_name;
1472 error = sooptcopyout(sopt, &optval, sizeof optval);
1476 optval = so->so_type;
1480 optval = so->so_error;
1485 optval = so->so_snd.sb_hiwat;
1489 optval = so->so_rcv.sb_hiwat;
1493 optval = so->so_snd.sb_lowat;
1497 optval = so->so_rcv.sb_lowat;
1502 optval = (sopt->sopt_name == SO_SNDTIMEO ?
1503 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1505 tv.tv_sec = optval / hz;
1506 tv.tv_usec = (optval % hz) * tick;
1507 error = sooptcopyout(sopt, &tv, sizeof tv);
1511 error = ENOPROTOOPT;
1518 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
1520 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
1522 struct mbuf *m, *m_prev;
1523 int sopt_size = sopt->sopt_valsize, msize;
1525 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, MT_DATA,
1529 m->m_len = min(msize, sopt_size);
1530 sopt_size -= m->m_len;
1534 while (sopt_size > 0) {
1535 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT,
1536 MT_DATA, 0, &msize);
1541 m->m_len = min(msize, sopt_size);
1542 sopt_size -= m->m_len;
1549 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
1551 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
1553 struct mbuf *m0 = m;
1555 if (sopt->sopt_val == NULL)
1557 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1558 if (sopt->sopt_td != NULL) {
1561 error = copyin(sopt->sopt_val, mtod(m, char *),
1568 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
1569 sopt->sopt_valsize -= m->m_len;
1570 sopt->sopt_val = (caddr_t)sopt->sopt_val + m->m_len;
1573 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
1574 panic("ip6_sooptmcopyin");
1578 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
1580 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
1582 struct mbuf *m0 = m;
1585 if (sopt->sopt_val == NULL)
1587 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1588 if (sopt->sopt_td != NULL) {
1591 error = copyout(mtod(m, char *), sopt->sopt_val,
1598 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
1599 sopt->sopt_valsize -= m->m_len;
1600 sopt->sopt_val = (caddr_t)sopt->sopt_val + m->m_len;
1601 valsize += m->m_len;
1605 /* enough soopt buffer should be given from user-land */
1609 sopt->sopt_valsize = valsize;
1614 sohasoutofband(struct socket *so)
1616 if (so->so_sigio != NULL)
1617 pgsigio(so->so_sigio, SIGURG, 0);
1618 selwakeup(&so->so_rcv.sb_sel);
1622 sopoll(struct socket *so, int events, struct ucred *cred, struct thread *td)
1628 if (events & (POLLIN | POLLRDNORM))
1630 revents |= events & (POLLIN | POLLRDNORM);
1632 if (events & POLLINIGNEOF)
1633 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat ||
1634 !TAILQ_EMPTY(&so->so_comp) || so->so_error)
1635 revents |= POLLINIGNEOF;
1637 if (events & (POLLOUT | POLLWRNORM))
1638 if (sowriteable(so))
1639 revents |= events & (POLLOUT | POLLWRNORM);
1641 if (events & (POLLPRI | POLLRDBAND))
1642 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
1643 revents |= events & (POLLPRI | POLLRDBAND);
1647 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM |
1649 selrecord(td, &so->so_rcv.sb_sel);
1650 so->so_rcv.sb_flags |= SB_SEL;
1653 if (events & (POLLOUT | POLLWRNORM)) {
1654 selrecord(td, &so->so_snd.sb_sel);
1655 so->so_snd.sb_flags |= SB_SEL;
1664 sokqfilter(struct file *fp, struct knote *kn)
1666 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1669 switch (kn->kn_filter) {
1671 if (so->so_options & SO_ACCEPTCONN)
1672 kn->kn_fop = &solisten_filtops;
1674 kn->kn_fop = &soread_filtops;
1678 kn->kn_fop = &sowrite_filtops;
1686 SLIST_INSERT_HEAD(&sb->sb_sel.si_note, kn, kn_selnext);
1687 sb->sb_flags |= SB_KNOTE;
1693 filt_sordetach(struct knote *kn)
1695 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1698 SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext);
1699 if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note))
1700 so->so_rcv.sb_flags &= ~SB_KNOTE;
1706 filt_soread(struct knote *kn, long hint)
1708 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1710 kn->kn_data = so->so_rcv.sb_cc;
1711 if (so->so_state & SS_CANTRCVMORE) {
1712 kn->kn_flags |= EV_EOF;
1713 kn->kn_fflags = so->so_error;
1716 if (so->so_error) /* temporary udp error */
1718 if (kn->kn_sfflags & NOTE_LOWAT)
1719 return (kn->kn_data >= kn->kn_sdata);
1720 return (kn->kn_data >= so->so_rcv.sb_lowat);
1724 filt_sowdetach(struct knote *kn)
1726 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1729 SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext);
1730 if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note))
1731 so->so_snd.sb_flags &= ~SB_KNOTE;
1737 filt_sowrite(struct knote *kn, long hint)
1739 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1741 kn->kn_data = sbspace(&so->so_snd);
1742 if (so->so_state & SS_CANTSENDMORE) {
1743 kn->kn_flags |= EV_EOF;
1744 kn->kn_fflags = so->so_error;
1747 if (so->so_error) /* temporary udp error */
1749 if (((so->so_state & SS_ISCONNECTED) == 0) &&
1750 (so->so_proto->pr_flags & PR_CONNREQUIRED))
1752 if (kn->kn_sfflags & NOTE_LOWAT)
1753 return (kn->kn_data >= kn->kn_sdata);
1754 return (kn->kn_data >= so->so_snd.sb_lowat);
1759 filt_solisten(struct knote *kn, long hint)
1761 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1763 kn->kn_data = so->so_qlen;
1764 return (! TAILQ_EMPTY(&so->so_comp));