2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 1982, 1986, 1988, 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by the University of
17 * California, Berkeley and its contributors.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
35 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $
36 * $DragonFly: src/sys/kern/uipc_socket.c,v 1.22 2004/06/06 19:16:06 dillon Exp $
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/fcntl.h>
44 #include <sys/malloc.h>
46 #include <sys/domain.h>
47 #include <sys/file.h> /* for struct knote */
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/event.h>
53 #include <sys/protosw.h>
54 #include <sys/socket.h>
55 #include <sys/socketvar.h>
56 #include <sys/socketops.h>
57 #include <sys/resourcevar.h>
58 #include <sys/signalvar.h>
59 #include <sys/sysctl.h>
62 #include <vm/vm_zone.h>
64 #include <machine/limits.h>
67 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt);
70 static void filt_sordetach(struct knote *kn);
71 static int filt_soread(struct knote *kn, long hint);
72 static void filt_sowdetach(struct knote *kn);
73 static int filt_sowrite(struct knote *kn, long hint);
74 static int filt_solisten(struct knote *kn, long hint);
76 static struct filterops solisten_filtops =
77 { 1, NULL, filt_sordetach, filt_solisten };
78 static struct filterops soread_filtops =
79 { 1, NULL, filt_sordetach, filt_soread };
80 static struct filterops sowrite_filtops =
81 { 1, NULL, filt_sowdetach, filt_sowrite };
83 struct vm_zone *socket_zone;
84 so_gen_t so_gencnt; /* generation count for sockets */
86 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
87 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
90 static int somaxconn = SOMAXCONN;
91 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW,
92 &somaxconn, 0, "Maximum pending socket connection queue size");
95 * Socket operation routines.
96 * These routines are called by the routines in
97 * sys_socket.c or from a system process, and
98 * implement the semantics of socket operations by
99 * switching out to the protocol specific routines.
103 * Get a socket structure from our zone, and initialize it.
104 * We don't implement `waitok' yet (see comments in uipc_domain.c).
105 * Note that it would probably be better to allocate socket
106 * and PCB at the same time, but I'm not convinced that all
107 * the protocols can be easily modified to do this.
115 so = zalloc(socket_zone);
117 /* XXX race condition for reentrant kernel */
118 bzero(so, sizeof *so);
119 so->so_gencnt = ++so_gencnt;
120 TAILQ_INIT(&so->so_aiojobq);
121 TAILQ_INIT(&so->so_rcv.sb_sel.si_mlist);
122 TAILQ_INIT(&so->so_snd.sb_sel.si_mlist);
128 socreate(int dom, struct socket **aso, int type,
129 int proto, struct thread *td)
131 struct proc *p = td->td_proc;
134 struct pru_attach_info ai;
138 prp = pffindproto(dom, proto, type);
140 prp = pffindtype(dom, type);
142 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
143 return (EPROTONOSUPPORT);
145 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only &&
146 prp->pr_domain->dom_family != PF_LOCAL &&
147 prp->pr_domain->dom_family != PF_INET &&
148 prp->pr_domain->dom_family != PF_ROUTE) {
149 return (EPROTONOSUPPORT);
152 if (prp->pr_type != type)
154 so = soalloc(p != 0);
158 TAILQ_INIT(&so->so_incomp);
159 TAILQ_INIT(&so->so_comp);
161 so->so_cred = crhold(p->p_ucred);
163 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE];
164 ai.p_ucred = p->p_ucred;
165 ai.fd_rdir = p->p_fd->fd_rdir;
166 error = so_pru_attach(so, proto, &ai);
168 so->so_state |= SS_NOFDREF;
177 sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
182 error = so_pru_bind(so, nam, td);
188 sodealloc(struct socket *so)
191 so->so_gencnt = ++so_gencnt;
192 if (so->so_rcv.sb_hiwat)
193 (void)chgsbsize(so->so_cred->cr_uidinfo,
194 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
195 if (so->so_snd.sb_hiwat)
196 (void)chgsbsize(so->so_cred->cr_uidinfo,
197 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
199 /* remove accept filter if present */
200 if (so->so_accf != NULL)
201 do_setopt_accept_filter(so, NULL);
204 zfree(socket_zone, so);
208 solisten(struct socket *so, int backlog, struct thread *td)
213 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) {
218 error = so_pru_listen(so, td);
223 if (TAILQ_EMPTY(&so->so_comp))
224 so->so_options |= SO_ACCEPTCONN;
225 if (backlog < 0 || backlog > somaxconn)
227 so->so_qlimit = backlog;
233 sofree(struct socket *so)
235 struct socket *head = so->so_head;
237 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
240 if (so->so_state & SS_INCOMP) {
241 TAILQ_REMOVE(&head->so_incomp, so, so_list);
243 } else if (so->so_state & SS_COMP) {
245 * We must not decommission a socket that's
246 * on the accept(2) queue. If we do, then
247 * accept(2) may hang after select(2) indicated
248 * that the listening socket was ready.
252 panic("sofree: not queued");
254 so->so_state &= ~SS_INCOMP;
257 sbrelease(&so->so_snd, so);
263 * Close a socket on last file table reference removal.
264 * Initiate disconnect if connected.
265 * Free socket when disconnect complete.
268 soclose(struct socket *so)
270 int s = splnet(); /* conservative */
273 funsetown(so->so_sigio);
274 if (so->so_options & SO_ACCEPTCONN) {
275 struct socket *sp, *sonext;
277 sp = TAILQ_FIRST(&so->so_incomp);
278 for (; sp != NULL; sp = sonext) {
279 sonext = TAILQ_NEXT(sp, so_list);
282 for (sp = TAILQ_FIRST(&so->so_comp); sp != NULL; sp = sonext) {
283 sonext = TAILQ_NEXT(sp, so_list);
284 /* Dequeue from so_comp since sofree() won't do it */
285 TAILQ_REMOVE(&so->so_comp, sp, so_list);
287 sp->so_state &= ~SS_COMP;
294 if (so->so_state & SS_ISCONNECTED) {
295 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
296 error = sodisconnect(so);
300 if (so->so_options & SO_LINGER) {
301 if ((so->so_state & SS_ISDISCONNECTING) &&
302 (so->so_state & SS_NBIO))
304 while (so->so_state & SS_ISCONNECTED) {
305 error = tsleep((caddr_t)&so->so_timeo,
306 PCATCH, "soclos", so->so_linger * hz);
316 error2 = so_pru_detach(so);
321 if (so->so_state & SS_NOFDREF)
322 panic("soclose: NOFDREF");
323 so->so_state |= SS_NOFDREF;
330 * Must be called at splnet...
338 error = so_pru_abort(so);
347 soaccept(struct socket *so, struct sockaddr **nam)
352 if ((so->so_state & SS_NOFDREF) == 0)
353 panic("soaccept: !NOFDREF");
354 so->so_state &= ~SS_NOFDREF;
355 error = so_pru_accept(so, nam);
361 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
366 if (so->so_options & SO_ACCEPTCONN)
370 * If protocol is connection-based, can only connect once.
371 * Otherwise, if connected, try to disconnect first.
372 * This allows user to disconnect by connecting to, e.g.,
375 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
376 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
377 (error = sodisconnect(so))))
380 error = so_pru_connect(so, nam, td);
386 soconnect2(struct socket *so1, struct socket *so2)
391 error = so_pru_connect2(so1, so2);
397 sodisconnect(struct socket *so)
402 if ((so->so_state & SS_ISCONNECTED) == 0) {
406 if (so->so_state & SS_ISDISCONNECTING) {
410 error = so_pru_disconnect(so);
416 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
419 * If send must go all at once and message is larger than
420 * send buffering, then hard error.
421 * Lock against other senders.
422 * If must go all at once and not enough room now, then
423 * inform user that this would block and do nothing.
424 * Otherwise, if nonblocking, send as much as possible.
425 * The data to be sent is described by "uio" if nonzero,
426 * otherwise by the mbuf chain "top" (which must be null
427 * if uio is not). Data provided in mbuf chain must be small
428 * enough to send all at once.
430 * Returns nonzero on error, timeout or signal; callers
431 * must check for short counts if EINTR/ERESTART are returned.
432 * Data and control buffers are freed on return.
435 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
436 struct mbuf *top, struct mbuf *control, int flags,
441 long space, len, resid;
442 int clen = 0, error, s, dontroute, mlen;
443 int atomic = sosendallatonce(so) || top;
447 resid = uio->uio_resid;
449 resid = top->m_pkthdr.len;
451 * In theory resid should be unsigned.
452 * However, space must be signed, as it might be less than 0
453 * if we over-committed, and we must use a signed comparison
454 * of space and resid. On the other hand, a negative resid
455 * causes us to loop sending 0-length segments to the protocol.
457 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
458 * type sockets since that's an error.
460 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
466 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
467 (so->so_proto->pr_flags & PR_ATOMIC);
468 if (td->td_proc && td->td_proc->p_stats)
469 td->td_proc->p_stats->p_ru.ru_msgsnd++;
471 clen = control->m_len;
472 #define gotoerr(errno) { error = errno; splx(s); goto release; }
475 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
480 if (so->so_state & SS_CANTSENDMORE)
483 error = so->so_error;
488 if ((so->so_state & SS_ISCONNECTED) == 0) {
490 * `sendto' and `sendmsg' is allowed on a connection-
491 * based socket if it supports implied connect.
492 * Return ENOTCONN if not connected and no address is
495 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
496 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
497 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
498 !(resid == 0 && clen != 0))
500 } else if (addr == 0)
501 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
502 ENOTCONN : EDESTADDRREQ);
504 space = sbspace(&so->so_snd);
507 if ((atomic && resid > so->so_snd.sb_hiwat) ||
508 clen > so->so_snd.sb_hiwat)
510 if (space < resid + clen && uio &&
511 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
512 if (so->so_state & SS_NBIO)
513 gotoerr(EWOULDBLOCK);
514 sbunlock(&so->so_snd);
515 error = sbwait(&so->so_snd);
527 * Data is prepackaged in "top".
531 top->m_flags |= M_EOR;
534 MGETHDR(m, MB_WAIT, MT_DATA);
541 m->m_pkthdr.rcvif = (struct ifnet *)0;
543 MGET(m, MB_WAIT, MT_DATA);
550 if (resid >= MINCLSIZE) {
552 if ((m->m_flags & M_EXT) == 0)
555 len = min(min(mlen, resid), space);
558 len = min(min(mlen, resid), space);
560 * For datagram protocols, leave room
561 * for protocol headers in first mbuf.
563 if (atomic && top == 0 && len < mlen)
567 error = uiomove(mtod(m, caddr_t), (int)len, uio);
568 resid = uio->uio_resid;
571 top->m_pkthdr.len += len;
577 top->m_flags |= M_EOR;
580 } while (space > 0 && atomic);
582 so->so_options |= SO_DONTROUTE;
583 if (flags & MSG_OOB) {
584 pru_flags = PRUS_OOB;
585 } else if ((flags & MSG_EOF) &&
586 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
589 * If the user set MSG_EOF, the protocol
590 * understands this flag and nothing left to
591 * send then use PRU_SEND_EOF instead of PRU_SEND.
593 pru_flags = PRUS_EOF;
594 } else if (resid > 0 && space > 0) {
595 /* If there is more to send, set PRUS_MORETOCOME */
596 pru_flags = PRUS_MORETOCOME;
600 s = splnet(); /* XXX */
602 * XXX all the SS_CANTSENDMORE checks previously
603 * done could be out of date. We could have recieved
604 * a reset packet in an interrupt or maybe we slept
605 * while doing page faults in uiomove() etc. We could
606 * probably recheck again inside the splnet() protection
607 * here, but there are probably other places that this
608 * also happens. We must rethink this.
610 error = so_pru_send(so, pru_flags, top, addr, control, td);
613 so->so_options &= ~SO_DONTROUTE;
620 } while (resid && space > 0);
624 sbunlock(&so->so_snd);
634 * A specialization of sosend() for UDP based on protocol-specific knowledge:
635 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that
636 * sosendallatonce() returns true,
637 * the "atomic" variable is true,
638 * and sosendudp() blocks until space is available for the entire send.
639 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or
640 * PR_IMPLOPCL flags set.
641 * UDP has no out-of-band data.
642 * UDP has no control data.
643 * UDP does not support MSG_EOR.
646 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio,
647 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
650 boolean_t dontroute; /* temporary SO_DONTROUTE setting */
652 if (td->td_proc && td->td_proc->p_stats)
653 td->td_proc->p_stats->p_ru.ru_msgsnd++;
657 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp"));
658 resid = uio ? uio->uio_resid : top->m_pkthdr.len;
661 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
666 if (so->so_state & SS_CANTSENDMORE)
669 error = so->so_error;
674 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL)
675 gotoerr(EDESTADDRREQ);
676 if (resid > so->so_snd.sb_hiwat)
678 if (uio && sbspace(&so->so_snd) < resid) {
679 if (so->so_state & SS_NBIO)
680 gotoerr(EWOULDBLOCK);
681 sbunlock(&so->so_snd);
682 error = sbwait(&so->so_snd);
691 top = m_uiomove(uio, MB_WAIT, 0);
696 dontroute = (flags & MSG_DONTROUTE) && !(so->so_options & SO_DONTROUTE);
698 so->so_options |= SO_DONTROUTE;
700 error = so_pru_send(so, 0, top, addr, NULL, td);
701 top = NULL; /* sent or freed in lower layer */
704 so->so_options &= ~SO_DONTROUTE;
707 sbunlock(&so->so_snd);
715 * Implement receive operations on a socket.
716 * We depend on the way that records are added to the sockbuf
717 * by sbappend*. In particular, each record (mbufs linked through m_next)
718 * must begin with an address if the protocol so specifies,
719 * followed by an optional mbuf or mbufs containing ancillary data,
720 * and then zero or more mbufs of data.
721 * In order to avoid blocking network interrupts for the entire time here,
722 * we splx() while doing the actual copy to user space.
723 * Although the sockbuf is locked, new data may still be appended,
724 * and thus we must maintain consistency of the sockbuf during that time.
726 * The caller may receive the data as a single mbuf chain by supplying
727 * an mbuf **mp0 for use in returning the chain. The uio is then used
728 * only for the count in uio_resid.
731 soreceive(so, psa, uio, mp0, controlp, flagsp)
733 struct sockaddr **psa;
736 struct mbuf **controlp;
739 struct mbuf *m, **mp;
740 int flags, len, error, s, offset;
741 struct protosw *pr = so->so_proto;
742 struct mbuf *nextrecord;
744 int orig_resid = uio->uio_resid;
752 flags = *flagsp &~ MSG_EOR;
755 if (flags & MSG_OOB) {
756 m = m_get(MB_WAIT, MT_DATA);
759 error = so_pru_rcvoob(so, m, flags & MSG_PEEK);
763 error = uiomove(mtod(m, caddr_t),
764 (int) min(uio->uio_resid, m->m_len), uio);
766 } while (uio->uio_resid && error == 0 && m);
773 *mp = (struct mbuf *)0;
774 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
778 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
783 m = so->so_rcv.sb_mb;
785 * If we have less data than requested, block awaiting more
786 * (subject to any timeout) if:
787 * 1. the current count is less than the low water mark, or
788 * 2. MSG_WAITALL is set, and it is possible to do the entire
789 * receive operation at once if we block (resid <= hiwat).
790 * 3. MSG_DONTWAIT is not set
791 * If MSG_WAITALL is set but resid is larger than the receive buffer,
792 * we have to do the receive in sections, and thus risk returning
793 * a short count if a timeout or signal occurs after we start.
795 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
796 so->so_rcv.sb_cc < uio->uio_resid) &&
797 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
798 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
799 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
800 KASSERT(m != 0 || !so->so_rcv.sb_cc, ("receive 1"));
804 error = so->so_error;
805 if ((flags & MSG_PEEK) == 0)
809 if (so->so_state & SS_CANTRCVMORE) {
815 for (; m; m = m->m_next)
816 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
817 m = so->so_rcv.sb_mb;
820 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
821 (pr->pr_flags & PR_CONNREQUIRED)) {
825 if (uio->uio_resid == 0)
827 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
831 sbunlock(&so->so_rcv);
832 error = sbwait(&so->so_rcv);
839 if (uio->uio_td && uio->uio_td->td_proc)
840 uio->uio_td->td_proc->p_stats->p_ru.ru_msgrcv++;
841 nextrecord = m->m_nextpkt;
842 if (pr->pr_flags & PR_ADDR) {
843 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
846 *psa = dup_sockaddr(mtod(m, struct sockaddr *));
847 if (flags & MSG_PEEK) {
850 sbfree(&so->so_rcv, m);
851 so->so_rcv.sb_mb = m_free(m);
852 m = so->so_rcv.sb_mb;
855 while (m && m->m_type == MT_CONTROL && error == 0) {
856 if (flags & MSG_PEEK) {
858 *controlp = m_copy(m, 0, m->m_len);
861 sbfree(&so->so_rcv, m);
863 if (pr->pr_domain->dom_externalize &&
864 mtod(m, struct cmsghdr *)->cmsg_type ==
866 error = (*pr->pr_domain->dom_externalize)(m);
868 so->so_rcv.sb_mb = m->m_next;
870 m = so->so_rcv.sb_mb;
872 so->so_rcv.sb_mb = m_free(m);
873 m = so->so_rcv.sb_mb;
878 controlp = &(*controlp)->m_next;
882 if ((flags & MSG_PEEK) == 0)
883 m->m_nextpkt = nextrecord;
885 if (type == MT_OOBDATA)
890 while (m && uio->uio_resid > 0 && error == 0) {
891 if (m->m_type == MT_OOBDATA) {
892 if (type != MT_OOBDATA)
894 } else if (type == MT_OOBDATA)
897 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
899 so->so_state &= ~SS_RCVATMARK;
900 len = uio->uio_resid;
901 if (so->so_oobmark && len > so->so_oobmark - offset)
902 len = so->so_oobmark - offset;
903 if (len > m->m_len - moff)
904 len = m->m_len - moff;
906 * If mp is set, just pass back the mbufs.
907 * Otherwise copy them out via the uio, then free.
908 * Sockbuf must be consistent here (points to current mbuf,
909 * it points to next record) when we drop priority;
910 * we must note any additions to the sockbuf when we
911 * block interrupts again.
915 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
920 uio->uio_resid -= len;
921 if (len == m->m_len - moff) {
922 if (m->m_flags & M_EOR)
924 if (flags & MSG_PEEK) {
928 nextrecord = m->m_nextpkt;
929 sbfree(&so->so_rcv, m);
933 so->so_rcv.sb_mb = m = m->m_next;
934 *mp = (struct mbuf *)0;
936 so->so_rcv.sb_mb = m = m_free(m);
939 m->m_nextpkt = nextrecord;
942 if (flags & MSG_PEEK)
946 *mp = m_copym(m, 0, len, MB_WAIT);
949 so->so_rcv.sb_cc -= len;
952 if (so->so_oobmark) {
953 if ((flags & MSG_PEEK) == 0) {
954 so->so_oobmark -= len;
955 if (so->so_oobmark == 0) {
956 so->so_state |= SS_RCVATMARK;
961 if (offset == so->so_oobmark)
968 * If the MSG_WAITALL flag is set (for non-atomic socket),
969 * we must not quit until "uio->uio_resid == 0" or an error
970 * termination. If a signal/timeout occurs, return
971 * with a short count but without error.
972 * Keep sockbuf locked against other readers.
974 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
975 !sosendallatonce(so) && !nextrecord) {
976 if (so->so_error || so->so_state & SS_CANTRCVMORE)
979 * The window might have closed to zero, make
980 * sure we send an ack now that we've drained
981 * the buffer or we might end up blocking until
982 * the idle takes over (5 seconds).
984 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
985 so_pru_rcvd(so, flags);
986 error = sbwait(&so->so_rcv);
988 sbunlock(&so->so_rcv);
992 m = so->so_rcv.sb_mb;
994 nextrecord = m->m_nextpkt;
998 if (m && pr->pr_flags & PR_ATOMIC) {
1000 if ((flags & MSG_PEEK) == 0)
1001 (void) sbdroprecord(&so->so_rcv);
1003 if ((flags & MSG_PEEK) == 0) {
1005 so->so_rcv.sb_mb = nextrecord;
1006 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
1007 so_pru_rcvd(so, flags);
1009 if (orig_resid == uio->uio_resid && orig_resid &&
1010 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
1011 sbunlock(&so->so_rcv);
1019 sbunlock(&so->so_rcv);
1029 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
1035 return (so_pru_shutdown(so));
1043 struct sockbuf *sb = &so->so_rcv;
1044 struct protosw *pr = so->so_proto;
1048 sb->sb_flags |= SB_NOINTR;
1049 (void) sblock(sb, M_WAITOK);
1054 bzero((caddr_t)sb, sizeof (*sb));
1055 if (asb.sb_flags & SB_KNOTE) {
1056 sb->sb_sel.si_note = asb.sb_sel.si_note;
1057 sb->sb_flags = SB_KNOTE;
1060 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
1061 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
1062 sbrelease(&asb, so);
1067 do_setopt_accept_filter(so, sopt)
1069 struct sockopt *sopt;
1071 struct accept_filter_arg *afap = NULL;
1072 struct accept_filter *afp;
1073 struct so_accf *af = so->so_accf;
1076 /* do not set/remove accept filters on non listen sockets */
1077 if ((so->so_options & SO_ACCEPTCONN) == 0) {
1082 /* removing the filter */
1085 if (af->so_accept_filter != NULL &&
1086 af->so_accept_filter->accf_destroy != NULL) {
1087 af->so_accept_filter->accf_destroy(so);
1089 if (af->so_accept_filter_str != NULL) {
1090 FREE(af->so_accept_filter_str, M_ACCF);
1095 so->so_options &= ~SO_ACCEPTFILTER;
1098 /* adding a filter */
1099 /* must remove previous filter first */
1104 /* don't put large objects on the kernel stack */
1105 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), M_TEMP, M_WAITOK);
1106 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap);
1107 afap->af_name[sizeof(afap->af_name)-1] = '\0';
1108 afap->af_arg[sizeof(afap->af_arg)-1] = '\0';
1111 afp = accept_filt_get(afap->af_name);
1116 MALLOC(af, struct so_accf *, sizeof(*af), M_ACCF, M_WAITOK);
1117 bzero(af, sizeof(*af));
1118 if (afp->accf_create != NULL) {
1119 if (afap->af_name[0] != '\0') {
1120 int len = strlen(afap->af_name) + 1;
1122 MALLOC(af->so_accept_filter_str, char *, len, M_ACCF, M_WAITOK);
1123 strcpy(af->so_accept_filter_str, afap->af_name);
1125 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg);
1126 if (af->so_accept_filter_arg == NULL) {
1127 FREE(af->so_accept_filter_str, M_ACCF);
1134 af->so_accept_filter = afp;
1136 so->so_options |= SO_ACCEPTFILTER;
1145 * Perhaps this routine, and sooptcopyout(), below, ought to come in
1146 * an additional variant to handle the case where the option value needs
1147 * to be some kind of integer, but not a specific size.
1148 * In addition to their use here, these functions are also called by the
1149 * protocol-level pr_ctloutput() routines.
1152 sooptcopyin(sopt, buf, len, minlen)
1153 struct sockopt *sopt;
1161 * If the user gives us more than we wanted, we ignore it,
1162 * but if we don't get the minimum length the caller
1163 * wants, we return EINVAL. On success, sopt->sopt_valsize
1164 * is set to however much we actually retrieved.
1166 if ((valsize = sopt->sopt_valsize) < minlen)
1169 sopt->sopt_valsize = valsize = len;
1171 if (sopt->sopt_td != NULL)
1172 return (copyin(sopt->sopt_val, buf, valsize));
1174 bcopy(sopt->sopt_val, buf, valsize);
1181 struct sockopt *sopt;
1189 if (sopt->sopt_level != SOL_SOCKET) {
1190 if (so->so_proto && so->so_proto->pr_ctloutput) {
1191 return (so_pr_ctloutput(so, sopt));
1193 error = ENOPROTOOPT;
1195 switch (sopt->sopt_name) {
1197 case SO_ACCEPTFILTER:
1198 error = do_setopt_accept_filter(so, sopt);
1204 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1208 so->so_linger = l.l_linger;
1210 so->so_options |= SO_LINGER;
1212 so->so_options &= ~SO_LINGER;
1218 case SO_USELOOPBACK:
1224 error = sooptcopyin(sopt, &optval, sizeof optval,
1229 so->so_options |= sopt->sopt_name;
1231 so->so_options &= ~sopt->sopt_name;
1238 error = sooptcopyin(sopt, &optval, sizeof optval,
1244 * Values < 1 make no sense for any of these
1245 * options, so disallow them.
1252 switch (sopt->sopt_name) {
1255 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1256 &so->so_snd : &so->so_rcv, (u_long)optval,
1258 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) {
1265 * Make sure the low-water is never greater than
1269 so->so_snd.sb_lowat =
1270 (optval > so->so_snd.sb_hiwat) ?
1271 so->so_snd.sb_hiwat : optval;
1274 so->so_rcv.sb_lowat =
1275 (optval > so->so_rcv.sb_hiwat) ?
1276 so->so_rcv.sb_hiwat : optval;
1283 error = sooptcopyin(sopt, &tv, sizeof tv,
1288 /* assert(hz > 0); */
1289 if (tv.tv_sec < 0 || tv.tv_sec > SHRT_MAX / hz ||
1290 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
1294 /* assert(tick > 0); */
1295 /* assert(ULONG_MAX - SHRT_MAX >= 1000000); */
1296 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
1297 if (val > SHRT_MAX) {
1301 if (val == 0 && tv.tv_usec != 0)
1304 switch (sopt->sopt_name) {
1306 so->so_snd.sb_timeo = val;
1309 so->so_rcv.sb_timeo = val;
1314 error = ENOPROTOOPT;
1317 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
1318 (void) so_pr_ctloutput(so, sopt);
1325 /* Helper routine for getsockopt */
1327 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
1335 * Documented get behavior is that we always return a value,
1336 * possibly truncated to fit in the user's buffer.
1337 * Traditional behavior is that we always tell the user
1338 * precisely how much we copied, rather than something useful
1339 * like the total amount we had available for her.
1340 * Note that this interface is not idempotent; the entire answer must
1341 * generated ahead of time.
1343 valsize = min(len, sopt->sopt_valsize);
1344 sopt->sopt_valsize = valsize;
1345 if (sopt->sopt_val != 0) {
1346 if (sopt->sopt_td != NULL)
1347 error = copyout(buf, sopt->sopt_val, valsize);
1349 bcopy(buf, sopt->sopt_val, valsize);
1357 struct sockopt *sopt;
1363 struct accept_filter_arg *afap;
1367 if (sopt->sopt_level != SOL_SOCKET) {
1368 if (so->so_proto && so->so_proto->pr_ctloutput) {
1369 return (so_pr_ctloutput(so, sopt));
1371 return (ENOPROTOOPT);
1373 switch (sopt->sopt_name) {
1375 case SO_ACCEPTFILTER:
1376 if ((so->so_options & SO_ACCEPTCONN) == 0)
1378 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap),
1380 bzero(afap, sizeof(*afap));
1381 if ((so->so_options & SO_ACCEPTFILTER) != 0) {
1382 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name);
1383 if (so->so_accf->so_accept_filter_str != NULL)
1384 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str);
1386 error = sooptcopyout(sopt, afap, sizeof(*afap));
1392 l.l_onoff = so->so_options & SO_LINGER;
1393 l.l_linger = so->so_linger;
1394 error = sooptcopyout(sopt, &l, sizeof l);
1397 case SO_USELOOPBACK:
1406 optval = so->so_options & sopt->sopt_name;
1408 error = sooptcopyout(sopt, &optval, sizeof optval);
1412 optval = so->so_type;
1416 optval = so->so_error;
1421 optval = so->so_snd.sb_hiwat;
1425 optval = so->so_rcv.sb_hiwat;
1429 optval = so->so_snd.sb_lowat;
1433 optval = so->so_rcv.sb_lowat;
1438 optval = (sopt->sopt_name == SO_SNDTIMEO ?
1439 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1441 tv.tv_sec = optval / hz;
1442 tv.tv_usec = (optval % hz) * tick;
1443 error = sooptcopyout(sopt, &tv, sizeof tv);
1447 error = ENOPROTOOPT;
1454 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
1456 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
1458 struct mbuf *m, *m_prev;
1459 int sopt_size = sopt->sopt_valsize;
1461 MGET(m, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, MT_DATA);
1464 if (sopt_size > MLEN) {
1465 MCLGET(m, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT);
1466 if ((m->m_flags & M_EXT) == 0) {
1470 m->m_len = min(MCLBYTES, sopt_size);
1472 m->m_len = min(MLEN, sopt_size);
1474 sopt_size -= m->m_len;
1479 MGET(m, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, MT_DATA);
1484 if (sopt_size > MLEN) {
1485 MCLGET(m, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT);
1486 if ((m->m_flags & M_EXT) == 0) {
1490 m->m_len = min(MCLBYTES, sopt_size);
1492 m->m_len = min(MLEN, sopt_size);
1494 sopt_size -= m->m_len;
1501 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
1503 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
1505 struct mbuf *m0 = m;
1507 if (sopt->sopt_val == NULL)
1509 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1510 if (sopt->sopt_td != NULL) {
1513 error = copyin(sopt->sopt_val, mtod(m, char *),
1520 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
1521 sopt->sopt_valsize -= m->m_len;
1522 (caddr_t)sopt->sopt_val += m->m_len;
1525 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
1526 panic("ip6_sooptmcopyin");
1530 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
1532 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
1534 struct mbuf *m0 = m;
1537 if (sopt->sopt_val == NULL)
1539 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1540 if (sopt->sopt_td != NULL) {
1543 error = copyout(mtod(m, char *), sopt->sopt_val,
1550 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
1551 sopt->sopt_valsize -= m->m_len;
1552 (caddr_t)sopt->sopt_val += m->m_len;
1553 valsize += m->m_len;
1557 /* enough soopt buffer should be given from user-land */
1561 sopt->sopt_valsize = valsize;
1569 if (so->so_sigio != NULL)
1570 pgsigio(so->so_sigio, SIGURG, 0);
1571 selwakeup(&so->so_rcv.sb_sel);
1575 sopoll(struct socket *so, int events, struct ucred *cred, struct thread *td)
1580 if (events & (POLLIN | POLLRDNORM))
1582 revents |= events & (POLLIN | POLLRDNORM);
1584 if (events & POLLINIGNEOF)
1585 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat ||
1586 !TAILQ_EMPTY(&so->so_comp) || so->so_error)
1587 revents |= POLLINIGNEOF;
1589 if (events & (POLLOUT | POLLWRNORM))
1590 if (sowriteable(so))
1591 revents |= events & (POLLOUT | POLLWRNORM);
1593 if (events & (POLLPRI | POLLRDBAND))
1594 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
1595 revents |= events & (POLLPRI | POLLRDBAND);
1599 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM |
1601 selrecord(td, &so->so_rcv.sb_sel);
1602 so->so_rcv.sb_flags |= SB_SEL;
1605 if (events & (POLLOUT | POLLWRNORM)) {
1606 selrecord(td, &so->so_snd.sb_sel);
1607 so->so_snd.sb_flags |= SB_SEL;
1616 sokqfilter(struct file *fp, struct knote *kn)
1618 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1622 switch (kn->kn_filter) {
1624 if (so->so_options & SO_ACCEPTCONN)
1625 kn->kn_fop = &solisten_filtops;
1627 kn->kn_fop = &soread_filtops;
1631 kn->kn_fop = &sowrite_filtops;
1639 SLIST_INSERT_HEAD(&sb->sb_sel.si_note, kn, kn_selnext);
1640 sb->sb_flags |= SB_KNOTE;
1646 filt_sordetach(struct knote *kn)
1648 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1651 SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext);
1652 if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note))
1653 so->so_rcv.sb_flags &= ~SB_KNOTE;
1659 filt_soread(struct knote *kn, long hint)
1661 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1663 kn->kn_data = so->so_rcv.sb_cc;
1664 if (so->so_state & SS_CANTRCVMORE) {
1665 kn->kn_flags |= EV_EOF;
1666 kn->kn_fflags = so->so_error;
1669 if (so->so_error) /* temporary udp error */
1671 if (kn->kn_sfflags & NOTE_LOWAT)
1672 return (kn->kn_data >= kn->kn_sdata);
1673 return (kn->kn_data >= so->so_rcv.sb_lowat);
1677 filt_sowdetach(struct knote *kn)
1679 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1682 SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext);
1683 if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note))
1684 so->so_snd.sb_flags &= ~SB_KNOTE;
1690 filt_sowrite(struct knote *kn, long hint)
1692 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1694 kn->kn_data = sbspace(&so->so_snd);
1695 if (so->so_state & SS_CANTSENDMORE) {
1696 kn->kn_flags |= EV_EOF;
1697 kn->kn_fflags = so->so_error;
1700 if (so->so_error) /* temporary udp error */
1702 if (((so->so_state & SS_ISCONNECTED) == 0) &&
1703 (so->so_proto->pr_flags & PR_CONNREQUIRED))
1705 if (kn->kn_sfflags & NOTE_LOWAT)
1706 return (kn->kn_data >= kn->kn_sdata);
1707 return (kn->kn_data >= so->so_snd.sb_lowat);
1712 filt_solisten(struct knote *kn, long hint)
1714 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1716 kn->kn_data = so->so_qlen;
1717 return (! TAILQ_EMPTY(&so->so_comp));