2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
34 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.22 2002/12/15 09:24:23 maxim Exp $
35 * $DragonFly: src/sys/kern/uipc_socket.c,v 1.8 2003/07/26 19:42:11 rob Exp $
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/fcntl.h>
43 #include <sys/malloc.h>
45 #include <sys/domain.h>
46 #include <sys/file.h> /* for struct knote */
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/event.h>
52 #include <sys/protosw.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/resourcevar.h>
56 #include <sys/signalvar.h>
57 #include <sys/sysctl.h>
60 #include <vm/vm_zone.h>
62 #include <machine/limits.h>
65 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt);
68 static void filt_sordetach(struct knote *kn);
69 static int filt_soread(struct knote *kn, long hint);
70 static void filt_sowdetach(struct knote *kn);
71 static int filt_sowrite(struct knote *kn, long hint);
72 static int filt_solisten(struct knote *kn, long hint);
74 static struct filterops solisten_filtops =
75 { 1, NULL, filt_sordetach, filt_solisten };
76 static struct filterops soread_filtops =
77 { 1, NULL, filt_sordetach, filt_soread };
78 static struct filterops sowrite_filtops =
79 { 1, NULL, filt_sowdetach, filt_sowrite };
81 struct vm_zone *socket_zone;
82 so_gen_t so_gencnt; /* generation count for sockets */
84 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
85 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
87 SYSCTL_DECL(_kern_ipc);
89 static int somaxconn = SOMAXCONN;
90 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW,
91 &somaxconn, 0, "Maximum pending socket connection queue size");
94 * Socket operation routines.
95 * These routines are called by the routines in
96 * sys_socket.c or from a system process, and
97 * implement the semantics of socket operations by
98 * switching out to the protocol specific routines.
102 * Get a socket structure from our zone, and initialize it.
103 * We don't implement `waitok' yet (see comments in uipc_domain.c).
104 * Note that it would probably be better to allocate socket
105 * and PCB at the same time, but I'm not convinced that all
106 * the protocols can be easily modified to do this.
114 so = zalloc(socket_zone);
116 /* XXX race condition for reentrant kernel */
117 bzero(so, sizeof *so);
118 so->so_gencnt = ++so_gencnt;
119 TAILQ_INIT(&so->so_aiojobq);
125 socreate(int dom, struct socket **aso, int type,
126 int proto, struct thread *td)
128 struct proc *p = td->td_proc;
134 prp = pffindproto(dom, proto, type);
136 prp = pffindtype(dom, type);
138 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
139 return (EPROTONOSUPPORT);
141 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only &&
142 prp->pr_domain->dom_family != PF_LOCAL &&
143 prp->pr_domain->dom_family != PF_INET &&
144 prp->pr_domain->dom_family != PF_ROUTE) {
145 return (EPROTONOSUPPORT);
148 if (prp->pr_type != type)
150 so = soalloc(p != 0);
154 TAILQ_INIT(&so->so_incomp);
155 TAILQ_INIT(&so->so_comp);
157 so->so_cred = crhold(p->p_ucred);
159 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
161 so->so_state |= SS_NOFDREF;
170 sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
175 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td);
181 sodealloc(struct socket *so)
184 so->so_gencnt = ++so_gencnt;
185 if (so->so_rcv.sb_hiwat)
186 (void)chgsbsize(so->so_cred->cr_uidinfo,
187 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
188 if (so->so_snd.sb_hiwat)
189 (void)chgsbsize(so->so_cred->cr_uidinfo,
190 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
192 if (so->so_accf != NULL) {
193 if (so->so_accf->so_accept_filter != NULL &&
194 so->so_accf->so_accept_filter->accf_destroy != NULL) {
195 so->so_accf->so_accept_filter->accf_destroy(so);
197 if (so->so_accf->so_accept_filter_str != NULL)
198 FREE(so->so_accf->so_accept_filter_str, M_ACCF);
199 FREE(so->so_accf, M_ACCF);
203 zfree(socket_zone, so);
207 solisten(struct socket *so, int backlog, struct thread *td)
212 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, td);
217 if (TAILQ_EMPTY(&so->so_comp))
218 so->so_options |= SO_ACCEPTCONN;
219 if (backlog < 0 || backlog > somaxconn)
221 so->so_qlimit = backlog;
227 sofree(struct socket *so)
229 struct socket *head = so->so_head;
231 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
234 if (so->so_state & SS_INCOMP) {
235 TAILQ_REMOVE(&head->so_incomp, so, so_list);
237 } else if (so->so_state & SS_COMP) {
239 * We must not decommission a socket that's
240 * on the accept(2) queue. If we do, then
241 * accept(2) may hang after select(2) indicated
242 * that the listening socket was ready.
246 panic("sofree: not queued");
248 so->so_state &= ~SS_INCOMP;
251 sbrelease(&so->so_snd, so);
257 * Close a socket on last file table reference removal.
258 * Initiate disconnect if connected.
259 * Free socket when disconnect complete.
262 soclose(struct socket *so)
264 int s = splnet(); /* conservative */
267 funsetown(so->so_sigio);
268 if (so->so_options & SO_ACCEPTCONN) {
269 struct socket *sp, *sonext;
271 sp = TAILQ_FIRST(&so->so_incomp);
272 for (; sp != NULL; sp = sonext) {
273 sonext = TAILQ_NEXT(sp, so_list);
276 for (sp = TAILQ_FIRST(&so->so_comp); sp != NULL; sp = sonext) {
277 sonext = TAILQ_NEXT(sp, so_list);
278 /* Dequeue from so_comp since sofree() won't do it */
279 TAILQ_REMOVE(&so->so_comp, sp, so_list);
281 sp->so_state &= ~SS_COMP;
288 if (so->so_state & SS_ISCONNECTED) {
289 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
290 error = sodisconnect(so);
294 if (so->so_options & SO_LINGER) {
295 if ((so->so_state & SS_ISDISCONNECTING) &&
296 (so->so_state & SS_NBIO))
298 while (so->so_state & SS_ISCONNECTED) {
299 error = tsleep((caddr_t)&so->so_timeo,
300 PCATCH, "soclos", so->so_linger * hz);
308 int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so);
313 if (so->so_state & SS_NOFDREF)
314 panic("soclose: NOFDREF");
315 so->so_state |= SS_NOFDREF;
322 * Must be called at splnet...
330 error = (*so->so_proto->pr_usrreqs->pru_abort)(so);
339 soaccept(struct socket *so, struct sockaddr **nam)
344 if ((so->so_state & SS_NOFDREF) == 0)
345 panic("soaccept: !NOFDREF");
346 so->so_state &= ~SS_NOFDREF;
347 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
353 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
358 if (so->so_options & SO_ACCEPTCONN)
362 * If protocol is connection-based, can only connect once.
363 * Otherwise, if connected, try to disconnect first.
364 * This allows user to disconnect by connecting to, e.g.,
367 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
368 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
369 (error = sodisconnect(so))))
372 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td);
378 soconnect2(struct socket *so1, struct socket *so2)
383 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
389 sodisconnect(struct socket *so)
394 if ((so->so_state & SS_ISCONNECTED) == 0) {
398 if (so->so_state & SS_ISDISCONNECTING) {
402 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
408 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
411 * If send must go all at once and message is larger than
412 * send buffering, then hard error.
413 * Lock against other senders.
414 * If must go all at once and not enough room now, then
415 * inform user that this would block and do nothing.
416 * Otherwise, if nonblocking, send as much as possible.
417 * The data to be sent is described by "uio" if nonzero,
418 * otherwise by the mbuf chain "top" (which must be null
419 * if uio is not). Data provided in mbuf chain must be small
420 * enough to send all at once.
422 * Returns nonzero on error, timeout or signal; callers
423 * must check for short counts if EINTR/ERESTART are returned.
424 * Data and control buffers are freed on return.
427 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
428 struct mbuf *top, struct mbuf *control, int flags,
433 long space, len, resid;
434 int clen = 0, error, s, dontroute, mlen;
435 int atomic = sosendallatonce(so) || top;
438 resid = uio->uio_resid;
440 resid = top->m_pkthdr.len;
442 * In theory resid should be unsigned.
443 * However, space must be signed, as it might be less than 0
444 * if we over-committed, and we must use a signed comparison
445 * of space and resid. On the other hand, a negative resid
446 * causes us to loop sending 0-length segments to the protocol.
448 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
449 * type sockets since that's an error.
451 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
457 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
458 (so->so_proto->pr_flags & PR_ATOMIC);
459 if (td->td_proc && td->td_proc->p_stats)
460 td->td_proc->p_stats->p_ru.ru_msgsnd++;
462 clen = control->m_len;
463 #define snderr(errno) { error = errno; splx(s); goto release; }
466 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
471 if (so->so_state & SS_CANTSENDMORE)
474 error = so->so_error;
479 if ((so->so_state & SS_ISCONNECTED) == 0) {
481 * `sendto' and `sendmsg' is allowed on a connection-
482 * based socket if it supports implied connect.
483 * Return ENOTCONN if not connected and no address is
486 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
487 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
488 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
489 !(resid == 0 && clen != 0))
491 } else if (addr == 0)
492 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
493 ENOTCONN : EDESTADDRREQ);
495 space = sbspace(&so->so_snd);
498 if ((atomic && resid > so->so_snd.sb_hiwat) ||
499 clen > so->so_snd.sb_hiwat)
501 if (space < resid + clen &&
502 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
503 if (so->so_state & SS_NBIO)
505 sbunlock(&so->so_snd);
506 error = sbwait(&so->so_snd);
518 * Data is prepackaged in "top".
522 top->m_flags |= M_EOR;
525 MGETHDR(m, M_WAIT, MT_DATA);
532 m->m_pkthdr.rcvif = (struct ifnet *)0;
534 MGET(m, M_WAIT, MT_DATA);
541 if (resid >= MINCLSIZE) {
543 if ((m->m_flags & M_EXT) == 0)
546 len = min(min(mlen, resid), space);
549 len = min(min(mlen, resid), space);
551 * For datagram protocols, leave room
552 * for protocol headers in first mbuf.
554 if (atomic && top == 0 && len < mlen)
558 error = uiomove(mtod(m, caddr_t), (int)len, uio);
559 resid = uio->uio_resid;
562 top->m_pkthdr.len += len;
568 top->m_flags |= M_EOR;
571 } while (space > 0 && atomic);
573 so->so_options |= SO_DONTROUTE;
574 s = splnet(); /* XXX */
576 * XXX all the SS_CANTSENDMORE checks previously
577 * done could be out of date. We could have recieved
578 * a reset packet in an interrupt or maybe we slept
579 * while doing page faults in uiomove() etc. We could
580 * probably recheck again inside the splnet() protection
581 * here, but there are probably other places that this
582 * also happens. We must rethink this.
584 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
585 (flags & MSG_OOB) ? PRUS_OOB :
587 * If the user set MSG_EOF, the protocol
588 * understands this flag and nothing left to
589 * send then use PRU_SEND_EOF instead of PRU_SEND.
591 ((flags & MSG_EOF) &&
592 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
595 /* If there is more to send set PRUS_MORETOCOME */
596 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
597 top, addr, control, td);
600 so->so_options &= ~SO_DONTROUTE;
607 } while (resid && space > 0);
611 sbunlock(&so->so_snd);
621 * Implement receive operations on a socket.
622 * We depend on the way that records are added to the sockbuf
623 * by sbappend*. In particular, each record (mbufs linked through m_next)
624 * must begin with an address if the protocol so specifies,
625 * followed by an optional mbuf or mbufs containing ancillary data,
626 * and then zero or more mbufs of data.
627 * In order to avoid blocking network interrupts for the entire time here,
628 * we splx() while doing the actual copy to user space.
629 * Although the sockbuf is locked, new data may still be appended,
630 * and thus we must maintain consistency of the sockbuf during that time.
632 * The caller may receive the data as a single mbuf chain by supplying
633 * an mbuf **mp0 for use in returning the chain. The uio is then used
634 * only for the count in uio_resid.
637 soreceive(so, psa, uio, mp0, controlp, flagsp)
639 struct sockaddr **psa;
642 struct mbuf **controlp;
645 struct mbuf *m, **mp;
646 int flags, len, error, s, offset;
647 struct protosw *pr = so->so_proto;
648 struct mbuf *nextrecord;
650 int orig_resid = uio->uio_resid;
658 flags = *flagsp &~ MSG_EOR;
661 if (flags & MSG_OOB) {
662 m = m_get(M_WAIT, MT_DATA);
665 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
669 error = uiomove(mtod(m, caddr_t),
670 (int) min(uio->uio_resid, m->m_len), uio);
672 } while (uio->uio_resid && error == 0 && m);
679 *mp = (struct mbuf *)0;
680 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
681 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
684 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
689 m = so->so_rcv.sb_mb;
691 * If we have less data than requested, block awaiting more
692 * (subject to any timeout) if:
693 * 1. the current count is less than the low water mark, or
694 * 2. MSG_WAITALL is set, and it is possible to do the entire
695 * receive operation at once if we block (resid <= hiwat).
696 * 3. MSG_DONTWAIT is not set
697 * If MSG_WAITALL is set but resid is larger than the receive buffer,
698 * we have to do the receive in sections, and thus risk returning
699 * a short count if a timeout or signal occurs after we start.
701 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
702 so->so_rcv.sb_cc < uio->uio_resid) &&
703 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
704 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
705 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
706 KASSERT(m != 0 || !so->so_rcv.sb_cc, ("receive 1"));
710 error = so->so_error;
711 if ((flags & MSG_PEEK) == 0)
715 if (so->so_state & SS_CANTRCVMORE) {
721 for (; m; m = m->m_next)
722 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
723 m = so->so_rcv.sb_mb;
726 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
727 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
731 if (uio->uio_resid == 0)
733 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
737 sbunlock(&so->so_rcv);
738 error = sbwait(&so->so_rcv);
745 if (uio->uio_td && uio->uio_td->td_proc)
746 uio->uio_td->td_proc->p_stats->p_ru.ru_msgrcv++;
747 nextrecord = m->m_nextpkt;
748 if (pr->pr_flags & PR_ADDR) {
749 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
752 *psa = dup_sockaddr(mtod(m, struct sockaddr *),
754 if (flags & MSG_PEEK) {
757 sbfree(&so->so_rcv, m);
758 so->so_rcv.sb_mb = m_free(m);
759 m = so->so_rcv.sb_mb;
762 while (m && m->m_type == MT_CONTROL && error == 0) {
763 if (flags & MSG_PEEK) {
765 *controlp = m_copy(m, 0, m->m_len);
768 sbfree(&so->so_rcv, m);
770 if (pr->pr_domain->dom_externalize &&
771 mtod(m, struct cmsghdr *)->cmsg_type ==
773 error = (*pr->pr_domain->dom_externalize)(m);
775 so->so_rcv.sb_mb = m->m_next;
777 m = so->so_rcv.sb_mb;
779 so->so_rcv.sb_mb = m_free(m);
780 m = so->so_rcv.sb_mb;
785 controlp = &(*controlp)->m_next;
789 if ((flags & MSG_PEEK) == 0)
790 m->m_nextpkt = nextrecord;
792 if (type == MT_OOBDATA)
797 while (m && uio->uio_resid > 0 && error == 0) {
798 if (m->m_type == MT_OOBDATA) {
799 if (type != MT_OOBDATA)
801 } else if (type == MT_OOBDATA)
804 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
806 so->so_state &= ~SS_RCVATMARK;
807 len = uio->uio_resid;
808 if (so->so_oobmark && len > so->so_oobmark - offset)
809 len = so->so_oobmark - offset;
810 if (len > m->m_len - moff)
811 len = m->m_len - moff;
813 * If mp is set, just pass back the mbufs.
814 * Otherwise copy them out via the uio, then free.
815 * Sockbuf must be consistent here (points to current mbuf,
816 * it points to next record) when we drop priority;
817 * we must note any additions to the sockbuf when we
818 * block interrupts again.
822 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
827 uio->uio_resid -= len;
828 if (len == m->m_len - moff) {
829 if (m->m_flags & M_EOR)
831 if (flags & MSG_PEEK) {
835 nextrecord = m->m_nextpkt;
836 sbfree(&so->so_rcv, m);
840 so->so_rcv.sb_mb = m = m->m_next;
841 *mp = (struct mbuf *)0;
843 so->so_rcv.sb_mb = m = m_free(m);
846 m->m_nextpkt = nextrecord;
849 if (flags & MSG_PEEK)
853 *mp = m_copym(m, 0, len, M_WAIT);
856 so->so_rcv.sb_cc -= len;
859 if (so->so_oobmark) {
860 if ((flags & MSG_PEEK) == 0) {
861 so->so_oobmark -= len;
862 if (so->so_oobmark == 0) {
863 so->so_state |= SS_RCVATMARK;
868 if (offset == so->so_oobmark)
875 * If the MSG_WAITALL flag is set (for non-atomic socket),
876 * we must not quit until "uio->uio_resid == 0" or an error
877 * termination. If a signal/timeout occurs, return
878 * with a short count but without error.
879 * Keep sockbuf locked against other readers.
881 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
882 !sosendallatonce(so) && !nextrecord) {
883 if (so->so_error || so->so_state & SS_CANTRCVMORE)
886 * The window might have closed to zero, make
887 * sure we send an ack now that we've drained
888 * the buffer or we might end up blocking until
889 * the idle takes over (5 seconds).
891 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
892 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
893 error = sbwait(&so->so_rcv);
895 sbunlock(&so->so_rcv);
899 m = so->so_rcv.sb_mb;
901 nextrecord = m->m_nextpkt;
905 if (m && pr->pr_flags & PR_ATOMIC) {
907 if ((flags & MSG_PEEK) == 0)
908 (void) sbdroprecord(&so->so_rcv);
910 if ((flags & MSG_PEEK) == 0) {
912 so->so_rcv.sb_mb = nextrecord;
913 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
914 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
916 if (orig_resid == uio->uio_resid && orig_resid &&
917 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
918 sbunlock(&so->so_rcv);
926 sbunlock(&so->so_rcv);
936 struct protosw *pr = so->so_proto;
938 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
944 return ((*pr->pr_usrreqs->pru_shutdown)(so));
952 struct sockbuf *sb = &so->so_rcv;
953 struct protosw *pr = so->so_proto;
957 sb->sb_flags |= SB_NOINTR;
958 (void) sblock(sb, M_WAITOK);
963 bzero((caddr_t)sb, sizeof (*sb));
964 if (asb.sb_flags & SB_KNOTE) {
965 sb->sb_sel.si_note = asb.sb_sel.si_note;
966 sb->sb_flags = SB_KNOTE;
969 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
970 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
976 do_setopt_accept_filter(so, sopt)
978 struct sockopt *sopt;
980 struct accept_filter_arg *afap = NULL;
981 struct accept_filter *afp;
982 struct so_accf *af = so->so_accf;
985 /* do not set/remove accept filters on non listen sockets */
986 if ((so->so_options & SO_ACCEPTCONN) == 0) {
991 /* removing the filter */
994 if (af->so_accept_filter != NULL &&
995 af->so_accept_filter->accf_destroy != NULL) {
996 af->so_accept_filter->accf_destroy(so);
998 if (af->so_accept_filter_str != NULL) {
999 FREE(af->so_accept_filter_str, M_ACCF);
1004 so->so_options &= ~SO_ACCEPTFILTER;
1007 /* adding a filter */
1008 /* must remove previous filter first */
1013 /* don't put large objects on the kernel stack */
1014 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), M_TEMP, M_WAITOK);
1015 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap);
1016 afap->af_name[sizeof(afap->af_name)-1] = '\0';
1017 afap->af_arg[sizeof(afap->af_arg)-1] = '\0';
1020 afp = accept_filt_get(afap->af_name);
1025 MALLOC(af, struct so_accf *, sizeof(*af), M_ACCF, M_WAITOK);
1026 bzero(af, sizeof(*af));
1027 if (afp->accf_create != NULL) {
1028 if (afap->af_name[0] != '\0') {
1029 int len = strlen(afap->af_name) + 1;
1031 MALLOC(af->so_accept_filter_str, char *, len, M_ACCF, M_WAITOK);
1032 strcpy(af->so_accept_filter_str, afap->af_name);
1034 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg);
1035 if (af->so_accept_filter_arg == NULL) {
1036 FREE(af->so_accept_filter_str, M_ACCF);
1043 af->so_accept_filter = afp;
1045 so->so_options |= SO_ACCEPTFILTER;
1054 * Perhaps this routine, and sooptcopyout(), below, ought to come in
1055 * an additional variant to handle the case where the option value needs
1056 * to be some kind of integer, but not a specific size.
1057 * In addition to their use here, these functions are also called by the
1058 * protocol-level pr_ctloutput() routines.
1061 sooptcopyin(sopt, buf, len, minlen)
1062 struct sockopt *sopt;
1070 * If the user gives us more than we wanted, we ignore it,
1071 * but if we don't get the minimum length the caller
1072 * wants, we return EINVAL. On success, sopt->sopt_valsize
1073 * is set to however much we actually retrieved.
1075 if ((valsize = sopt->sopt_valsize) < minlen)
1078 sopt->sopt_valsize = valsize = len;
1080 if (sopt->sopt_td != NULL)
1081 return (copyin(sopt->sopt_val, buf, valsize));
1083 bcopy(sopt->sopt_val, buf, valsize);
1090 struct sockopt *sopt;
1098 if (sopt->sopt_level != SOL_SOCKET) {
1099 if (so->so_proto && so->so_proto->pr_ctloutput)
1100 return ((*so->so_proto->pr_ctloutput)
1102 error = ENOPROTOOPT;
1104 switch (sopt->sopt_name) {
1106 case SO_ACCEPTFILTER:
1107 error = do_setopt_accept_filter(so, sopt);
1113 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1117 so->so_linger = l.l_linger;
1119 so->so_options |= SO_LINGER;
1121 so->so_options &= ~SO_LINGER;
1127 case SO_USELOOPBACK:
1133 error = sooptcopyin(sopt, &optval, sizeof optval,
1138 so->so_options |= sopt->sopt_name;
1140 so->so_options &= ~sopt->sopt_name;
1147 error = sooptcopyin(sopt, &optval, sizeof optval,
1153 * Values < 1 make no sense for any of these
1154 * options, so disallow them.
1161 switch (sopt->sopt_name) {
1164 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1165 &so->so_snd : &so->so_rcv, (u_long)optval,
1166 so, curproc) == 0) {
1173 * Make sure the low-water is never greater than
1177 so->so_snd.sb_lowat =
1178 (optval > so->so_snd.sb_hiwat) ?
1179 so->so_snd.sb_hiwat : optval;
1182 so->so_rcv.sb_lowat =
1183 (optval > so->so_rcv.sb_hiwat) ?
1184 so->so_rcv.sb_hiwat : optval;
1191 error = sooptcopyin(sopt, &tv, sizeof tv,
1196 /* assert(hz > 0); */
1197 if (tv.tv_sec < 0 || tv.tv_sec > SHRT_MAX / hz ||
1198 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
1202 /* assert(tick > 0); */
1203 /* assert(ULONG_MAX - SHRT_MAX >= 1000000); */
1204 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
1205 if (val > SHRT_MAX) {
1209 if (val == 0 && tv.tv_usec != 0)
1212 switch (sopt->sopt_name) {
1214 so->so_snd.sb_timeo = val;
1217 so->so_rcv.sb_timeo = val;
1222 error = ENOPROTOOPT;
1225 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
1226 (void) ((*so->so_proto->pr_ctloutput)
1234 /* Helper routine for getsockopt */
1236 sooptcopyout(sopt, buf, len)
1237 struct sockopt *sopt;
1247 * Documented get behavior is that we always return a value,
1248 * possibly truncated to fit in the user's buffer.
1249 * Traditional behavior is that we always tell the user
1250 * precisely how much we copied, rather than something useful
1251 * like the total amount we had available for her.
1252 * Note that this interface is not idempotent; the entire answer must
1253 * generated ahead of time.
1255 valsize = min(len, sopt->sopt_valsize);
1256 sopt->sopt_valsize = valsize;
1257 if (sopt->sopt_val != 0) {
1258 if (sopt->sopt_td != NULL)
1259 error = copyout(buf, sopt->sopt_val, valsize);
1261 bcopy(buf, sopt->sopt_val, valsize);
1269 struct sockopt *sopt;
1274 struct accept_filter_arg *afap;
1277 if (sopt->sopt_level != SOL_SOCKET) {
1278 if (so->so_proto && so->so_proto->pr_ctloutput) {
1279 return ((*so->so_proto->pr_ctloutput)
1282 return (ENOPROTOOPT);
1284 switch (sopt->sopt_name) {
1286 case SO_ACCEPTFILTER:
1287 if ((so->so_options & SO_ACCEPTCONN) == 0)
1289 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap),
1291 bzero(afap, sizeof(*afap));
1292 if ((so->so_options & SO_ACCEPTFILTER) != 0) {
1293 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name);
1294 if (so->so_accf->so_accept_filter_str != NULL)
1295 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str);
1297 error = sooptcopyout(sopt, afap, sizeof(*afap));
1303 l.l_onoff = so->so_options & SO_LINGER;
1304 l.l_linger = so->so_linger;
1305 error = sooptcopyout(sopt, &l, sizeof l);
1308 case SO_USELOOPBACK:
1317 optval = so->so_options & sopt->sopt_name;
1319 error = sooptcopyout(sopt, &optval, sizeof optval);
1323 optval = so->so_type;
1327 optval = so->so_error;
1332 optval = so->so_snd.sb_hiwat;
1336 optval = so->so_rcv.sb_hiwat;
1340 optval = so->so_snd.sb_lowat;
1344 optval = so->so_rcv.sb_lowat;
1349 optval = (sopt->sopt_name == SO_SNDTIMEO ?
1350 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1352 tv.tv_sec = optval / hz;
1353 tv.tv_usec = (optval % hz) * tick;
1354 error = sooptcopyout(sopt, &tv, sizeof tv);
1358 error = ENOPROTOOPT;
1365 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
1367 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
1369 struct mbuf *m, *m_prev;
1370 int sopt_size = sopt->sopt_valsize;
1372 MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA);
1375 if (sopt_size > MLEN) {
1376 MCLGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT);
1377 if ((m->m_flags & M_EXT) == 0) {
1381 m->m_len = min(MCLBYTES, sopt_size);
1383 m->m_len = min(MLEN, sopt_size);
1385 sopt_size -= m->m_len;
1390 MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA);
1395 if (sopt_size > MLEN) {
1396 MCLGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT);
1397 if ((m->m_flags & M_EXT) == 0) {
1401 m->m_len = min(MCLBYTES, sopt_size);
1403 m->m_len = min(MLEN, sopt_size);
1405 sopt_size -= m->m_len;
1412 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
1414 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
1416 struct mbuf *m0 = m;
1418 if (sopt->sopt_val == NULL)
1420 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1421 if (sopt->sopt_td != NULL) {
1424 error = copyin(sopt->sopt_val, mtod(m, char *),
1431 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
1432 sopt->sopt_valsize -= m->m_len;
1433 (caddr_t)sopt->sopt_val += m->m_len;
1436 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
1437 panic("ip6_sooptmcopyin");
1441 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
1443 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
1445 struct mbuf *m0 = m;
1448 if (sopt->sopt_val == NULL)
1450 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1451 if (sopt->sopt_td != NULL) {
1454 error = copyout(mtod(m, char *), sopt->sopt_val,
1461 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
1462 sopt->sopt_valsize -= m->m_len;
1463 (caddr_t)sopt->sopt_val += m->m_len;
1464 valsize += m->m_len;
1468 /* enough soopt buffer should be given from user-land */
1472 sopt->sopt_valsize = valsize;
1480 if (so->so_sigio != NULL)
1481 pgsigio(so->so_sigio, SIGURG, 0);
1482 selwakeup(&so->so_rcv.sb_sel);
1486 sopoll(struct socket *so, int events, struct ucred *cred, struct thread *td)
1491 if (events & (POLLIN | POLLRDNORM))
1493 revents |= events & (POLLIN | POLLRDNORM);
1495 if (events & (POLLOUT | POLLWRNORM))
1496 if (sowriteable(so))
1497 revents |= events & (POLLOUT | POLLWRNORM);
1499 if (events & (POLLPRI | POLLRDBAND))
1500 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
1501 revents |= events & (POLLPRI | POLLRDBAND);
1504 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
1505 selrecord(td, &so->so_rcv.sb_sel);
1506 so->so_rcv.sb_flags |= SB_SEL;
1509 if (events & (POLLOUT | POLLWRNORM)) {
1510 selrecord(td, &so->so_snd.sb_sel);
1511 so->so_snd.sb_flags |= SB_SEL;
1520 sokqfilter(struct file *fp, struct knote *kn)
1522 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1526 switch (kn->kn_filter) {
1528 if (so->so_options & SO_ACCEPTCONN)
1529 kn->kn_fop = &solisten_filtops;
1531 kn->kn_fop = &soread_filtops;
1535 kn->kn_fop = &sowrite_filtops;
1543 SLIST_INSERT_HEAD(&sb->sb_sel.si_note, kn, kn_selnext);
1544 sb->sb_flags |= SB_KNOTE;
1550 filt_sordetach(struct knote *kn)
1552 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1555 SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext);
1556 if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note))
1557 so->so_rcv.sb_flags &= ~SB_KNOTE;
1563 filt_soread(struct knote *kn, long hint)
1565 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1567 kn->kn_data = so->so_rcv.sb_cc;
1568 if (so->so_state & SS_CANTRCVMORE) {
1569 kn->kn_flags |= EV_EOF;
1570 kn->kn_fflags = so->so_error;
1573 if (so->so_error) /* temporary udp error */
1575 if (kn->kn_sfflags & NOTE_LOWAT)
1576 return (kn->kn_data >= kn->kn_sdata);
1577 return (kn->kn_data >= so->so_rcv.sb_lowat);
1581 filt_sowdetach(struct knote *kn)
1583 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1586 SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext);
1587 if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note))
1588 so->so_snd.sb_flags &= ~SB_KNOTE;
1594 filt_sowrite(struct knote *kn, long hint)
1596 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1598 kn->kn_data = sbspace(&so->so_snd);
1599 if (so->so_state & SS_CANTSENDMORE) {
1600 kn->kn_flags |= EV_EOF;
1601 kn->kn_fflags = so->so_error;
1604 if (so->so_error) /* temporary udp error */
1606 if (((so->so_state & SS_ISCONNECTED) == 0) &&
1607 (so->so_proto->pr_flags & PR_CONNREQUIRED))
1609 if (kn->kn_sfflags & NOTE_LOWAT)
1610 return (kn->kn_data >= kn->kn_sdata);
1611 return (kn->kn_data >= so->so_snd.sb_lowat);
1616 filt_solisten(struct knote *kn, long hint)
1618 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1620 kn->kn_data = so->so_qlen;
1621 return (! TAILQ_EMPTY(&so->so_comp));