2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * From: @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94
34 * $FreeBSD: src/sys/kern/uipc_usrreq.c,v 1.54.2.10 2003/03/04 17:28:09 nectar Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/domain.h>
41 #include <sys/fcntl.h>
42 #include <sys/malloc.h> /* XXX must be before <sys/file.h> */
45 #include <sys/filedesc.h>
47 #include <sys/nlookup.h>
48 #include <sys/protosw.h>
49 #include <sys/socket.h>
50 #include <sys/socketvar.h>
51 #include <sys/resourcevar.h>
53 #include <sys/mount.h>
54 #include <sys/sysctl.h>
56 #include <sys/unpcb.h>
57 #include <sys/vnode.h>
59 #include <sys/file2.h>
60 #include <sys/spinlock2.h>
61 #include <sys/socketvar2.h>
62 #include <sys/msgport2.h>
64 typedef struct unp_defdiscard {
65 struct unp_defdiscard *next;
69 static MALLOC_DEFINE(M_UNPCB, "unpcb", "unpcb struct");
70 static unp_gen_t unp_gencnt;
71 static u_int unp_count;
73 static struct unp_head unp_shead, unp_dhead;
75 static struct lwkt_token unp_token = LWKT_TOKEN_INITIALIZER(unp_token);
76 static int unp_defdiscard_nest;
77 static unp_defdiscard_t unp_defdiscard_base;
80 * Unix communications domain.
84 * rethink name space problems
85 * need a proper out-of-band
88 static struct sockaddr sun_noname = { sizeof(sun_noname), AF_LOCAL };
89 static ino_t unp_ino = 1; /* prototype for fake inode numbers */
90 static struct spinlock unp_ino_spin = SPINLOCK_INITIALIZER(&unp_ino_spin);
92 static int unp_attach (struct socket *, struct pru_attach_info *);
93 static void unp_detach (struct unpcb *);
94 static int unp_bind (struct unpcb *,struct sockaddr *, struct thread *);
95 static int unp_connect (struct socket *,struct sockaddr *,
97 static void unp_disconnect (struct unpcb *);
98 static void unp_shutdown (struct unpcb *);
99 static void unp_drop (struct unpcb *, int);
100 static void unp_gc (void);
101 static int unp_gc_clearmarks(struct file *, void *);
102 static int unp_gc_checkmarks(struct file *, void *);
103 static int unp_gc_checkrefs(struct file *, void *);
104 static int unp_revoke_gc_check(struct file *, void *);
105 static void unp_scan (struct mbuf *, void (*)(struct file *, void *),
107 static void unp_mark (struct file *, void *data);
108 static void unp_discard (struct file *, void *);
109 static int unp_internalize (struct mbuf *, struct thread *);
110 static int unp_listen (struct unpcb *, struct thread *);
111 static void unp_fp_externalize(struct lwp *lp, struct file *fp, int fd);
115 * Since unp_token will be automaticly released upon execution of
116 * blocking code, we need to reference unp_conn before any possible
117 * blocking code to prevent it from being ripped behind our back.
120 /* NOTE: unp_token MUST be held */
122 unp_reference(struct unpcb *unp)
124 atomic_add_int(&unp->unp_refcnt, 1);
127 /* NOTE: unp_token MUST be held */
129 unp_free(struct unpcb *unp)
131 KKASSERT(unp->unp_refcnt > 0);
132 if (atomic_fetchadd_int(&unp->unp_refcnt, -1) == 1)
137 * NOTE: (so) is referenced from soabort*() and netmsg_pru_abort()
138 * will sofree() it when we return.
141 uipc_abort(netmsg_t msg)
146 lwkt_gettoken(&unp_token);
147 unp = msg->base.nm_so->so_pcb;
149 unp_drop(unp, ECONNABORTED);
155 lwkt_reltoken(&unp_token);
157 lwkt_replymsg(&msg->lmsg, error);
161 uipc_accept(netmsg_t msg)
166 lwkt_gettoken(&unp_token);
167 unp = msg->base.nm_so->so_pcb;
171 struct unpcb *unp2 = unp->unp_conn;
174 * Pass back name of connected socket,
175 * if it was bound and we are still connected
176 * (our peer may have closed already!).
178 if (unp2 && unp2->unp_addr) {
180 *msg->accept.nm_nam = dup_sockaddr(
181 (struct sockaddr *)unp2->unp_addr);
184 *msg->accept.nm_nam = dup_sockaddr(&sun_noname);
188 lwkt_reltoken(&unp_token);
189 lwkt_replymsg(&msg->lmsg, error);
193 uipc_attach(netmsg_t msg)
198 lwkt_gettoken(&unp_token);
199 unp = msg->base.nm_so->so_pcb;
203 error = unp_attach(msg->base.nm_so, msg->attach.nm_ai);
204 lwkt_reltoken(&unp_token);
205 lwkt_replymsg(&msg->lmsg, error);
209 uipc_bind(netmsg_t msg)
214 lwkt_gettoken(&unp_token);
215 unp = msg->base.nm_so->so_pcb;
217 error = unp_bind(unp, msg->bind.nm_nam, msg->bind.nm_td);
220 lwkt_reltoken(&unp_token);
221 lwkt_replymsg(&msg->lmsg, error);
225 uipc_connect(netmsg_t msg)
230 lwkt_gettoken(&unp_token);
231 unp = msg->base.nm_so->so_pcb;
233 error = unp_connect(msg->base.nm_so,
239 lwkt_reltoken(&unp_token);
240 lwkt_replymsg(&msg->lmsg, error);
244 uipc_connect2(netmsg_t msg)
249 lwkt_gettoken(&unp_token);
250 unp = msg->connect2.nm_so1->so_pcb;
252 error = unp_connect2(msg->connect2.nm_so1,
253 msg->connect2.nm_so2);
257 lwkt_reltoken(&unp_token);
258 lwkt_replymsg(&msg->lmsg, error);
261 /* control is EOPNOTSUPP */
264 uipc_detach(netmsg_t msg)
269 lwkt_gettoken(&unp_token);
270 unp = msg->base.nm_so->so_pcb;
277 lwkt_reltoken(&unp_token);
278 lwkt_replymsg(&msg->lmsg, error);
282 uipc_disconnect(netmsg_t msg)
287 lwkt_gettoken(&unp_token);
288 unp = msg->base.nm_so->so_pcb;
295 lwkt_reltoken(&unp_token);
296 lwkt_replymsg(&msg->lmsg, error);
300 uipc_listen(netmsg_t msg)
305 lwkt_gettoken(&unp_token);
306 unp = msg->base.nm_so->so_pcb;
307 if (unp == NULL || unp->unp_vnode == NULL)
310 error = unp_listen(unp, msg->listen.nm_td);
311 lwkt_reltoken(&unp_token);
312 lwkt_replymsg(&msg->lmsg, error);
316 uipc_peeraddr(netmsg_t msg)
321 lwkt_gettoken(&unp_token);
322 unp = msg->base.nm_so->so_pcb;
325 } else if (unp->unp_conn && unp->unp_conn->unp_addr) {
326 struct unpcb *unp2 = unp->unp_conn;
329 *msg->peeraddr.nm_nam = dup_sockaddr(
330 (struct sockaddr *)unp2->unp_addr);
335 * XXX: It seems that this test always fails even when
336 * connection is established. So, this else clause is
337 * added as workaround to return PF_LOCAL sockaddr.
339 *msg->peeraddr.nm_nam = dup_sockaddr(&sun_noname);
342 lwkt_reltoken(&unp_token);
343 lwkt_replymsg(&msg->lmsg, error);
347 uipc_rcvd(netmsg_t msg)
349 struct unpcb *unp, *unp2;
354 lwkt_gettoken(&unp_token);
355 so = msg->base.nm_so;
362 switch (so->so_type) {
364 panic("uipc_rcvd DGRAM?");
368 if (unp->unp_conn == NULL)
370 unp2 = unp->unp_conn;
373 * Because we are transfering mbufs directly to the
374 * peer socket we have to use SSB_STOP on the sender
375 * to prevent it from building up infinite mbufs.
377 so2 = unp2->unp_socket;
378 if (so->so_rcv.ssb_cc < so2->so_snd.ssb_hiwat &&
379 so->so_rcv.ssb_mbcnt < so2->so_snd.ssb_mbmax
381 atomic_clear_int(&so2->so_snd.ssb_flags, SSB_STOP);
389 panic("uipc_rcvd unknown socktype");
394 lwkt_reltoken(&unp_token);
395 lwkt_replymsg(&msg->lmsg, error);
398 /* pru_rcvoob is EOPNOTSUPP */
401 uipc_send(netmsg_t msg)
403 struct unpcb *unp, *unp2;
406 struct mbuf *control;
410 lwkt_gettoken(&unp_token);
411 so = msg->base.nm_so;
412 control = msg->send.nm_control;
420 if (msg->send.nm_flags & PRUS_OOB) {
425 if (control && (error = unp_internalize(control, msg->send.nm_td)))
428 switch (so->so_type) {
431 struct sockaddr *from;
433 if (msg->send.nm_addr) {
438 error = unp_connect(so,
444 if (unp->unp_conn == NULL) {
449 unp2 = unp->unp_conn;
450 so2 = unp2->unp_socket;
452 from = (struct sockaddr *)unp->unp_addr;
458 lwkt_gettoken(&so2->so_rcv.ssb_token);
459 if (ssb_appendaddr(&so2->so_rcv, from, m, control)) {
466 if (msg->send.nm_addr)
468 lwkt_reltoken(&so2->so_rcv.ssb_token);
476 /* Connect if not connected yet. */
478 * Note: A better implementation would complain
479 * if not equal to the peer's address.
481 if (!(so->so_state & SS_ISCONNECTED)) {
482 if (msg->send.nm_addr) {
483 error = unp_connect(so,
494 if (so->so_state & SS_CANTSENDMORE) {
498 if (unp->unp_conn == NULL)
499 panic("uipc_send connected but no connection?");
500 unp2 = unp->unp_conn;
501 so2 = unp2->unp_socket;
506 * Send to paired receive port, and then reduce
507 * send buffer hiwater marks to maintain backpressure.
510 lwkt_gettoken(&so2->so_rcv.ssb_token);
512 if (ssb_appendcontrol(&so2->so_rcv, m, control)) {
516 } else if (so->so_type == SOCK_SEQPACKET) {
517 sbappendrecord(&so2->so_rcv.sb, m);
520 sbappend(&so2->so_rcv.sb, m);
525 * Because we are transfering mbufs directly to the
526 * peer socket we have to use SSB_STOP on the sender
527 * to prevent it from building up infinite mbufs.
529 if (so2->so_rcv.ssb_cc >= so->so_snd.ssb_hiwat ||
530 so2->so_rcv.ssb_mbcnt >= so->so_snd.ssb_mbmax
532 atomic_set_int(&so->so_snd.ssb_flags, SSB_STOP);
534 lwkt_reltoken(&so2->so_rcv.ssb_token);
541 panic("uipc_send unknown socktype");
545 * SEND_EOF is equivalent to a SEND followed by a SHUTDOWN.
547 if (msg->send.nm_flags & PRUS_EOF) {
552 if (control && error != 0)
553 unp_dispose(control);
556 lwkt_reltoken(&unp_token);
562 lwkt_replymsg(&msg->lmsg, error);
569 uipc_sense(netmsg_t msg)
576 lwkt_gettoken(&unp_token);
577 so = msg->base.nm_so;
578 sb = msg->sense.nm_stat;
584 sb->st_blksize = so->so_snd.ssb_hiwat;
586 if (unp->unp_ino == 0) { /* make up a non-zero inode number */
587 spin_lock(&unp_ino_spin);
588 unp->unp_ino = unp_ino++;
589 spin_unlock(&unp_ino_spin);
591 sb->st_ino = unp->unp_ino;
594 lwkt_reltoken(&unp_token);
595 lwkt_replymsg(&msg->lmsg, error);
599 uipc_shutdown(netmsg_t msg)
605 lwkt_gettoken(&unp_token);
606 so = msg->base.nm_so;
615 lwkt_reltoken(&unp_token);
616 lwkt_replymsg(&msg->lmsg, error);
620 uipc_sockaddr(netmsg_t msg)
625 lwkt_gettoken(&unp_token);
626 unp = msg->base.nm_so->so_pcb;
629 *msg->sockaddr.nm_nam =
630 dup_sockaddr((struct sockaddr *)unp->unp_addr);
636 lwkt_reltoken(&unp_token);
637 lwkt_replymsg(&msg->lmsg, error);
640 struct pr_usrreqs uipc_usrreqs = {
641 .pru_abort = uipc_abort,
642 .pru_accept = uipc_accept,
643 .pru_attach = uipc_attach,
644 .pru_bind = uipc_bind,
645 .pru_connect = uipc_connect,
646 .pru_connect2 = uipc_connect2,
647 .pru_control = pr_generic_notsupp,
648 .pru_detach = uipc_detach,
649 .pru_disconnect = uipc_disconnect,
650 .pru_listen = uipc_listen,
651 .pru_peeraddr = uipc_peeraddr,
652 .pru_rcvd = uipc_rcvd,
653 .pru_rcvoob = pr_generic_notsupp,
654 .pru_send = uipc_send,
655 .pru_sense = uipc_sense,
656 .pru_shutdown = uipc_shutdown,
657 .pru_sockaddr = uipc_sockaddr,
658 .pru_sosend = sosend,
659 .pru_soreceive = soreceive
663 uipc_ctloutput(netmsg_t msg)
666 struct sockopt *sopt;
670 lwkt_gettoken(&unp_token);
671 so = msg->base.nm_so;
672 sopt = msg->ctloutput.nm_sopt;
675 switch (sopt->sopt_dir) {
677 switch (sopt->sopt_name) {
679 if (unp->unp_flags & UNP_HAVEPC)
680 soopt_from_kbuf(sopt, &unp->unp_peercred,
681 sizeof(unp->unp_peercred));
683 if (so->so_type == SOCK_STREAM)
685 else if (so->so_type == SOCK_SEQPACKET)
701 lwkt_reltoken(&unp_token);
702 lwkt_replymsg(&msg->lmsg, error);
706 * Both send and receive buffers are allocated PIPSIZ bytes of buffering
707 * for stream sockets, although the total for sender and receiver is
708 * actually only PIPSIZ.
710 * Datagram sockets really use the sendspace as the maximum datagram size,
711 * and don't really want to reserve the sendspace. Their recvspace should
712 * be large enough for at least one max-size datagram plus address.
714 * We want the local send/recv space to be significant larger then lo0's
720 static u_long unpst_sendspace = PIPSIZ;
721 static u_long unpst_recvspace = PIPSIZ;
722 static u_long unpdg_sendspace = 2*1024; /* really max datagram size */
723 static u_long unpdg_recvspace = 4*1024;
725 static int unp_rights; /* file descriptors in flight */
726 static struct spinlock unp_spin = SPINLOCK_INITIALIZER(&unp_spin);
728 SYSCTL_DECL(_net_local_seqpacket);
729 SYSCTL_DECL(_net_local_stream);
730 SYSCTL_INT(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW,
731 &unpst_sendspace, 0, "Size of stream socket send buffer");
732 SYSCTL_INT(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW,
733 &unpst_recvspace, 0, "Size of stream socket receive buffer");
735 SYSCTL_DECL(_net_local_dgram);
736 SYSCTL_INT(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW,
737 &unpdg_sendspace, 0, "Max datagram socket size");
738 SYSCTL_INT(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW,
739 &unpdg_recvspace, 0, "Size of datagram socket receive buffer");
741 SYSCTL_DECL(_net_local);
742 SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0,
743 "File descriptors in flight");
746 unp_attach(struct socket *so, struct pru_attach_info *ai)
751 lwkt_gettoken(&unp_token);
753 if (so->so_snd.ssb_hiwat == 0 || so->so_rcv.ssb_hiwat == 0) {
754 switch (so->so_type) {
758 error = soreserve(so, unpst_sendspace, unpst_recvspace,
763 error = soreserve(so, unpdg_sendspace, unpdg_recvspace,
773 unp = kmalloc(sizeof(*unp), M_UNPCB, M_WAITOK | M_ZERO | M_NULLOK);
779 unp->unp_gencnt = ++unp_gencnt;
781 LIST_INIT(&unp->unp_refs);
782 unp->unp_socket = so;
783 unp->unp_rvnode = ai->fd_rdir; /* jail cruft XXX JH */
784 LIST_INSERT_HEAD(so->so_type == SOCK_DGRAM ? &unp_dhead
785 : &unp_shead, unp, unp_link);
786 so->so_pcb = (caddr_t)unp;
790 lwkt_reltoken(&unp_token);
795 unp_detach(struct unpcb *unp)
799 lwkt_gettoken(&unp_token);
801 LIST_REMOVE(unp, unp_link);
802 unp->unp_gencnt = ++unp_gencnt;
804 if (unp->unp_vnode) {
805 unp->unp_vnode->v_socket = NULL;
806 vrele(unp->unp_vnode);
807 unp->unp_vnode = NULL;
811 while (!LIST_EMPTY(&unp->unp_refs))
812 unp_drop(LIST_FIRST(&unp->unp_refs), ECONNRESET);
813 soisdisconnected(unp->unp_socket);
814 so = unp->unp_socket;
815 soreference(so); /* for delayed sorflush */
817 unp->unp_socket = NULL;
818 sofree(so); /* remove pcb ref */
822 * Normally the receive buffer is flushed later,
823 * in sofree, but if our receive buffer holds references
824 * to descriptors that are now garbage, we will dispose
825 * of those descriptor references after the garbage collector
826 * gets them (resulting in a "panic: closef: count < 0").
832 lwkt_reltoken(&unp_token);
835 kfree(unp->unp_addr, M_SONAME);
840 unp_bind(struct unpcb *unp, struct sockaddr *nam, struct thread *td)
842 struct proc *p = td->td_proc;
843 struct sockaddr_un *soun = (struct sockaddr_un *)nam;
847 struct nlookupdata nd;
848 char buf[SOCK_MAXADDRLEN];
850 lwkt_gettoken(&unp_token);
851 if (unp->unp_vnode != NULL) {
855 namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path);
860 strncpy(buf, soun->sun_path, namelen);
861 buf[namelen] = 0; /* null-terminate the string */
862 error = nlookup_init(&nd, buf, UIO_SYSSPACE,
863 NLC_LOCKVP | NLC_CREATE | NLC_REFDVP);
865 error = nlookup(&nd);
866 if (error == 0 && nd.nl_nch.ncp->nc_vp != NULL)
872 vattr.va_type = VSOCK;
873 vattr.va_mode = (ACCESSPERMS & ~p->p_fd->fd_cmask);
874 error = VOP_NCREATE(&nd.nl_nch, nd.nl_dvp, &vp, nd.nl_cred, &vattr);
876 vp->v_socket = unp->unp_socket;
878 unp->unp_addr = (struct sockaddr_un *)dup_sockaddr(nam);
884 lwkt_reltoken(&unp_token);
889 unp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
891 struct proc *p = td->td_proc;
892 struct sockaddr_un *soun = (struct sockaddr_un *)nam;
894 struct socket *so2, *so3;
895 struct unpcb *unp, *unp2, *unp3;
897 struct nlookupdata nd;
898 char buf[SOCK_MAXADDRLEN];
900 lwkt_gettoken(&unp_token);
902 len = nam->sa_len - offsetof(struct sockaddr_un, sun_path);
907 strncpy(buf, soun->sun_path, len);
911 error = nlookup_init(&nd, buf, UIO_SYSSPACE, NLC_FOLLOW);
913 error = nlookup(&nd);
915 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp);
920 if (vp->v_type != VSOCK) {
924 error = VOP_EACCESS(vp, VWRITE, p->p_ucred);
929 error = ECONNREFUSED;
932 if (so->so_type != so2->so_type) {
936 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
937 if (!(so2->so_options & SO_ACCEPTCONN) ||
938 (so3 = sonewconn(so2, 0)) == NULL) {
939 error = ECONNREFUSED;
946 unp3->unp_addr = (struct sockaddr_un *)
947 dup_sockaddr((struct sockaddr *)unp2->unp_addr);
950 * unp_peercred management:
952 * The connecter's (client's) credentials are copied
953 * from its process structure at the time of connect()
956 cru2x(p->p_ucred, &unp3->unp_peercred);
957 unp3->unp_flags |= UNP_HAVEPC;
959 * The receiver's (server's) credentials are copied
960 * from the unp_peercred member of socket on which the
961 * former called listen(); unp_listen() cached that
962 * process's credentials at that time so we can use
965 KASSERT(unp2->unp_flags & UNP_HAVEPCCACHED,
966 ("unp_connect: listener without cached peercred"));
967 memcpy(&unp->unp_peercred, &unp2->unp_peercred,
968 sizeof(unp->unp_peercred));
969 unp->unp_flags |= UNP_HAVEPC;
973 error = unp_connect2(so, so2);
977 lwkt_reltoken(&unp_token);
982 unp_connect2(struct socket *so, struct socket *so2)
987 lwkt_gettoken(&unp_token);
989 if (so2->so_type != so->so_type) {
990 lwkt_reltoken(&unp_token);
994 unp->unp_conn = unp2;
996 switch (so->so_type) {
998 LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink);
1003 case SOCK_SEQPACKET:
1004 unp2->unp_conn = unp;
1010 panic("unp_connect2");
1012 lwkt_reltoken(&unp_token);
1017 unp_disconnect(struct unpcb *unp)
1021 lwkt_gettoken(&unp_token);
1023 unp2 = unp->unp_conn;
1025 lwkt_reltoken(&unp_token);
1029 unp->unp_conn = NULL;
1031 switch (unp->unp_socket->so_type) {
1033 LIST_REMOVE(unp, unp_reflink);
1034 soclrstate(unp->unp_socket, SS_ISCONNECTED);
1038 case SOCK_SEQPACKET:
1039 unp_reference(unp2);
1040 unp2->unp_conn = NULL;
1042 soisdisconnected(unp->unp_socket);
1043 soisdisconnected(unp2->unp_socket);
1048 lwkt_reltoken(&unp_token);
1053 unp_abort(struct unpcb *unp)
1055 lwkt_gettoken(&unp_token);
1057 lwkt_reltoken(&unp_token);
1062 prison_unpcb(struct thread *td, struct unpcb *unp)
1068 if ((p = td->td_proc) == NULL)
1070 if (!p->p_ucred->cr_prison)
1072 if (p->p_fd->fd_rdir == unp->unp_rvnode)
1078 unp_pcblist(SYSCTL_HANDLER_ARGS)
1081 struct unpcb *unp, **unp_list;
1083 struct unp_head *head;
1085 head = ((intptr_t)arg1 == SOCK_DGRAM ? &unp_dhead : &unp_shead);
1087 KKASSERT(curproc != NULL);
1090 * The process of preparing the PCB list is too time-consuming and
1091 * resource-intensive to repeat twice on every request.
1093 if (req->oldptr == NULL) {
1095 req->oldidx = (n + n/8) * sizeof(struct xunpcb);
1099 if (req->newptr != NULL)
1102 lwkt_gettoken(&unp_token);
1105 * OK, now we're committed to doing something.
1107 gencnt = unp_gencnt;
1110 unp_list = kmalloc(n * sizeof *unp_list, M_TEMP, M_WAITOK);
1112 for (unp = LIST_FIRST(head), i = 0; unp && i < n;
1113 unp = LIST_NEXT(unp, unp_link)) {
1114 if (unp->unp_gencnt <= gencnt && !prison_unpcb(req->td, unp))
1115 unp_list[i++] = unp;
1117 n = i; /* in case we lost some during malloc */
1120 for (i = 0; i < n; i++) {
1122 if (unp->unp_gencnt <= gencnt) {
1124 xu.xu_len = sizeof xu;
1127 * XXX - need more locking here to protect against
1128 * connect/disconnect races for SMP.
1131 bcopy(unp->unp_addr, &xu.xu_addr,
1132 unp->unp_addr->sun_len);
1133 if (unp->unp_conn && unp->unp_conn->unp_addr)
1134 bcopy(unp->unp_conn->unp_addr,
1136 unp->unp_conn->unp_addr->sun_len);
1137 bcopy(unp, &xu.xu_unp, sizeof *unp);
1138 sotoxsocket(unp->unp_socket, &xu.xu_socket);
1139 error = SYSCTL_OUT(req, &xu, sizeof xu);
1142 lwkt_reltoken(&unp_token);
1143 kfree(unp_list, M_TEMP);
1148 SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist, CTLFLAG_RD,
1149 (caddr_t)(long)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb",
1150 "List of active local datagram sockets");
1151 SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist, CTLFLAG_RD,
1152 (caddr_t)(long)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb",
1153 "List of active local stream sockets");
1154 SYSCTL_PROC(_net_local_seqpacket, OID_AUTO, pcblist, CTLFLAG_RD,
1155 (caddr_t)(long)SOCK_SEQPACKET, 0, unp_pcblist, "S,xunpcb",
1156 "List of active local seqpacket stream sockets");
1159 unp_shutdown(struct unpcb *unp)
1163 if ((unp->unp_socket->so_type == SOCK_STREAM ||
1164 unp->unp_socket->so_type == SOCK_SEQPACKET) &&
1165 unp->unp_conn != NULL && (so = unp->unp_conn->unp_socket)) {
1171 unp_drop(struct unpcb *unp, int err)
1173 struct socket *so = unp->unp_socket;
1176 unp_disconnect(unp);
1183 lwkt_gettoken(&unp_token);
1184 lwkt_reltoken(&unp_token);
1189 unp_externalize(struct mbuf *rights)
1191 struct thread *td = curthread;
1192 struct proc *p = td->td_proc; /* XXX */
1193 struct lwp *lp = td->td_lwp;
1194 struct cmsghdr *cm = mtod(rights, struct cmsghdr *);
1199 int newfds = (cm->cmsg_len - (CMSG_DATA(cm) - (u_char *)cm))
1200 / sizeof (struct file *);
1203 lwkt_gettoken(&unp_token);
1206 * if the new FD's will not fit, then we free them all
1208 if (!fdavail(p, newfds)) {
1209 rp = (struct file **)CMSG_DATA(cm);
1210 for (i = 0; i < newfds; i++) {
1213 * zero the pointer before calling unp_discard,
1214 * since it may end up in unp_gc()..
1217 unp_discard(fp, NULL);
1219 lwkt_reltoken(&unp_token);
1224 * now change each pointer to an fd in the global table to
1225 * an integer that is the index to the local fd table entry
1226 * that we set up to point to the global one we are transferring.
1227 * If sizeof (struct file *) is bigger than or equal to sizeof int,
1228 * then do it in forward order. In that case, an integer will
1229 * always come in the same place or before its corresponding
1230 * struct file pointer.
1231 * If sizeof (struct file *) is smaller than sizeof int, then
1232 * do it in reverse order.
1234 if (sizeof (struct file *) >= sizeof (int)) {
1235 fdp = (int *)CMSG_DATA(cm);
1236 rp = (struct file **)CMSG_DATA(cm);
1237 for (i = 0; i < newfds; i++) {
1238 if (fdalloc(p, 0, &f))
1239 panic("unp_externalize");
1241 unp_fp_externalize(lp, fp, f);
1245 fdp = (int *)CMSG_DATA(cm) + newfds - 1;
1246 rp = (struct file **)CMSG_DATA(cm) + newfds - 1;
1247 for (i = 0; i < newfds; i++) {
1248 if (fdalloc(p, 0, &f))
1249 panic("unp_externalize");
1251 unp_fp_externalize(lp, fp, f);
1257 * Adjust length, in case sizeof(struct file *) and sizeof(int)
1260 cm->cmsg_len = CMSG_LEN(newfds * sizeof(int));
1261 rights->m_len = cm->cmsg_len;
1263 lwkt_reltoken(&unp_token);
1268 unp_fp_externalize(struct lwp *lp, struct file *fp, int fd)
1273 lwkt_gettoken(&unp_token);
1277 if (fp->f_flag & FREVOKED) {
1278 kprintf("Warning: revoked fp exiting unix socket\n");
1280 error = falloc(lp, &fx, NULL);
1282 fsetfd(lp->lwp_proc->p_fd, fx, fd);
1284 fsetfd(lp->lwp_proc->p_fd, NULL, fd);
1287 fsetfd(lp->lwp_proc->p_fd, fp, fd);
1290 spin_lock(&unp_spin);
1293 spin_unlock(&unp_spin);
1296 lwkt_reltoken(&unp_token);
1303 LIST_INIT(&unp_dhead);
1304 LIST_INIT(&unp_shead);
1305 spin_init(&unp_spin);
1309 unp_internalize(struct mbuf *control, struct thread *td)
1311 struct proc *p = td->td_proc;
1312 struct filedesc *fdescp;
1313 struct cmsghdr *cm = mtod(control, struct cmsghdr *);
1317 struct cmsgcred *cmcred;
1323 lwkt_gettoken(&unp_token);
1326 if ((cm->cmsg_type != SCM_RIGHTS && cm->cmsg_type != SCM_CREDS) ||
1327 cm->cmsg_level != SOL_SOCKET ||
1328 CMSG_ALIGN(cm->cmsg_len) != control->m_len) {
1334 * Fill in credential information.
1336 if (cm->cmsg_type == SCM_CREDS) {
1337 cmcred = (struct cmsgcred *)CMSG_DATA(cm);
1338 cmcred->cmcred_pid = p->p_pid;
1339 cmcred->cmcred_uid = p->p_ucred->cr_ruid;
1340 cmcred->cmcred_gid = p->p_ucred->cr_rgid;
1341 cmcred->cmcred_euid = p->p_ucred->cr_uid;
1342 cmcred->cmcred_ngroups = MIN(p->p_ucred->cr_ngroups,
1344 for (i = 0; i < cmcred->cmcred_ngroups; i++)
1345 cmcred->cmcred_groups[i] = p->p_ucred->cr_groups[i];
1351 * cmsghdr may not be aligned, do not allow calculation(s) to
1354 if (cm->cmsg_len < CMSG_LEN(0)) {
1359 oldfds = (cm->cmsg_len - CMSG_LEN(0)) / sizeof (int);
1362 * check that all the FDs passed in refer to legal OPEN files
1363 * If not, reject the entire operation.
1365 fdp = (int *)CMSG_DATA(cm);
1366 for (i = 0; i < oldfds; i++) {
1368 if ((unsigned)fd >= fdescp->fd_nfiles ||
1369 fdescp->fd_files[fd].fp == NULL) {
1373 if (fdescp->fd_files[fd].fp->f_type == DTYPE_KQUEUE) {
1379 * Now replace the integer FDs with pointers to
1380 * the associated global file table entry..
1381 * Allocate a bigger buffer as necessary. But if an cluster is not
1382 * enough, return E2BIG.
1384 newlen = CMSG_LEN(oldfds * sizeof(struct file *));
1385 if (newlen > MCLBYTES) {
1389 if (newlen - control->m_len > M_TRAILINGSPACE(control)) {
1390 if (control->m_flags & M_EXT) {
1394 MCLGET(control, MB_WAIT);
1395 if (!(control->m_flags & M_EXT)) {
1400 /* copy the data to the cluster */
1401 memcpy(mtod(control, char *), cm, cm->cmsg_len);
1402 cm = mtod(control, struct cmsghdr *);
1406 * Adjust length, in case sizeof(struct file *) and sizeof(int)
1409 cm->cmsg_len = newlen;
1410 control->m_len = CMSG_ALIGN(newlen);
1413 * Transform the file descriptors into struct file pointers.
1414 * If sizeof (struct file *) is bigger than or equal to sizeof int,
1415 * then do it in reverse order so that the int won't get until
1417 * If sizeof (struct file *) is smaller than sizeof int, then
1418 * do it in forward order.
1420 if (sizeof (struct file *) >= sizeof (int)) {
1421 fdp = (int *)CMSG_DATA(cm) + oldfds - 1;
1422 rp = (struct file **)CMSG_DATA(cm) + oldfds - 1;
1423 for (i = 0; i < oldfds; i++) {
1424 fp = fdescp->fd_files[*fdp--].fp;
1427 spin_lock(&unp_spin);
1430 spin_unlock(&unp_spin);
1433 fdp = (int *)CMSG_DATA(cm);
1434 rp = (struct file **)CMSG_DATA(cm);
1435 for (i = 0; i < oldfds; i++) {
1436 fp = fdescp->fd_files[*fdp++].fp;
1439 spin_lock(&unp_spin);
1442 spin_unlock(&unp_spin);
1447 lwkt_reltoken(&unp_token);
1452 * Garbage collect in-transit file descriptors that get lost due to
1453 * loops (i.e. when a socket is sent to another process over itself,
1454 * and more complex situations).
1456 * NOT MPSAFE - TODO socket flush code and maybe closef. Rest is MPSAFE.
1459 struct unp_gc_info {
1460 struct file **extra_ref;
1461 struct file *locked_fp;
1470 struct unp_gc_info info;
1471 static boolean_t unp_gcing;
1476 * Only one gc can be in-progress at any given moment
1478 spin_lock(&unp_spin);
1480 spin_unlock(&unp_spin);
1484 spin_unlock(&unp_spin);
1486 lwkt_gettoken(&unp_token);
1489 * Before going through all this, set all FDs to be NOT defered
1490 * and NOT externally accessible (not marked). During the scan
1491 * a fd can be marked externally accessible but we may or may not
1492 * be able to immediately process it (controlled by FDEFER).
1494 * If we loop sleep a bit. The complexity of the topology can cause
1495 * multiple loops. Also failure to acquire the socket's so_rcv
1496 * token can cause us to loop.
1498 allfiles_scan_exclusive(unp_gc_clearmarks, NULL);
1501 allfiles_scan_exclusive(unp_gc_checkmarks, &info);
1503 tsleep(&info, 0, "gcagain", 1);
1504 } while (info.defer);
1507 * We grab an extra reference to each of the file table entries
1508 * that are not otherwise accessible and then free the rights
1509 * that are stored in messages on them.
1511 * The bug in the orginal code is a little tricky, so I'll describe
1512 * what's wrong with it here.
1514 * It is incorrect to simply unp_discard each entry for f_msgcount
1515 * times -- consider the case of sockets A and B that contain
1516 * references to each other. On a last close of some other socket,
1517 * we trigger a gc since the number of outstanding rights (unp_rights)
1518 * is non-zero. If during the sweep phase the gc code un_discards,
1519 * we end up doing a (full) closef on the descriptor. A closef on A
1520 * results in the following chain. Closef calls soo_close, which
1521 * calls soclose. Soclose calls first (through the switch
1522 * uipc_usrreq) unp_detach, which re-invokes unp_gc. Unp_gc simply
1523 * returns because the previous instance had set unp_gcing, and
1524 * we return all the way back to soclose, which marks the socket
1525 * with SS_NOFDREF, and then calls sofree. Sofree calls sorflush
1526 * to free up the rights that are queued in messages on the socket A,
1527 * i.e., the reference on B. The sorflush calls via the dom_dispose
1528 * switch unp_dispose, which unp_scans with unp_discard. This second
1529 * instance of unp_discard just calls closef on B.
1531 * Well, a similar chain occurs on B, resulting in a sorflush on B,
1532 * which results in another closef on A. Unfortunately, A is already
1533 * being closed, and the descriptor has already been marked with
1534 * SS_NOFDREF, and soclose panics at this point.
1536 * Here, we first take an extra reference to each inaccessible
1537 * descriptor. Then, we call sorflush ourself, since we know
1538 * it is a Unix domain socket anyhow. After we destroy all the
1539 * rights carried in messages, we do a last closef to get rid
1540 * of our extra reference. This is the last close, and the
1541 * unp_detach etc will shut down the socket.
1543 * 91/09/19, bsy@cs.cmu.edu
1545 info.extra_ref = kmalloc(256 * sizeof(struct file *), M_FILE, M_WAITOK);
1546 info.maxindex = 256;
1553 allfiles_scan_exclusive(unp_gc_checkrefs, &info);
1556 * For each FD on our hit list, do the following two things
1558 for (i = info.index, fpp = info.extra_ref; --i >= 0; ++fpp) {
1559 struct file *tfp = *fpp;
1560 if (tfp->f_type == DTYPE_SOCKET && tfp->f_data != NULL)
1561 sorflush((struct socket *)(tfp->f_data));
1563 for (i = info.index, fpp = info.extra_ref; --i >= 0; ++fpp)
1565 } while (info.index == info.maxindex);
1567 lwkt_reltoken(&unp_token);
1569 kfree((caddr_t)info.extra_ref, M_FILE);
1574 * MPSAFE - NOTE: filehead list and file pointer spinlocked on entry
1577 unp_gc_checkrefs(struct file *fp, void *data)
1579 struct unp_gc_info *info = data;
1581 if (fp->f_count == 0)
1583 if (info->index == info->maxindex)
1587 * If all refs are from msgs, and it's not marked accessible
1588 * then it must be referenced from some unreachable cycle
1589 * of (shut-down) FDs, so include it in our
1590 * list of FDs to remove
1592 if (fp->f_count == fp->f_msgcount && !(fp->f_flag & FMARK)) {
1593 info->extra_ref[info->index++] = fp;
1600 * MPSAFE - NOTE: filehead list and file pointer spinlocked on entry
1603 unp_gc_clearmarks(struct file *fp, void *data __unused)
1605 atomic_clear_int(&fp->f_flag, FMARK | FDEFER);
1610 * MPSAFE - NOTE: filehead list and file pointer spinlocked on entry
1613 unp_gc_checkmarks(struct file *fp, void *data)
1615 struct unp_gc_info *info = data;
1619 * If the file is not open, skip it. Make sure it isn't marked
1620 * defered or we could loop forever, in case we somehow race
1623 if (fp->f_count == 0) {
1624 if (fp->f_flag & FDEFER)
1625 atomic_clear_int(&fp->f_flag, FDEFER);
1629 * If we already marked it as 'defer' in a
1630 * previous pass, then try process it this time
1633 if (fp->f_flag & FDEFER) {
1634 atomic_clear_int(&fp->f_flag, FDEFER);
1637 * if it's not defered, then check if it's
1638 * already marked.. if so skip it
1640 if (fp->f_flag & FMARK)
1643 * If all references are from messages
1644 * in transit, then skip it. it's not
1645 * externally accessible.
1647 if (fp->f_count == fp->f_msgcount)
1650 * If it got this far then it must be
1651 * externally accessible.
1653 atomic_set_int(&fp->f_flag, FMARK);
1657 * either it was defered, or it is externally
1658 * accessible and not already marked so.
1659 * Now check if it is possibly one of OUR sockets.
1661 if (fp->f_type != DTYPE_SOCKET ||
1662 (so = (struct socket *)fp->f_data) == NULL) {
1665 if (so->so_proto->pr_domain != &localdomain ||
1666 !(so->so_proto->pr_flags & PR_RIGHTS)) {
1671 * So, Ok, it's one of our sockets and it IS externally accessible
1672 * (or was defered). Now we look to see if we hold any file
1673 * descriptors in its message buffers. Follow those links and mark
1674 * them as accessible too.
1676 * We are holding multiple spinlocks here, if we cannot get the
1677 * token non-blocking defer until the next loop.
1679 info->locked_fp = fp;
1680 if (lwkt_trytoken(&so->so_rcv.ssb_token)) {
1681 unp_scan(so->so_rcv.ssb_mb, unp_mark, info);
1682 lwkt_reltoken(&so->so_rcv.ssb_token);
1684 atomic_set_int(&fp->f_flag, FDEFER);
1691 * Scan all unix domain sockets and replace any revoked file pointers
1692 * found with the dummy file pointer fx. We don't worry about races
1693 * against file pointers being read out as those are handled in the
1697 #define REVOKE_GC_MAXFILES 32
1699 struct unp_revoke_gc_info {
1701 struct file *fary[REVOKE_GC_MAXFILES];
1706 unp_revoke_gc(struct file *fx)
1708 struct unp_revoke_gc_info info;
1711 lwkt_gettoken(&unp_token);
1715 allfiles_scan_exclusive(unp_revoke_gc_check, &info);
1716 for (i = 0; i < info.fcount; ++i)
1717 unp_fp_externalize(NULL, info.fary[i], -1);
1718 } while (info.fcount == REVOKE_GC_MAXFILES);
1719 lwkt_reltoken(&unp_token);
1723 * Check for and replace revoked descriptors.
1725 * WARNING: This routine is not allowed to block.
1728 unp_revoke_gc_check(struct file *fps, void *vinfo)
1730 struct unp_revoke_gc_info *info = vinfo;
1741 * Is this a unix domain socket with rights-passing abilities?
1743 if (fps->f_type != DTYPE_SOCKET)
1745 if ((so = (struct socket *)fps->f_data) == NULL)
1747 if (so->so_proto->pr_domain != &localdomain)
1749 if ((so->so_proto->pr_flags & PR_RIGHTS) == 0)
1753 * Scan the mbufs for control messages and replace any revoked
1754 * descriptors we find.
1756 lwkt_gettoken(&so->so_rcv.ssb_token);
1757 m0 = so->so_rcv.ssb_mb;
1759 for (m = m0; m; m = m->m_next) {
1760 if (m->m_type != MT_CONTROL)
1762 if (m->m_len < sizeof(*cm))
1764 cm = mtod(m, struct cmsghdr *);
1765 if (cm->cmsg_level != SOL_SOCKET ||
1766 cm->cmsg_type != SCM_RIGHTS) {
1769 qfds = (cm->cmsg_len - CMSG_LEN(0)) / sizeof(void *);
1770 rp = (struct file **)CMSG_DATA(cm);
1771 for (i = 0; i < qfds; i++) {
1773 if (fp->f_flag & FREVOKED) {
1774 kprintf("Warning: Removing revoked fp from unix domain socket queue\n");
1776 info->fx->f_msgcount++;
1779 info->fary[info->fcount++] = fp;
1781 if (info->fcount == REVOKE_GC_MAXFILES)
1784 if (info->fcount == REVOKE_GC_MAXFILES)
1788 if (info->fcount == REVOKE_GC_MAXFILES)
1791 lwkt_reltoken(&so->so_rcv.ssb_token);
1794 * Stop the scan if we filled up our array.
1796 if (info->fcount == REVOKE_GC_MAXFILES)
1802 * Dispose of the fp's stored in a mbuf.
1804 * The dds loop can cause additional fps to be entered onto the
1805 * list while it is running, flattening out the operation and avoiding
1806 * a deep kernel stack recursion.
1809 unp_dispose(struct mbuf *m)
1811 unp_defdiscard_t dds;
1813 lwkt_gettoken(&unp_token);
1814 ++unp_defdiscard_nest;
1816 unp_scan(m, unp_discard, NULL);
1818 if (unp_defdiscard_nest == 1) {
1819 while ((dds = unp_defdiscard_base) != NULL) {
1820 unp_defdiscard_base = dds->next;
1821 closef(dds->fp, NULL);
1822 kfree(dds, M_UNPCB);
1825 --unp_defdiscard_nest;
1826 lwkt_reltoken(&unp_token);
1830 unp_listen(struct unpcb *unp, struct thread *td)
1832 struct proc *p = td->td_proc;
1835 lwkt_gettoken(&unp_token);
1836 cru2x(p->p_ucred, &unp->unp_peercred);
1837 unp->unp_flags |= UNP_HAVEPCCACHED;
1838 lwkt_reltoken(&unp_token);
1843 unp_scan(struct mbuf *m0, void (*op)(struct file *, void *), void *data)
1852 for (m = m0; m; m = m->m_next) {
1853 if (m->m_type == MT_CONTROL &&
1854 m->m_len >= sizeof(*cm)) {
1855 cm = mtod(m, struct cmsghdr *);
1856 if (cm->cmsg_level != SOL_SOCKET ||
1857 cm->cmsg_type != SCM_RIGHTS)
1859 qfds = (cm->cmsg_len - CMSG_LEN(0)) /
1861 rp = (struct file **)CMSG_DATA(cm);
1862 for (i = 0; i < qfds; i++)
1864 break; /* XXX, but saves time */
1872 * Mark visibility. info->defer is recalculated on every pass.
1875 unp_mark(struct file *fp, void *data)
1877 struct unp_gc_info *info = data;
1879 if ((fp->f_flag & FMARK) == 0) {
1881 atomic_set_int(&fp->f_flag, FMARK | FDEFER);
1882 } else if (fp->f_flag & FDEFER) {
1888 * Discard a fp previously held in a unix domain socket mbuf. To
1889 * avoid blowing out the kernel stack due to contrived chain-reactions
1890 * we may have to defer the operation to a higher procedural level.
1892 * Caller holds unp_token
1895 unp_discard(struct file *fp, void *data __unused)
1897 unp_defdiscard_t dds;
1899 spin_lock(&unp_spin);
1902 spin_unlock(&unp_spin);
1904 if (unp_defdiscard_nest) {
1905 dds = kmalloc(sizeof(*dds), M_UNPCB, M_WAITOK|M_ZERO);
1907 dds->next = unp_defdiscard_base;
1908 unp_defdiscard_base = dds;