2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * From: @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94
30 * $FreeBSD: src/sys/kern/uipc_usrreq.c,v 1.54.2.10 2003/03/04 17:28:09 nectar Exp $
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/domain.h>
37 #include <sys/fcntl.h>
38 #include <sys/malloc.h> /* XXX must be before <sys/file.h> */
41 #include <sys/filedesc.h>
43 #include <sys/nlookup.h>
44 #include <sys/protosw.h>
45 #include <sys/socket.h>
46 #include <sys/socketvar.h>
47 #include <sys/resourcevar.h>
49 #include <sys/mount.h>
50 #include <sys/sysctl.h>
52 #include <sys/unpcb.h>
53 #include <sys/vnode.h>
55 #include <sys/file2.h>
56 #include <sys/spinlock2.h>
57 #include <sys/socketvar2.h>
58 #include <sys/msgport2.h>
60 #define UNP_DETACHED UNP_PRIVATE1
62 #define UNP_ISATTACHED(unp) \
63 ((unp) != NULL && ((unp)->unp_flags & UNP_DETACHED) == 0)
65 typedef struct unp_defdiscard {
66 struct unp_defdiscard *next;
70 static MALLOC_DEFINE(M_UNPCB, "unpcb", "unpcb struct");
71 static unp_gen_t unp_gencnt;
72 static u_int unp_count;
74 static struct unp_head unp_shead, unp_dhead;
76 static struct lwkt_token unp_token = LWKT_TOKEN_INITIALIZER(unp_token);
77 static int unp_defdiscard_nest;
78 static unp_defdiscard_t unp_defdiscard_base;
81 * Unix communications domain.
85 * rethink name space problems
86 * need a proper out-of-band
89 static struct sockaddr sun_noname = { sizeof(sun_noname), AF_LOCAL };
90 static ino_t unp_ino = 1; /* prototype for fake inode numbers */
91 static struct spinlock unp_ino_spin = SPINLOCK_INITIALIZER(&unp_ino_spin, "unp_ino_spin");
93 static int unp_attach (struct socket *, struct pru_attach_info *);
94 static void unp_detach (struct unpcb *);
95 static int unp_bind (struct unpcb *,struct sockaddr *, struct thread *);
96 static int unp_connect (struct socket *,struct sockaddr *,
98 static void unp_disconnect (struct unpcb *);
99 static void unp_shutdown (struct unpcb *);
100 static void unp_drop (struct unpcb *, int);
101 static void unp_gc (void);
102 static int unp_gc_clearmarks(struct file *, void *);
103 static int unp_gc_checkmarks(struct file *, void *);
104 static int unp_gc_checkrefs(struct file *, void *);
105 static int unp_revoke_gc_check(struct file *, void *);
106 static void unp_scan (struct mbuf *, void (*)(struct file *, void *),
108 static void unp_mark (struct file *, void *data);
109 static void unp_discard (struct file *, void *);
110 static int unp_internalize (struct mbuf *, struct thread *);
111 static int unp_listen (struct unpcb *, struct thread *);
112 static void unp_fp_externalize(struct lwp *lp, struct file *fp, int fd);
115 * SMP Considerations:
117 * Since unp_token will be automaticly released upon execution of
118 * blocking code, we need to reference unp_conn before any possible
119 * blocking code to prevent it from being ripped behind our back.
121 * Any adjustment to unp->unp_conn requires both the global unp_token
122 * AND the per-unp token (lwkt_token_pool_lookup(unp)) to be held.
124 * Any access to so_pcb to obtain unp requires the pool token for
128 /* NOTE: unp_token MUST be held */
130 unp_reference(struct unpcb *unp)
132 atomic_add_int(&unp->unp_refcnt, 1);
135 /* NOTE: unp_token MUST be held */
137 unp_free(struct unpcb *unp)
139 KKASSERT(unp->unp_refcnt > 0);
140 if (atomic_fetchadd_int(&unp->unp_refcnt, -1) == 1)
144 static __inline struct unpcb *
145 unp_getsocktoken(struct socket *so)
150 * The unp pointer is invalid until we verify that it is
151 * good by re-checking so_pcb AFTER obtaining the token.
153 while ((unp = so->so_pcb) != NULL) {
154 lwkt_getpooltoken(unp);
155 if (unp == so->so_pcb)
157 lwkt_relpooltoken(unp);
163 unp_reltoken(struct unpcb *unp)
166 lwkt_relpooltoken(unp);
170 unp_setflags(struct unpcb *unp, int flags)
172 atomic_set_int(&unp->unp_flags, flags);
176 unp_clrflags(struct unpcb *unp, int flags)
178 atomic_clear_int(&unp->unp_flags, flags);
182 * NOTE: (so) is referenced from soabort*() and netmsg_pru_abort()
183 * will sofree() it when we return.
186 uipc_abort(netmsg_t msg)
191 lwkt_gettoken(&unp_token);
192 unp = unp_getsocktoken(msg->base.nm_so);
194 if (UNP_ISATTACHED(unp)) {
195 unp_setflags(unp, UNP_DETACHED);
196 unp_drop(unp, ECONNABORTED);
204 lwkt_reltoken(&unp_token);
206 lwkt_replymsg(&msg->lmsg, error);
210 uipc_accept(netmsg_t msg)
215 lwkt_gettoken(&unp_token);
216 unp = msg->base.nm_so->so_pcb;
217 if (!UNP_ISATTACHED(unp)) {
220 struct unpcb *unp2 = unp->unp_conn;
223 * Pass back name of connected socket,
224 * if it was bound and we are still connected
225 * (our peer may have closed already!).
227 if (unp2 && unp2->unp_addr) {
229 *msg->accept.nm_nam = dup_sockaddr(
230 (struct sockaddr *)unp2->unp_addr);
233 *msg->accept.nm_nam = dup_sockaddr(&sun_noname);
237 lwkt_reltoken(&unp_token);
238 lwkt_replymsg(&msg->lmsg, error);
242 uipc_attach(netmsg_t msg)
247 lwkt_gettoken(&unp_token);
248 unp = msg->base.nm_so->so_pcb;
249 KASSERT(unp == NULL, ("double unp attach"));
250 error = unp_attach(msg->base.nm_so, msg->attach.nm_ai);
251 lwkt_reltoken(&unp_token);
252 lwkt_replymsg(&msg->lmsg, error);
256 uipc_bind(netmsg_t msg)
261 lwkt_gettoken(&unp_token);
262 unp = msg->base.nm_so->so_pcb;
263 if (UNP_ISATTACHED(unp))
264 error = unp_bind(unp, msg->bind.nm_nam, msg->bind.nm_td);
267 lwkt_reltoken(&unp_token);
268 lwkt_replymsg(&msg->lmsg, error);
272 uipc_connect(netmsg_t msg)
277 unp = msg->base.nm_so->so_pcb;
278 if (UNP_ISATTACHED(unp)) {
279 error = unp_connect(msg->base.nm_so,
285 lwkt_replymsg(&msg->lmsg, error);
289 uipc_connect2(netmsg_t msg)
294 unp = msg->connect2.nm_so1->so_pcb;
295 if (UNP_ISATTACHED(unp)) {
296 error = unp_connect2(msg->connect2.nm_so1,
297 msg->connect2.nm_so2);
301 lwkt_replymsg(&msg->lmsg, error);
304 /* control is EOPNOTSUPP */
307 uipc_detach(netmsg_t msg)
312 lwkt_gettoken(&unp_token);
313 unp = unp_getsocktoken(msg->base.nm_so);
315 if (UNP_ISATTACHED(unp)) {
316 unp_setflags(unp, UNP_DETACHED);
324 lwkt_reltoken(&unp_token);
326 lwkt_replymsg(&msg->lmsg, error);
330 uipc_disconnect(netmsg_t msg)
335 lwkt_gettoken(&unp_token);
336 unp = msg->base.nm_so->so_pcb;
337 if (UNP_ISATTACHED(unp)) {
343 lwkt_reltoken(&unp_token);
344 lwkt_replymsg(&msg->lmsg, error);
348 uipc_listen(netmsg_t msg)
353 lwkt_gettoken(&unp_token);
354 unp = msg->base.nm_so->so_pcb;
355 if (!UNP_ISATTACHED(unp) || unp->unp_vnode == NULL)
358 error = unp_listen(unp, msg->listen.nm_td);
359 lwkt_reltoken(&unp_token);
360 lwkt_replymsg(&msg->lmsg, error);
364 uipc_peeraddr(netmsg_t msg)
369 lwkt_gettoken(&unp_token);
370 unp = msg->base.nm_so->so_pcb;
371 if (!UNP_ISATTACHED(unp)) {
373 } else if (unp->unp_conn && unp->unp_conn->unp_addr) {
374 struct unpcb *unp2 = unp->unp_conn;
377 *msg->peeraddr.nm_nam = dup_sockaddr(
378 (struct sockaddr *)unp2->unp_addr);
383 * XXX: It seems that this test always fails even when
384 * connection is established. So, this else clause is
385 * added as workaround to return PF_LOCAL sockaddr.
387 *msg->peeraddr.nm_nam = dup_sockaddr(&sun_noname);
390 lwkt_reltoken(&unp_token);
391 lwkt_replymsg(&msg->lmsg, error);
395 uipc_rcvd(netmsg_t msg)
397 struct unpcb *unp, *unp2;
403 * so_pcb is only modified with both the global and the unp
406 so = msg->base.nm_so;
407 unp = unp_getsocktoken(so);
409 if (!UNP_ISATTACHED(unp)) {
414 switch (so->so_type) {
416 panic("uipc_rcvd DGRAM?");
420 if (unp->unp_conn == NULL)
422 unp2 = unp->unp_conn; /* protected by pool token */
425 * Because we are transfering mbufs directly to the
426 * peer socket we have to use SSB_STOP on the sender
427 * to prevent it from building up infinite mbufs.
429 * As in several places in this module w ehave to ref unp2
430 * to ensure that it does not get ripped out from under us
431 * if we block on the so2 token or in sowwakeup().
433 so2 = unp2->unp_socket;
435 lwkt_gettoken(&so2->so_rcv.ssb_token);
436 if (so->so_rcv.ssb_cc < so2->so_snd.ssb_hiwat &&
437 so->so_rcv.ssb_mbcnt < so2->so_snd.ssb_mbmax
439 atomic_clear_int(&so2->so_snd.ssb_flags, SSB_STOP);
443 lwkt_reltoken(&so2->so_rcv.ssb_token);
447 panic("uipc_rcvd unknown socktype");
453 lwkt_replymsg(&msg->lmsg, error);
456 /* pru_rcvoob is EOPNOTSUPP */
459 uipc_send(netmsg_t msg)
461 struct unpcb *unp, *unp2;
464 struct mbuf *control;
468 so = msg->base.nm_so;
469 control = msg->send.nm_control;
473 * so_pcb is only modified with both the global and the unp
476 so = msg->base.nm_so;
477 unp = unp_getsocktoken(so);
479 if (!UNP_ISATTACHED(unp)) {
484 if (msg->send.nm_flags & PRUS_OOB) {
489 wakeup_start_delayed();
491 if (control && (error = unp_internalize(control, msg->send.nm_td)))
494 switch (so->so_type) {
497 struct sockaddr *from;
499 if (msg->send.nm_addr) {
504 error = unp_connect(so,
510 if (unp->unp_conn == NULL) {
515 unp2 = unp->unp_conn;
516 so2 = unp2->unp_socket;
518 from = (struct sockaddr *)unp->unp_addr;
524 lwkt_gettoken(&so2->so_rcv.ssb_token);
525 if (ssb_appendaddr(&so2->so_rcv, from, m, control)) {
532 if (msg->send.nm_addr)
534 lwkt_reltoken(&so2->so_rcv.ssb_token);
542 /* Connect if not connected yet. */
544 * Note: A better implementation would complain
545 * if not equal to the peer's address.
547 if (!(so->so_state & SS_ISCONNECTED)) {
548 if (msg->send.nm_addr) {
549 error = unp_connect(so,
560 if (so->so_state & SS_CANTSENDMORE) {
564 if (unp->unp_conn == NULL)
565 panic("uipc_send connected but no connection?");
566 unp2 = unp->unp_conn;
567 so2 = unp2->unp_socket;
572 * Send to paired receive port, and then reduce
573 * send buffer hiwater marks to maintain backpressure.
576 lwkt_gettoken(&so2->so_rcv.ssb_token);
578 if (ssb_appendcontrol(&so2->so_rcv, m, control)) {
582 } else if (so->so_type == SOCK_SEQPACKET) {
583 sbappendrecord(&so2->so_rcv.sb, m);
586 sbappend(&so2->so_rcv.sb, m);
591 * Because we are transfering mbufs directly to the
592 * peer socket we have to use SSB_STOP on the sender
593 * to prevent it from building up infinite mbufs.
595 if (so2->so_rcv.ssb_cc >= so->so_snd.ssb_hiwat ||
596 so2->so_rcv.ssb_mbcnt >= so->so_snd.ssb_mbmax
598 atomic_set_int(&so->so_snd.ssb_flags, SSB_STOP);
600 lwkt_reltoken(&so2->so_rcv.ssb_token);
607 panic("uipc_send unknown socktype");
611 * SEND_EOF is equivalent to a SEND followed by a SHUTDOWN.
613 if (msg->send.nm_flags & PRUS_EOF) {
618 if (control && error != 0)
619 unp_dispose(control);
622 wakeup_end_delayed();
628 lwkt_replymsg(&msg->lmsg, error);
635 uipc_sense(netmsg_t msg)
642 so = msg->base.nm_so;
643 sb = msg->sense.nm_stat;
646 * so_pcb is only modified with both the global and the unp
649 unp = unp_getsocktoken(so);
651 if (!UNP_ISATTACHED(unp)) {
656 sb->st_blksize = so->so_snd.ssb_hiwat;
658 if (unp->unp_ino == 0) { /* make up a non-zero inode number */
659 spin_lock(&unp_ino_spin);
660 unp->unp_ino = unp_ino++;
661 spin_unlock(&unp_ino_spin);
663 sb->st_ino = unp->unp_ino;
667 lwkt_replymsg(&msg->lmsg, error);
671 uipc_shutdown(netmsg_t msg)
678 * so_pcb is only modified with both the global and the unp
681 so = msg->base.nm_so;
682 unp = unp_getsocktoken(so);
684 if (UNP_ISATTACHED(unp)) {
693 lwkt_replymsg(&msg->lmsg, error);
697 uipc_sockaddr(netmsg_t msg)
704 * so_pcb is only modified with both the global and the unp
707 so = msg->base.nm_so;
708 unp = unp_getsocktoken(so);
710 if (UNP_ISATTACHED(unp)) {
712 *msg->sockaddr.nm_nam =
713 dup_sockaddr((struct sockaddr *)unp->unp_addr);
721 lwkt_replymsg(&msg->lmsg, error);
724 struct pr_usrreqs uipc_usrreqs = {
725 .pru_abort = uipc_abort,
726 .pru_accept = uipc_accept,
727 .pru_attach = uipc_attach,
728 .pru_bind = uipc_bind,
729 .pru_connect = uipc_connect,
730 .pru_connect2 = uipc_connect2,
731 .pru_control = pr_generic_notsupp,
732 .pru_detach = uipc_detach,
733 .pru_disconnect = uipc_disconnect,
734 .pru_listen = uipc_listen,
735 .pru_peeraddr = uipc_peeraddr,
736 .pru_rcvd = uipc_rcvd,
737 .pru_rcvoob = pr_generic_notsupp,
738 .pru_send = uipc_send,
739 .pru_sense = uipc_sense,
740 .pru_shutdown = uipc_shutdown,
741 .pru_sockaddr = uipc_sockaddr,
742 .pru_sosend = sosend,
743 .pru_soreceive = soreceive
747 uipc_ctloutput(netmsg_t msg)
750 struct sockopt *sopt;
754 lwkt_gettoken(&unp_token);
755 so = msg->base.nm_so;
756 sopt = msg->ctloutput.nm_sopt;
759 switch (sopt->sopt_dir) {
761 switch (sopt->sopt_name) {
763 if (unp->unp_flags & UNP_HAVEPC)
764 soopt_from_kbuf(sopt, &unp->unp_peercred,
765 sizeof(unp->unp_peercred));
767 if (so->so_type == SOCK_STREAM)
769 else if (so->so_type == SOCK_SEQPACKET)
785 lwkt_reltoken(&unp_token);
786 lwkt_replymsg(&msg->lmsg, error);
790 * Both send and receive buffers are allocated PIPSIZ bytes of buffering
791 * for stream sockets, although the total for sender and receiver is
792 * actually only PIPSIZ.
794 * Datagram sockets really use the sendspace as the maximum datagram size,
795 * and don't really want to reserve the sendspace. Their recvspace should
796 * be large enough for at least one max-size datagram plus address.
798 * We want the local send/recv space to be significant larger then lo0's
804 static u_long unpst_sendspace = PIPSIZ;
805 static u_long unpst_recvspace = PIPSIZ;
806 static u_long unpdg_sendspace = 2*1024; /* really max datagram size */
807 static u_long unpdg_recvspace = 4*1024;
809 static int unp_rights; /* file descriptors in flight */
810 static struct spinlock unp_spin = SPINLOCK_INITIALIZER(&unp_spin, "unp_spin");
812 SYSCTL_DECL(_net_local_seqpacket);
813 SYSCTL_DECL(_net_local_stream);
814 SYSCTL_INT(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW,
815 &unpst_sendspace, 0, "Size of stream socket send buffer");
816 SYSCTL_INT(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW,
817 &unpst_recvspace, 0, "Size of stream socket receive buffer");
819 SYSCTL_DECL(_net_local_dgram);
820 SYSCTL_INT(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW,
821 &unpdg_sendspace, 0, "Max datagram socket size");
822 SYSCTL_INT(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW,
823 &unpdg_recvspace, 0, "Size of datagram socket receive buffer");
825 SYSCTL_DECL(_net_local);
826 SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0,
827 "File descriptors in flight");
830 unp_attach(struct socket *so, struct pru_attach_info *ai)
835 lwkt_gettoken(&unp_token);
837 if (so->so_snd.ssb_hiwat == 0 || so->so_rcv.ssb_hiwat == 0) {
838 switch (so->so_type) {
841 error = soreserve(so, unpst_sendspace, unpst_recvspace,
846 error = soreserve(so, unpdg_sendspace, unpdg_recvspace,
858 * In order to support sendfile we have to set either SSB_STOPSUPP
859 * or SSB_PREALLOC. Unix domain sockets use the SSB_STOP flow
862 if (so->so_type == SOCK_STREAM) {
863 atomic_set_int(&so->so_rcv.ssb_flags, SSB_STOPSUPP);
864 atomic_set_int(&so->so_snd.ssb_flags, SSB_STOPSUPP);
867 unp = kmalloc(sizeof(*unp), M_UNPCB, M_WAITOK | M_ZERO | M_NULLOK);
873 unp->unp_gencnt = ++unp_gencnt;
875 LIST_INIT(&unp->unp_refs);
876 unp->unp_socket = so;
877 unp->unp_rvnode = ai->fd_rdir; /* jail cruft XXX JH */
878 LIST_INSERT_HEAD(so->so_type == SOCK_DGRAM ? &unp_dhead
879 : &unp_shead, unp, unp_link);
880 so->so_pcb = (caddr_t)unp;
884 lwkt_reltoken(&unp_token);
889 unp_detach(struct unpcb *unp)
893 lwkt_gettoken(&unp_token);
894 lwkt_getpooltoken(unp);
896 LIST_REMOVE(unp, unp_link); /* both tokens required */
897 unp->unp_gencnt = ++unp_gencnt;
899 if (unp->unp_vnode) {
900 unp->unp_vnode->v_socket = NULL;
901 vrele(unp->unp_vnode);
902 unp->unp_vnode = NULL;
906 while (!LIST_EMPTY(&unp->unp_refs))
907 unp_drop(LIST_FIRST(&unp->unp_refs), ECONNRESET);
908 soisdisconnected(unp->unp_socket);
909 so = unp->unp_socket;
910 soreference(so); /* for delayed sorflush */
911 KKASSERT(so->so_pcb == unp);
912 so->so_pcb = NULL; /* both tokens required */
913 unp->unp_socket = NULL;
914 sofree(so); /* remove pcb ref */
918 * Normally the receive buffer is flushed later,
919 * in sofree, but if our receive buffer holds references
920 * to descriptors that are now garbage, we will dispose
921 * of those descriptor references after the garbage collector
922 * gets them (resulting in a "panic: closef: count < 0").
928 lwkt_relpooltoken(unp);
929 lwkt_reltoken(&unp_token);
932 kfree(unp->unp_addr, M_SONAME);
937 unp_bind(struct unpcb *unp, struct sockaddr *nam, struct thread *td)
939 struct proc *p = td->td_proc;
940 struct sockaddr_un *soun = (struct sockaddr_un *)nam;
944 struct nlookupdata nd;
945 char buf[SOCK_MAXADDRLEN];
947 lwkt_gettoken(&unp_token);
948 if (unp->unp_vnode != NULL) {
952 namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path);
957 strncpy(buf, soun->sun_path, namelen);
958 buf[namelen] = 0; /* null-terminate the string */
959 error = nlookup_init(&nd, buf, UIO_SYSSPACE,
960 NLC_LOCKVP | NLC_CREATE | NLC_REFDVP);
962 error = nlookup(&nd);
963 if (error == 0 && nd.nl_nch.ncp->nc_vp != NULL)
969 vattr.va_type = VSOCK;
970 vattr.va_mode = (ACCESSPERMS & ~p->p_fd->fd_cmask);
971 error = VOP_NCREATE(&nd.nl_nch, nd.nl_dvp, &vp, nd.nl_cred, &vattr);
973 if (unp->unp_vnode == NULL) {
974 vp->v_socket = unp->unp_socket;
976 unp->unp_addr = (struct sockaddr_un *)dup_sockaddr(nam);
979 vput(vp); /* late race */
986 lwkt_reltoken(&unp_token);
991 unp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
993 struct proc *p = td->td_proc;
994 struct sockaddr_un *soun = (struct sockaddr_un *)nam;
996 struct socket *so2, *so3;
997 struct unpcb *unp, *unp2, *unp3;
999 struct nlookupdata nd;
1000 char buf[SOCK_MAXADDRLEN];
1002 lwkt_gettoken(&unp_token);
1004 len = nam->sa_len - offsetof(struct sockaddr_un, sun_path);
1009 strncpy(buf, soun->sun_path, len);
1013 error = nlookup_init(&nd, buf, UIO_SYSSPACE, NLC_FOLLOW);
1015 error = nlookup(&nd);
1017 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp);
1022 if (vp->v_type != VSOCK) {
1026 error = VOP_EACCESS(vp, VWRITE, p->p_ucred);
1031 error = ECONNREFUSED;
1034 if (so->so_type != so2->so_type) {
1038 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
1039 if (!(so2->so_options & SO_ACCEPTCONN) ||
1040 (so3 = sonewconn(so2, 0)) == NULL) {
1041 error = ECONNREFUSED;
1045 if (unp->unp_conn) { /* race, already connected! */
1053 unp3->unp_addr = (struct sockaddr_un *)
1054 dup_sockaddr((struct sockaddr *)unp2->unp_addr);
1057 * unp_peercred management:
1059 * The connecter's (client's) credentials are copied
1060 * from its process structure at the time of connect()
1063 cru2x(p->p_ucred, &unp3->unp_peercred);
1064 unp_setflags(unp3, UNP_HAVEPC);
1066 * The receiver's (server's) credentials are copied
1067 * from the unp_peercred member of socket on which the
1068 * former called listen(); unp_listen() cached that
1069 * process's credentials at that time so we can use
1072 KASSERT(unp2->unp_flags & UNP_HAVEPCCACHED,
1073 ("unp_connect: listener without cached peercred"));
1074 memcpy(&unp->unp_peercred, &unp2->unp_peercred,
1075 sizeof(unp->unp_peercred));
1076 unp_setflags(unp, UNP_HAVEPC);
1080 error = unp_connect2(so, so2);
1084 lwkt_reltoken(&unp_token);
1089 * Connect two unix domain sockets together.
1091 * NOTE: Semantics for any change to unp_conn requires that the per-unp
1092 * pool token also be held.
1095 unp_connect2(struct socket *so, struct socket *so2)
1100 lwkt_gettoken(&unp_token);
1101 if (so2->so_type != so->so_type) {
1102 lwkt_reltoken(&unp_token);
1103 return (EPROTOTYPE);
1105 unp = unp_getsocktoken(so);
1106 unp2 = unp_getsocktoken(so2);
1108 unp->unp_conn = unp2;
1110 switch (so->so_type) {
1112 LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink);
1117 case SOCK_SEQPACKET:
1118 unp2->unp_conn = unp;
1124 panic("unp_connect2");
1129 lwkt_reltoken(&unp_token);
1134 * Disconnect a unix domain socket pair.
1136 * NOTE: Semantics for any change to unp_conn requires that the per-unp
1137 * pool token also be held.
1140 unp_disconnect(struct unpcb *unp)
1144 lwkt_gettoken(&unp_token);
1145 lwkt_getpooltoken(unp);
1147 while ((unp2 = unp->unp_conn) != NULL) {
1148 lwkt_getpooltoken(unp2);
1149 if (unp2 == unp->unp_conn)
1151 lwkt_relpooltoken(unp2);
1156 unp->unp_conn = NULL;
1158 switch (unp->unp_socket->so_type) {
1160 LIST_REMOVE(unp, unp_reflink);
1161 soclrstate(unp->unp_socket, SS_ISCONNECTED);
1165 case SOCK_SEQPACKET:
1166 unp_reference(unp2);
1167 unp2->unp_conn = NULL;
1169 soisdisconnected(unp->unp_socket);
1170 soisdisconnected(unp2->unp_socket);
1175 lwkt_relpooltoken(unp2);
1177 lwkt_relpooltoken(unp);
1178 lwkt_reltoken(&unp_token);
1183 unp_abort(struct unpcb *unp)
1185 lwkt_gettoken(&unp_token);
1187 lwkt_reltoken(&unp_token);
1192 prison_unpcb(struct thread *td, struct unpcb *unp)
1198 if ((p = td->td_proc) == NULL)
1200 if (!p->p_ucred->cr_prison)
1202 if (p->p_fd->fd_rdir == unp->unp_rvnode)
1208 unp_pcblist(SYSCTL_HANDLER_ARGS)
1211 struct unpcb *unp, **unp_list;
1213 struct unp_head *head;
1215 head = ((intptr_t)arg1 == SOCK_DGRAM ? &unp_dhead : &unp_shead);
1217 KKASSERT(curproc != NULL);
1220 * The process of preparing the PCB list is too time-consuming and
1221 * resource-intensive to repeat twice on every request.
1223 if (req->oldptr == NULL) {
1225 req->oldidx = (n + n/8) * sizeof(struct xunpcb);
1229 if (req->newptr != NULL)
1232 lwkt_gettoken(&unp_token);
1235 * OK, now we're committed to doing something.
1237 gencnt = unp_gencnt;
1240 unp_list = kmalloc(n * sizeof *unp_list, M_TEMP, M_WAITOK);
1242 for (unp = LIST_FIRST(head), i = 0; unp && i < n;
1243 unp = LIST_NEXT(unp, unp_link)) {
1244 if (unp->unp_gencnt <= gencnt && !prison_unpcb(req->td, unp))
1245 unp_list[i++] = unp;
1247 n = i; /* in case we lost some during malloc */
1250 for (i = 0; i < n; i++) {
1252 if (unp->unp_gencnt <= gencnt) {
1254 xu.xu_len = sizeof xu;
1257 * XXX - need more locking here to protect against
1258 * connect/disconnect races for SMP.
1261 bcopy(unp->unp_addr, &xu.xu_addr,
1262 unp->unp_addr->sun_len);
1263 if (unp->unp_conn && unp->unp_conn->unp_addr)
1264 bcopy(unp->unp_conn->unp_addr,
1266 unp->unp_conn->unp_addr->sun_len);
1267 bcopy(unp, &xu.xu_unp, sizeof *unp);
1268 sotoxsocket(unp->unp_socket, &xu.xu_socket);
1269 error = SYSCTL_OUT(req, &xu, sizeof xu);
1272 lwkt_reltoken(&unp_token);
1273 kfree(unp_list, M_TEMP);
1278 SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist, CTLFLAG_RD,
1279 (caddr_t)(long)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb",
1280 "List of active local datagram sockets");
1281 SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist, CTLFLAG_RD,
1282 (caddr_t)(long)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb",
1283 "List of active local stream sockets");
1284 SYSCTL_PROC(_net_local_seqpacket, OID_AUTO, pcblist, CTLFLAG_RD,
1285 (caddr_t)(long)SOCK_SEQPACKET, 0, unp_pcblist, "S,xunpcb",
1286 "List of active local seqpacket stream sockets");
1289 unp_shutdown(struct unpcb *unp)
1293 if ((unp->unp_socket->so_type == SOCK_STREAM ||
1294 unp->unp_socket->so_type == SOCK_SEQPACKET) &&
1295 unp->unp_conn != NULL && (so = unp->unp_conn->unp_socket)) {
1301 unp_drop(struct unpcb *unp, int err)
1303 struct socket *so = unp->unp_socket;
1306 unp_disconnect(unp);
1313 lwkt_gettoken(&unp_token);
1314 lwkt_reltoken(&unp_token);
1319 unp_externalize(struct mbuf *rights)
1321 struct thread *td = curthread;
1322 struct proc *p = td->td_proc; /* XXX */
1323 struct lwp *lp = td->td_lwp;
1324 struct cmsghdr *cm = mtod(rights, struct cmsghdr *);
1329 int newfds = (cm->cmsg_len - (CMSG_DATA(cm) - (u_char *)cm))
1330 / sizeof (struct file *);
1333 lwkt_gettoken(&unp_token);
1336 * if the new FD's will not fit, then we free them all
1338 if (!fdavail(p, newfds)) {
1339 rp = (struct file **)CMSG_DATA(cm);
1340 for (i = 0; i < newfds; i++) {
1343 * zero the pointer before calling unp_discard,
1344 * since it may end up in unp_gc()..
1347 unp_discard(fp, NULL);
1349 lwkt_reltoken(&unp_token);
1354 * now change each pointer to an fd in the global table to
1355 * an integer that is the index to the local fd table entry
1356 * that we set up to point to the global one we are transferring.
1357 * If sizeof (struct file *) is bigger than or equal to sizeof int,
1358 * then do it in forward order. In that case, an integer will
1359 * always come in the same place or before its corresponding
1360 * struct file pointer.
1361 * If sizeof (struct file *) is smaller than sizeof int, then
1362 * do it in reverse order.
1364 if (sizeof (struct file *) >= sizeof (int)) {
1365 fdp = (int *)CMSG_DATA(cm);
1366 rp = (struct file **)CMSG_DATA(cm);
1367 for (i = 0; i < newfds; i++) {
1368 if (fdalloc(p, 0, &f))
1369 panic("unp_externalize");
1371 unp_fp_externalize(lp, fp, f);
1375 fdp = (int *)CMSG_DATA(cm) + newfds - 1;
1376 rp = (struct file **)CMSG_DATA(cm) + newfds - 1;
1377 for (i = 0; i < newfds; i++) {
1378 if (fdalloc(p, 0, &f))
1379 panic("unp_externalize");
1381 unp_fp_externalize(lp, fp, f);
1387 * Adjust length, in case sizeof(struct file *) and sizeof(int)
1390 cm->cmsg_len = CMSG_LEN(newfds * sizeof(int));
1391 rights->m_len = cm->cmsg_len;
1393 lwkt_reltoken(&unp_token);
1398 unp_fp_externalize(struct lwp *lp, struct file *fp, int fd)
1403 lwkt_gettoken(&unp_token);
1407 if (fp->f_flag & FREVOKED) {
1408 kprintf("Warning: revoked fp exiting unix socket\n");
1410 error = falloc(lp, &fx, NULL);
1412 fsetfd(lp->lwp_proc->p_fd, fx, fd);
1414 fsetfd(lp->lwp_proc->p_fd, NULL, fd);
1417 fsetfd(lp->lwp_proc->p_fd, fp, fd);
1420 spin_lock(&unp_spin);
1423 spin_unlock(&unp_spin);
1426 lwkt_reltoken(&unp_token);
1433 LIST_INIT(&unp_dhead);
1434 LIST_INIT(&unp_shead);
1435 spin_init(&unp_spin, "unpinit");
1439 unp_internalize(struct mbuf *control, struct thread *td)
1441 struct proc *p = td->td_proc;
1442 struct filedesc *fdescp;
1443 struct cmsghdr *cm = mtod(control, struct cmsghdr *);
1447 struct cmsgcred *cmcred;
1453 lwkt_gettoken(&unp_token);
1456 if ((cm->cmsg_type != SCM_RIGHTS && cm->cmsg_type != SCM_CREDS) ||
1457 cm->cmsg_level != SOL_SOCKET ||
1458 CMSG_ALIGN(cm->cmsg_len) != control->m_len) {
1464 * Fill in credential information.
1466 if (cm->cmsg_type == SCM_CREDS) {
1467 cmcred = (struct cmsgcred *)CMSG_DATA(cm);
1468 cmcred->cmcred_pid = p->p_pid;
1469 cmcred->cmcred_uid = p->p_ucred->cr_ruid;
1470 cmcred->cmcred_gid = p->p_ucred->cr_rgid;
1471 cmcred->cmcred_euid = p->p_ucred->cr_uid;
1472 cmcred->cmcred_ngroups = MIN(p->p_ucred->cr_ngroups,
1474 for (i = 0; i < cmcred->cmcred_ngroups; i++)
1475 cmcred->cmcred_groups[i] = p->p_ucred->cr_groups[i];
1481 * cmsghdr may not be aligned, do not allow calculation(s) to
1484 if (cm->cmsg_len < CMSG_LEN(0)) {
1489 oldfds = (cm->cmsg_len - CMSG_LEN(0)) / sizeof (int);
1492 * check that all the FDs passed in refer to legal OPEN files
1493 * If not, reject the entire operation.
1495 fdp = (int *)CMSG_DATA(cm);
1496 for (i = 0; i < oldfds; i++) {
1498 if ((unsigned)fd >= fdescp->fd_nfiles ||
1499 fdescp->fd_files[fd].fp == NULL) {
1503 if (fdescp->fd_files[fd].fp->f_type == DTYPE_KQUEUE) {
1509 * Now replace the integer FDs with pointers to
1510 * the associated global file table entry..
1511 * Allocate a bigger buffer as necessary. But if an cluster is not
1512 * enough, return E2BIG.
1514 newlen = CMSG_LEN(oldfds * sizeof(struct file *));
1515 if (newlen > MCLBYTES) {
1519 if (newlen - control->m_len > M_TRAILINGSPACE(control)) {
1520 if (control->m_flags & M_EXT) {
1524 MCLGET(control, M_WAITOK);
1525 if (!(control->m_flags & M_EXT)) {
1530 /* copy the data to the cluster */
1531 memcpy(mtod(control, char *), cm, cm->cmsg_len);
1532 cm = mtod(control, struct cmsghdr *);
1536 * Adjust length, in case sizeof(struct file *) and sizeof(int)
1539 cm->cmsg_len = newlen;
1540 control->m_len = CMSG_ALIGN(newlen);
1543 * Transform the file descriptors into struct file pointers.
1544 * If sizeof (struct file *) is bigger than or equal to sizeof int,
1545 * then do it in reverse order so that the int won't get until
1547 * If sizeof (struct file *) is smaller than sizeof int, then
1548 * do it in forward order.
1550 if (sizeof (struct file *) >= sizeof (int)) {
1551 fdp = (int *)CMSG_DATA(cm) + oldfds - 1;
1552 rp = (struct file **)CMSG_DATA(cm) + oldfds - 1;
1553 for (i = 0; i < oldfds; i++) {
1554 fp = fdescp->fd_files[*fdp--].fp;
1557 spin_lock(&unp_spin);
1560 spin_unlock(&unp_spin);
1563 fdp = (int *)CMSG_DATA(cm);
1564 rp = (struct file **)CMSG_DATA(cm);
1565 for (i = 0; i < oldfds; i++) {
1566 fp = fdescp->fd_files[*fdp++].fp;
1569 spin_lock(&unp_spin);
1572 spin_unlock(&unp_spin);
1577 lwkt_reltoken(&unp_token);
1582 * Garbage collect in-transit file descriptors that get lost due to
1583 * loops (i.e. when a socket is sent to another process over itself,
1584 * and more complex situations).
1586 * NOT MPSAFE - TODO socket flush code and maybe closef. Rest is MPSAFE.
1589 struct unp_gc_info {
1590 struct file **extra_ref;
1591 struct file *locked_fp;
1600 struct unp_gc_info info;
1601 static boolean_t unp_gcing;
1606 * Only one gc can be in-progress at any given moment
1608 spin_lock(&unp_spin);
1610 spin_unlock(&unp_spin);
1614 spin_unlock(&unp_spin);
1616 lwkt_gettoken(&unp_token);
1619 * Before going through all this, set all FDs to be NOT defered
1620 * and NOT externally accessible (not marked). During the scan
1621 * a fd can be marked externally accessible but we may or may not
1622 * be able to immediately process it (controlled by FDEFER).
1624 * If we loop sleep a bit. The complexity of the topology can cause
1625 * multiple loops. Also failure to acquire the socket's so_rcv
1626 * token can cause us to loop.
1628 allfiles_scan_exclusive(unp_gc_clearmarks, NULL);
1631 allfiles_scan_exclusive(unp_gc_checkmarks, &info);
1633 tsleep(&info, 0, "gcagain", 1);
1634 } while (info.defer);
1637 * We grab an extra reference to each of the file table entries
1638 * that are not otherwise accessible and then free the rights
1639 * that are stored in messages on them.
1641 * The bug in the orginal code is a little tricky, so I'll describe
1642 * what's wrong with it here.
1644 * It is incorrect to simply unp_discard each entry for f_msgcount
1645 * times -- consider the case of sockets A and B that contain
1646 * references to each other. On a last close of some other socket,
1647 * we trigger a gc since the number of outstanding rights (unp_rights)
1648 * is non-zero. If during the sweep phase the gc code un_discards,
1649 * we end up doing a (full) closef on the descriptor. A closef on A
1650 * results in the following chain. Closef calls soo_close, which
1651 * calls soclose. Soclose calls first (through the switch
1652 * uipc_usrreq) unp_detach, which re-invokes unp_gc. Unp_gc simply
1653 * returns because the previous instance had set unp_gcing, and
1654 * we return all the way back to soclose, which marks the socket
1655 * with SS_NOFDREF, and then calls sofree. Sofree calls sorflush
1656 * to free up the rights that are queued in messages on the socket A,
1657 * i.e., the reference on B. The sorflush calls via the dom_dispose
1658 * switch unp_dispose, which unp_scans with unp_discard. This second
1659 * instance of unp_discard just calls closef on B.
1661 * Well, a similar chain occurs on B, resulting in a sorflush on B,
1662 * which results in another closef on A. Unfortunately, A is already
1663 * being closed, and the descriptor has already been marked with
1664 * SS_NOFDREF, and soclose panics at this point.
1666 * Here, we first take an extra reference to each inaccessible
1667 * descriptor. Then, we call sorflush ourself, since we know
1668 * it is a Unix domain socket anyhow. After we destroy all the
1669 * rights carried in messages, we do a last closef to get rid
1670 * of our extra reference. This is the last close, and the
1671 * unp_detach etc will shut down the socket.
1673 * 91/09/19, bsy@cs.cmu.edu
1675 info.extra_ref = kmalloc(256 * sizeof(struct file *), M_FILE, M_WAITOK);
1676 info.maxindex = 256;
1683 allfiles_scan_exclusive(unp_gc_checkrefs, &info);
1686 * For each FD on our hit list, do the following two things
1688 for (i = info.index, fpp = info.extra_ref; --i >= 0; ++fpp) {
1689 struct file *tfp = *fpp;
1690 if (tfp->f_type == DTYPE_SOCKET && tfp->f_data != NULL)
1691 sorflush((struct socket *)(tfp->f_data));
1693 for (i = info.index, fpp = info.extra_ref; --i >= 0; ++fpp)
1695 } while (info.index == info.maxindex);
1697 lwkt_reltoken(&unp_token);
1699 kfree((caddr_t)info.extra_ref, M_FILE);
1704 * MPSAFE - NOTE: filehead list and file pointer spinlocked on entry
1707 unp_gc_checkrefs(struct file *fp, void *data)
1709 struct unp_gc_info *info = data;
1711 if (fp->f_count == 0)
1713 if (info->index == info->maxindex)
1717 * If all refs are from msgs, and it's not marked accessible
1718 * then it must be referenced from some unreachable cycle
1719 * of (shut-down) FDs, so include it in our
1720 * list of FDs to remove
1722 if (fp->f_count == fp->f_msgcount && !(fp->f_flag & FMARK)) {
1723 info->extra_ref[info->index++] = fp;
1730 * MPSAFE - NOTE: filehead list and file pointer spinlocked on entry
1733 unp_gc_clearmarks(struct file *fp, void *data __unused)
1735 atomic_clear_int(&fp->f_flag, FMARK | FDEFER);
1740 * MPSAFE - NOTE: filehead list and file pointer spinlocked on entry
1743 unp_gc_checkmarks(struct file *fp, void *data)
1745 struct unp_gc_info *info = data;
1749 * If the file is not open, skip it. Make sure it isn't marked
1750 * defered or we could loop forever, in case we somehow race
1753 if (fp->f_count == 0) {
1754 if (fp->f_flag & FDEFER)
1755 atomic_clear_int(&fp->f_flag, FDEFER);
1759 * If we already marked it as 'defer' in a
1760 * previous pass, then try process it this time
1763 if (fp->f_flag & FDEFER) {
1764 atomic_clear_int(&fp->f_flag, FDEFER);
1767 * if it's not defered, then check if it's
1768 * already marked.. if so skip it
1770 if (fp->f_flag & FMARK)
1773 * If all references are from messages
1774 * in transit, then skip it. it's not
1775 * externally accessible.
1777 if (fp->f_count == fp->f_msgcount)
1780 * If it got this far then it must be
1781 * externally accessible.
1783 atomic_set_int(&fp->f_flag, FMARK);
1787 * either it was defered, or it is externally
1788 * accessible and not already marked so.
1789 * Now check if it is possibly one of OUR sockets.
1791 if (fp->f_type != DTYPE_SOCKET ||
1792 (so = (struct socket *)fp->f_data) == NULL) {
1795 if (so->so_proto->pr_domain != &localdomain ||
1796 !(so->so_proto->pr_flags & PR_RIGHTS)) {
1801 * So, Ok, it's one of our sockets and it IS externally accessible
1802 * (or was defered). Now we look to see if we hold any file
1803 * descriptors in its message buffers. Follow those links and mark
1804 * them as accessible too.
1806 * We are holding multiple spinlocks here, if we cannot get the
1807 * token non-blocking defer until the next loop.
1809 info->locked_fp = fp;
1810 if (lwkt_trytoken(&so->so_rcv.ssb_token)) {
1811 unp_scan(so->so_rcv.ssb_mb, unp_mark, info);
1812 lwkt_reltoken(&so->so_rcv.ssb_token);
1814 atomic_set_int(&fp->f_flag, FDEFER);
1821 * Scan all unix domain sockets and replace any revoked file pointers
1822 * found with the dummy file pointer fx. We don't worry about races
1823 * against file pointers being read out as those are handled in the
1827 #define REVOKE_GC_MAXFILES 32
1829 struct unp_revoke_gc_info {
1831 struct file *fary[REVOKE_GC_MAXFILES];
1836 unp_revoke_gc(struct file *fx)
1838 struct unp_revoke_gc_info info;
1841 lwkt_gettoken(&unp_token);
1845 allfiles_scan_exclusive(unp_revoke_gc_check, &info);
1846 for (i = 0; i < info.fcount; ++i)
1847 unp_fp_externalize(NULL, info.fary[i], -1);
1848 } while (info.fcount == REVOKE_GC_MAXFILES);
1849 lwkt_reltoken(&unp_token);
1853 * Check for and replace revoked descriptors.
1855 * WARNING: This routine is not allowed to block.
1858 unp_revoke_gc_check(struct file *fps, void *vinfo)
1860 struct unp_revoke_gc_info *info = vinfo;
1871 * Is this a unix domain socket with rights-passing abilities?
1873 if (fps->f_type != DTYPE_SOCKET)
1875 if ((so = (struct socket *)fps->f_data) == NULL)
1877 if (so->so_proto->pr_domain != &localdomain)
1879 if ((so->so_proto->pr_flags & PR_RIGHTS) == 0)
1883 * Scan the mbufs for control messages and replace any revoked
1884 * descriptors we find.
1886 lwkt_gettoken(&so->so_rcv.ssb_token);
1887 m0 = so->so_rcv.ssb_mb;
1889 for (m = m0; m; m = m->m_next) {
1890 if (m->m_type != MT_CONTROL)
1892 if (m->m_len < sizeof(*cm))
1894 cm = mtod(m, struct cmsghdr *);
1895 if (cm->cmsg_level != SOL_SOCKET ||
1896 cm->cmsg_type != SCM_RIGHTS) {
1899 qfds = (cm->cmsg_len - CMSG_LEN(0)) / sizeof(void *);
1900 rp = (struct file **)CMSG_DATA(cm);
1901 for (i = 0; i < qfds; i++) {
1903 if (fp->f_flag & FREVOKED) {
1904 kprintf("Warning: Removing revoked fp from unix domain socket queue\n");
1906 info->fx->f_msgcount++;
1909 info->fary[info->fcount++] = fp;
1911 if (info->fcount == REVOKE_GC_MAXFILES)
1914 if (info->fcount == REVOKE_GC_MAXFILES)
1918 if (info->fcount == REVOKE_GC_MAXFILES)
1921 lwkt_reltoken(&so->so_rcv.ssb_token);
1924 * Stop the scan if we filled up our array.
1926 if (info->fcount == REVOKE_GC_MAXFILES)
1932 * Dispose of the fp's stored in a mbuf.
1934 * The dds loop can cause additional fps to be entered onto the
1935 * list while it is running, flattening out the operation and avoiding
1936 * a deep kernel stack recursion.
1939 unp_dispose(struct mbuf *m)
1941 unp_defdiscard_t dds;
1943 lwkt_gettoken(&unp_token);
1944 ++unp_defdiscard_nest;
1946 unp_scan(m, unp_discard, NULL);
1948 if (unp_defdiscard_nest == 1) {
1949 while ((dds = unp_defdiscard_base) != NULL) {
1950 unp_defdiscard_base = dds->next;
1951 closef(dds->fp, NULL);
1952 kfree(dds, M_UNPCB);
1955 --unp_defdiscard_nest;
1956 lwkt_reltoken(&unp_token);
1960 unp_listen(struct unpcb *unp, struct thread *td)
1962 struct proc *p = td->td_proc;
1965 lwkt_gettoken(&unp_token);
1966 cru2x(p->p_ucred, &unp->unp_peercred);
1967 unp_setflags(unp, UNP_HAVEPCCACHED);
1968 lwkt_reltoken(&unp_token);
1973 unp_scan(struct mbuf *m0, void (*op)(struct file *, void *), void *data)
1982 for (m = m0; m; m = m->m_next) {
1983 if (m->m_type == MT_CONTROL &&
1984 m->m_len >= sizeof(*cm)) {
1985 cm = mtod(m, struct cmsghdr *);
1986 if (cm->cmsg_level != SOL_SOCKET ||
1987 cm->cmsg_type != SCM_RIGHTS)
1989 qfds = (cm->cmsg_len - CMSG_LEN(0)) /
1991 rp = (struct file **)CMSG_DATA(cm);
1992 for (i = 0; i < qfds; i++)
1994 break; /* XXX, but saves time */
2002 * Mark visibility. info->defer is recalculated on every pass.
2005 unp_mark(struct file *fp, void *data)
2007 struct unp_gc_info *info = data;
2009 if ((fp->f_flag & FMARK) == 0) {
2011 atomic_set_int(&fp->f_flag, FMARK | FDEFER);
2012 } else if (fp->f_flag & FDEFER) {
2018 * Discard a fp previously held in a unix domain socket mbuf. To
2019 * avoid blowing out the kernel stack due to contrived chain-reactions
2020 * we may have to defer the operation to a higher procedural level.
2022 * Caller holds unp_token
2025 unp_discard(struct file *fp, void *data __unused)
2027 unp_defdiscard_t dds;
2029 spin_lock(&unp_spin);
2032 spin_unlock(&unp_spin);
2034 if (unp_defdiscard_nest) {
2035 dds = kmalloc(sizeof(*dds), M_UNPCB, M_WAITOK|M_ZERO);
2037 dds->next = unp_defdiscard_base;
2038 unp_defdiscard_base = dds;