2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/kern/lwkt_caps.c,v 1.13 2007/02/26 21:41:08 corecode Exp $
38 * This module implements the DragonFly LWKT IPC rendezvous and message
39 * passing API which operates between userland processes, between userland
40 * threads, and between userland processes and kernel threads. This API
41 * is known as the CAPS interface.
43 * Generally speaking this module abstracts the LWKT message port interface
44 * into userland Clients and Servers rendezvous through ports named
45 * by or wildcarded by (name,uid,gid). The kernel provides system calls
46 * which may be assigned to the mp_* fields in a userland-supplied
47 * kernel-managed port, and a registration interface which associates an
48 * upcall with a userland port. The kernel tracks authentication information
49 * and deals with connection failures by automatically replying to unreplied
52 * From the userland perspective a client/server connection involves two
53 * message ports on the client and two message ports on the server.
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/kernel.h>
59 #include <sys/sysproto.h>
60 #include <sys/malloc.h>
62 #include <sys/ucred.h>
64 #include <sys/sysctl.h>
66 #include <sys/mplock2.h>
69 #include <vm/vm_extern.h>
71 static int caps_process_msg(caps_kinfo_t caps, caps_kmsg_t msg, struct caps_sys_get_args *uap);
72 static void caps_free(caps_kinfo_t caps);
73 static void caps_free_msg(caps_kmsg_t msg);
74 static int caps_name_check(const char *name, size_t len);
75 static caps_kinfo_t caps_free_msg_mcaps(caps_kmsg_t msg);
76 static caps_kinfo_t kern_caps_sys_service(const char *name, uid_t uid,
77 gid_t gid, struct ucred *cred,
78 int flags, int *error);
79 static caps_kinfo_t kern_caps_sys_client(const char *name, uid_t uid,
80 gid_t gid, struct ucred *cred, int flags, int *error);
83 #define CAPS_HMASK (CAPS_HSIZE - 1)
85 static caps_kinfo_t caps_hash_ary[CAPS_HSIZE];
86 static int caps_waitsvc;
88 MALLOC_DEFINE(M_CAPS, "caps", "caps IPC messaging");
90 static int caps_enabled;
91 SYSCTL_INT(_kern, OID_AUTO, caps_enabled,
92 CTLFLAG_RW, &caps_enabled, 0, "Enable CAPS");
94 /************************************************************************
95 * INLINE SUPPORT FUNCTIONS *
96 ************************************************************************/
100 caps_hash(const char *name, int len)
105 hv = (hv << 5) ^ name[len] ^ (hv >> 23);
106 return(&caps_hash_ary[(hv ^ (hv >> 16)) & CAPS_HMASK]);
111 caps_hold(caps_kinfo_t caps)
118 caps_drop(caps_kinfo_t caps)
120 if (--caps->ci_refs == 0)
124 /************************************************************************
125 * STATIC SUPPORT FUNCTIONS *
126 ************************************************************************/
130 caps_find(const char *name, int len, uid_t uid, gid_t gid)
133 struct caps_kinfo **chash;
135 chash = caps_hash(name, len);
136 for (caps = *chash; caps; caps = caps->ci_hnext) {
137 if ((uid == (uid_t)-1 || uid == caps->ci_uid) &&
138 (gid == (gid_t)-1 || gid == caps->ci_gid) &&
139 len == caps->ci_namelen &&
140 bcmp(name, caps->ci_name, len) == 0
151 caps_find_id(thread_t td, int id)
155 for (caps = td->td_caps; caps; caps = caps->ci_tdnext) {
156 if (caps->ci_id == id) {
166 caps_alloc(thread_t td, const char *name, int len, uid_t uid, gid_t gid,
167 int flags, caps_type_t type)
169 struct caps_kinfo **chash;
173 caps = kmalloc(offsetof(struct caps_kinfo, ci_name[len+1]),
174 M_CAPS, M_WAITOK|M_ZERO);
175 TAILQ_INIT(&caps->ci_msgpendq);
176 TAILQ_INIT(&caps->ci_msguserq);
177 caps->ci_uid = uid; /* -1 == not registered for uid search */
178 caps->ci_gid = gid; /* -1 == not registered for gid search */
179 caps->ci_type = type;
180 caps->ci_refs = 1; /* CAPKF_TDLIST reference */
181 caps->ci_namelen = len;
182 caps->ci_flags = flags;
183 bcopy(name, caps->ci_name, len + 1);
184 if (type == CAPT_SERVICE) {
185 chash = caps_hash(caps->ci_name, len);
186 caps->ci_hnext = *chash;
188 caps->ci_flags |= CAPKF_HLIST;
191 caps->ci_id = td->td_caps->ci_id + 1;
192 if (caps->ci_id < 0) {
194 * It is virtually impossible for this case to occur.
197 while ((ctmp = caps_find_id(td, caps->ci_id)) != NULL) {
205 caps->ci_flags |= CAPKF_TDLIST;
206 caps->ci_tdnext = td->td_caps;
214 caps_alloc_msg(caps_kinfo_t caps)
218 msg = kmalloc(sizeof(struct caps_kmsg), M_CAPS, M_WAITOK|M_ZERO);
219 msg->km_msgid.c_id = (off_t)(uintptr_t)msg;
225 caps_find_msg(caps_kinfo_t caps, off_t msgid)
229 TAILQ_FOREACH(msg, &caps->ci_msguserq, km_node) {
230 if (msg->km_msgid.c_id == msgid)
233 TAILQ_FOREACH(msg, &caps->ci_msgpendq, km_node) {
234 if (msg->km_msgid.c_id == msgid)
242 caps_load_ccr(caps_kinfo_t caps, caps_kmsg_t msg, struct lwp *lp,
243 void *udata, int ubytes)
245 struct ucred *cr = lp ? lp->lwp_thread->td_ucred : proc0.p_ucred;
250 * replace km_mcaps with new VM state, return the old km_mcaps. The
251 * caller is expected to drop the rcaps ref count on return so we do
252 * not do it ourselves.
254 rcaps = caps_free_msg_mcaps(msg); /* can be NULL */
256 msg->km_mcaps = caps;
257 xio_init_ubuf(&msg->km_xio, udata, ubytes, XIOF_READ);
259 msg->km_ccr.pid = lp ? lp->lwp_proc->p_pid : -1;
260 msg->km_ccr.uid = cr->cr_ruid;
261 msg->km_ccr.euid = cr->cr_uid;
262 msg->km_ccr.gid = cr->cr_rgid;
263 msg->km_ccr.ngroups = MIN(cr->cr_ngroups, CAPS_MAXGROUPS);
264 for (i = 0; i < msg->km_ccr.ngroups; ++i)
265 msg->km_ccr.groups[i] = cr->cr_groups[i];
270 caps_dequeue_msg(caps_kinfo_t caps, caps_kmsg_t msg)
272 if (msg->km_flags & CAPKMF_ONUSERQ)
273 TAILQ_REMOVE(&caps->ci_msguserq, msg, km_node);
274 if (msg->km_flags & CAPKMF_ONPENDQ)
275 TAILQ_REMOVE(&caps->ci_msgpendq, msg, km_node);
276 msg->km_flags &= ~(CAPKMF_ONPENDQ|CAPKMF_ONUSERQ);
280 caps_put_msg(caps_kinfo_t caps, caps_kmsg_t msg, caps_msg_state_t state)
282 KKASSERT((msg->km_flags & (CAPKMF_ONUSERQ|CAPKMF_ONPENDQ)) == 0);
284 msg->km_flags |= CAPKMF_ONPENDQ;
285 msg->km_flags &= ~CAPKMF_PEEKED;
286 msg->km_state = state;
287 TAILQ_INSERT_TAIL(&caps->ci_msgpendq, msg, km_node);
290 * Instead of waking up the service for both new messages and disposals,
291 * just wakeup the service for new messages and it will process the
292 * previous disposal in the same loop, reducing the number of context
293 * switches required to run an IPC.
295 if (state != CAPMS_DISPOSE)
301 * caps_free_msg_mcaps()
305 caps_free_msg_mcaps(caps_kmsg_t msg)
309 mcaps = msg->km_mcaps; /* may be NULL */
310 msg->km_mcaps = NULL;
311 if (msg->km_xio.xio_npages)
312 xio_release(&msg->km_xio);
319 * Free a caps placeholder message. The message must not be on any queues.
322 caps_free_msg(caps_kmsg_t msg)
326 if ((rcaps = caps_free_msg_mcaps(msg)) != NULL)
332 * Validate the service name
335 caps_name_check(const char *name, size_t len)
340 for (i = len - 1; i >= 0; --i) {
342 if (c >= '0' && c <= '9')
344 if (c >= 'a' && c <= 'z')
346 if (c >= 'A' && c <= 'Z')
348 if (c == '_' || c == '.')
358 * Terminate portions of a caps info structure. This is used to close
359 * an end-point or to flush particular messages on an end-point.
361 * This function should not be called with CAPKF_TDLIST unless the caller
362 * has an additional hold on the caps structure.
365 caps_term(caps_kinfo_t caps, int flags, caps_kinfo_t cflush)
367 struct thread *td = curthread;
368 struct caps_kinfo **scan;
371 if (flags & CAPKF_TDLIST)
372 caps->ci_flags |= CAPKF_CLOSED;
374 if (flags & CAPKF_FLUSH) {
376 struct caps_kmsg_queue tmpuserq;
377 struct caps_kmsg_queue tmppendq;
380 TAILQ_INIT(&tmpuserq);
381 TAILQ_INIT(&tmppendq);
383 while ((msg = TAILQ_FIRST(&caps->ci_msgpendq)) != NULL ||
384 (msg = TAILQ_FIRST(&caps->ci_msguserq)) != NULL
386 mflags = msg->km_flags & (CAPKMF_ONUSERQ|CAPKMF_ONPENDQ);
387 caps_dequeue_msg(caps, msg);
389 if (cflush && msg->km_mcaps != cflush) {
390 if (mflags & CAPKMF_ONUSERQ)
391 TAILQ_INSERT_TAIL(&tmpuserq, msg, km_node);
393 TAILQ_INSERT_TAIL(&tmppendq, msg, km_node);
396 * Dispose of the message. If the received message is a
397 * request we must reply it. If the received message is
398 * a reply we must return it for disposal. If the
399 * received message is a disposal request we simply free it.
401 switch(msg->km_state) {
403 case CAPMS_REQUEST_RETRY:
404 rcaps = caps_load_ccr(caps, msg, td->td_lwp, NULL, 0);
405 if (rcaps->ci_flags & CAPKF_CLOSED) {
407 * can't reply, if we never read the message (its on
408 * the pending queue), or if we are closed ourselves,
409 * we can just free the message. Otherwise we have
410 * to send ourselves a disposal request (multi-threaded
411 * services have to deal with disposal requests for
412 * messages that might be in progress).
414 if ((caps->ci_flags & CAPKF_CLOSED) ||
415 (mflags & CAPKMF_ONPENDQ)
421 caps_hold(caps); /* for message */
422 caps_put_msg(caps, msg, CAPMS_DISPOSE);
426 * auto-reply to the originator. rcaps already
427 * has a dangling hold so we do not have to hold it
430 caps_put_msg(rcaps, msg, CAPMS_REPLY);
434 case CAPMS_REPLY_RETRY:
435 rcaps = caps_load_ccr(caps, msg, td->td_lwp, NULL, 0);
436 if (caps == rcaps || (rcaps->ci_flags & CAPKF_CLOSED)) {
437 caps_free_msg(msg); /* degenerate disposal case */
440 caps_put_msg(rcaps, msg, CAPMS_DISPOSE);
449 while ((msg = TAILQ_FIRST(&tmpuserq)) != NULL) {
450 TAILQ_REMOVE(&tmpuserq, msg, km_node);
451 TAILQ_INSERT_TAIL(&caps->ci_msguserq, msg, km_node);
452 msg->km_flags |= CAPKMF_ONUSERQ;
454 while ((msg = TAILQ_FIRST(&tmppendq)) != NULL) {
455 TAILQ_REMOVE(&tmppendq, msg, km_node);
456 TAILQ_INSERT_TAIL(&caps->ci_msgpendq, msg, km_node);
457 msg->km_flags |= CAPKMF_ONPENDQ;
460 if ((flags & CAPKF_HLIST) && (caps->ci_flags & CAPKF_HLIST)) {
461 for (scan = caps_hash(caps->ci_name, caps->ci_namelen);
463 scan = &(*scan)->ci_hnext
465 KKASSERT(*scan != NULL);
467 *scan = caps->ci_hnext;
468 caps->ci_hnext = (void *)-1;
469 caps->ci_flags &= ~CAPKF_HLIST;
471 if ((flags & CAPKF_TDLIST) && (caps->ci_flags & CAPKF_TDLIST)) {
472 for (scan = &caps->ci_td->td_caps;
474 scan = &(*scan)->ci_tdnext
476 KKASSERT(*scan != NULL);
478 *scan = caps->ci_tdnext;
479 caps->ci_flags &= ~CAPKF_TDLIST;
480 caps->ci_tdnext = (void *)-1;
484 if ((flags & CAPKF_RCAPS) && (caps->ci_flags & CAPKF_RCAPS)) {
487 caps->ci_flags &= ~CAPKF_RCAPS;
488 if ((ctmp = caps->ci_rcaps)) {
489 caps->ci_rcaps = NULL;
490 caps_term(ctmp, CAPKF_FLUSH, caps);
497 caps_free(caps_kinfo_t caps)
499 KKASSERT(TAILQ_EMPTY(&caps->ci_msgpendq));
500 KKASSERT(TAILQ_EMPTY(&caps->ci_msguserq));
501 KKASSERT((caps->ci_flags & (CAPKF_HLIST|CAPKF_TDLIST)) == 0);
505 /************************************************************************
506 * PROCESS SUPPORT FUNCTIONS *
507 ************************************************************************/
510 * Create dummy entries in p2 so we can return the appropriate
511 * error code. Robust userland code will check the error for a
512 * forked condition and reforge the connection.
515 caps_fork(struct thread *td1, struct thread *td2)
521 * Create dummy entries with the same id's as the originals. Note
522 * that service entries are not re-added to the hash table. The
523 * dummy entries return an ENOTCONN error allowing userland code to
524 * detect that a fork occured. Userland must reconnect to the service.
526 for (caps1 = td1->td_caps; caps1; caps1 = caps1->ci_tdnext) {
527 if (caps1->ci_flags & CAPF_NOFORK)
529 caps2 = caps_alloc(td2,
530 caps1->ci_name, caps1->ci_namelen,
531 caps1->ci_uid, caps1->ci_gid,
532 caps1->ci_flags & CAPF_UFLAGS, CAPT_FORKED);
533 caps2->ci_id = caps1->ci_id;
537 * Reverse the list order to maintain highest-id-first
539 caps2 = td2->td_caps;
542 caps1 = caps2->ci_tdnext;
543 caps2->ci_tdnext = td2->td_caps;
544 td2->td_caps = caps2;
550 caps_exit(struct thread *td)
554 while ((caps = td->td_caps) != NULL) {
556 caps_term(caps, CAPKF_TDLIST|CAPKF_HLIST|CAPKF_FLUSH|CAPKF_RCAPS, NULL);
561 /************************************************************************
563 ************************************************************************/
566 * caps_sys_service(name, uid, gid, upcid, flags);
568 * Create an IPC service using the specified name, uid, gid, and flags.
569 * Either uid or gid can be -1, but not both. The port identifier is
572 * upcid can either be an upcall or a kqueue identifier (XXX)
577 sys_caps_sys_service(struct caps_sys_service_args *uap)
579 struct ucred *cred = curthread->td_ucred;
580 char name[CAPS_MAXNAMELEN];
585 if (caps_enabled == 0)
587 if ((error = copyinstr(uap->name, name, CAPS_MAXNAMELEN, &len)) != 0)
589 if ((ssize_t)--len <= 0)
593 if ((error = caps_name_check(name, len)) == 0) {
594 caps = kern_caps_sys_service(name, uap->uid, uap->gid, cred,
595 uap->flags & CAPF_UFLAGS, &error);
597 uap->sysmsg_result = caps->ci_id;
604 * caps_sys_client(name, uid, gid, upcid, flags);
606 * Create an IPC client connected to the specified service. Either uid or gid
607 * may be -1, indicating a wildcard, but not both. The port identifier is
610 * upcid can either be an upcall or a kqueue identifier (XXX)
615 sys_caps_sys_client(struct caps_sys_client_args *uap)
617 struct ucred *cred = curthread->td_ucred;
618 char name[CAPS_MAXNAMELEN];
623 if (caps_enabled == 0)
625 if ((error = copyinstr(uap->name, name, CAPS_MAXNAMELEN, &len)) != 0)
627 if ((ssize_t)--len <= 0)
631 if ((error = caps_name_check(name, len)) == 0) {
632 caps = kern_caps_sys_client(name, uap->uid, uap->gid, cred,
633 uap->flags & CAPF_UFLAGS, &error);
635 uap->sysmsg_result = caps->ci_id;
645 sys_caps_sys_close(struct caps_sys_close_args *uap)
647 struct thread *td = curthread;
653 if ((caps = caps_find_id(td, uap->portid)) != NULL) {
654 caps_term(caps, CAPKF_TDLIST|CAPKF_HLIST|CAPKF_FLUSH|CAPKF_RCAPS,
669 sys_caps_sys_setgen(struct caps_sys_setgen_args *uap)
671 struct thread *td = curthread;
677 if ((caps = caps_find_id(td, uap->portid)) != NULL) {
678 if (caps->ci_type == CAPT_FORKED) {
681 caps->ci_gen = uap->gen;
696 sys_caps_sys_getgen(struct caps_sys_getgen_args *uap)
698 struct thread *td = curthread;
704 if ((caps = caps_find_id(td, uap->portid)) != NULL) {
705 if (caps->ci_type == CAPT_FORKED) {
707 } else if (caps->ci_rcaps == NULL) {
710 uap->sysmsg_result64 = caps->ci_rcaps->ci_gen;
722 * caps_sys_put(portid, msg, msgsize)
724 * Send an opaque message of the specified size to the specified port. This
725 * function may only be used with a client port. The message id is returned.
730 sys_caps_sys_put(struct caps_sys_put_args *uap)
732 struct thread *td = curthread;
737 if (uap->msgsize < 0)
741 if ((caps = caps_find_id(td, uap->portid)) == NULL) {
745 if (caps->ci_type == CAPT_FORKED) {
747 } else if (caps->ci_rcaps == NULL) {
749 } else if (caps->ci_cmsgcount > CAPS_MAXINPROG) {
751 * If this client has queued a large number of messages return
752 * ENOBUFS. The client must process some replies before it can
753 * send new messages. The server can also throttle a client by
754 * holding its replies. XXX allow a server to refuse messages from
759 msg = caps_alloc_msg(caps);
760 uap->sysmsg_offset = msg->km_msgid.c_id;
763 * If the remote end is closed return ENOTCONN immediately, otherwise
764 * send it to the remote end.
766 * Note: since this is a new message, caps_load_ccr() returns a remote
769 if (caps->ci_rcaps->ci_flags & CAPKF_CLOSED) {
774 * new message, load_ccr returns NULL. hold rcaps for put_msg
777 caps_load_ccr(caps, msg, td->td_lwp, uap->msg, uap->msgsize);
778 caps_hold(caps->ci_rcaps);
779 ++caps->ci_cmsgcount;
780 caps_put_msg(caps->ci_rcaps, msg, CAPMS_REQUEST); /* drops rcaps */
790 * caps_sys_reply(portid, msg, msgsize, msgid)
792 * Reply to the message referenced by the specified msgid, supplying opaque
793 * data back to the originator.
798 sys_caps_sys_reply(struct caps_sys_reply_args *uap)
800 struct thread *td = curthread;
806 if (uap->msgsize < 0)
810 if ((caps = caps_find_id(td, uap->portid)) == NULL) {
814 if (caps->ci_type == CAPT_FORKED) {
816 * The caps structure is just a fork placeholder, tell the caller
817 * that he has to reconnect.
820 } else if ((msg = caps_find_msg(caps, uap->msgcid)) == NULL) {
822 * Could not find message being replied to (other side might have
826 } else if ((msg->km_flags & CAPKMF_ONUSERQ) == 0) {
828 * Trying to reply to a non-replyable message
833 * If the remote end is closed requeue to ourselves for disposal.
834 * Otherwise send the reply to the other end (the other end will
835 * return a passive DISPOSE to us when it has eaten the data)
838 caps_dequeue_msg(caps, msg);
839 if (msg->km_mcaps->ci_flags & CAPKF_CLOSED) {
840 caps_drop(caps_load_ccr(caps, msg, td->td_lwp, NULL, 0));
841 caps_hold(caps); /* ref for message */
842 caps_put_msg(caps, msg, CAPMS_DISPOSE);
844 rcaps = caps_load_ccr(caps, msg, td->td_lwp, uap->msg, uap->msgsize);
845 caps_put_msg(rcaps, msg, CAPMS_REPLY);
855 * caps_sys_get(portid, msg, maxsize, msgid, ccr)
857 * Retrieve the next ready message on the port, store its message id in
858 * uap->msgid and return the length of the message. If the message is too
859 * large to fit the message id, length, and creds are still returned, but
860 * the message is not dequeued (the caller is expected to call again with
861 * a larger buffer or to reply the messageid if it does not want to handle
864 * EWOULDBLOCK is returned if no messages are pending. Note that 0-length
865 * messages are perfectly acceptable so 0 can be legitimately returned.
870 sys_caps_sys_get(struct caps_sys_get_args *uap)
872 struct thread *td = curthread;
877 if (uap->maxsize < 0)
881 if ((caps = caps_find_id(td, uap->portid)) != NULL) {
882 if (caps->ci_type == CAPT_FORKED) {
884 } else if ((msg = TAILQ_FIRST(&caps->ci_msgpendq)) == NULL) {
887 error = caps_process_msg(caps, msg, uap);
898 * caps_sys_wait(portid, msg, maxsize, msgid, ccr)
900 * Retrieve the next ready message on the port, store its message id in
901 * uap->msgid and return the length of the message. If the message is too
902 * large to fit the message id, length, and creds are still returned, but
903 * the message is not dequeued (the caller is expected to call again with
904 * a larger buffer or to reply the messageid if it does not want to handle
907 * This function blocks until interrupted or a message is received.
908 * Note that 0-length messages are perfectly acceptable so 0 can be
909 * legitimately returned.
914 sys_caps_sys_wait(struct caps_sys_wait_args *uap)
916 struct thread *td = curthread;
921 if (uap->maxsize < 0)
925 if ((caps = caps_find_id(td, uap->portid)) != NULL) {
926 if (caps->ci_type == CAPT_FORKED) {
930 while ((msg = TAILQ_FIRST(&caps->ci_msgpendq)) == NULL) {
931 if ((error = tsleep(caps, PCATCH, "caps", 0)) != 0)
935 error = caps_process_msg(caps, msg,
936 (struct caps_sys_get_args *)uap);
948 caps_process_msg(caps_kinfo_t caps, caps_kmsg_t msg,
949 struct caps_sys_get_args *uap)
951 struct thread *td = curthread;
956 msg->km_flags |= CAPKMF_PEEKED;
957 msgsize = msg->km_xio.xio_bytes;
958 if (msgsize <= uap->maxsize)
959 caps_dequeue_msg(caps, msg);
961 if (msg->km_xio.xio_bytes != 0) {
962 error = xio_copy_xtou(&msg->km_xio, 0, uap->msg,
963 min(msg->km_xio.xio_bytes, uap->maxsize));
965 if (msg->km_mcaps->ci_td && msg->km_mcaps->ci_td->td_proc) {
966 kprintf("xio_copy_xtou: error %d from proc %d\n",
967 error, msg->km_mcaps->ci_td->td_proc->p_pid);
969 if (msgsize > uap->maxsize)
970 caps_dequeue_msg(caps, msg);
977 error = copyout(&msg->km_msgid, uap->msgid, sizeof(msg->km_msgid));
979 error = copyout(&msg->km_ccr, uap->ccr, sizeof(msg->km_ccr));
981 uap->sysmsg_result = msgsize;
984 * If the message was dequeued we must deal with it.
986 if (msgsize <= uap->maxsize) {
987 switch(msg->km_state) {
989 case CAPMS_REQUEST_RETRY:
990 TAILQ_INSERT_TAIL(&caps->ci_msguserq, msg, km_node);
991 msg->km_flags |= CAPKMF_ONUSERQ;
994 case CAPMS_REPLY_RETRY:
995 --caps->ci_cmsgcount;
996 rcaps = caps_load_ccr(caps, msg, td->td_lwp, NULL, 0);
997 if (caps == rcaps || (rcaps->ci_flags & CAPKF_CLOSED)) {
998 /* degenerate disposal case */
1002 caps_put_msg(rcaps, msg, CAPMS_DISPOSE);
1014 * caps_sys_abort(portid, msgcid, flags)
1016 * Abort a previously sent message. You must still wait for the message
1017 * to be returned after sending the abort request. This function will
1018 * return the appropriate CAPS_ABORT_* code depending on what it had
1024 sys_caps_sys_abort(struct caps_sys_abort_args *uap)
1026 uap->sysmsg_result = CAPS_ABORT_NOTIMPL;
1031 * KERNEL SYSCALL SEPARATION SUPPORT FUNCTIONS
1036 kern_caps_sys_service(const char *name, uid_t uid, gid_t gid,
1037 struct ucred *cred, int flags, int *error)
1039 struct thread *td = curthread;
1046 * Make sure we can use the uid and gid
1049 if (cred->cr_uid != 0 && uid != (uid_t)-1 && cred->cr_uid != uid) {
1053 if (cred->cr_uid != 0 && gid != (gid_t)-1 && !groupmember(gid, cred)) {
1062 if (flags & CAPF_EXCL) {
1063 if ((caps = caps_find(name, strlen(name), uid, gid)) != NULL) {
1071 * Create the service
1073 caps = caps_alloc(td, name, len,
1074 uid, gid, flags & CAPF_UFLAGS, CAPT_SERVICE);
1075 wakeup(&caps_waitsvc);
1081 kern_caps_sys_client(const char *name, uid_t uid, gid_t gid,
1082 struct ucred *cred, int flags, int *error)
1084 struct thread *td = curthread;
1085 caps_kinfo_t caps, rcaps;
1091 * Locate the CAPS service (rcaps ref is for caps->ci_rcaps)
1094 if ((rcaps = caps_find(name, len, uid, gid)) == NULL) {
1095 if (flags & CAPF_WAITSVC) {
1097 ksnprintf(cbuf, sizeof(cbuf), "C%s", name);
1098 *error = tsleep(&caps_waitsvc, PCATCH, cbuf, 0);
1112 if ((flags & CAPF_USER) && (rcaps->ci_flags & CAPF_USER)) {
1113 if (rcaps->ci_uid != (uid_t)-1 && rcaps->ci_uid == cred->cr_uid)
1116 if ((flags & CAPF_GROUP) && (rcaps->ci_flags & CAPF_GROUP)) {
1117 if (rcaps->ci_gid != (gid_t)-1 && groupmember(rcaps->ci_gid, cred))
1120 if ((flags & CAPF_WORLD) && (rcaps->ci_flags & CAPF_WORLD)) {
1132 * Allocate the client side and connect to the server
1134 caps = caps_alloc(td, name, len,
1135 uid, gid, flags & CAPF_UFLAGS, CAPT_CLIENT);
1136 caps->ci_rcaps = rcaps;
1137 caps->ci_flags |= CAPKF_RCAPS;