2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $DragonFly: src/sys/kern/lwkt_caps.c,v 1.4 2004/04/26 17:06:18 dillon Exp $
30 * This module implements the DragonFly LWKT IPC rendezvous and message
31 * passing API which operates between userland processes, between userland
32 * threads, and between userland processes and kernel threads. This API
33 * is known as the CAPS interface.
35 * Generally speaking this module abstracts the LWKT message port interface
36 * into userland Clients and Servers rendezvous through ports named
37 * by or wildcarded by (name,uid,gid). The kernel provides system calls
38 * which may be assigned to the mp_* fields in a userland-supplied
39 * kernel-managed port, and a registration interface which associates an
40 * upcall with a userland port. The kernel tracks authentication information
41 * and deals with connection failures by automatically replying to unreplied
44 * From the userland perspective a client/server connection involves two
45 * message ports on the client and two message ports on the server.
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/sysproto.h>
52 #include <sys/malloc.h>
54 #include <sys/ucred.h>
56 #include <sys/sysctl.h>
58 #include <vm/vm_extern.h>
60 static int caps_process_msg(caps_kinfo_t caps, caps_kmsg_t msg, struct caps_sys_get_args *uap);
61 static void caps_free(caps_kinfo_t caps);
62 static void caps_free_msg(caps_kmsg_t msg);
63 static int caps_name_check(const char *name, int len);
64 static caps_kinfo_t caps_free_msg_mcaps(caps_kmsg_t msg);
65 static caps_kinfo_t kern_caps_sys_service(const char *name, uid_t uid,
66 gid_t gid, struct ucred *cred,
67 int flags, int *error);
68 static caps_kinfo_t kern_caps_sys_client(const char *name, uid_t uid,
69 gid_t gid, struct ucred *cred, int flags, int *error);
72 #define CAPS_HMASK (CAPS_HSIZE - 1)
74 static caps_kinfo_t caps_hash_ary[CAPS_HSIZE];
75 static int caps_waitsvc;
77 MALLOC_DEFINE(M_CAPS, "caps", "caps IPC messaging");
79 static int caps_enabled;
80 SYSCTL_INT(_kern, OID_AUTO, caps_enabled,
81 CTLFLAG_RW, &caps_enabled, 0, "Enable CAPS");
83 /************************************************************************
84 * INLINE SUPPORT FUNCTIONS *
85 ************************************************************************/
89 caps_hash(const char *name, int len)
94 hv = (hv << 5) ^ name[len] ^ (hv >> 23);
95 return(&caps_hash_ary[(hv ^ (hv >> 16)) & CAPS_HMASK]);
100 caps_hold(caps_kinfo_t caps)
107 caps_drop(caps_kinfo_t caps)
109 if (--caps->ci_refs == 0)
113 /************************************************************************
114 * STATIC SUPPORT FUNCTIONS *
115 ************************************************************************/
119 caps_find(const char *name, int len, uid_t uid, gid_t gid)
122 struct caps_kinfo **chash;
124 chash = caps_hash(name, len);
125 for (caps = *chash; caps; caps = caps->ci_hnext) {
126 if ((uid == (uid_t)-1 || uid == caps->ci_uid) &&
127 (gid == (gid_t)-1 || gid == caps->ci_gid) &&
128 len == caps->ci_namelen &&
129 bcmp(name, caps->ci_name, len) == 0
140 caps_find_id(thread_t td, int id)
144 for (caps = td->td_caps; caps; caps = caps->ci_tdnext) {
145 if (caps->ci_id == id) {
155 caps_alloc(thread_t td, const char *name, int len, uid_t uid, gid_t gid,
156 int flags, caps_type_t type)
158 struct caps_kinfo **chash;
162 caps = malloc(offsetof(struct caps_kinfo, ci_name[len+1]),
163 M_CAPS, M_WAITOK|M_ZERO);
164 TAILQ_INIT(&caps->ci_msgpendq);
165 TAILQ_INIT(&caps->ci_msguserq);
166 caps->ci_uid = uid; /* -1 == not registered for uid search */
167 caps->ci_gid = gid; /* -1 == not registered for gid search */
168 caps->ci_type = type;
169 caps->ci_refs = 1; /* CAPKF_TDLIST reference */
170 caps->ci_namelen = len;
171 caps->ci_flags = flags;
172 bcopy(name, caps->ci_name, len + 1);
173 if (type == CAPT_SERVICE) {
174 chash = caps_hash(caps->ci_name, len);
175 caps->ci_hnext = *chash;
177 caps->ci_flags |= CAPKF_HLIST;
180 caps->ci_id = td->td_caps->ci_id + 1;
181 if (caps->ci_id < 0) {
183 * It is virtually impossible for this case to occur.
186 while ((ctmp = caps_find_id(td, caps->ci_id)) != NULL) {
194 caps->ci_flags |= CAPKF_TDLIST;
195 caps->ci_tdnext = td->td_caps;
203 caps_alloc_msg(caps_kinfo_t caps)
207 msg = malloc(sizeof(struct caps_kmsg), M_CAPS, M_WAITOK|M_ZERO);
208 msg->km_msgid.c_id = (off_t)(uintptr_t)msg;
214 caps_find_msg(caps_kinfo_t caps, off_t msgid)
218 TAILQ_FOREACH(msg, &caps->ci_msguserq, km_node) {
219 if (msg->km_msgid.c_id == msgid)
222 TAILQ_FOREACH(msg, &caps->ci_msgpendq, km_node) {
223 if (msg->km_msgid.c_id == msgid)
231 caps_load_ccr(caps_kinfo_t caps, caps_kmsg_t msg, struct proc *p,
232 void *udata, int ubytes)
235 struct ucred *cr = p->p_ucred;
239 * replace km_mcaps with new VM state, return the old km_mcaps. The
240 * caller is expected to drop the rcaps ref count on return so we do
241 * not do it ourselves.
243 rcaps = caps_free_msg_mcaps(msg); /* can be NULL */
245 msg->km_mcaps = caps;
246 xio_init_ubuf(&msg->km_xio, udata, ubytes, XIOF_READ);
248 msg->km_ccr.pid = p ? p->p_pid : -1;
249 msg->km_ccr.uid = cr->cr_ruid;
250 msg->km_ccr.euid = cr->cr_uid;
251 msg->km_ccr.gid = cr->cr_rgid;
252 msg->km_ccr.ngroups = MIN(cr->cr_ngroups, CAPS_MAXGROUPS);
253 for (i = 0; i < msg->km_ccr.ngroups; ++i)
254 msg->km_ccr.groups[i] = cr->cr_groups[i];
259 caps_dequeue_msg(caps_kinfo_t caps, caps_kmsg_t msg)
261 if (msg->km_flags & CAPKMF_ONUSERQ)
262 TAILQ_REMOVE(&caps->ci_msguserq, msg, km_node);
263 if (msg->km_flags & CAPKMF_ONPENDQ)
264 TAILQ_REMOVE(&caps->ci_msgpendq, msg, km_node);
265 msg->km_flags &= ~(CAPKMF_ONPENDQ|CAPKMF_ONUSERQ);
269 caps_put_msg(caps_kinfo_t caps, caps_kmsg_t msg, caps_msg_state_t state)
271 KKASSERT((msg->km_flags & (CAPKMF_ONUSERQ|CAPKMF_ONPENDQ)) == 0);
273 msg->km_flags |= CAPKMF_ONPENDQ;
274 msg->km_flags &= ~CAPKMF_PEEKED;
275 msg->km_state = state;
276 TAILQ_INSERT_TAIL(&caps->ci_msgpendq, msg, km_node);
279 * Instead of waking up the service for both new messages and disposals,
280 * just wakeup the service for new messages and it will process the
281 * previous disposal in the same loop, reducing the number of context
282 * switches required to run an IPC.
284 if (state != CAPMS_DISPOSE)
290 * caps_free_msg_mcaps()
294 caps_free_msg_mcaps(caps_kmsg_t msg)
298 mcaps = msg->km_mcaps; /* may be NULL */
299 msg->km_mcaps = NULL;
300 if (msg->km_xio.xio_npages)
301 xio_release(&msg->km_xio);
308 * Free a caps placeholder message. The message must not be on any queues.
311 caps_free_msg(caps_kmsg_t msg)
315 if ((rcaps = caps_free_msg_mcaps(msg)) != NULL)
321 * Validate the service name
324 caps_name_check(const char *name, int len)
329 for (i = len - 1; i >= 0; --i) {
331 if (c >= '0' && c <= '9')
333 if (c >= 'a' && c <= 'z')
335 if (c >= 'A' && c <= 'Z')
337 if (c == '_' || c == '.')
347 * Terminate portions of a caps info structure. This is used to close
348 * an end-point or to flush particular messages on an end-point.
350 * This function should not be called with CAPKF_TDLIST unless the caller
351 * has an additional hold on the caps structure.
354 caps_term(caps_kinfo_t caps, int flags, caps_kinfo_t cflush)
356 struct caps_kinfo **scan;
359 if (flags & CAPKF_TDLIST)
360 caps->ci_flags |= CAPKF_CLOSED;
362 if (flags & CAPKF_FLUSH) {
364 struct caps_kmsg_queue tmpuserq;
365 struct caps_kmsg_queue tmppendq;
368 TAILQ_INIT(&tmpuserq);
369 TAILQ_INIT(&tmppendq);
371 while ((msg = TAILQ_FIRST(&caps->ci_msgpendq)) != NULL ||
372 (msg = TAILQ_FIRST(&caps->ci_msguserq)) != NULL
374 mflags = msg->km_flags & (CAPKMF_ONUSERQ|CAPKMF_ONPENDQ);
375 caps_dequeue_msg(caps, msg);
377 if (cflush && msg->km_mcaps != cflush) {
378 if (mflags & CAPKMF_ONUSERQ)
379 TAILQ_INSERT_TAIL(&tmpuserq, msg, km_node);
381 TAILQ_INSERT_TAIL(&tmppendq, msg, km_node);
384 * Dispose of the message. If the received message is a
385 * request we must reply it. If the received message is
386 * a reply we must return it for disposal. If the
387 * received message is a disposal request we simply free it.
389 switch(msg->km_state) {
391 case CAPMS_REQUEST_RETRY:
392 rcaps = caps_load_ccr(caps, msg, curproc, NULL, 0);
393 if (rcaps->ci_flags & CAPKF_CLOSED) {
395 * can't reply, if we never read the message (its on
396 * the pending queue), or if we are closed ourselves,
397 * we can just free the message. Otherwise we have
398 * to send ourselves a disposal request (multi-threaded
399 * services have to deal with disposal requests for
400 * messages that might be in progress).
402 if ((caps->ci_flags & CAPKF_CLOSED) ||
403 (mflags & CAPKMF_ONPENDQ)
409 caps_hold(caps); /* for message */
410 caps_put_msg(caps, msg, CAPMS_DISPOSE);
414 * auto-reply to the originator. rcaps already
415 * has a dangling hold so we do not have to hold it
418 caps_put_msg(rcaps, msg, CAPMS_REPLY);
422 case CAPMS_REPLY_RETRY:
423 rcaps = caps_load_ccr(caps, msg, curproc, NULL, 0);
424 if (caps == rcaps || (rcaps->ci_flags & CAPKF_CLOSED)) {
425 caps_free_msg(msg); /* degenerate disposal case */
428 caps_put_msg(rcaps, msg, CAPMS_DISPOSE);
437 while ((msg = TAILQ_FIRST(&tmpuserq)) != NULL) {
438 TAILQ_REMOVE(&tmpuserq, msg, km_node);
439 TAILQ_INSERT_TAIL(&caps->ci_msguserq, msg, km_node);
440 msg->km_flags |= CAPKMF_ONUSERQ;
442 while ((msg = TAILQ_FIRST(&tmppendq)) != NULL) {
443 TAILQ_REMOVE(&tmppendq, msg, km_node);
444 TAILQ_INSERT_TAIL(&caps->ci_msgpendq, msg, km_node);
445 msg->km_flags |= CAPKMF_ONPENDQ;
448 if ((flags & CAPKF_HLIST) && (caps->ci_flags & CAPKF_HLIST)) {
449 for (scan = caps_hash(caps->ci_name, caps->ci_namelen);
451 scan = &(*scan)->ci_hnext
453 KKASSERT(*scan != NULL);
455 *scan = caps->ci_hnext;
456 caps->ci_hnext = (void *)-1;
457 caps->ci_flags &= ~CAPKF_HLIST;
459 if ((flags & CAPKF_TDLIST) && (caps->ci_flags & CAPKF_TDLIST)) {
460 for (scan = &caps->ci_td->td_caps;
462 scan = &(*scan)->ci_tdnext
464 KKASSERT(*scan != NULL);
466 *scan = caps->ci_tdnext;
467 caps->ci_flags &= ~CAPKF_TDLIST;
468 caps->ci_tdnext = (void *)-1;
472 if ((flags & CAPKF_RCAPS) && (caps->ci_flags & CAPKF_RCAPS)) {
475 caps->ci_flags &= ~CAPKF_RCAPS;
476 if ((ctmp = caps->ci_rcaps)) {
477 caps->ci_rcaps = NULL;
478 caps_term(ctmp, CAPKF_FLUSH, caps);
485 caps_free(caps_kinfo_t caps)
487 KKASSERT(TAILQ_EMPTY(&caps->ci_msgpendq));
488 KKASSERT(TAILQ_EMPTY(&caps->ci_msguserq));
489 KKASSERT((caps->ci_flags & (CAPKF_HLIST|CAPKF_TDLIST)) == 0);
493 /************************************************************************
494 * PROCESS SUPPORT FUNCTIONS *
495 ************************************************************************/
498 * Create dummy entries in p2 so we can return the appropriate
499 * error code. Robust userland code will check the error for a
500 * forked condition and reforge the connection.
503 caps_fork(struct proc *p1, struct proc *p2, int flags)
514 * Create dummy entries with the same id's as the originals. Note
515 * that service entries are not re-added to the hash table. The
516 * dummy entries return an ENOTCONN error allowing userland code to
517 * detect that a fork occured. Userland must reconnect to the service.
519 for (caps1 = td1->td_caps; caps1; caps1 = caps1->ci_tdnext) {
520 if (caps1->ci_flags & CAPF_NOFORK)
522 caps2 = caps_alloc(td2,
523 caps1->ci_name, caps1->ci_namelen,
524 caps1->ci_uid, caps1->ci_gid,
525 caps1->ci_flags & CAPF_UFLAGS, CAPT_FORKED);
526 caps2->ci_id = caps1->ci_id;
530 * Reverse the list order to maintain highest-id-first
532 caps2 = td2->td_caps;
535 caps1 = caps2->ci_tdnext;
536 caps2->ci_tdnext = td2->td_caps;
537 td2->td_caps = caps2;
543 caps_exit(struct thread *td)
547 while ((caps = td->td_caps) != NULL) {
549 caps_term(caps, CAPKF_TDLIST|CAPKF_HLIST|CAPKF_FLUSH|CAPKF_RCAPS, NULL);
554 /************************************************************************
556 ************************************************************************/
559 * caps_sys_service(name, uid, gid, upcid, flags);
561 * Create an IPC service using the specified name, uid, gid, and flags.
562 * Either uid or gid can be -1, but not both. The port identifier is
565 * upcid can either be an upcall or a kqueue identifier (XXX)
568 caps_sys_service(struct caps_sys_service_args *uap)
570 struct ucred *cred = curproc->p_ucred;
571 char name[CAPS_MAXNAMELEN];
576 if (caps_enabled == 0)
578 if ((error = copyinstr(uap->name, name, CAPS_MAXNAMELEN, &len)) != 0)
582 if ((error = caps_name_check(name, len)) != 0)
585 caps = kern_caps_sys_service(name, uap->uid, uap->gid, cred,
586 uap->flags & CAPF_UFLAGS, &error);
588 uap->sysmsg_result = caps->ci_id;
593 * caps_sys_client(name, uid, gid, upcid, flags);
595 * Create an IPC client connected to the specified service. Either uid or gid
596 * may be -1, indicating a wildcard, but not both. The port identifier is
599 * upcid can either be an upcall or a kqueue identifier (XXX)
602 caps_sys_client(struct caps_sys_client_args *uap)
604 struct ucred *cred = curproc->p_ucred;
605 char name[CAPS_MAXNAMELEN];
610 if (caps_enabled == 0)
612 if ((error = copyinstr(uap->name, name, CAPS_MAXNAMELEN, &len)) != 0)
616 if ((error = caps_name_check(name, len)) != 0)
619 caps = kern_caps_sys_client(name, uap->uid, uap->gid, cred,
620 uap->flags & CAPF_UFLAGS, &error);
622 uap->sysmsg_result = caps->ci_id;
627 caps_sys_close(struct caps_sys_close_args *uap)
631 if ((caps = caps_find_id(curthread, uap->portid)) == NULL)
633 caps_term(caps, CAPKF_TDLIST|CAPKF_HLIST|CAPKF_FLUSH|CAPKF_RCAPS, NULL);
639 caps_sys_setgen(struct caps_sys_setgen_args *uap)
644 if ((caps = caps_find_id(curthread, uap->portid)) == NULL)
646 if (caps->ci_type == CAPT_FORKED) {
649 caps->ci_gen = uap->gen;
657 caps_sys_getgen(struct caps_sys_getgen_args *uap)
662 if ((caps = caps_find_id(curthread, uap->portid)) == NULL)
664 if (caps->ci_type == CAPT_FORKED) {
666 } else if (caps->ci_rcaps == NULL) {
669 uap->sysmsg_result64 = caps->ci_rcaps->ci_gen;
677 * caps_sys_put(portid, msg, msgsize)
679 * Send an opaque message of the specified size to the specified port. This
680 * function may only be used with a client port. The message id is returned.
683 caps_sys_put(struct caps_sys_put_args *uap)
687 struct proc *p = curproc;
690 if (uap->msgsize < 0)
692 if ((caps = caps_find_id(curthread, uap->portid)) == NULL)
694 if (caps->ci_type == CAPT_FORKED) {
696 } else if (caps->ci_rcaps == NULL) {
698 } else if (caps->ci_cmsgcount > CAPS_MAXINPROG) {
700 * If this client has queued a large number of messages return
701 * ENOBUFS. The client must process some replies before it can
702 * send new messages. The server can also throttle a client by
703 * holding its replies. XXX allow a server to refuse messages from
708 msg = caps_alloc_msg(caps);
709 uap->sysmsg_offset = msg->km_msgid.c_id;
712 * If the remote end is closed return ENOTCONN immediately, otherwise
713 * send it to the remote end.
715 * Note: since this is a new message, caps_load_ccr() returns a remote
718 if (caps->ci_rcaps->ci_flags & CAPKF_CLOSED) {
723 * new message, load_ccr returns NULL. hold rcaps for put_msg
726 caps_load_ccr(caps, msg, p, uap->msg, uap->msgsize);
727 caps_hold(caps->ci_rcaps);
728 ++caps->ci_cmsgcount;
729 caps_put_msg(caps->ci_rcaps, msg, CAPMS_REQUEST); /* drops rcaps */
737 * caps_sys_reply(portid, msg, msgsize, msgid)
739 * Reply to the message referenced by the specified msgid, supplying opaque
740 * data back to the originator.
743 caps_sys_reply(struct caps_sys_reply_args *uap)
751 if (uap->msgsize < 0)
753 if ((caps = caps_find_id(curthread, uap->portid)) == NULL)
755 if (caps->ci_type == CAPT_FORKED) {
757 * The caps structure is just a fork placeholder, tell the caller
758 * that he has to reconnect.
761 } else if ((msg = caps_find_msg(caps, uap->msgcid)) == NULL) {
763 * Could not find message being replied to (other side might have
767 } else if ((msg->km_flags & CAPKMF_ONUSERQ) == 0) {
769 * Trying to reply to a non-replyable message
774 * If the remote end is closed requeue to ourselves for disposal.
775 * Otherwise send the reply to the other end (the other end will
776 * return a passive DISPOSE to us when it has eaten the data)
779 caps_dequeue_msg(caps, msg);
781 if (msg->km_mcaps->ci_flags & CAPKF_CLOSED) {
782 caps_drop(caps_load_ccr(caps, msg, p, NULL, 0));
783 caps_hold(caps); /* ref for message */
784 caps_put_msg(caps, msg, CAPMS_DISPOSE);
786 rcaps = caps_load_ccr(caps, msg, p, uap->msg, uap->msgsize);
787 caps_put_msg(rcaps, msg, CAPMS_REPLY);
795 * caps_sys_get(portid, msg, maxsize, msgid, ccr)
797 * Retrieve the next ready message on the port, store its message id in
798 * uap->msgid and return the length of the message. If the message is too
799 * large to fit the message id, length, and creds are still returned, but
800 * the message is not dequeued (the caller is expected to call again with
801 * a larger buffer or to reply the messageid if it does not want to handle
804 * EWOULDBLOCK is returned if no messages are pending. Note that 0-length
805 * messages are perfectly acceptable so 0 can be legitimately returned.
808 caps_sys_get(struct caps_sys_get_args *uap)
814 if (uap->maxsize < 0)
816 if ((caps = caps_find_id(curthread, uap->portid)) == NULL)
818 if (caps->ci_type == CAPT_FORKED) {
820 } else if ((msg = TAILQ_FIRST(&caps->ci_msgpendq)) == NULL) {
823 error = caps_process_msg(caps, msg, uap);
830 * caps_sys_wait(portid, msg, maxsize, msgid, ccr)
832 * Retrieve the next ready message on the port, store its message id in
833 * uap->msgid and return the length of the message. If the message is too
834 * large to fit the message id, length, and creds are still returned, but
835 * the message is not dequeued (the caller is expected to call again with
836 * a larger buffer or to reply the messageid if it does not want to handle
839 * This function blocks until interrupted or a message is received.
840 * Note that 0-length messages are perfectly acceptable so 0 can be
841 * legitimately returned.
844 caps_sys_wait(struct caps_sys_wait_args *uap)
850 if (uap->maxsize < 0)
852 if ((caps = caps_find_id(curthread, uap->portid)) == NULL)
854 if (caps->ci_type == CAPT_FORKED) {
858 while ((msg = TAILQ_FIRST(&caps->ci_msgpendq)) == NULL) {
859 if ((error = tsleep(caps, PCATCH, "caps", 0)) != 0)
863 error = caps_process_msg(caps, msg,
864 (struct caps_sys_get_args *)uap);
872 caps_process_msg(caps_kinfo_t caps, caps_kmsg_t msg, struct caps_sys_get_args *uap)
878 msg->km_flags |= CAPKMF_PEEKED;
879 msgsize = msg->km_xio.xio_bytes;
880 if (msgsize <= uap->maxsize)
881 caps_dequeue_msg(caps, msg);
883 if (msg->km_xio.xio_bytes != 0) {
884 error = xio_copy_xtou(&msg->km_xio, uap->msg,
885 min(msg->km_xio.xio_bytes, uap->maxsize));
887 if (msg->km_mcaps->ci_td && msg->km_mcaps->ci_td->td_proc) {
888 printf("xio_copy_xtou: error %d from proc %d\n",
889 error, msg->km_mcaps->ci_td->td_proc->p_pid);
891 if (msgsize > uap->maxsize)
892 caps_dequeue_msg(caps, msg);
899 error = copyout(&msg->km_msgid, uap->msgid, sizeof(msg->km_msgid));
901 error = copyout(&msg->km_ccr, uap->ccr, sizeof(msg->km_ccr));
903 uap->sysmsg_result = msgsize;
906 * If the message was dequeued we must deal with it.
908 if (msgsize <= uap->maxsize) {
909 switch(msg->km_state) {
911 case CAPMS_REQUEST_RETRY:
912 TAILQ_INSERT_TAIL(&caps->ci_msguserq, msg, km_node);
913 msg->km_flags |= CAPKMF_ONUSERQ;
916 case CAPMS_REPLY_RETRY:
917 --caps->ci_cmsgcount;
918 rcaps = caps_load_ccr(caps, msg, curproc, NULL, 0);
919 if (caps == rcaps || (rcaps->ci_flags & CAPKF_CLOSED)) {
920 /* degenerate disposal case */
924 caps_put_msg(rcaps, msg, CAPMS_DISPOSE);
936 * caps_sys_abort(portid, msgcid, flags)
938 * Abort a previously sent message. You must still wait for the message
939 * to be returned after sending the abort request. This function will
940 * return the appropriate CAPS_ABORT_* code depending on what it had
944 caps_sys_abort(struct caps_sys_abort_args *uap)
946 uap->sysmsg_result = CAPS_ABORT_NOTIMPL;
951 * KERNEL SYSCALL SEPARATION SUPPORT FUNCTIONS
956 kern_caps_sys_service(const char *name, uid_t uid, gid_t gid,
957 struct ucred *cred, int flags, int *error)
965 * Make sure we can use the uid and gid
968 if (cred->cr_uid != 0 && uid != (uid_t)-1 && cred->cr_uid != uid) {
972 if (cred->cr_uid != 0 && gid != (gid_t)-1 && !groupmember(gid, cred)) {
981 if (flags & CAPF_EXCL) {
982 if ((caps = caps_find(name, strlen(name), uid, gid)) != NULL) {
992 caps = caps_alloc(curthread, name, len,
993 uid, gid, flags & CAPF_UFLAGS, CAPT_SERVICE);
994 wakeup(&caps_waitsvc);
1000 kern_caps_sys_client(const char *name, uid_t uid, gid_t gid,
1001 struct ucred *cred, int flags, int *error)
1003 caps_kinfo_t caps, rcaps;
1009 * Locate the CAPS service (rcaps ref is for caps->ci_rcaps)
1012 if ((rcaps = caps_find(name, len, uid, gid)) == NULL) {
1013 if (flags & CAPF_WAITSVC) {
1015 snprintf(cbuf, sizeof(cbuf), "C%s", name);
1016 *error = tsleep(&caps_waitsvc, PCATCH, cbuf, 0);
1030 if ((flags & CAPF_USER) && (rcaps->ci_flags & CAPF_USER)) {
1031 if (rcaps->ci_uid != (uid_t)-1 && rcaps->ci_uid == cred->cr_uid)
1034 if ((flags & CAPF_GROUP) && (rcaps->ci_flags & CAPF_GROUP)) {
1035 if (rcaps->ci_gid != (gid_t)-1 && groupmember(rcaps->ci_gid, cred))
1038 if ((flags & CAPF_WORLD) && (rcaps->ci_flags & CAPF_WORLD)) {
1050 * Allocate the client side and connect to the server
1052 caps = caps_alloc(curthread, name, len,
1053 uid, gid, flags & CAPF_UFLAGS, CAPT_CLIENT);
1054 caps->ci_rcaps = rcaps;
1055 caps->ci_flags |= CAPKF_RCAPS;