1 /* $NetBSD: sys_mqueue.c,v 1.16 2009/04/11 23:05:26 christos Exp $ */
4 * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Implementation of POSIX message queues.
31 * Defined in the Base Definitions volume of IEEE Std 1003.1-2001.
35 * Global list of message queues (mqueue_head) and proc_t::p_mqueue_cnt
36 * counter are protected by mqlist_mtx lock. The very message queue and
37 * its members are protected by mqueue::mq_mtx.
45 #include <sys/param.h>
46 #include <sys/types.h>
47 #include <sys/errno.h>
48 #include <sys/fcntl.h>
50 #include <sys/filedesc.h>
51 #include <sys/ucred.h>
53 #include <sys/kernel.h>
54 #include <sys/malloc.h>
55 #include <sys/mqueue.h>
56 #include <sys/objcache.h>
59 #include <sys/queue.h>
60 #include <sys/select.h>
61 #include <sys/serialize.h>
62 #include <sys/signal.h>
63 #include <sys/signalvar.h>
64 #include <sys/spinlock.h>
65 #include <sys/spinlock2.h>
67 #include <sys/sysctl.h>
68 #include <sys/sysproto.h>
69 #include <sys/systm.h>
71 #include <sys/unistd.h>
72 #include <sys/vnode.h>
74 /* System-wide limits. */
75 static u_int mq_open_max = MQ_OPEN_MAX;
76 static u_int mq_prio_max = MQ_PRIO_MAX;
77 static u_int mq_max_msgsize = 16 * MQ_DEF_MSGSIZE;
78 static u_int mq_def_maxmsg = 32;
80 struct lock mqlist_mtx;
81 static struct objcache * mqmsg_cache;
82 static LIST_HEAD(, mqueue) mqueue_head =
83 LIST_HEAD_INITIALIZER(mqueue_head);
85 typedef struct file file_t; /* XXX: Should we put this in sys/types.h ? */
87 /* Function prototypes */
88 static int mq_poll_fop(file_t *, int, struct ucred *cred);
89 static int mq_stat_fop(file_t *, struct stat *, struct ucred *cred);
90 static int mq_close_fop(file_t *);
92 /* Some time-related utility functions */
93 static int itimespecfix(struct timespec *ts);
94 static int tstohz(const struct timespec *ts);
96 /* File operations vector */
97 static struct fileops mqops = {
98 .fo_read = badfo_readwrite,
99 .fo_write = badfo_readwrite,
100 .fo_ioctl = badfo_ioctl,
101 .fo_poll = mq_poll_fop,
102 .fo_stat = mq_stat_fop,
103 .fo_close = mq_close_fop,
104 .fo_kqfilter = badfo_kqfilter,
105 .fo_shutdown = badfo_shutdown
108 /* Define a new malloc type for message queues */
109 MALLOC_DECLARE(M_MQBUF);
110 MALLOC_DEFINE(M_MQBUF, "mqueues", "Buffers to message queues");
112 /* Malloc arguments for object cache */
113 struct objcache_malloc_args mqueue_malloc_args = {
114 sizeof(struct mqueue), M_MQBUF };
116 /* Spinlock around the process list */
117 extern struct spinlock allproc_spin;
120 * Initialize POSIX message queue subsystem.
125 mqmsg_cache = objcache_create("mqmsg_cache",
126 0, /* infinite depot's capacity */
127 0, /* default magazine's capacity */
128 NULL, /* constructor */
129 NULL, /* deconstructor */
131 objcache_malloc_alloc,
132 objcache_malloc_free,
133 &mqueue_malloc_args);
135 lockinit(&mqlist_mtx, "mqlist_mtx", 0, LK_CANRECURSE);
142 mqueue_freemsg(struct mq_msg *msg, const size_t size)
145 if (size > MQ_DEF_MSGSIZE) {
148 objcache_put(mqmsg_cache, msg);
153 * Destroy the message queue.
156 mqueue_destroy(struct mqueue *mq)
162 /* Note MQ_PQSIZE + 1. */
163 for (i = 0; i < MQ_PQSIZE + 1; i++) {
164 while ((msg = TAILQ_FIRST(&mq->mq_head[i])) != NULL) {
165 TAILQ_REMOVE(&mq->mq_head[i], msg, msg_queue);
166 msz = sizeof(struct mq_msg) + msg->msg_len;
167 mqueue_freemsg(msg, msz);
170 lockuninit(&mq->mq_mtx);
175 * Lookup for file name in general list of message queues.
176 * => locks the message queue
179 mqueue_lookup(char *name)
183 KKASSERT(lockstatus(&mqlist_mtx, curthread));
185 LIST_FOREACH(mq, &mqueue_head, mq_list) {
186 if (strncmp(mq->mq_name, name, MQ_NAMELEN) == 0) {
187 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
196 * mqueue_get: get the mqueue from the descriptor.
197 * => locks the message queue, if found.
198 * => holds a reference on the file descriptor.
201 mqueue_get(struct lwp *l, mqd_t mqd, file_t **fpr)
206 fp = holdfp(curproc->p_fd, (int)mqd, -1); /* XXX: Why -1 ? */
207 if (__predict_false(fp == NULL))
210 if (__predict_false(fp->f_type != DTYPE_MQUEUE)) {
215 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
222 * mqueue_linear_insert: perform linear insert according to the message
223 * priority into the reserved queue (MQ_PQRESQ). Reserved queue is a
224 * sorted list used only when mq_prio_max is increased via sysctl.
227 mqueue_linear_insert(struct mqueue *mq, struct mq_msg *msg)
231 TAILQ_FOREACH(mit, &mq->mq_head[MQ_PQRESQ], msg_queue) {
232 if (msg->msg_prio > mit->msg_prio)
236 TAILQ_INSERT_TAIL(&mq->mq_head[MQ_PQRESQ], msg, msg_queue);
238 TAILQ_INSERT_BEFORE(mit, msg, msg_queue);
246 itimespecfix(struct timespec *ts)
248 if (ts->tv_sec < 0 || ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
250 if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < nstick)
251 ts->tv_nsec = nstick;
256 * Compute number of ticks in the specified amount of time.
259 tstohz(const struct timespec *ts)
264 * usec has great enough resolution for hz, so convert to a
265 * timeval and use tvtohz() above.
267 TIMESPEC_TO_TIMEVAL(&tv, ts);
268 return tvtohz_high(&tv); /* XXX Why _high() and not _low() ? */
272 * Converter from struct timespec to the ticks.
273 * Used by mq_timedreceive(), mq_timedsend().
276 abstimeout2timo(struct timespec *ts, int *timo)
281 error = itimespecfix(ts);
286 timespecsub(ts, &tsd);
287 if (ts->tv_sec < 0 || (ts->tv_sec == 0 && ts->tv_nsec <= 0)) {
291 KKASSERT(*timo != 0);
297 mq_stat_fop(file_t *fp, struct stat *st, struct ucred *cred)
299 struct mqueue *mq = fp->f_data;
301 (void)memset(st, 0, sizeof(*st));
303 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
304 st->st_mode = mq->mq_mode;
305 st->st_uid = mq->mq_euid;
306 st->st_gid = mq->mq_egid;
307 st->st_atimespec = mq->mq_atime;
308 st->st_mtimespec = mq->mq_mtime;
309 /*st->st_ctimespec = st->st_birthtimespec = mq->mq_btime;*/
310 st->st_uid = fp->f_cred->cr_uid;
311 st->st_gid = fp->f_cred->cr_svgid;
312 lockmgr(&mq->mq_mtx, LK_RELEASE);
318 mq_poll_fop(file_t *fp, int events, struct ucred *cred)
320 struct mqueue *mq = fp->f_data;
321 struct mq_attr *mqattr;
324 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
325 mqattr = &mq->mq_attrib;
326 if (events & (POLLIN | POLLRDNORM)) {
327 /* Ready for receiving, if there are messages in the queue */
328 if (mqattr->mq_curmsgs)
329 revents |= (POLLIN | POLLRDNORM);
331 selrecord(curthread, &mq->mq_rsel);
333 if (events & (POLLOUT | POLLWRNORM)) {
334 /* Ready for sending, if the message queue is not full */
335 if (mqattr->mq_curmsgs < mqattr->mq_maxmsg)
336 revents |= (POLLOUT | POLLWRNORM);
338 selrecord(curthread, &mq->mq_wsel);
340 lockmgr(&mq->mq_mtx, LK_RELEASE);
346 mq_close_fop(file_t *fp)
348 struct proc *p = curproc;
349 struct mqueue *mq = fp->f_data;
352 lockmgr(&mqlist_mtx, LK_EXCLUSIVE);
353 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
355 /* Decrease the counters */
359 /* Remove notification if registered for this process */
360 if (mq->mq_notify_proc == p)
361 mq->mq_notify_proc = NULL;
364 * If this is the last reference and mqueue is marked for unlink,
365 * remove and later destroy the message queue.
367 if (mq->mq_refcnt == 0 && (mq->mq_attrib.mq_flags & MQ_UNLINK)) {
368 LIST_REMOVE(mq, mq_list);
373 lockmgr(&mq->mq_mtx, LK_RELEASE);
374 lockmgr(&mqlist_mtx, LK_RELEASE);
383 * General mqueue system calls.
387 sys_mq_open(struct mq_open_args *uap)
390 syscallarg(const char *) name;
391 syscallarg(int) oflag;
392 syscallarg(mode_t) mode;
393 syscallarg(struct mq_attr) attr;
395 struct thread *td = curthread;
396 struct proc *p = td->td_proc;
397 struct filedesc *fdp = p->p_fd;
398 struct mqueue *mq, *mq_new = NULL;
401 int mqd, error, oflag;
403 /* Check access mode flags */
404 oflag = SCARG(uap, oflag);
405 if ((oflag & O_ACCMODE) == (O_WRONLY | O_RDWR)) {
409 /* Get the name from the user-space */
410 name = kmalloc(MQ_NAMELEN, M_MQBUF, M_WAITOK | M_ZERO);
411 error = copyinstr(SCARG(uap, name), name, MQ_NAMELEN - 1, NULL);
413 kfree(name, M_MQBUF);
417 if (oflag & O_CREAT) {
421 /* Check the limit */
422 if (p->p_mqueue_cnt == mq_open_max) {
423 kfree(name, M_MQBUF);
427 /* Empty name is invalid */
428 if (name[0] == '\0') {
429 kfree(name, M_MQBUF);
433 /* Check for mqueue attributes */
434 if (SCARG(uap, attr)) {
435 error = copyin(SCARG(uap, attr), &attr,
436 sizeof(struct mq_attr));
438 kfree(name, M_MQBUF);
441 if (attr.mq_maxmsg <= 0 || attr.mq_msgsize <= 0 ||
442 attr.mq_msgsize > mq_max_msgsize) {
443 kfree(name, M_MQBUF);
448 memset(&attr, 0, sizeof(struct mq_attr));
449 attr.mq_maxmsg = mq_def_maxmsg;
451 MQ_DEF_MSGSIZE - sizeof(struct mq_msg);
455 * Allocate new mqueue, initialize data structures,
456 * copy the name, attributes and set the flag.
458 mq_new = kmalloc(sizeof(struct mqueue), M_MQBUF, M_WAITOK | M_ZERO);
460 lockinit(&mq_new->mq_mtx, "mq_new->mq_mtx", 0, LK_CANRECURSE);
461 for (i = 0; i < (MQ_PQSIZE + 1); i++) {
462 TAILQ_INIT(&mq_new->mq_head[i]);
465 strlcpy(mq_new->mq_name, name, MQ_NAMELEN);
466 memcpy(&mq_new->mq_attrib, &attr, sizeof(struct mq_attr));
468 /*CTASSERT((O_MASK & (MQ_UNLINK | MQ_RECEIVE)) == 0);*/
469 /* mq_new->mq_attrib.mq_flags = (O_MASK & oflag); */
470 mq_new->mq_attrib.mq_flags = oflag;
472 /* Store mode and effective UID with GID */
473 mq_new->mq_mode = ((SCARG(uap, mode) &
474 ~p->p_fd->fd_cmask) & ALLPERMS) & ~S_ISTXT;
475 mq_new->mq_euid = td->td_ucred->cr_uid;
476 mq_new->mq_egid = td->td_ucred->cr_svgid;
479 /* Allocate file structure and descriptor */
480 error = falloc(td->td_lwp, &fp, &mqd);
483 mqueue_destroy(mq_new);
484 kfree(name, M_MQBUF);
487 fp->f_type = DTYPE_MQUEUE;
488 fp->f_flag = FFLAGS(oflag) & (FREAD | FWRITE);
491 /* Look up for mqueue with such name */
492 lockmgr(&mqlist_mtx, LK_EXCLUSIVE);
493 mq = mqueue_lookup(name);
497 KKASSERT(lockstatus(&mq->mq_mtx, curthread));
499 /* Check if mqueue is not marked as unlinking */
500 if (mq->mq_attrib.mq_flags & MQ_UNLINK) {
504 /* Fail if O_EXCL is set, and mqueue already exists */
505 if ((oflag & O_CREAT) && (oflag & O_EXCL)) {
511 * Check the permissions. Note the difference between
512 * VREAD/VWRITE and FREAD/FWRITE.
515 if (fp->f_flag & FREAD) {
518 if (fp->f_flag & FWRITE) {
521 if (vaccess(VNON, mq->mq_mode, mq->mq_euid, mq->mq_egid,
522 acc_mode, td->td_ucred)) {
528 /* Fail if mqueue neither exists, nor we create it */
529 if ((oflag & O_CREAT) == 0) {
530 lockmgr(&mqlist_mtx, LK_RELEASE);
531 KKASSERT(mq_new == NULL);
532 fsetfd(fdp, NULL, mqd);
533 fp->f_ops = &badfileops;
535 kfree(name, M_MQBUF);
539 /* Check the limit */
540 if (p->p_mqueue_cnt == mq_open_max) {
545 /* Insert the queue to the list */
547 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
548 LIST_INSERT_HEAD(&mqueue_head, mq, mq_list);
550 getnanotime(&mq->mq_btime);
551 mq->mq_atime = mq->mq_mtime = mq->mq_btime;
554 /* Increase the counters, and make descriptor ready */
559 lockmgr(&mq->mq_mtx, LK_RELEASE);
560 lockmgr(&mqlist_mtx, LK_RELEASE);
563 mqueue_destroy(mq_new);
565 fsetfd(fdp, NULL, mqd);
566 fp->f_ops = &badfileops;
568 fsetfd(fdp, fp, mqd);
569 uap->sysmsg_result = mqd;
572 kfree(name, M_MQBUF);
578 sys_mq_close(struct mq_close_args *uap)
580 return sys_close((void *)uap);
584 * Primary mq_receive1() function.
587 mq_receive1(struct lwp *l, mqd_t mqdes, void *msg_ptr, size_t msg_len,
588 unsigned *msg_prio, struct timespec *ts, ssize_t *mlen)
592 struct mq_msg *msg = NULL;
593 struct mq_attr *mqattr;
597 /* Get the message queue */
598 error = mqueue_get(l, mqdes, &fp);
603 if ((fp->f_flag & FREAD) == 0) {
607 getnanotime(&mq->mq_atime);
608 mqattr = &mq->mq_attrib;
610 /* Check the message size limits */
611 if (msg_len < mqattr->mq_msgsize) {
616 /* Check if queue is empty */
617 while (mqattr->mq_curmsgs == 0) {
620 if (mqattr->mq_flags & O_NONBLOCK) {
625 error = abstimeout2timo(ts, &t);
631 * Block until someone sends the message.
632 * While doing this, notification should not be sent.
634 mqattr->mq_flags |= MQ_RECEIVE;
635 error = lksleep(&mq->mq_send_cv, &mq->mq_mtx, PCATCH, "mqsend", t);
636 mqattr->mq_flags &= ~MQ_RECEIVE;
637 if (error || (mqattr->mq_flags & MQ_UNLINK)) {
638 error = (error == EWOULDBLOCK) ? ETIMEDOUT : EINTR;
645 * Find the highest priority message, and remove it from the queue.
646 * At first, reserved queue is checked, bitmap is next.
648 msg = TAILQ_FIRST(&mq->mq_head[MQ_PQRESQ]);
649 if (__predict_true(msg == NULL)) {
650 idx = ffs(mq->mq_bitmap);
651 msg = TAILQ_FIRST(&mq->mq_head[idx]);
652 KKASSERT(msg != NULL);
656 TAILQ_REMOVE(&mq->mq_head[idx], msg, msg_queue);
658 /* Unmark the bit, if last message. */
659 if (__predict_true(idx) && TAILQ_EMPTY(&mq->mq_head[idx])) {
660 KKASSERT((MQ_PQSIZE - idx) == msg->msg_prio);
661 mq->mq_bitmap &= ~(1 << --idx);
664 /* Decrement the counter and signal waiter, if any */
665 mqattr->mq_curmsgs--;
666 wakeup_one(&mq->mq_recv_cv);
668 /* Ready for sending now */
669 selwakeup(&mq->mq_wsel);
671 lockmgr(&mq->mq_mtx, LK_RELEASE);
677 * Copy the data to the user-space.
678 * Note: According to POSIX, no message should be removed from the
679 * queue in case of fail - this would be violated.
681 *mlen = msg->msg_len;
682 error = copyout(msg->msg_ptr, msg_ptr, msg->msg_len);
683 if (error == 0 && msg_prio)
684 error = copyout(&msg->msg_prio, msg_prio, sizeof(unsigned));
685 mqueue_freemsg(msg, sizeof(struct mq_msg) + msg->msg_len);
691 sys_mq_receive(struct mq_receive_args *uap)
694 syscallarg(mqd_t) mqdes;
695 syscallarg(char *) msg_ptr;
696 syscallarg(size_t) msg_len;
697 syscallarg(unsigned *) msg_prio;
702 error = mq_receive1(curthread->td_lwp, SCARG(uap, mqdes), SCARG(uap, msg_ptr),
703 SCARG(uap, msg_len), SCARG(uap, msg_prio), 0, &mlen);
705 uap->sysmsg_result = mlen;
711 sys_mq_timedreceive(struct mq_timedreceive_args *uap)
714 syscallarg(mqd_t) mqdes;
715 syscallarg(char *) msg_ptr;
716 syscallarg(size_t) msg_len;
717 syscallarg(unsigned *) msg_prio;
718 syscallarg(const struct timespec *) abs_timeout;
722 struct timespec ts, *tsp;
724 /* Get and convert time value */
725 if (SCARG(uap, abs_timeout)) {
726 error = copyin(SCARG(uap, abs_timeout), &ts, sizeof(ts));
734 error = mq_receive1(curthread->td_lwp, SCARG(uap, mqdes), SCARG(uap, msg_ptr),
735 SCARG(uap, msg_len), SCARG(uap, msg_prio), tsp, &mlen);
737 uap->sysmsg_result = mlen;
743 * Primary mq_send1() function.
746 mq_send1(struct lwp *l, mqd_t mqdes, const char *msg_ptr, size_t msg_len,
747 unsigned msg_prio, struct timespec *ts)
752 struct mq_attr *mqattr;
753 struct proc *notify = NULL;
758 /* Check the priority range */
759 if (msg_prio >= mq_prio_max)
762 /* Allocate a new message */
763 size = sizeof(struct mq_msg) + msg_len;
764 if (size > mq_max_msgsize)
767 if (size > MQ_DEF_MSGSIZE) {
768 msg = kmalloc(size, M_MQBUF, M_WAITOK);
770 msg = objcache_get(mqmsg_cache, M_WAITOK);
773 /* Get the data from user-space */
774 error = copyin(msg_ptr, msg->msg_ptr, msg_len);
776 mqueue_freemsg(msg, size);
779 msg->msg_len = msg_len;
780 msg->msg_prio = msg_prio;
783 error = mqueue_get(l, mqdes, &fp);
785 mqueue_freemsg(msg, size);
789 if ((fp->f_flag & FWRITE) == 0) {
793 getnanotime(&mq->mq_mtime);
794 mqattr = &mq->mq_attrib;
796 /* Check the message size limit */
797 if (msg_len <= 0 || msg_len > mqattr->mq_msgsize) {
802 /* Check if queue is full */
803 while (mqattr->mq_curmsgs >= mqattr->mq_maxmsg) {
806 if (mqattr->mq_flags & O_NONBLOCK) {
811 error = abstimeout2timo(ts, &t);
816 /* Block until queue becomes available */
817 error = lksleep(&mq->mq_recv_cv, &mq->mq_mtx, PCATCH, "mqrecv", t);
818 if (error || (mqattr->mq_flags & MQ_UNLINK)) {
819 error = (error == EWOULDBLOCK) ? ETIMEDOUT : error;
823 KKASSERT(mq->mq_attrib.mq_curmsgs < mq->mq_attrib.mq_maxmsg);
826 * Insert message into the queue, according to the priority.
827 * Note the difference between index and priority.
829 if (__predict_true(msg_prio < MQ_PQSIZE)) {
830 u_int idx = MQ_PQSIZE - msg_prio;
832 KKASSERT(idx != MQ_PQRESQ);
833 TAILQ_INSERT_TAIL(&mq->mq_head[idx], msg, msg_queue);
834 mq->mq_bitmap |= (1 << --idx);
836 mqueue_linear_insert(mq, msg);
839 /* Check for the notify */
840 if (mqattr->mq_curmsgs == 0 && mq->mq_notify_proc &&
841 (mqattr->mq_flags & MQ_RECEIVE) == 0 &&
842 mq->mq_sig_notify.sigev_notify == SIGEV_SIGNAL) {
843 /* Initialize the signal */
845 /*ksi.ksi_signo = mq->mq_sig_notify.sigev_signo;*/
846 /*ksi.ksi_code = SI_MESGQ;*/
847 /*ksi.ksi_value = mq->mq_sig_notify.sigev_value;*/
848 /* Unregister the process */
849 notify = mq->mq_notify_proc;
850 mq->mq_notify_proc = NULL;
853 /* Increment the counter and signal waiter, if any */
854 mqattr->mq_curmsgs++;
855 wakeup_one(&mq->mq_send_cv);
857 /* Ready for receiving now */
858 selwakeup(&mq->mq_rsel);
860 lockmgr(&mq->mq_mtx, LK_RELEASE);
864 mqueue_freemsg(msg, size);
866 /* Send the notify, if needed */
867 spin_lock_wr(&allproc_spin);
868 /*kpsignal(notify, &ksi, NULL);*/
869 ksignal(notify, mq->mq_sig_notify.sigev_signo);
870 spin_unlock_wr(&allproc_spin);
877 sys_mq_send(struct mq_send_args *uap)
880 syscallarg(mqd_t) mqdes;
881 syscallarg(const char *) msg_ptr;
882 syscallarg(size_t) msg_len;
883 syscallarg(unsigned) msg_prio;
886 return mq_send1(curthread->td_lwp, SCARG(uap, mqdes), SCARG(uap, msg_ptr),
887 SCARG(uap, msg_len), SCARG(uap, msg_prio), 0);
891 sys_mq_timedsend(struct mq_timedsend_args *uap)
894 syscallarg(mqd_t) mqdes;
895 syscallarg(const char *) msg_ptr;
896 syscallarg(size_t) msg_len;
897 syscallarg(unsigned) msg_prio;
898 syscallarg(const struct timespec *) abs_timeout;
900 struct timespec ts, *tsp;
903 /* Get and convert time value */
904 if (SCARG(uap, abs_timeout)) {
905 error = copyin(SCARG(uap, abs_timeout), &ts, sizeof(ts));
913 return mq_send1(curthread->td_lwp, SCARG(uap, mqdes), SCARG(uap, msg_ptr),
914 SCARG(uap, msg_len), SCARG(uap, msg_prio), tsp);
918 sys_mq_notify(struct mq_notify_args *uap)
921 syscallarg(mqd_t) mqdes;
922 syscallarg(const struct sigevent *) notification;
929 if (SCARG(uap, notification)) {
930 /* Get the signal from user-space */
931 error = copyin(SCARG(uap, notification), &sig,
932 sizeof(struct sigevent));
935 if (sig.sigev_notify == SIGEV_SIGNAL &&
936 (sig.sigev_signo <= 0 || sig.sigev_signo >= NSIG))
940 error = mqueue_get(curthread->td_lwp, SCARG(uap, mqdes), &fp);
945 if (SCARG(uap, notification)) {
946 /* Register notification: set the signal and target process */
947 if (mq->mq_notify_proc == NULL) {
948 memcpy(&mq->mq_sig_notify, &sig,
949 sizeof(struct sigevent));
950 mq->mq_notify_proc = curproc;
952 /* Fail if someone else already registered */
956 /* Unregister the notification */
957 mq->mq_notify_proc = NULL;
959 lockmgr(&mq->mq_mtx, LK_RELEASE);
966 sys_mq_getattr(struct mq_getattr_args *uap)
969 syscallarg(mqd_t) mqdes;
970 syscallarg(struct mq_attr *) mqstat;
977 /* Get the message queue */
978 error = mqueue_get(curthread->td_lwp, SCARG(uap, mqdes), &fp);
982 memcpy(&attr, &mq->mq_attrib, sizeof(struct mq_attr));
983 lockmgr(&mq->mq_mtx, LK_RELEASE);
986 return copyout(&attr, SCARG(uap, mqstat), sizeof(struct mq_attr));
990 sys_mq_setattr(struct mq_setattr_args *uap)
993 syscallarg(mqd_t) mqdes;
994 syscallarg(const struct mq_attr *) mqstat;
995 syscallarg(struct mq_attr *) omqstat;
1000 int error, nonblock;
1002 error = copyin(SCARG(uap, mqstat), &attr, sizeof(struct mq_attr));
1005 nonblock = (attr.mq_flags & O_NONBLOCK);
1007 /* Get the message queue */
1008 error = mqueue_get(curthread->td_lwp, SCARG(uap, mqdes), &fp);
1013 /* Copy the old attributes, if needed */
1014 if (SCARG(uap, omqstat)) {
1015 memcpy(&attr, &mq->mq_attrib, sizeof(struct mq_attr));
1018 /* Ignore everything, except O_NONBLOCK */
1020 mq->mq_attrib.mq_flags |= O_NONBLOCK;
1022 mq->mq_attrib.mq_flags &= ~O_NONBLOCK;
1024 lockmgr(&mq->mq_mtx, LK_RELEASE);
1028 * Copy the data to the user-space.
1029 * Note: According to POSIX, the new attributes should not be set in
1030 * case of fail - this would be violated.
1032 if (SCARG(uap, omqstat))
1033 error = copyout(&attr, SCARG(uap, omqstat),
1034 sizeof(struct mq_attr));
1040 sys_mq_unlink(struct mq_unlink_args *uap)
1043 syscallarg(const char *) name;
1045 struct thread *td = curthread;
1048 int error, refcnt = 0;
1050 /* Get the name from the user-space */
1051 name = kmalloc(MQ_NAMELEN, M_MQBUF, M_WAITOK | M_ZERO);
1052 error = copyinstr(SCARG(uap, name), name, MQ_NAMELEN - 1, NULL);
1054 kfree(name, M_MQBUF);
1058 /* Lookup for this file */
1059 lockmgr(&mqlist_mtx, LK_EXCLUSIVE);
1060 mq = mqueue_lookup(name);
1066 /* Check the permissions */
1067 if (td->td_ucred->cr_uid != mq->mq_euid &&
1068 priv_check(td, PRIV_ROOT) != 0) {
1069 lockmgr(&mq->mq_mtx, LK_RELEASE);
1074 /* Mark message queue as unlinking, before leaving the window */
1075 mq->mq_attrib.mq_flags |= MQ_UNLINK;
1077 /* Wake up all waiters, if there are such */
1078 wakeup(&mq->mq_send_cv);
1079 wakeup(&mq->mq_recv_cv);
1081 selwakeup(&mq->mq_rsel);
1082 selwakeup(&mq->mq_wsel);
1084 refcnt = mq->mq_refcnt;
1086 LIST_REMOVE(mq, mq_list);
1088 lockmgr(&mq->mq_mtx, LK_RELEASE);
1090 lockmgr(&mqlist_mtx, LK_RELEASE);
1093 * If there are no references - destroy the message
1094 * queue, otherwise, the last mq_close() will do that.
1096 if (error == 0 && refcnt == 0)
1099 kfree(name, M_MQBUF);
1106 SYSCTL_NODE(_kern, OID_AUTO, mqueue,
1107 CTLFLAG_RW, 0, "Message queue options");
1109 SYSCTL_INT(_kern_mqueue, OID_AUTO, mq_open_max,
1110 CTLFLAG_RW, &mq_open_max, 0,
1111 "Maximal number of message queue descriptors per process");
1113 SYSCTL_INT(_kern_mqueue, OID_AUTO, mq_prio_max,
1114 CTLFLAG_RW, &mq_prio_max, 0,
1115 "Maximal priority of the message");
1117 SYSCTL_INT(_kern_mqueue, OID_AUTO, mq_max_msgsize,
1118 CTLFLAG_RW, &mq_max_msgsize, 0,
1119 "Maximal allowed size of the message");
1121 SYSCTL_INT(_kern_mqueue, OID_AUTO, mq_def_maxmsg,
1122 CTLFLAG_RW, &mq_def_maxmsg, 0,
1123 "Default maximal message count");
1125 SYSINIT(sys_mqueue_init, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, mqueue_sysinit, NULL);