2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)sys_generic.c 8.5 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/sys_generic.c,v 1.55.2.10 2001/03/17 10:39:32 peter Exp $
40 * $DragonFly: src/sys/kern/sys_generic.c,v 1.49 2008/05/05 22:09:44 dillon Exp $
43 #include "opt_ktrace.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/event.h>
49 #include <sys/filedesc.h>
50 #include <sys/filio.h>
51 #include <sys/fcntl.h>
54 #include <sys/signalvar.h>
55 #include <sys/socketvar.h>
57 #include <sys/kernel.h>
58 #include <sys/kern_syscall.h>
59 #include <sys/malloc.h>
60 #include <sys/mapped_ioctl.h>
62 #include <sys/queue.h>
63 #include <sys/resourcevar.h>
64 #include <sys/socketops.h>
65 #include <sys/sysctl.h>
66 #include <sys/sysent.h>
69 #include <sys/ktrace.h>
72 #include <vm/vm_page.h>
74 #include <sys/file2.h>
75 #include <sys/mplock2.h>
77 #include <machine/limits.h>
79 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer");
80 static MALLOC_DEFINE(M_IOCTLMAP, "ioctlmap", "mapped ioctl handler buffer");
81 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer");
82 MALLOC_DEFINE(M_IOV, "iov", "large iov's");
84 typedef struct kfd_set {
88 enum select_copyin_states {
89 COPYIN_READ, COPYIN_WRITE, COPYIN_EXCEPT, COPYIN_DONE };
91 struct select_kevent_copyin_args {
95 int active_set; /* One of select_copyin_states */
96 struct lwp *lwp; /* Pointer to our lwp */
97 int num_fds; /* Number of file descriptors (syscall arg) */
98 int proc_fds; /* Processed fd's (wraps) */
99 int error; /* Returned to userland */
102 struct poll_kevent_copyin_args {
110 static int doselect(int nd, fd_set *in, fd_set *ou, fd_set *ex,
111 struct timespec *ts, int *res);
112 static int dopoll(int nfds, struct pollfd *fds, struct timespec *ts,
114 static int dofileread(int, struct file *, struct uio *, int, size_t *);
115 static int dofilewrite(int, struct file *, struct uio *, int, size_t *);
123 sys_read(struct read_args *uap)
125 struct thread *td = curthread;
130 if ((ssize_t)uap->nbyte < 0)
133 aiov.iov_base = uap->buf;
134 aiov.iov_len = uap->nbyte;
135 auio.uio_iov = &aiov;
137 auio.uio_offset = -1;
138 auio.uio_resid = uap->nbyte;
139 auio.uio_rw = UIO_READ;
140 auio.uio_segflg = UIO_USERSPACE;
143 error = kern_preadv(uap->fd, &auio, 0, &uap->sysmsg_szresult);
148 * Positioned (Pread) read system call
153 sys_extpread(struct extpread_args *uap)
155 struct thread *td = curthread;
161 if ((ssize_t)uap->nbyte < 0)
164 aiov.iov_base = uap->buf;
165 aiov.iov_len = uap->nbyte;
166 auio.uio_iov = &aiov;
168 auio.uio_offset = uap->offset;
169 auio.uio_resid = uap->nbyte;
170 auio.uio_rw = UIO_READ;
171 auio.uio_segflg = UIO_USERSPACE;
174 flags = uap->flags & O_FMASK;
175 if (uap->offset != (off_t)-1)
178 error = kern_preadv(uap->fd, &auio, flags, &uap->sysmsg_szresult);
183 * Scatter read system call.
188 sys_readv(struct readv_args *uap)
190 struct thread *td = curthread;
192 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
195 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt,
200 auio.uio_iovcnt = uap->iovcnt;
201 auio.uio_offset = -1;
202 auio.uio_rw = UIO_READ;
203 auio.uio_segflg = UIO_USERSPACE;
206 error = kern_preadv(uap->fd, &auio, 0, &uap->sysmsg_szresult);
208 iovec_free(&iov, aiov);
214 * Scatter positioned read system call.
219 sys_extpreadv(struct extpreadv_args *uap)
221 struct thread *td = curthread;
223 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
227 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt,
232 auio.uio_iovcnt = uap->iovcnt;
233 auio.uio_offset = uap->offset;
234 auio.uio_rw = UIO_READ;
235 auio.uio_segflg = UIO_USERSPACE;
238 flags = uap->flags & O_FMASK;
239 if (uap->offset != (off_t)-1)
242 error = kern_preadv(uap->fd, &auio, flags, &uap->sysmsg_szresult);
244 iovec_free(&iov, aiov);
252 kern_preadv(int fd, struct uio *auio, int flags, size_t *res)
254 struct thread *td = curthread;
255 struct proc *p = td->td_proc;
261 fp = holdfp(p->p_fd, fd, FREAD);
264 if (flags & O_FOFFSET && fp->f_type != DTYPE_VNODE) {
267 error = dofileread(fd, fp, auio, flags, res);
274 * Common code for readv and preadv that reads data in
275 * from a file using the passed in uio, offset, and flags.
277 * MPALMOSTSAFE - ktrace needs help
280 dofileread(int fd, struct file *fp, struct uio *auio, int flags, size_t *res)
285 struct thread *td = curthread;
286 struct iovec *ktriov = NULL;
292 * if tracing, save a copy of iovec
294 if (KTRPOINT(td, KTR_GENIO)) {
295 int iovlen = auio->uio_iovcnt * sizeof(struct iovec);
297 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
298 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen);
302 len = auio->uio_resid;
303 error = fo_read(fp, auio, fp->f_cred, flags);
305 if (auio->uio_resid != len && (error == ERESTART ||
306 error == EINTR || error == EWOULDBLOCK))
310 if (ktriov != NULL) {
312 ktruio.uio_iov = ktriov;
313 ktruio.uio_resid = len - auio->uio_resid;
315 ktrgenio(td->td_lwp, fd, UIO_READ, &ktruio, error);
318 FREE(ktriov, M_TEMP);
322 *res = len - auio->uio_resid;
333 sys_write(struct write_args *uap)
335 struct thread *td = curthread;
340 if ((ssize_t)uap->nbyte < 0)
343 aiov.iov_base = (void *)(uintptr_t)uap->buf;
344 aiov.iov_len = uap->nbyte;
345 auio.uio_iov = &aiov;
347 auio.uio_offset = -1;
348 auio.uio_resid = uap->nbyte;
349 auio.uio_rw = UIO_WRITE;
350 auio.uio_segflg = UIO_USERSPACE;
353 error = kern_pwritev(uap->fd, &auio, 0, &uap->sysmsg_szresult);
364 sys_extpwrite(struct extpwrite_args *uap)
366 struct thread *td = curthread;
372 if ((ssize_t)uap->nbyte < 0)
375 aiov.iov_base = (void *)(uintptr_t)uap->buf;
376 aiov.iov_len = uap->nbyte;
377 auio.uio_iov = &aiov;
379 auio.uio_offset = uap->offset;
380 auio.uio_resid = uap->nbyte;
381 auio.uio_rw = UIO_WRITE;
382 auio.uio_segflg = UIO_USERSPACE;
385 flags = uap->flags & O_FMASK;
386 if (uap->offset != (off_t)-1)
388 error = kern_pwritev(uap->fd, &auio, flags, &uap->sysmsg_szresult);
396 sys_writev(struct writev_args *uap)
398 struct thread *td = curthread;
400 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
403 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt,
408 auio.uio_iovcnt = uap->iovcnt;
409 auio.uio_offset = -1;
410 auio.uio_rw = UIO_WRITE;
411 auio.uio_segflg = UIO_USERSPACE;
414 error = kern_pwritev(uap->fd, &auio, 0, &uap->sysmsg_szresult);
416 iovec_free(&iov, aiov);
422 * Gather positioned write system call
427 sys_extpwritev(struct extpwritev_args *uap)
429 struct thread *td = curthread;
431 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
435 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt,
440 auio.uio_iovcnt = uap->iovcnt;
441 auio.uio_offset = uap->offset;
442 auio.uio_rw = UIO_WRITE;
443 auio.uio_segflg = UIO_USERSPACE;
446 flags = uap->flags & O_FMASK;
447 if (uap->offset != (off_t)-1)
450 error = kern_pwritev(uap->fd, &auio, flags, &uap->sysmsg_szresult);
452 iovec_free(&iov, aiov);
460 kern_pwritev(int fd, struct uio *auio, int flags, size_t *res)
462 struct thread *td = curthread;
463 struct proc *p = td->td_proc;
469 fp = holdfp(p->p_fd, fd, FWRITE);
472 else if ((flags & O_FOFFSET) && fp->f_type != DTYPE_VNODE) {
475 error = dofilewrite(fd, fp, auio, flags, res);
483 * Common code for writev and pwritev that writes data to
484 * a file using the passed in uio, offset, and flags.
486 * MPALMOSTSAFE - ktrace needs help
489 dofilewrite(int fd, struct file *fp, struct uio *auio, int flags, size_t *res)
491 struct thread *td = curthread;
492 struct lwp *lp = td->td_lwp;
496 struct iovec *ktriov = NULL;
502 * if tracing, save a copy of iovec and uio
504 if (KTRPOINT(td, KTR_GENIO)) {
505 int iovlen = auio->uio_iovcnt * sizeof(struct iovec);
507 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
508 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen);
512 len = auio->uio_resid;
513 error = fo_write(fp, auio, fp->f_cred, flags);
515 if (auio->uio_resid != len && (error == ERESTART ||
516 error == EINTR || error == EWOULDBLOCK))
518 /* Socket layer is responsible for issuing SIGPIPE. */
519 if (error == EPIPE) {
521 lwpsignal(lp->lwp_proc, lp, SIGPIPE);
526 if (ktriov != NULL) {
528 ktruio.uio_iov = ktriov;
529 ktruio.uio_resid = len - auio->uio_resid;
531 ktrgenio(lp, fd, UIO_WRITE, &ktruio, error);
534 FREE(ktriov, M_TEMP);
538 *res = len - auio->uio_resid;
549 sys_ioctl(struct ioctl_args *uap)
554 error = mapped_ioctl(uap->fd, uap->com, uap->data, NULL, &uap->sysmsg);
559 struct ioctl_map_entry {
561 struct ioctl_map_range *cmd_ranges;
562 LIST_ENTRY(ioctl_map_entry) entries;
566 * The true heart of all ioctl syscall handlers (native, emulation).
567 * If map != NULL, it will be searched for a matching entry for com,
568 * and appropriate conversions/conversion functions will be utilized.
571 mapped_ioctl(int fd, u_long com, caddr_t uspc_data, struct ioctl_map *map,
574 struct thread *td = curthread;
575 struct proc *p = td->td_proc;
578 struct ioctl_map_range *iomc = NULL;
584 #define STK_PARAMS 128
586 char stkbuf[STK_PARAMS];
593 fp = holdfp(p->p_fd, fd, FREAD|FWRITE);
597 if (map != NULL) { /* obey translation map */
599 struct ioctl_map_entry *e;
601 maskcmd = com & map->mask;
603 LIST_FOREACH(e, &map->mapping, entries) {
604 for (iomc = e->cmd_ranges; iomc->start != 0 ||
605 iomc->maptocmd != 0 || iomc->wrapfunc != NULL ||
606 iomc->mapfunc != NULL;
608 if (maskcmd >= iomc->start &&
609 maskcmd <= iomc->end)
613 /* Did we find a match? */
614 if (iomc->start != 0 || iomc->maptocmd != 0 ||
615 iomc->wrapfunc != NULL || iomc->mapfunc != NULL)
620 (iomc->start == 0 && iomc->maptocmd == 0
621 && iomc->wrapfunc == NULL && iomc->mapfunc == NULL)) {
622 kprintf("%s: 'ioctl' fd=%d, cmd=0x%lx ('%c',%d) not implemented\n",
623 map->sys, fd, maskcmd,
624 (int)((maskcmd >> 8) & 0xff),
625 (int)(maskcmd & 0xff));
631 * If it's a non-range one to one mapping, maptocmd should be
632 * correct. If it's a ranged one to one mapping, we pass the
633 * original value of com, and for a range mapped to a different
634 * range, we always need a mapping function to translate the
635 * ioctl to our native ioctl. Ex. 6500-65ff <-> 9500-95ff
637 if (iomc->start == iomc->end && iomc->maptocmd == iomc->maptoend) {
638 com = iomc->maptocmd;
639 } else if (iomc->start == iomc->maptocmd && iomc->end == iomc->maptoend) {
640 if (iomc->mapfunc != NULL)
641 com = iomc->mapfunc(iomc->start, iomc->end,
642 iomc->start, iomc->end,
645 if (iomc->mapfunc != NULL) {
646 com = iomc->mapfunc(iomc->start, iomc->end,
647 iomc->maptocmd, iomc->maptoend,
650 kprintf("%s: Invalid mapping for fd=%d, cmd=%#lx ('%c',%d)\n",
651 map->sys, fd, maskcmd,
652 (int)((maskcmd >> 8) & 0xff),
653 (int)(maskcmd & 0xff));
662 error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE);
665 error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE);
670 * Interpret high order word to find amount of data to be
671 * copied to/from the user's address space.
673 size = IOCPARM_LEN(com);
674 if (size > IOCPARM_MAX) {
680 if (size > sizeof (ubuf.stkbuf)) {
681 memp = kmalloc(size, M_IOCTLOPS, M_WAITOK);
686 if ((com & IOC_IN) != 0) {
688 error = copyin(uspc_data, data, (size_t)size);
691 kfree(memp, M_IOCTLOPS);
695 *(caddr_t *)data = uspc_data;
697 } else if ((com & IOC_OUT) != 0 && size) {
699 * Zero the buffer so the user always
700 * gets back something deterministic.
702 bzero(data, (size_t)size);
703 } else if ((com & IOC_VOID) != 0) {
704 *(caddr_t *)data = uspc_data;
709 if ((tmp = *(int *)data))
710 fp->f_flag |= FNONBLOCK;
712 fp->f_flag &= ~FNONBLOCK;
717 if ((tmp = *(int *)data))
718 fp->f_flag |= FASYNC;
720 fp->f_flag &= ~FASYNC;
721 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, cred, msg);
726 * If there is a override function,
727 * call it instead of directly routing the call
729 if (map != NULL && iomc->wrapfunc != NULL)
730 error = iomc->wrapfunc(fp, com, ocom, data, cred);
732 error = fo_ioctl(fp, com, data, cred, msg);
734 * Copy any data to user, size was
735 * already set and checked above.
737 if (error == 0 && (com & IOC_OUT) != 0 && size != 0)
738 error = copyout(data, uspc_data, (size_t)size);
742 kfree(memp, M_IOCTLOPS);
749 mapped_ioctl_register_handler(struct ioctl_map_handler *he)
751 struct ioctl_map_entry *ne;
753 KKASSERT(he != NULL && he->map != NULL && he->cmd_ranges != NULL &&
754 he->subsys != NULL && *he->subsys != '\0');
756 ne = kmalloc(sizeof(struct ioctl_map_entry), M_IOCTLMAP, M_WAITOK);
758 ne->subsys = he->subsys;
759 ne->cmd_ranges = he->cmd_ranges;
761 LIST_INSERT_HEAD(&he->map->mapping, ne, entries);
767 mapped_ioctl_unregister_handler(struct ioctl_map_handler *he)
769 struct ioctl_map_entry *ne;
771 KKASSERT(he != NULL && he->map != NULL && he->cmd_ranges != NULL);
773 LIST_FOREACH(ne, &he->map->mapping, entries) {
774 if (ne->cmd_ranges != he->cmd_ranges)
776 LIST_REMOVE(ne, entries);
777 kfree(ne, M_IOCTLMAP);
783 static int nselcoll; /* Select collisions since boot */
785 SYSCTL_INT(_kern, OID_AUTO, nselcoll, CTLFLAG_RD, &nselcoll, 0, "");
786 static int nseldebug;
787 SYSCTL_INT(_kern, OID_AUTO, nseldebug, CTLFLAG_RW, &nseldebug, 0, "");
790 * Select system call.
795 sys_select(struct select_args *uap)
798 struct timespec *ktsp, kts;
802 * Get timeout if any.
804 if (uap->tv != NULL) {
805 error = copyin(uap->tv, &ktv, sizeof (ktv));
808 TIMEVAL_TO_TIMESPEC(&ktv, &kts);
817 error = doselect(uap->nd, uap->in, uap->ou, uap->ex, ktsp,
818 &uap->sysmsg_result);
825 * Pselect system call.
830 sys_pselect(struct pselect_args *uap)
832 struct thread *td = curthread;
833 struct lwp *lp = td->td_lwp;
834 struct timespec *ktsp, kts;
839 * Get timeout if any.
841 if (uap->ts != NULL) {
842 error = copyin(uap->ts, &kts, sizeof (kts));
851 * Install temporary signal mask if any provided.
853 if (uap->sigmask != NULL) {
854 error = copyin(uap->sigmask, &sigmask, sizeof(sigmask));
858 lp->lwp_oldsigmask = lp->lwp_sigmask;
859 SIG_CANTMASK(sigmask);
860 lp->lwp_sigmask = sigmask;
868 error = doselect(uap->nd, uap->in, uap->ou, uap->ex, ktsp,
869 &uap->sysmsg_result);
871 if (uap->sigmask != NULL) {
872 /* doselect() responsible for turning ERESTART into EINTR */
873 KKASSERT(error != ERESTART);
874 if (error == EINTR) {
876 * We can't restore the previous signal mask now
877 * because it could block the signal that interrupted
878 * us. So make a note to restore it after executing
881 lp->lwp_flag |= LWP_OLDMASK;
884 * No handler to run. Restore previous mask immediately.
886 lp->lwp_sigmask = lp->lwp_oldsigmask;
895 select_copyin(void *arg, struct kevent *kevp, int maxevents, int *events)
897 struct select_kevent_copyin_args *skap = NULL;
904 skap = (struct select_kevent_copyin_args *)arg;
906 if (*events == maxevents)
909 while (skap->active_set < COPYIN_DONE) {
910 switch (skap->active_set) {
913 * Register descriptors for the read filter
915 fdp = skap->read_set;
916 filter = EVFILT_READ;
925 * Register descriptors for the write filter
927 fdp = skap->write_set;
928 filter = EVFILT_WRITE;
937 * Register descriptors for the exception filter
939 fdp = skap->except_set;
940 filter = EVFILT_EXCEPT;
949 * Nothing left to register
955 while (skap->proc_fds < skap->num_fds) {
957 if (FD_ISSET(fd, fdp)) {
958 kev = &kevp[*events];
959 EV_SET(kev, fd, filter,
962 (void *)skap->lwp->lwp_kqueue_serial);
967 if (*events == maxevents)
978 select_copyout(void *arg, struct kevent *kevp, int count, int *res)
980 struct select_kevent_copyin_args *skap;
984 skap = (struct select_kevent_copyin_args *)arg;
986 if (kevp[0].flags & EV_ERROR) {
987 skap->error = kevp[0].data;
991 for (i = 0; i < count; ++i) {
992 if ((u_int)kevp[i].udata != skap->lwp->lwp_kqueue_serial) {
994 kev.flags = EV_DISABLE|EV_DELETE;
995 kqueue_register(&skap->lwp->lwp_kqueue, &kev);
999 switch (kevp[i].filter) {
1001 FD_SET(kevp[i].ident, skap->read_set);
1004 FD_SET(kevp[i].ident, skap->write_set);
1007 FD_SET(kevp[i].ident, skap->except_set);
1018 * Copy select bits in from userland. Allocate kernel memory if the
1022 getbits(int bytes, fd_set *in_set, kfd_set **out_set, kfd_set *tmp_set)
1027 if (bytes < sizeof(*tmp_set))
1030 *out_set = kmalloc(bytes, M_SELECT, M_WAITOK);
1031 error = copyin(in_set, *out_set, bytes);
1040 * Copy returned select bits back out to userland.
1043 putbits(int bytes, kfd_set *in_set, fd_set *out_set)
1048 error = copyout(in_set, out_set, bytes);
1056 * Common code for sys_select() and sys_pselect().
1058 * in, out and ex are userland pointers. ts must point to validated
1059 * kernel-side timeout value or NULL for infinite timeout. res must
1060 * point to syscall return value.
1063 doselect(int nd, fd_set *read, fd_set *write, fd_set *except,
1064 struct timespec *ts, int *res)
1066 struct proc *p = curproc;
1067 struct select_kevent_copyin_args *kap, ka;
1076 if (nd > p->p_fd->fd_nfiles) /* limit kmalloc */
1077 nd = p->p_fd->fd_nfiles;
1080 kap->lwp = curthread->td_lwp;
1084 kap->active_set = COPYIN_READ;
1087 * Calculate bytes based on the number of __fd_mask[] array entries
1088 * multiplied by the size of __fd_mask.
1090 bytes = howmany(nd, __NFDBITS) * sizeof(__fd_mask);
1092 error = getbits(bytes, read, &kap->read_set, &read_tmp);
1094 error = getbits(bytes, write, &kap->write_set, &write_tmp);
1096 error = getbits(bytes, except, &kap->except_set, &except_tmp);
1101 * NOTE: Make sure the max events passed to kern_kevent() is
1102 * effectively unlimited. (nd * 3) accomplishes this.
1104 * (*res) continues to increment as returned events are
1107 error = kern_kevent(&kap->lwp->lwp_kqueue, 0x7FFFFFFF, res, kap,
1108 select_copyin, select_copyout, ts);
1110 error = putbits(bytes, kap->read_set, read);
1112 error = putbits(bytes, kap->write_set, write);
1114 error = putbits(bytes, kap->except_set, except);
1117 * Cumulative error from individual events (EBADFD?)
1126 if (kap->read_set && kap->read_set != &read_tmp)
1127 kfree(kap->read_set, M_SELECT);
1128 if (kap->write_set && kap->write_set != &write_tmp)
1129 kfree(kap->write_set, M_SELECT);
1130 if (kap->except_set && kap->except_set != &except_tmp)
1131 kfree(kap->except_set, M_SELECT);
1133 kap->lwp->lwp_kqueue_serial++;
1144 sys_poll(struct poll_args *uap)
1146 struct timespec ts, *tsp;
1149 if (uap->timeout != INFTIM) {
1150 ts.tv_sec = uap->timeout / 1000;
1151 ts.tv_nsec = (uap->timeout % 1000) * 1000 * 1000;
1157 error = dopoll(uap->nfds, uap->fds, tsp, &uap->sysmsg_result);
1163 poll_copyin(void *arg, struct kevent *kevp, int maxevents, int *events)
1165 struct poll_kevent_copyin_args *pkap;
1170 pkap = (struct poll_kevent_copyin_args *)arg;
1172 while (pkap->pfds < pkap->nfds) {
1173 pfd = &pkap->fds[pkap->pfds];
1175 /* Clear return events */
1178 /* Do not check if fd is equal to -1 */
1179 if (pfd->fd == -1) {
1185 if (pfd->events & (POLLIN | POLLRDNORM))
1187 if (pfd->events & (POLLOUT | POLLWRNORM))
1189 if (pfd->events & (POLLPRI | POLLRDBAND))
1192 if (*events + kev_count > maxevents)
1196 * NOTE: A combined serial number and poll array index is
1197 * stored in kev->udata.
1199 kev = &kevp[*events];
1200 if (pfd->events & (POLLIN | POLLRDNORM)) {
1201 EV_SET(kev++, pfd->fd, EVFILT_READ, EV_ADD|EV_ENABLE,
1202 0, 0, (void *)(pkap->lwp->lwp_kqueue_serial +
1205 if (pfd->events & (POLLOUT | POLLWRNORM)) {
1206 EV_SET(kev++, pfd->fd, EVFILT_WRITE, EV_ADD|EV_ENABLE,
1207 0, 0, (void *)(pkap->lwp->lwp_kqueue_serial +
1210 if (pfd->events & (POLLPRI | POLLRDBAND)) {
1211 EV_SET(kev++, pfd->fd, EVFILT_EXCEPT, EV_ADD|EV_ENABLE,
1213 (void *)(pkap->lwp->lwp_kqueue_serial +
1218 kprintf("poll index %d fd %d events %08x\n",
1219 pkap->pfds, pfd->fd, pfd->events);
1223 (*events) += kev_count;
1230 poll_copyout(void *arg, struct kevent *kevp, int count, int *res)
1232 struct poll_kevent_copyin_args *pkap;
1238 pkap = (struct poll_kevent_copyin_args *)arg;
1240 for (i = 0; i < count; ++i) {
1242 * Extract the poll array index and delete spurious events.
1243 * We can easily tell if the serial number is incorrect
1244 * by checking whether the extracted index is out of range.
1246 pi = (u_int)kevp[i].udata - (u_int)pkap->lwp->lwp_kqueue_serial;
1248 if (pi >= pkap->nfds) {
1250 kev.flags = EV_DISABLE|EV_DELETE;
1251 kqueue_register(&pkap->lwp->lwp_kqueue, &kev);
1253 kprintf("poll index %d out of range\n", pi);
1256 pfd = &pkap->fds[pi];
1257 if (kevp[i].ident == pfd->fd) {
1258 if (kevp[i].flags & EV_ERROR) {
1259 switch(kevp[i].data) {
1262 * Operation not supported. Poll
1263 * does not return an error for
1264 * POLLPRI (OOB/urgent data) when
1265 * it is not supported by the device.
1267 if (kevp[i].filter != EVFILT_EXCEPT) {
1268 pfd->revents |= POLLERR;
1273 /* Bad file descriptor */
1274 pfd->revents |= POLLNVAL;
1278 pfd->revents |= POLLERR;
1283 kprintf("poll index %d fd %d filter %d error %d\n",
1285 kevp[i].filter, kevp[i].data);
1289 if (kevp[i].flags & EV_EOF) {
1290 pfd->revents |= POLLHUP;
1295 switch (kevp[i].filter) {
1297 pfd->revents |= (POLLIN | POLLRDNORM);
1300 pfd->revents |= (POLLOUT | POLLWRNORM);
1303 pfd->revents |= (POLLPRI | POLLRDBAND);
1308 kprintf("poll index %d fd %d revents %08x\n",
1309 pi, pfd->fd, pfd->revents);
1316 kprintf("poll index %d mismatch %d/%d\n",
1317 pi, kevp[i].ident, pfd->fd);
1325 dopoll(int nfds, struct pollfd *fds, struct timespec *ts, int *res)
1327 struct poll_kevent_copyin_args ka;
1328 struct pollfd sfds[64];
1337 * This is a bit arbitrary but we need to limit internal kmallocs.
1339 if (nfds > maxfilesperproc * 2)
1340 nfds = maxfilesperproc * 2;
1341 bytes = sizeof(struct pollfd) * nfds;
1343 ka.lwp = curthread->td_lwp;
1351 ka.fds = kmalloc(bytes, M_SELECT, M_WAITOK);
1353 error = copyin(fds, ka.fds, bytes);
1355 error = kern_kevent(&ka.lwp->lwp_kqueue, ka.nfds, res, &ka,
1356 poll_copyin, poll_copyout, ts);
1359 error = copyout(ka.fds, fds, bytes);
1362 kfree(ka.fds, M_SELECT);
1364 ka.lwp->lwp_kqueue_serial += nfds;
1370 socket_wait_copyin(void *arg, struct kevent *kevp, int maxevents, int *events)
1376 socket_wait_copyout(void *arg, struct kevent *kevp, int count, int *res)
1382 extern struct fileops socketops;
1384 socket_wait(struct socket *so, struct timespec *ts, int *res)
1392 if ((error = falloc(NULL, &fp, NULL)) != 0)
1395 fp->f_ops = &socketops;
1399 kqueue_init(&kq, &fd);
1401 EV_SET(&kev, 0, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, NULL);
1402 if ((error = kqueue_register(&kq, &kev)) != 0) {
1407 error = kern_kevent(&kq, 1, res, NULL, socket_wait_copyin,
1408 socket_wait_copyout, ts);
1415 * OpenBSD poll system call.
1416 * XXX this isn't quite a true representation.. OpenBSD uses select ops.
1421 sys_openbsd_poll(struct openbsd_poll_args *uap)
1423 return (sys_poll((struct poll_args *)uap));
1428 seltrue(cdev_t dev, int events)
1430 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
1434 * Record a select request. A global wait must be used since a process/thread
1435 * might go away after recording its request.
1438 selrecord(struct thread *selector, struct selinfo *sip)
1441 struct lwp *lp = NULL;
1443 if (selector->td_lwp == NULL)
1444 panic("selrecord: thread needs a process");
1446 if (sip->si_pid == selector->td_proc->p_pid &&
1447 sip->si_tid == selector->td_lwp->lwp_tid)
1449 if (sip->si_pid && (p = pfind(sip->si_pid)))
1450 lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, sip->si_tid);
1451 if (lp != NULL && lp->lwp_wchan == (caddr_t)&selwait) {
1452 sip->si_flags |= SI_COLL;
1454 sip->si_pid = selector->td_proc->p_pid;
1455 sip->si_tid = selector->td_lwp->lwp_tid;
1460 * Do a wakeup when a selectable event occurs.
1463 selwakeup(struct selinfo *sip)
1466 struct lwp *lp = NULL;
1468 if (sip->si_pid == 0)
1470 if (sip->si_flags & SI_COLL) {
1472 sip->si_flags &= ~SI_COLL;
1473 wakeup((caddr_t)&selwait); /* YYY fixable */
1475 p = pfind(sip->si_pid);
1479 lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, sip->si_tid);
1484 * This is a temporary hack until the code can be rewritten.
1485 * Check LWP_SELECT before assuming we can setrunnable().
1486 * Otherwise we might catch the lwp before it actually goes to
1490 if (lp->lwp_flag & LWP_SELECT) {
1491 lp->lwp_flag &= ~LWP_SELECT;
1492 } else if (lp->lwp_wchan == (caddr_t)&selwait) {
1494 * Flag the process to break the tsleep when
1495 * setrunnable is called, but only call setrunnable
1496 * here if the process is not in a stopped state.
1498 lp->lwp_flag |= LWP_BREAKTSLEEP;
1499 if (p->p_stat != SSTOP)
1504 kqueue_wakeup(&lp->lwp_kqueue);