2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)sys_generic.c 8.5 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/sys_generic.c,v 1.55.2.10 2001/03/17 10:39:32 peter Exp $
40 * $DragonFly: src/sys/kern/sys_generic.c,v 1.31 2006/05/27 20:17:16 dillon Exp $
43 #include "opt_ktrace.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/filedesc.h>
49 #include <sys/filio.h>
50 #include <sys/fcntl.h>
53 #include <sys/signalvar.h>
54 #include <sys/socketvar.h>
56 #include <sys/kernel.h>
57 #include <sys/kern_syscall.h>
58 #include <sys/malloc.h>
59 #include <sys/mapped_ioctl.h>
61 #include <sys/queue.h>
62 #include <sys/resourcevar.h>
63 #include <sys/sysctl.h>
64 #include <sys/sysent.h>
67 #include <sys/ktrace.h>
70 #include <vm/vm_page.h>
71 #include <sys/file2.h>
73 #include <machine/limits.h>
75 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer");
76 static MALLOC_DEFINE(M_IOCTLMAP, "ioctlmap", "mapped ioctl handler buffer");
77 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer");
78 MALLOC_DEFINE(M_IOV, "iov", "large iov's");
80 static int pollscan (struct proc *, struct pollfd *, u_int, int *);
81 static int selscan (struct proc *, fd_mask **, fd_mask **,
83 static int dofileread(int, struct file *, struct uio *, int, int *);
84 static int dofilewrite(int, struct file *, struct uio *, int, int *);
92 read(struct read_args *uap)
94 struct thread *td = curthread;
99 aiov.iov_base = uap->buf;
100 aiov.iov_len = uap->nbyte;
101 auio.uio_iov = &aiov;
103 auio.uio_offset = -1;
104 auio.uio_resid = uap->nbyte;
105 auio.uio_rw = UIO_READ;
106 auio.uio_segflg = UIO_USERSPACE;
109 if (auio.uio_resid < 0)
112 error = kern_preadv(uap->fd, &auio, 0, &uap->sysmsg_result);
117 * Positioned (Pread) read system call
122 pread(struct pread_args *uap)
124 struct thread *td = curthread;
129 aiov.iov_base = uap->buf;
130 aiov.iov_len = uap->nbyte;
131 auio.uio_iov = &aiov;
133 auio.uio_offset = uap->offset;
134 auio.uio_resid = uap->nbyte;
135 auio.uio_rw = UIO_READ;
136 auio.uio_segflg = UIO_USERSPACE;
139 if (auio.uio_resid < 0)
142 error = kern_preadv(uap->fd, &auio, FOF_OFFSET, &uap->sysmsg_result);
147 * Scatter read system call.
152 readv(struct readv_args *uap)
154 struct thread *td = curthread;
156 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
159 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt,
164 auio.uio_iovcnt = uap->iovcnt;
165 auio.uio_offset = -1;
166 auio.uio_rw = UIO_READ;
167 auio.uio_segflg = UIO_USERSPACE;
170 error = kern_preadv(uap->fd, &auio, 0, &uap->sysmsg_result);
172 iovec_free(&iov, aiov);
178 * Scatter positioned read system call.
183 preadv(struct preadv_args *uap)
185 struct thread *td = curthread;
187 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
190 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt,
195 auio.uio_iovcnt = uap->iovcnt;
196 auio.uio_offset = uap->offset;
197 auio.uio_rw = UIO_READ;
198 auio.uio_segflg = UIO_USERSPACE;
201 error = kern_preadv(uap->fd, &auio, FOF_OFFSET, &uap->sysmsg_result);
203 iovec_free(&iov, aiov);
211 kern_preadv(int fd, struct uio *auio, int flags, int *res)
213 struct thread *td = curthread;
214 struct proc *p = td->td_proc;
220 fp = holdfp(p->p_fd, fd, FREAD);
223 if (flags & FOF_OFFSET && fp->f_type != DTYPE_VNODE) {
225 } else if (auio->uio_resid < 0) {
228 error = dofileread(fd, fp, auio, flags, res);
235 * Common code for readv and preadv that reads data in
236 * from a file using the passed in uio, offset, and flags.
238 * MPALMOSTSAFE - ktrace needs help
241 dofileread(int fd, struct file *fp, struct uio *auio, int flags, int *res)
243 struct thread *td = curthread;
244 struct proc *p = td->td_proc;
248 struct iovec *ktriov = NULL;
254 * if tracing, save a copy of iovec
256 if (KTRPOINT(td, KTR_GENIO)) {
257 int iovlen = auio->uio_iovcnt * sizeof(struct iovec);
259 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
260 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen);
264 len = auio->uio_resid;
265 error = fo_read(fp, auio, fp->f_cred, flags);
267 if (auio->uio_resid != len && (error == ERESTART ||
268 error == EINTR || error == EWOULDBLOCK))
272 if (ktriov != NULL) {
274 ktruio.uio_iov = ktriov;
275 ktruio.uio_resid = len - auio->uio_resid;
277 ktrgenio(p, fd, UIO_READ, &ktruio, error);
280 FREE(ktriov, M_TEMP);
284 *res = len - auio->uio_resid;
295 write(struct write_args *uap)
297 struct thread *td = curthread;
302 aiov.iov_base = (void *)(uintptr_t)uap->buf;
303 aiov.iov_len = uap->nbyte;
304 auio.uio_iov = &aiov;
306 auio.uio_offset = -1;
307 auio.uio_resid = uap->nbyte;
308 auio.uio_rw = UIO_WRITE;
309 auio.uio_segflg = UIO_USERSPACE;
312 if (auio.uio_resid < 0)
315 error = kern_pwritev(uap->fd, &auio, 0, &uap->sysmsg_result);
326 pwrite(struct pwrite_args *uap)
328 struct thread *td = curthread;
333 aiov.iov_base = (void *)(uintptr_t)uap->buf;
334 aiov.iov_len = uap->nbyte;
335 auio.uio_iov = &aiov;
337 auio.uio_offset = uap->offset;
338 auio.uio_resid = uap->nbyte;
339 auio.uio_rw = UIO_WRITE;
340 auio.uio_segflg = UIO_USERSPACE;
343 if (auio.uio_resid < 0)
346 error = kern_pwritev(uap->fd, &auio, FOF_OFFSET, &uap->sysmsg_result);
355 writev(struct writev_args *uap)
357 struct thread *td = curthread;
359 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
362 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt,
367 auio.uio_iovcnt = uap->iovcnt;
368 auio.uio_offset = -1;
369 auio.uio_rw = UIO_WRITE;
370 auio.uio_segflg = UIO_USERSPACE;
373 error = kern_pwritev(uap->fd, &auio, 0, &uap->sysmsg_result);
375 iovec_free(&iov, aiov);
381 * Gather positioned write system call
386 pwritev(struct pwritev_args *uap)
388 struct thread *td = curthread;
390 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
393 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt,
398 auio.uio_iovcnt = uap->iovcnt;
399 auio.uio_offset = uap->offset;
400 auio.uio_rw = UIO_WRITE;
401 auio.uio_segflg = UIO_USERSPACE;
404 error = kern_pwritev(uap->fd, &auio, FOF_OFFSET, &uap->sysmsg_result);
406 iovec_free(&iov, aiov);
414 kern_pwritev(int fd, struct uio *auio, int flags, int *res)
416 struct thread *td = curthread;
417 struct proc *p = td->td_proc;
423 fp = holdfp(p->p_fd, fd, FWRITE);
426 else if ((flags & FOF_OFFSET) && fp->f_type != DTYPE_VNODE) {
429 error = dofilewrite(fd, fp, auio, flags, res);
437 * Common code for writev and pwritev that writes data to
438 * a file using the passed in uio, offset, and flags.
440 * MPALMOSTSAFE - ktrace needs help
443 dofilewrite(int fd, struct file *fp, struct uio *auio, int flags, int *res)
445 struct thread *td = curthread;
446 struct proc *p = td->td_proc;
450 struct iovec *ktriov = NULL;
456 * if tracing, save a copy of iovec and uio
458 if (KTRPOINT(td, KTR_GENIO)) {
459 int iovlen = auio->uio_iovcnt * sizeof(struct iovec);
461 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
462 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen);
466 len = auio->uio_resid;
467 if (fp->f_type == DTYPE_VNODE)
469 error = fo_write(fp, auio, fp->f_cred, flags);
471 if (auio->uio_resid != len && (error == ERESTART ||
472 error == EINTR || error == EWOULDBLOCK))
474 /* Socket layer is responsible for issuing SIGPIPE. */
475 if (error == EPIPE) {
482 if (ktriov != NULL) {
484 ktruio.uio_iov = ktriov;
485 ktruio.uio_resid = len - auio->uio_resid;
487 ktrgenio(p, fd, UIO_WRITE, &ktruio, error);
490 FREE(ktriov, M_TEMP);
494 *res = len - auio->uio_resid;
504 ioctl(struct ioctl_args *uap)
506 return(mapped_ioctl(uap->fd, uap->com, uap->data, NULL));
509 struct ioctl_map_entry {
511 struct ioctl_map_range *cmd_ranges;
512 LIST_ENTRY(ioctl_map_entry) entries;
516 * The true heart of all ioctl syscall handlers (native, emulation).
517 * If map != NULL, it will be searched for a matching entry for com,
518 * and appropriate conversions/conversion functions will be utilized.
521 mapped_ioctl(int fd, u_long com, caddr_t uspc_data, struct ioctl_map *map)
523 struct thread *td = curthread;
524 struct proc *p = td->td_proc;
527 struct ioctl_map_range *iomc = NULL;
533 #define STK_PARAMS 128
535 char stkbuf[STK_PARAMS];
542 fp = holdfp(p->p_fd, fd, FREAD|FWRITE);
546 if (map != NULL) { /* obey translation map */
548 struct ioctl_map_entry *e;
550 maskcmd = com & map->mask;
552 LIST_FOREACH(e, &map->mapping, entries) {
553 for (iomc = e->cmd_ranges; iomc->start != 0 ||
554 iomc->maptocmd != 0 || iomc->wrapfunc != NULL ||
555 iomc->mapfunc != NULL;
557 if (maskcmd >= iomc->start &&
558 maskcmd <= iomc->end)
562 /* Did we find a match? */
563 if (iomc->start != 0 || iomc->maptocmd != 0 ||
564 iomc->wrapfunc != NULL || iomc->mapfunc != NULL)
569 (iomc->start == 0 && iomc->maptocmd == 0
570 && iomc->wrapfunc == NULL && iomc->mapfunc == NULL)) {
571 printf("%s: 'ioctl' fd=%d, cmd=0x%lx ('%c',%d) not implemented\n",
572 map->sys, fd, maskcmd,
573 (int)((maskcmd >> 8) & 0xff),
574 (int)(maskcmd & 0xff));
580 * If it's a non-range one to one mapping, maptocmd should be
581 * correct. If it's a ranged one to one mapping, we pass the
582 * original value of com, and for a range mapped to a different
583 * range, we always need a mapping function to translate the
584 * ioctl to our native ioctl. Ex. 6500-65ff <-> 9500-95ff
586 if (iomc->start == iomc->end && iomc->maptocmd == iomc->maptoend) {
587 com = iomc->maptocmd;
588 } else if (iomc->start == iomc->maptocmd && iomc->end == iomc->maptoend) {
589 if (iomc->mapfunc != NULL)
590 com = iomc->mapfunc(iomc->start, iomc->end,
591 iomc->start, iomc->end,
594 if (iomc->mapfunc != NULL) {
595 com = iomc->mapfunc(iomc->start, iomc->end,
596 iomc->maptocmd, iomc->maptoend,
599 printf("%s: Invalid mapping for fd=%d, cmd=%#lx ('%c',%d)\n",
600 map->sys, fd, maskcmd,
601 (int)((maskcmd >> 8) & 0xff),
602 (int)(maskcmd & 0xff));
611 error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE);
614 error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE);
619 * Interpret high order word to find amount of data to be
620 * copied to/from the user's address space.
622 size = IOCPARM_LEN(com);
623 if (size > IOCPARM_MAX) {
629 if (size > sizeof (ubuf.stkbuf)) {
630 memp = malloc(size, M_IOCTLOPS, M_WAITOK);
635 if ((com & IOC_IN) != 0) {
637 error = copyin(uspc_data, data, (u_int)size);
640 free(memp, M_IOCTLOPS);
644 *(caddr_t *)data = uspc_data;
646 } else if ((com & IOC_OUT) != 0 && size) {
648 * Zero the buffer so the user always
649 * gets back something deterministic.
652 } else if ((com & IOC_VOID) != 0) {
653 *(caddr_t *)data = uspc_data;
659 if ((tmp = *(int *)data))
660 fp->f_flag |= FNONBLOCK;
662 fp->f_flag &= ~FNONBLOCK;
663 error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, cred);
667 if ((tmp = *(int *)data))
668 fp->f_flag |= FASYNC;
670 fp->f_flag &= ~FASYNC;
671 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, cred);
676 * If there is a override function,
677 * call it instead of directly routing the call
679 if (map != NULL && iomc->wrapfunc != NULL)
680 error = iomc->wrapfunc(fp, com, ocom, data, cred);
682 error = fo_ioctl(fp, com, data, cred);
684 * Copy any data to user, size was
685 * already set and checked above.
687 if (error == 0 && (com & IOC_OUT) != 0 && size != 0)
688 error = copyout(data, uspc_data, (u_int)size);
692 free(memp, M_IOCTLOPS);
699 mapped_ioctl_register_handler(struct ioctl_map_handler *he)
701 struct ioctl_map_entry *ne;
703 KKASSERT(he != NULL && he->map != NULL && he->cmd_ranges != NULL &&
704 he->subsys != NULL && *he->subsys != '\0');
706 ne = malloc(sizeof(struct ioctl_map_entry), M_IOCTLMAP, M_WAITOK);
708 ne->subsys = he->subsys;
709 ne->cmd_ranges = he->cmd_ranges;
711 LIST_INSERT_HEAD(&he->map->mapping, ne, entries);
717 mapped_ioctl_unregister_handler(struct ioctl_map_handler *he)
719 struct ioctl_map_entry *ne;
721 KKASSERT(he != NULL && he->map != NULL && he->cmd_ranges != NULL);
723 LIST_FOREACH(ne, &he->map->mapping, entries) {
724 if (ne->cmd_ranges != he->cmd_ranges)
726 LIST_REMOVE(ne, entries);
727 free(ne, M_IOCTLMAP);
733 static int nselcoll; /* Select collisions since boot */
735 SYSCTL_INT(_kern, OID_AUTO, nselcoll, CTLFLAG_RD, &nselcoll, 0, "");
738 * Select system call.
741 select(struct select_args *uap)
743 struct proc *p = curproc;
746 * The magic 2048 here is chosen to be just enough for FD_SETSIZE
747 * infds with the new FD_SETSIZE of 1024, and more than enough for
748 * FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE
751 fd_mask s_selbits[howmany(2048, NFDBITS)];
752 fd_mask *ibits[3], *obits[3], *selbits, *sbp;
753 struct timeval atv, rtv, ttv;
754 int ncoll, error, timo;
755 u_int nbufbytes, ncpbytes, nfdbits;
759 if (uap->nd > p->p_fd->fd_nfiles)
760 uap->nd = p->p_fd->fd_nfiles; /* forgiving; slightly wrong */
763 * Allocate just enough bits for the non-null fd_sets. Use the
764 * preallocated auto buffer if possible.
766 nfdbits = roundup(uap->nd, NFDBITS);
767 ncpbytes = nfdbits / NBBY;
770 nbufbytes += 2 * ncpbytes;
772 nbufbytes += 2 * ncpbytes;
774 nbufbytes += 2 * ncpbytes;
775 if (nbufbytes <= sizeof s_selbits)
776 selbits = &s_selbits[0];
778 selbits = malloc(nbufbytes, M_SELECT, M_WAITOK);
781 * Assign pointers into the bit buffers and fetch the input bits.
782 * Put the output buffers together so that they can be bzeroed
786 #define getbits(name, x) \
788 if (uap->name == NULL) \
791 ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp; \
793 sbp += ncpbytes / sizeof *sbp; \
794 error = copyin(uap->name, ibits[x], ncpbytes); \
804 bzero(selbits, nbufbytes / 2);
807 error = copyin((caddr_t)uap->tv, (caddr_t)&atv,
811 if (itimerfix(&atv)) {
815 getmicrouptime(&rtv);
816 timevaladd(&atv, &rtv);
824 p->p_flag |= P_SELECT;
825 error = selscan(p, ibits, obits, uap->nd, &uap->sysmsg_result);
826 if (error || uap->sysmsg_result)
828 if (atv.tv_sec || atv.tv_usec) {
829 getmicrouptime(&rtv);
830 if (timevalcmp(&rtv, &atv, >=))
833 timevalsub(&ttv, &rtv);
834 timo = ttv.tv_sec > 24 * 60 * 60 ?
835 24 * 60 * 60 * hz : tvtohz_high(&ttv);
838 if ((p->p_flag & P_SELECT) == 0 || nselcoll != ncoll) {
842 p->p_flag &= ~P_SELECT;
844 error = tsleep((caddr_t)&selwait, PCATCH, "select", timo);
850 p->p_flag &= ~P_SELECT;
851 /* select is not restarted after signals... */
852 if (error == ERESTART)
854 if (error == EWOULDBLOCK)
856 #define putbits(name, x) \
857 if (uap->name && (error2 = copyout(obits[x], uap->name, ncpbytes))) \
867 if (selbits != &s_selbits[0])
868 free(selbits, M_SELECT);
873 selscan(struct proc *p, fd_mask **ibits, fd_mask **obits, int nfd, int *res)
879 /* Note: backend also returns POLLHUP/POLLERR if appropriate. */
880 static int flag[3] = { POLLRDNORM, POLLWRNORM, POLLRDBAND };
882 for (msk = 0; msk < 3; msk++) {
883 if (ibits[msk] == NULL)
885 for (i = 0; i < nfd; i += NFDBITS) {
886 bits = ibits[msk][i/NFDBITS];
887 /* ffs(int mask) not portable, fd_mask is long */
888 for (fd = i; bits && fd < nfd; fd++, bits >>= 1) {
891 fp = holdfp(p->p_fd, fd, -1);
894 if (fo_poll(fp, flag[msk], fp->f_cred)) {
895 obits[msk][(fd)/NFDBITS] |=
896 ((fd_mask)1 << ((fd) % NFDBITS));
911 poll(struct poll_args *uap)
914 struct pollfd smallbits[32];
915 struct timeval atv, rtv, ttv;
916 int ncoll, error = 0, timo;
919 struct proc *p = curproc;
923 * This is kinda bogus. We have fd limits, but that is not
924 * really related to the size of the pollfd array. Make sure
925 * we let the process use at least FD_SETSIZE entries and at
926 * least enough for the current limits. We want to be reasonably
927 * safe, but not overly restrictive.
929 if (nfds > p->p_rlimit[RLIMIT_NOFILE].rlim_cur && nfds > FD_SETSIZE)
931 ni = nfds * sizeof(struct pollfd);
932 if (ni > sizeof(smallbits))
933 bits = malloc(ni, M_TEMP, M_WAITOK);
936 error = copyin(uap->fds, bits, ni);
939 if (uap->timeout != INFTIM) {
940 atv.tv_sec = uap->timeout / 1000;
941 atv.tv_usec = (uap->timeout % 1000) * 1000;
942 if (itimerfix(&atv)) {
946 getmicrouptime(&rtv);
947 timevaladd(&atv, &rtv);
955 p->p_flag |= P_SELECT;
956 error = pollscan(p, bits, nfds, &uap->sysmsg_result);
957 if (error || uap->sysmsg_result)
959 if (atv.tv_sec || atv.tv_usec) {
960 getmicrouptime(&rtv);
961 if (timevalcmp(&rtv, &atv, >=))
964 timevalsub(&ttv, &rtv);
965 timo = ttv.tv_sec > 24 * 60 * 60 ?
966 24 * 60 * 60 * hz : tvtohz_high(&ttv);
969 if ((p->p_flag & P_SELECT) == 0 || nselcoll != ncoll) {
973 p->p_flag &= ~P_SELECT;
974 error = tsleep((caddr_t)&selwait, PCATCH, "poll", timo);
979 p->p_flag &= ~P_SELECT;
980 /* poll is not restarted after signals... */
981 if (error == ERESTART)
983 if (error == EWOULDBLOCK)
986 error = copyout(bits, uap->fds, ni);
991 if (ni > sizeof(smallbits))
997 pollscan(struct proc *p, struct pollfd *fds, u_int nfd, int *res)
1003 for (i = 0; i < nfd; i++, fds++) {
1004 if (fds->fd >= p->p_fd->fd_nfiles) {
1005 fds->revents = POLLNVAL;
1007 } else if (fds->fd < 0) {
1010 fp = holdfp(p->p_fd, fds->fd, -1);
1012 fds->revents = POLLNVAL;
1016 * Note: backend also returns POLLHUP and
1017 * POLLERR if appropriate.
1019 fds->revents = fo_poll(fp, fds->events,
1021 if (fds->revents != 0)
1032 * OpenBSD poll system call.
1033 * XXX this isn't quite a true representation.. OpenBSD uses select ops.
1036 openbsd_poll(struct openbsd_poll_args *uap)
1038 return (poll((struct poll_args *)uap));
1043 seltrue(dev_t dev, int events, struct thread *td)
1045 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
1049 * Record a select request. A global wait must be used since a process/thread
1050 * might go away after recording its request.
1053 selrecord(struct thread *selector, struct selinfo *sip)
1058 if ((p = selector->td_proc) == NULL)
1059 panic("selrecord: thread needs a process");
1062 if (sip->si_pid == mypid)
1064 if (sip->si_pid && (p = pfind(sip->si_pid)) &&
1065 p->p_wchan == (caddr_t)&selwait) {
1066 sip->si_flags |= SI_COLL;
1068 sip->si_pid = mypid;
1073 * Do a wakeup when a selectable event occurs.
1076 selwakeup(struct selinfo *sip)
1080 if (sip->si_pid == 0)
1082 if (sip->si_flags & SI_COLL) {
1084 sip->si_flags &= ~SI_COLL;
1085 wakeup((caddr_t)&selwait); /* YYY fixable */
1087 p = pfind(sip->si_pid);
1091 if (p->p_wchan == (caddr_t)&selwait) {
1093 * Flag the process to break the tsleep when
1094 * setrunnable is called, but only call setrunnable
1095 * here if the process is not in a stopped state.
1097 p->p_flag |= P_BREAKTSLEEP;
1098 if ((p->p_flag & P_STOPPED) == 0)
1100 } else if (p->p_flag & P_SELECT) {
1101 p->p_flag &= ~P_SELECT;