kernel - Major signal path adjustments to fix races, tsleep race fixes, +more
[dragonfly.git] / sys / kern / sys_generic.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)sys_generic.c 8.5 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/sys_generic.c,v 1.55.2.10 2001/03/17 10:39:32 peter Exp $
2494f282 40 * $DragonFly: src/sys/kern/sys_generic.c,v 1.49 2008/05/05 22:09:44 dillon Exp $
984263bc
MD
41 */
42
43#include "opt_ktrace.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/sysproto.h>
e5857bf7 48#include <sys/event.h>
984263bc
MD
49#include <sys/filedesc.h>
50#include <sys/filio.h>
51#include <sys/fcntl.h>
52#include <sys/file.h>
53#include <sys/proc.h>
54#include <sys/signalvar.h>
55#include <sys/socketvar.h>
56#include <sys/uio.h>
57#include <sys/kernel.h>
ba023347 58#include <sys/kern_syscall.h>
984263bc 59#include <sys/malloc.h>
a0c5fc96 60#include <sys/mapped_ioctl.h>
984263bc 61#include <sys/poll.h>
a0c5fc96 62#include <sys/queue.h>
984263bc 63#include <sys/resourcevar.h>
8b5c39bb 64#include <sys/socketops.h>
984263bc
MD
65#include <sys/sysctl.h>
66#include <sys/sysent.h>
67#include <sys/buf.h>
68#ifdef KTRACE
69#include <sys/ktrace.h>
70#endif
71#include <vm/vm.h>
72#include <vm/vm_page.h>
684a93c4 73
dadab5e9 74#include <sys/file2.h>
684a93c4 75#include <sys/mplock2.h>
ac62ea3c 76#include <sys/spinlock2.h>
984263bc
MD
77
78#include <machine/limits.h>
79
80static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer");
a0c5fc96 81static MALLOC_DEFINE(M_IOCTLMAP, "ioctlmap", "mapped ioctl handler buffer");
984263bc
MD
82static MALLOC_DEFINE(M_SELECT, "select", "select() buffer");
83MALLOC_DEFINE(M_IOV, "iov", "large iov's");
84
e5857bf7 85typedef struct kfd_set {
8acdf1cf 86 fd_mask fds_bits[2];
e5857bf7
SG
87} kfd_set;
88
89enum select_copyin_states {
90 COPYIN_READ, COPYIN_WRITE, COPYIN_EXCEPT, COPYIN_DONE };
91
92struct select_kevent_copyin_args {
93 kfd_set *read_set;
94 kfd_set *write_set;
95 kfd_set *except_set;
96 int active_set; /* One of select_copyin_states */
97 struct lwp *lwp; /* Pointer to our lwp */
98 int num_fds; /* Number of file descriptors (syscall arg) */
99 int proc_fds; /* Processed fd's (wraps) */
100 int error; /* Returned to userland */
101};
102
7fbfbe29
SG
103struct poll_kevent_copyin_args {
104 struct lwp *lwp;
105 struct pollfd *fds;
106 int nfds;
107 int pfds;
108 int error;
109};
110
a3c18566 111static struct lwkt_token mioctl_token = LWKT_TOKEN_INITIALIZER(mioctl_token);
3c499555 112
ae7cb1b5 113static int doselect(int nd, fd_set *in, fd_set *ou, fd_set *ex,
8acdf1cf 114 struct timespec *ts, int *res);
7fbfbe29
SG
115static int dopoll(int nfds, struct pollfd *fds, struct timespec *ts,
116 int *res);
e54488bb
MD
117static int dofileread(int, struct file *, struct uio *, int, size_t *);
118static int dofilewrite(int, struct file *, struct uio *, int, size_t *);
7f83ed38 119
984263bc
MD
120/*
121 * Read system call.
f832287e
MD
122 *
123 * MPSAFE
984263bc 124 */
984263bc 125int
753fd850 126sys_read(struct read_args *uap)
984263bc 127{
dadab5e9 128 struct thread *td = curthread;
ba023347
DRJ
129 struct uio auio;
130 struct iovec aiov;
984263bc
MD
131 int error;
132
e54488bb
MD
133 if ((ssize_t)uap->nbyte < 0)
134 error = EINVAL;
135
ba023347
DRJ
136 aiov.iov_base = uap->buf;
137 aiov.iov_len = uap->nbyte;
138 auio.uio_iov = &aiov;
139 auio.uio_iovcnt = 1;
140 auio.uio_offset = -1;
141 auio.uio_resid = uap->nbyte;
142 auio.uio_rw = UIO_READ;
143 auio.uio_segflg = UIO_USERSPACE;
144 auio.uio_td = td;
145
e54488bb 146 error = kern_preadv(uap->fd, &auio, 0, &uap->sysmsg_szresult);
984263bc
MD
147 return(error);
148}
149
150/*
7f83ed38 151 * Positioned (Pread) read system call
f832287e
MD
152 *
153 * MPSAFE
984263bc 154 */
984263bc 155int
b09fd398 156sys_extpread(struct extpread_args *uap)
984263bc 157{
dadab5e9 158 struct thread *td = curthread;
ba023347
DRJ
159 struct uio auio;
160 struct iovec aiov;
984263bc 161 int error;
9ba76b73 162 int flags;
984263bc 163
e54488bb
MD
164 if ((ssize_t)uap->nbyte < 0)
165 return(EINVAL);
166
ba023347
DRJ
167 aiov.iov_base = uap->buf;
168 aiov.iov_len = uap->nbyte;
169 auio.uio_iov = &aiov;
170 auio.uio_iovcnt = 1;
171 auio.uio_offset = uap->offset;
172 auio.uio_resid = uap->nbyte;
173 auio.uio_rw = UIO_READ;
174 auio.uio_segflg = UIO_USERSPACE;
175 auio.uio_td = td;
176
9ba76b73
MD
177 flags = uap->flags & O_FMASK;
178 if (uap->offset != (off_t)-1)
179 flags |= O_FOFFSET;
180
e54488bb 181 error = kern_preadv(uap->fd, &auio, flags, &uap->sysmsg_szresult);
984263bc
MD
182 return(error);
183}
184
7f83ed38
MD
185/*
186 * Scatter read system call.
f832287e
MD
187 *
188 * MPSAFE
7f83ed38 189 */
984263bc 190int
753fd850 191sys_readv(struct readv_args *uap)
984263bc 192{
dadab5e9 193 struct thread *td = curthread;
984263bc 194 struct uio auio;
ba023347
DRJ
195 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
196 int error;
984263bc 197
ba023347 198 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt,
ef5c76d7 199 &auio.uio_resid);
ba023347
DRJ
200 if (error)
201 return (error);
202 auio.uio_iov = iov;
203 auio.uio_iovcnt = uap->iovcnt;
204 auio.uio_offset = -1;
984263bc
MD
205 auio.uio_rw = UIO_READ;
206 auio.uio_segflg = UIO_USERSPACE;
dadab5e9 207 auio.uio_td = td;
984263bc 208
e54488bb 209 error = kern_preadv(uap->fd, &auio, 0, &uap->sysmsg_szresult);
ba023347
DRJ
210
211 iovec_free(&iov, aiov);
984263bc
MD
212 return (error);
213}
214
7f83ed38
MD
215
216/*
217 * Scatter positioned read system call.
f832287e
MD
218 *
219 * MPSAFE
7f83ed38
MD
220 */
221int
b09fd398 222sys_extpreadv(struct extpreadv_args *uap)
7f83ed38
MD
223{
224 struct thread *td = curthread;
225 struct uio auio;
226 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
227 int error;
9ba76b73 228 int flags;
7f83ed38
MD
229
230 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt,
ef5c76d7 231 &auio.uio_resid);
7f83ed38
MD
232 if (error)
233 return (error);
234 auio.uio_iov = iov;
235 auio.uio_iovcnt = uap->iovcnt;
236 auio.uio_offset = uap->offset;
237 auio.uio_rw = UIO_READ;
238 auio.uio_segflg = UIO_USERSPACE;
239 auio.uio_td = td;
240
9ba76b73
MD
241 flags = uap->flags & O_FMASK;
242 if (uap->offset != (off_t)-1)
243 flags |= O_FOFFSET;
244
e54488bb 245 error = kern_preadv(uap->fd, &auio, flags, &uap->sysmsg_szresult);
7f83ed38
MD
246
247 iovec_free(&iov, aiov);
248 return(error);
249}
250
f832287e
MD
251/*
252 * MPSAFE
253 */
984263bc 254int
e54488bb 255kern_preadv(int fd, struct uio *auio, int flags, size_t *res)
984263bc 256{
dadab5e9
MD
257 struct thread *td = curthread;
258 struct proc *p = td->td_proc;
41c20dac 259 struct file *fp;
7f83ed38 260 int error;
984263bc 261
ba023347
DRJ
262 KKASSERT(p);
263
228b401d 264 fp = holdfp(p->p_fd, fd, FREAD);
ba023347 265 if (fp == NULL)
984263bc 266 return (EBADF);
9ba76b73 267 if (flags & O_FOFFSET && fp->f_type != DTYPE_VNODE) {
ba023347 268 error = ESPIPE;
7f83ed38
MD
269 } else {
270 error = dofileread(fd, fp, auio, flags, res);
984263bc 271 }
9f87144f 272 fdrop(fp);
7f83ed38
MD
273 return(error);
274}
275
276/*
277 * Common code for readv and preadv that reads data in
278 * from a file using the passed in uio, offset, and flags.
f832287e
MD
279 *
280 * MPALMOSTSAFE - ktrace needs help
7f83ed38
MD
281 */
282static int
e54488bb 283dofileread(int fd, struct file *fp, struct uio *auio, int flags, size_t *res)
7f83ed38 284{
7f83ed38 285 int error;
e54488bb 286 size_t len;
7f83ed38 287#ifdef KTRACE
fc9ae81d 288 struct thread *td = curthread;
7f83ed38
MD
289 struct iovec *ktriov = NULL;
290 struct uio ktruio;
291#endif
292
984263bc
MD
293#ifdef KTRACE
294 /*
295 * if tracing, save a copy of iovec
296 */
dadab5e9 297 if (KTRPOINT(td, KTR_GENIO)) {
ba023347
DRJ
298 int iovlen = auio->uio_iovcnt * sizeof(struct iovec);
299
984263bc 300 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
ba023347
DRJ
301 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen);
302 ktruio = *auio;
984263bc
MD
303 }
304#endif
ba023347 305 len = auio->uio_resid;
87de5057 306 error = fo_read(fp, auio, fp->f_cred, flags);
ba023347
DRJ
307 if (error) {
308 if (auio->uio_resid != len && (error == ERESTART ||
984263bc
MD
309 error == EINTR || error == EWOULDBLOCK))
310 error = 0;
311 }
984263bc
MD
312#ifdef KTRACE
313 if (ktriov != NULL) {
314 if (error == 0) {
315 ktruio.uio_iov = ktriov;
ba023347 316 ktruio.uio_resid = len - auio->uio_resid;
f832287e 317 get_mplock();
9fb04d14 318 ktrgenio(td->td_lwp, fd, UIO_READ, &ktruio, error);
f832287e 319 rel_mplock();
984263bc
MD
320 }
321 FREE(ktriov, M_TEMP);
322 }
323#endif
ba023347
DRJ
324 if (error == 0)
325 *res = len - auio->uio_resid;
7f83ed38
MD
326
327 return(error);
984263bc
MD
328}
329
330/*
331 * Write system call
f832287e
MD
332 *
333 * MPSAFE
984263bc 334 */
984263bc 335int
753fd850 336sys_write(struct write_args *uap)
984263bc 337{
dadab5e9 338 struct thread *td = curthread;
ba023347
DRJ
339 struct uio auio;
340 struct iovec aiov;
984263bc
MD
341 int error;
342
e54488bb
MD
343 if ((ssize_t)uap->nbyte < 0)
344 error = EINVAL;
345
ba023347
DRJ
346 aiov.iov_base = (void *)(uintptr_t)uap->buf;
347 aiov.iov_len = uap->nbyte;
348 auio.uio_iov = &aiov;
349 auio.uio_iovcnt = 1;
350 auio.uio_offset = -1;
351 auio.uio_resid = uap->nbyte;
352 auio.uio_rw = UIO_WRITE;
353 auio.uio_segflg = UIO_USERSPACE;
354 auio.uio_td = td;
355
e54488bb 356 error = kern_pwritev(uap->fd, &auio, 0, &uap->sysmsg_szresult);
dadab5e9 357
984263bc
MD
358 return(error);
359}
360
361/*
362 * Pwrite system call
f832287e
MD
363 *
364 * MPSAFE
984263bc 365 */
984263bc 366int
b09fd398 367sys_extpwrite(struct extpwrite_args *uap)
984263bc 368{
dadab5e9 369 struct thread *td = curthread;
ba023347
DRJ
370 struct uio auio;
371 struct iovec aiov;
984263bc 372 int error;
9ba76b73 373 int flags;
984263bc 374
e54488bb
MD
375 if ((ssize_t)uap->nbyte < 0)
376 error = EINVAL;
377
ba023347
DRJ
378 aiov.iov_base = (void *)(uintptr_t)uap->buf;
379 aiov.iov_len = uap->nbyte;
380 auio.uio_iov = &aiov;
381 auio.uio_iovcnt = 1;
382 auio.uio_offset = uap->offset;
383 auio.uio_resid = uap->nbyte;
384 auio.uio_rw = UIO_WRITE;
385 auio.uio_segflg = UIO_USERSPACE;
386 auio.uio_td = td;
387
9ba76b73
MD
388 flags = uap->flags & O_FMASK;
389 if (uap->offset != (off_t)-1)
390 flags |= O_FOFFSET;
e54488bb 391 error = kern_pwritev(uap->fd, &auio, flags, &uap->sysmsg_szresult);
984263bc
MD
392 return(error);
393}
394
f832287e
MD
395/*
396 * MPSAFE
397 */
ba023347 398int
753fd850 399sys_writev(struct writev_args *uap)
ba023347 400{
dadab5e9 401 struct thread *td = curthread;
984263bc 402 struct uio auio;
ba023347
DRJ
403 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
404 int error;
984263bc 405
ba023347 406 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt,
ef5c76d7 407 &auio.uio_resid);
ba023347
DRJ
408 if (error)
409 return (error);
410 auio.uio_iov = iov;
411 auio.uio_iovcnt = uap->iovcnt;
412 auio.uio_offset = -1;
984263bc
MD
413 auio.uio_rw = UIO_WRITE;
414 auio.uio_segflg = UIO_USERSPACE;
dadab5e9 415 auio.uio_td = td;
ba023347 416
e54488bb 417 error = kern_pwritev(uap->fd, &auio, 0, &uap->sysmsg_szresult);
ba023347
DRJ
418
419 iovec_free(&iov, aiov);
984263bc
MD
420 return (error);
421}
422
7f83ed38 423
984263bc 424/*
7f83ed38 425 * Gather positioned write system call
f832287e
MD
426 *
427 * MPSAFE
984263bc 428 */
984263bc 429int
b09fd398 430sys_extpwritev(struct extpwritev_args *uap)
7f83ed38
MD
431{
432 struct thread *td = curthread;
433 struct uio auio;
434 struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
435 int error;
9ba76b73 436 int flags;
7f83ed38
MD
437
438 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt,
ef5c76d7 439 &auio.uio_resid);
7f83ed38
MD
440 if (error)
441 return (error);
442 auio.uio_iov = iov;
443 auio.uio_iovcnt = uap->iovcnt;
444 auio.uio_offset = uap->offset;
445 auio.uio_rw = UIO_WRITE;
446 auio.uio_segflg = UIO_USERSPACE;
447 auio.uio_td = td;
448
9ba76b73
MD
449 flags = uap->flags & O_FMASK;
450 if (uap->offset != (off_t)-1)
451 flags |= O_FOFFSET;
452
e54488bb 453 error = kern_pwritev(uap->fd, &auio, flags, &uap->sysmsg_szresult);
7f83ed38
MD
454
455 iovec_free(&iov, aiov);
456 return(error);
457}
458
f832287e
MD
459/*
460 * MPSAFE
461 */
7f83ed38 462int
e54488bb 463kern_pwritev(int fd, struct uio *auio, int flags, size_t *res)
984263bc 464{
dadab5e9
MD
465 struct thread *td = curthread;
466 struct proc *p = td->td_proc;
41c20dac 467 struct file *fp;
7f83ed38 468 int error;
984263bc 469
dadab5e9 470 KKASSERT(p);
dadab5e9 471
228b401d 472 fp = holdfp(p->p_fd, fd, FWRITE);
ba023347 473 if (fp == NULL)
984263bc 474 return (EBADF);
9ba76b73 475 else if ((flags & O_FOFFSET) && fp->f_type != DTYPE_VNODE) {
ba023347 476 error = ESPIPE;
7f83ed38
MD
477 } else {
478 error = dofilewrite(fd, fp, auio, flags, res);
984263bc 479 }
7f83ed38 480
9f87144f 481 fdrop(fp);
7f83ed38
MD
482 return (error);
483}
484
485/*
486 * Common code for writev and pwritev that writes data to
487 * a file using the passed in uio, offset, and flags.
f832287e
MD
488 *
489 * MPALMOSTSAFE - ktrace needs help
7f83ed38
MD
490 */
491static int
e54488bb 492dofilewrite(int fd, struct file *fp, struct uio *auio, int flags, size_t *res)
7f83ed38
MD
493{
494 struct thread *td = curthread;
7278a846 495 struct lwp *lp = td->td_lwp;
7f83ed38 496 int error;
e54488bb 497 size_t len;
7f83ed38
MD
498#ifdef KTRACE
499 struct iovec *ktriov = NULL;
500 struct uio ktruio;
501#endif
502
984263bc
MD
503#ifdef KTRACE
504 /*
505 * if tracing, save a copy of iovec and uio
506 */
dadab5e9 507 if (KTRPOINT(td, KTR_GENIO)) {
ba023347
DRJ
508 int iovlen = auio->uio_iovcnt * sizeof(struct iovec);
509
984263bc 510 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
ba023347
DRJ
511 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen);
512 ktruio = *auio;
984263bc
MD
513 }
514#endif
ba023347 515 len = auio->uio_resid;
87de5057 516 error = fo_write(fp, auio, fp->f_cred, flags);
ba023347
DRJ
517 if (error) {
518 if (auio->uio_resid != len && (error == ERESTART ||
984263bc
MD
519 error == EINTR || error == EWOULDBLOCK))
520 error = 0;
7f83ed38 521 /* Socket layer is responsible for issuing SIGPIPE. */
73241316 522 if (error == EPIPE)
9fb04d14 523 lwpsignal(lp->lwp_proc, lp, SIGPIPE);
984263bc 524 }
984263bc
MD
525#ifdef KTRACE
526 if (ktriov != NULL) {
527 if (error == 0) {
528 ktruio.uio_iov = ktriov;
ba023347 529 ktruio.uio_resid = len - auio->uio_resid;
f832287e 530 get_mplock();
9fb04d14 531 ktrgenio(lp, fd, UIO_WRITE, &ktruio, error);
f832287e 532 rel_mplock();
984263bc
MD
533 }
534 FREE(ktriov, M_TEMP);
535 }
536#endif
ba023347
DRJ
537 if (error == 0)
538 *res = len - auio->uio_resid;
7f83ed38
MD
539
540 return(error);
984263bc
MD
541}
542
543/*
544 * Ioctl system call
3919ced0 545 *
3c499555 546 * MPSAFE
984263bc 547 */
984263bc 548int
753fd850 549sys_ioctl(struct ioctl_args *uap)
984263bc 550{
3919ced0
MD
551 int error;
552
3919ced0 553 error = mapped_ioctl(uap->fd, uap->com, uap->data, NULL, &uap->sysmsg);
3919ced0 554 return (error);
a0c5fc96
JS
555}
556
557struct ioctl_map_entry {
558 const char *subsys;
559 struct ioctl_map_range *cmd_ranges;
560 LIST_ENTRY(ioctl_map_entry) entries;
561};
562
25b5b94d
SS
563/*
564 * The true heart of all ioctl syscall handlers (native, emulation).
565 * If map != NULL, it will be searched for a matching entry for com,
566 * and appropriate conversions/conversion functions will be utilized.
3c499555
MD
567 *
568 * MPSAFE
25b5b94d 569 */
a0c5fc96 570int
87baaf0c
MD
571mapped_ioctl(int fd, u_long com, caddr_t uspc_data, struct ioctl_map *map,
572 struct sysmsg *msg)
a0c5fc96 573{
dadab5e9
MD
574 struct thread *td = curthread;
575 struct proc *p = td->td_proc;
87de5057 576 struct ucred *cred;
41c20dac 577 struct file *fp;
a0c5fc96 578 struct ioctl_map_range *iomc = NULL;
984263bc 579 int error;
1fd87d54 580 u_int size;
a0c5fc96 581 u_long ocom = com;
984263bc
MD
582 caddr_t data, memp;
583 int tmp;
584#define STK_PARAMS 128
585 union {
586 char stkbuf[STK_PARAMS];
587 long align;
588 } ubuf;
589
dadab5e9 590 KKASSERT(p);
9910d07b 591 cred = td->td_ucred;
984263bc 592
228b401d
MD
593 fp = holdfp(p->p_fd, fd, FREAD|FWRITE);
594 if (fp == NULL)
a0c5fc96
JS
595 return(EBADF);
596
597 if (map != NULL) { /* obey translation map */
598 u_long maskcmd;
599 struct ioctl_map_entry *e;
600
601 maskcmd = com & map->mask;
602
12586b82 603 lwkt_gettoken(&mioctl_token);
a0c5fc96
JS
604 LIST_FOREACH(e, &map->mapping, entries) {
605 for (iomc = e->cmd_ranges; iomc->start != 0 ||
25b5b94d
SS
606 iomc->maptocmd != 0 || iomc->wrapfunc != NULL ||
607 iomc->mapfunc != NULL;
a0c5fc96
JS
608 iomc++) {
609 if (maskcmd >= iomc->start &&
610 maskcmd <= iomc->end)
611 break;
612 }
613
614 /* Did we find a match? */
615 if (iomc->start != 0 || iomc->maptocmd != 0 ||
25b5b94d 616 iomc->wrapfunc != NULL || iomc->mapfunc != NULL)
a0c5fc96
JS
617 break;
618 }
12586b82 619 lwkt_reltoken(&mioctl_token);
a0c5fc96
JS
620
621 if (iomc == NULL ||
622 (iomc->start == 0 && iomc->maptocmd == 0
25b5b94d 623 && iomc->wrapfunc == NULL && iomc->mapfunc == NULL)) {
6ea70f76 624 kprintf("%s: 'ioctl' fd=%d, cmd=0x%lx ('%c',%d) not implemented\n",
a0c5fc96
JS
625 map->sys, fd, maskcmd,
626 (int)((maskcmd >> 8) & 0xff),
627 (int)(maskcmd & 0xff));
228b401d
MD
628 error = EINVAL;
629 goto done;
a0c5fc96 630 }
984263bc 631
25b5b94d
SS
632 /*
633 * If it's a non-range one to one mapping, maptocmd should be
634 * correct. If it's a ranged one to one mapping, we pass the
635 * original value of com, and for a range mapped to a different
636 * range, we always need a mapping function to translate the
637 * ioctl to our native ioctl. Ex. 6500-65ff <-> 9500-95ff
638 */
639 if (iomc->start == iomc->end && iomc->maptocmd == iomc->maptoend) {
640 com = iomc->maptocmd;
641 } else if (iomc->start == iomc->maptocmd && iomc->end == iomc->maptoend) {
642 if (iomc->mapfunc != NULL)
643 com = iomc->mapfunc(iomc->start, iomc->end,
644 iomc->start, iomc->end,
645 com, com);
646 } else {
647 if (iomc->mapfunc != NULL) {
648 com = iomc->mapfunc(iomc->start, iomc->end,
649 iomc->maptocmd, iomc->maptoend,
650 com, ocom);
651 } else {
6ea70f76 652 kprintf("%s: Invalid mapping for fd=%d, cmd=%#lx ('%c',%d)\n",
25b5b94d
SS
653 map->sys, fd, maskcmd,
654 (int)((maskcmd >> 8) & 0xff),
655 (int)(maskcmd & 0xff));
228b401d
MD
656 error = EINVAL;
657 goto done;
25b5b94d
SS
658 }
659 }
a0c5fc96
JS
660 }
661
662 switch (com) {
984263bc 663 case FIONCLEX:
228b401d
MD
664 error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE);
665 goto done;
984263bc 666 case FIOCLEX:
228b401d
MD
667 error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE);
668 goto done;
984263bc
MD
669 }
670
671 /*
672 * Interpret high order word to find amount of data to be
673 * copied to/from the user's address space.
674 */
675 size = IOCPARM_LEN(com);
228b401d
MD
676 if (size > IOCPARM_MAX) {
677 error = ENOTTY;
678 goto done;
679 }
984263bc 680
984263bc 681 if (size > sizeof (ubuf.stkbuf)) {
efda3bd0 682 memp = kmalloc(size, M_IOCTLOPS, M_WAITOK);
984263bc
MD
683 data = memp;
684 } else {
d0d4a734 685 memp = NULL;
984263bc
MD
686 data = ubuf.stkbuf;
687 }
a0c5fc96
JS
688 if ((com & IOC_IN) != 0) {
689 if (size != 0) {
e54488bb 690 error = copyin(uspc_data, data, (size_t)size);
984263bc 691 if (error) {
a0c5fc96 692 if (memp != NULL)
efda3bd0 693 kfree(memp, M_IOCTLOPS);
228b401d 694 goto done;
984263bc
MD
695 }
696 } else {
a0c5fc96 697 *(caddr_t *)data = uspc_data;
984263bc 698 }
a0c5fc96 699 } else if ((com & IOC_OUT) != 0 && size) {
984263bc
MD
700 /*
701 * Zero the buffer so the user always
702 * gets back something deterministic.
703 */
e54488bb 704 bzero(data, (size_t)size);
a0c5fc96
JS
705 } else if ((com & IOC_VOID) != 0) {
706 *(caddr_t *)data = uspc_data;
984263bc
MD
707 }
708
709 switch (com) {
984263bc
MD
710 case FIONBIO:
711 if ((tmp = *(int *)data))
3c499555 712 atomic_set_int(&fp->f_flag, FNONBLOCK);
984263bc 713 else
3c499555 714 atomic_clear_int(&fp->f_flag, FNONBLOCK);
9ba76b73 715 error = 0;
984263bc
MD
716 break;
717
718 case FIOASYNC:
719 if ((tmp = *(int *)data))
3c499555 720 atomic_set_int(&fp->f_flag, FASYNC);
984263bc 721 else
3c499555 722 atomic_clear_int(&fp->f_flag, FASYNC);
87baaf0c 723 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, cred, msg);
984263bc
MD
724 break;
725
726 default:
a0c5fc96
JS
727 /*
728 * If there is a override function,
729 * call it instead of directly routing the call
730 */
25b5b94d 731 if (map != NULL && iomc->wrapfunc != NULL)
87de5057 732 error = iomc->wrapfunc(fp, com, ocom, data, cred);
a0c5fc96 733 else
87baaf0c 734 error = fo_ioctl(fp, com, data, cred, msg);
984263bc
MD
735 /*
736 * Copy any data to user, size was
737 * already set and checked above.
738 */
a0c5fc96 739 if (error == 0 && (com & IOC_OUT) != 0 && size != 0)
e54488bb 740 error = copyout(data, uspc_data, (size_t)size);
984263bc
MD
741 break;
742 }
a0c5fc96 743 if (memp != NULL)
efda3bd0 744 kfree(memp, M_IOCTLOPS);
228b401d 745done:
9f87144f 746 fdrop(fp);
a0c5fc96
JS
747 return(error);
748}
749
3c499555
MD
750/*
751 * MPSAFE
752 */
a0c5fc96
JS
753int
754mapped_ioctl_register_handler(struct ioctl_map_handler *he)
755{
756 struct ioctl_map_entry *ne;
757
758 KKASSERT(he != NULL && he->map != NULL && he->cmd_ranges != NULL &&
759 he->subsys != NULL && *he->subsys != '\0');
760
3c499555
MD
761 ne = kmalloc(sizeof(struct ioctl_map_entry), M_IOCTLMAP,
762 M_WAITOK | M_ZERO);
a0c5fc96
JS
763
764 ne->subsys = he->subsys;
765 ne->cmd_ranges = he->cmd_ranges;
766
12586b82 767 lwkt_gettoken(&mioctl_token);
a0c5fc96 768 LIST_INSERT_HEAD(&he->map->mapping, ne, entries);
12586b82 769 lwkt_reltoken(&mioctl_token);
a0c5fc96
JS
770
771 return(0);
772}
773
3c499555
MD
774/*
775 * MPSAFE
776 */
a0c5fc96
JS
777int
778mapped_ioctl_unregister_handler(struct ioctl_map_handler *he)
779{
780 struct ioctl_map_entry *ne;
3c499555 781 int error = EINVAL;
a0c5fc96
JS
782
783 KKASSERT(he != NULL && he->map != NULL && he->cmd_ranges != NULL);
784
12586b82 785 lwkt_gettoken(&mioctl_token);
a0c5fc96 786 LIST_FOREACH(ne, &he->map->mapping, entries) {
3c499555
MD
787 if (ne->cmd_ranges == he->cmd_ranges) {
788 LIST_REMOVE(ne, entries);
789 kfree(ne, M_IOCTLMAP);
790 error = 0;
791 break;
792 }
a0c5fc96 793 }
12586b82 794 lwkt_reltoken(&mioctl_token);
3c499555 795 return(error);
984263bc
MD
796}
797
798static int nselcoll; /* Select collisions since boot */
799int selwait;
800SYSCTL_INT(_kern, OID_AUTO, nselcoll, CTLFLAG_RD, &nselcoll, 0, "");
fe24d605
MD
801static int nseldebug;
802SYSCTL_INT(_kern, OID_AUTO, nseldebug, CTLFLAG_RW, &nseldebug, 0, "");
984263bc
MD
803
804/*
805 * Select system call.
3919ced0 806 *
e5857bf7 807 * MPSAFE
984263bc 808 */
984263bc 809int
753fd850 810sys_select(struct select_args *uap)
984263bc 811{
ae7cb1b5 812 struct timeval ktv;
e5857bf7 813 struct timespec *ktsp, kts;
ae7cb1b5
NT
814 int error;
815
816 /*
817 * Get timeout if any.
818 */
819 if (uap->tv != NULL) {
820 error = copyin(uap->tv, &ktv, sizeof (ktv));
821 if (error)
822 return (error);
e5857bf7
SG
823 TIMEVAL_TO_TIMESPEC(&ktv, &kts);
824 ktsp = &kts;
ae7cb1b5 825 } else {
e5857bf7 826 ktsp = NULL;
ae7cb1b5
NT
827 }
828
829 /*
830 * Do real work.
831 */
e5857bf7 832 error = doselect(uap->nd, uap->in, uap->ou, uap->ex, ktsp,
8acdf1cf 833 &uap->sysmsg_result);
ae7cb1b5
NT
834
835 return (error);
836}
837
838
839/*
840 * Pselect system call.
841 */
842int
843sys_pselect(struct pselect_args *uap)
844{
845 struct thread *td = curthread;
846 struct lwp *lp = td->td_lwp;
e5857bf7 847 struct timespec *ktsp, kts;
ae7cb1b5
NT
848 sigset_t sigmask;
849 int error;
850
851 /*
e5857bf7 852 * Get timeout if any.
ae7cb1b5
NT
853 */
854 if (uap->ts != NULL) {
855 error = copyin(uap->ts, &kts, sizeof (kts));
856 if (error)
857 return (error);
e5857bf7 858 ktsp = &kts;
ae7cb1b5 859 } else {
e5857bf7 860 ktsp = NULL;
ae7cb1b5
NT
861 }
862
863 /*
864 * Install temporary signal mask if any provided.
865 */
866 if (uap->sigmask != NULL) {
867 error = copyin(uap->sigmask, &sigmask, sizeof(sigmask));
868 if (error)
869 return (error);
73241316 870 lwkt_gettoken(&lp->lwp_proc->p_token);
ae7cb1b5
NT
871 lp->lwp_oldsigmask = lp->lwp_sigmask;
872 SIG_CANTMASK(sigmask);
873 lp->lwp_sigmask = sigmask;
73241316 874 lwkt_reltoken(&lp->lwp_proc->p_token);
ae7cb1b5
NT
875 }
876
877 /*
878 * Do real job.
879 */
e5857bf7 880 error = doselect(uap->nd, uap->in, uap->ou, uap->ex, ktsp,
8acdf1cf 881 &uap->sysmsg_result);
ae7cb1b5
NT
882
883 if (uap->sigmask != NULL) {
73241316 884 lwkt_gettoken(&lp->lwp_proc->p_token);
ae7cb1b5
NT
885 /* doselect() responsible for turning ERESTART into EINTR */
886 KKASSERT(error != ERESTART);
887 if (error == EINTR) {
888 /*
889 * We can't restore the previous signal mask now
890 * because it could block the signal that interrupted
891 * us. So make a note to restore it after executing
892 * the handler.
893 */
4643740a 894 lp->lwp_flags |= LWP_OLDMASK;
ae7cb1b5
NT
895 } else {
896 /*
897 * No handler to run. Restore previous mask immediately.
898 */
899 lp->lwp_sigmask = lp->lwp_oldsigmask;
900 }
73241316 901 lwkt_reltoken(&lp->lwp_proc->p_token);
ae7cb1b5
NT
902 }
903
904 return (error);
905}
906
e5857bf7 907static int
8acdf1cf 908select_copyin(void *arg, struct kevent *kevp, int maxevents, int *events)
e5857bf7
SG
909{
910 struct select_kevent_copyin_args *skap = NULL;
8acdf1cf 911 struct kevent *kev;
e5857bf7
SG
912 int fd;
913 kfd_set *fdp = NULL;
914 short filter = 0;
915 u_int fflags = 0;
916
917 skap = (struct select_kevent_copyin_args *)arg;
918
8acdf1cf
MD
919 if (*events == maxevents)
920 return (0);
921
e5857bf7
SG
922 while (skap->active_set < COPYIN_DONE) {
923 switch (skap->active_set) {
924 case COPYIN_READ:
8acdf1cf
MD
925 /*
926 * Register descriptors for the read filter
927 */
e5857bf7
SG
928 fdp = skap->read_set;
929 filter = EVFILT_READ;
57b24f4e 930 fflags = NOTE_OLDAPI;
8acdf1cf
MD
931 if (fdp)
932 break;
933 ++skap->active_set;
934 skap->proc_fds = 0;
935 /* fall through */
e5857bf7 936 case COPYIN_WRITE:
8acdf1cf
MD
937 /*
938 * Register descriptors for the write filter
939 */
e5857bf7
SG
940 fdp = skap->write_set;
941 filter = EVFILT_WRITE;
57b24f4e 942 fflags = NOTE_OLDAPI;
8acdf1cf
MD
943 if (fdp)
944 break;
945 ++skap->active_set;
946 skap->proc_fds = 0;
947 /* fall through */
e5857bf7 948 case COPYIN_EXCEPT:
8acdf1cf
MD
949 /*
950 * Register descriptors for the exception filter
951 */
e5857bf7 952 fdp = skap->except_set;
73c344d3 953 filter = EVFILT_EXCEPT;
57b24f4e 954 fflags = NOTE_OLDAPI | NOTE_OOB;
8acdf1cf
MD
955 if (fdp)
956 break;
957 ++skap->active_set;
958 skap->proc_fds = 0;
959 /* fall through */
960 case COPYIN_DONE:
961 /*
962 * Nothing left to register
963 */
964 return(0);
965 /* NOT REACHED */
e5857bf7
SG
966 }
967
8acdf1cf
MD
968 while (skap->proc_fds < skap->num_fds) {
969 fd = skap->proc_fds;
e5857bf7 970 if (FD_ISSET(fd, fdp)) {
8acdf1cf
MD
971 kev = &kevp[*events];
972 EV_SET(kev, fd, filter,
973 EV_ADD|EV_ENABLE,
974 fflags, 0,
b4372719
MD
975 (void *)(uintptr_t)
976 skap->lwp->lwp_kqueue_serial);
e5857bf7 977 FD_CLR(fd, fdp);
8acdf1cf 978 ++*events;
22690e50
SG
979
980 if (nseldebug)
981 kprintf("select fd %d filter %d serial %d\n",
982 fd, filter, skap->lwp->lwp_kqueue_serial);
e5857bf7 983 }
8acdf1cf
MD
984 ++skap->proc_fds;
985 if (*events == maxevents)
e5857bf7
SG
986 return (0);
987 }
e5857bf7
SG
988 skap->active_set++;
989 skap->proc_fds = 0;
990 }
991
992 return (0);
993}
994
995static int
996select_copyout(void *arg, struct kevent *kevp, int count, int *res)
997{
998 struct select_kevent_copyin_args *skap;
999 struct kevent kev;
73c344d3 1000 int i = 0;
e5857bf7
SG
1001
1002 skap = (struct select_kevent_copyin_args *)arg;
1003
e5857bf7 1004 for (i = 0; i < count; ++i) {
22690e50
SG
1005 /*
1006 * Filter out and delete spurious events
1007 */
b4372719
MD
1008 if ((u_int)(uintptr_t)kevp[i].udata !=
1009 skap->lwp->lwp_kqueue_serial) {
8acdf1cf 1010 kev = kevp[i];
e5857bf7 1011 kev.flags = EV_DISABLE|EV_DELETE;
8acdf1cf 1012 kqueue_register(&skap->lwp->lwp_kqueue, &kev);
22690e50 1013 if (nseldebug)
c9a1a153
SW
1014 kprintf("select fd %ju mismatched serial %d\n",
1015 (uintmax_t)kevp[i].ident,
1016 skap->lwp->lwp_kqueue_serial);
22690e50
SG
1017 continue;
1018 }
1019
1020 /*
1021 * Handle errors
1022 */
1023 if (kevp[i].flags & EV_ERROR) {
23a031e3
SZ
1024 int error = kevp[i].data;
1025
1026 switch (error) {
22690e50
SG
1027 case EBADF:
1028 /*
1029 * A bad file descriptor is considered a
1030 * fatal error for select, bail out.
1031 */
23a031e3
SZ
1032 skap->error = error;
1033 *res = -1;
1034 return error;
1035
22690e50
SG
1036 default:
1037 /*
1038 * Select silently swallows any unknown errors
1039 * for descriptors in the read or write sets.
4fbb9324
SG
1040 *
1041 * ALWAYS filter out EOPNOTSUPP errors from
1042 * filters (at least until all filters support
1043 * EVFILT_EXCEPT)
22690e50
SG
1044 */
1045 if (kevp[i].filter != EVFILT_READ &&
4fbb9324 1046 kevp[i].filter != EVFILT_WRITE &&
23a031e3
SZ
1047 error != EOPNOTSUPP) {
1048 skap->error = error;
1049 *res = -1;
1050 return error;
22690e50
SG
1051 }
1052 break;
1053 }
1054 if (nseldebug)
23a031e3 1055 kprintf("select fd %ju filter %d error %d\n",
c9a1a153 1056 (uintmax_t)kevp[i].ident,
23a031e3 1057 kevp[i].filter, error);
e5857bf7
SG
1058 continue;
1059 }
1060
73c344d3
SG
1061 switch (kevp[i].filter) {
1062 case EVFILT_READ:
e5857bf7
SG
1063 FD_SET(kevp[i].ident, skap->read_set);
1064 break;
73c344d3 1065 case EVFILT_WRITE:
e5857bf7
SG
1066 FD_SET(kevp[i].ident, skap->write_set);
1067 break;
73c344d3 1068 case EVFILT_EXCEPT:
e5857bf7
SG
1069 FD_SET(kevp[i].ident, skap->except_set);
1070 break;
1071 }
1072
8acdf1cf 1073 ++*res;
e5857bf7
SG
1074 }
1075
1076 return (0);
1077}
1078
ae7cb1b5 1079/*
8acdf1cf
MD
1080 * Copy select bits in from userland. Allocate kernel memory if the
1081 * set is large.
1082 */
1083static int
1084getbits(int bytes, fd_set *in_set, kfd_set **out_set, kfd_set *tmp_set)
1085{
1086 int error;
1087
1088 if (in_set) {
1089 if (bytes < sizeof(*tmp_set))
1090 *out_set = tmp_set;
1091 else
1092 *out_set = kmalloc(bytes, M_SELECT, M_WAITOK);
1093 error = copyin(in_set, *out_set, bytes);
1094 } else {
1095 *out_set = NULL;
1096 error = 0;
1097 }
1098 return (error);
1099}
1100
1101/*
1102 * Copy returned select bits back out to userland.
1103 */
1104static int
1105putbits(int bytes, kfd_set *in_set, fd_set *out_set)
1106{
1107 int error;
1108
1109 if (in_set) {
1110 error = copyout(in_set, out_set, bytes);
1111 } else {
1112 error = 0;
1113 }
1114 return (error);
1115}
1116
4979e190
MD
1117static int
1118dotimeout_only(struct timespec *ts)
1119{
1120 return(nanosleep1(ts, NULL));
1121}
1122
8acdf1cf 1123/*
ae7cb1b5
NT
1124 * Common code for sys_select() and sys_pselect().
1125 *
e5857bf7 1126 * in, out and ex are userland pointers. ts must point to validated
ae7cb1b5
NT
1127 * kernel-side timeout value or NULL for infinite timeout. res must
1128 * point to syscall return value.
1129 */
1130static int
e5857bf7 1131doselect(int nd, fd_set *read, fd_set *write, fd_set *except,
8acdf1cf 1132 struct timespec *ts, int *res)
ae7cb1b5 1133{
41c20dac 1134 struct proc *p = curproc;
e5857bf7
SG
1135 struct select_kevent_copyin_args *kap, ka;
1136 int bytes, error;
8acdf1cf
MD
1137 kfd_set read_tmp;
1138 kfd_set write_tmp;
1139 kfd_set except_tmp;
984263bc 1140
8acdf1cf 1141 *res = 0;
ae7cb1b5 1142 if (nd < 0)
984263bc 1143 return (EINVAL);
4979e190
MD
1144 if (nd == 0)
1145 return (dotimeout_only(ts));
1146
679058fb
MD
1147 if (nd > p->p_fd->fd_nfiles) /* limit kmalloc */
1148 nd = p->p_fd->fd_nfiles;
984263bc 1149
e5857bf7
SG
1150 kap = &ka;
1151 kap->lwp = curthread->td_lwp;
1152 kap->num_fds = nd;
1153 kap->proc_fds = 0;
1154 kap->error = 0;
8acdf1cf 1155 kap->active_set = COPYIN_READ;
e6f31a83 1156
8acdf1cf
MD
1157 /*
1158 * Calculate bytes based on the number of __fd_mask[] array entries
1159 * multiplied by the size of __fd_mask.
1160 */
1161 bytes = howmany(nd, __NFDBITS) * sizeof(__fd_mask);
984263bc 1162
d0d4a734
MD
1163 /* kap->read_set = NULL; not needed */
1164 kap->write_set = NULL;
1165 kap->except_set = NULL;
1166
8acdf1cf
MD
1167 error = getbits(bytes, read, &kap->read_set, &read_tmp);
1168 if (error == 0)
1169 error = getbits(bytes, write, &kap->write_set, &write_tmp);
1170 if (error == 0)
1171 error = getbits(bytes, except, &kap->except_set, &except_tmp);
e5857bf7 1172 if (error)
8acdf1cf
MD
1173 goto done;
1174
1175 /*
1176 * NOTE: Make sure the max events passed to kern_kevent() is
1177 * effectively unlimited. (nd * 3) accomplishes this.
1178 *
1179 * (*res) continues to increment as returned events are
1180 * loaded in.
1181 */
679058fb 1182 error = kern_kevent(&kap->lwp->lwp_kqueue, 0x7FFFFFFF, res, kap,
8acdf1cf
MD
1183 select_copyin, select_copyout, ts);
1184 if (error == 0)
1185 error = putbits(bytes, kap->read_set, read);
1186 if (error == 0)
1187 error = putbits(bytes, kap->write_set, write);
1188 if (error == 0)
1189 error = putbits(bytes, kap->except_set, except);
984263bc 1190
8acdf1cf 1191 /*
22690e50
SG
1192 * An error from an individual event that should be passed
1193 * back to userland (EBADF)
8acdf1cf
MD
1194 */
1195 if (kap->error)
1196 error = kap->error;
e5857bf7 1197
8acdf1cf
MD
1198 /*
1199 * Clean up.
1200 */
1201done:
1202 if (kap->read_set && kap->read_set != &read_tmp)
1203 kfree(kap->read_set, M_SELECT);
1204 if (kap->write_set && kap->write_set != &write_tmp)
1205 kfree(kap->write_set, M_SELECT);
1206 if (kap->except_set && kap->except_set != &except_tmp)
1207 kfree(kap->except_set, M_SELECT);
1208
22690e50 1209 kap->lwp->lwp_kqueue_serial += kap->num_fds;
73c344d3 1210
e5857bf7 1211 return (error);
984263bc
MD
1212}
1213
1214/*
1215 * Poll system call.
3919ced0 1216 *
7fbfbe29 1217 * MPSAFE
984263bc 1218 */
984263bc 1219int
753fd850 1220sys_poll(struct poll_args *uap)
984263bc 1221{
7fbfbe29
SG
1222 struct timespec ts, *tsp;
1223 int error;
984263bc 1224
ab2eb4eb 1225 if (uap->timeout != INFTIM) {
7fbfbe29
SG
1226 ts.tv_sec = uap->timeout / 1000;
1227 ts.tv_nsec = (uap->timeout % 1000) * 1000 * 1000;
1228 tsp = &ts;
984263bc 1229 } else {
7fbfbe29 1230 tsp = NULL;
984263bc 1231 }
e6f31a83 1232
7fbfbe29
SG
1233 error = dopoll(uap->nfds, uap->fds, tsp, &uap->sysmsg_result);
1234
984263bc
MD
1235 return (error);
1236}
1237
1238static int
7fbfbe29 1239poll_copyin(void *arg, struct kevent *kevp, int maxevents, int *events)
984263bc 1240{
7fbfbe29
SG
1241 struct poll_kevent_copyin_args *pkap;
1242 struct pollfd *pfd;
1243 struct kevent *kev;
1244 int kev_count;
1245
1246 pkap = (struct poll_kevent_copyin_args *)arg;
1247
1248 while (pkap->pfds < pkap->nfds) {
1249 pfd = &pkap->fds[pkap->pfds];
1250
1251 /* Clear return events */
1252 pfd->revents = 0;
1253
ffdd7c7c
SG
1254 /* Do not check if fd is equal to -1 */
1255 if (pfd->fd == -1) {
1256 ++pkap->pfds;
1257 continue;
1258 }
1259
7fbfbe29
SG
1260 kev_count = 0;
1261 if (pfd->events & (POLLIN | POLLRDNORM))
1262 kev_count++;
1263 if (pfd->events & (POLLOUT | POLLWRNORM))
1264 kev_count++;
1265 if (pfd->events & (POLLPRI | POLLRDBAND))
1266 kev_count++;
1267
1268 if (*events + kev_count > maxevents)
1269 return (0);
1270
fe24d605
MD
1271 /*
1272 * NOTE: A combined serial number and poll array index is
1273 * stored in kev->udata.
1274 */
7fbfbe29 1275 kev = &kevp[*events];
fe24d605 1276 if (pfd->events & (POLLIN | POLLRDNORM)) {
7fbfbe29 1277 EV_SET(kev++, pfd->fd, EVFILT_READ, EV_ADD|EV_ENABLE,
57b24f4e 1278 NOTE_OLDAPI, 0, (void *)(uintptr_t)
b4372719 1279 (pkap->lwp->lwp_kqueue_serial + pkap->pfds));
fe24d605
MD
1280 }
1281 if (pfd->events & (POLLOUT | POLLWRNORM)) {
7fbfbe29 1282 EV_SET(kev++, pfd->fd, EVFILT_WRITE, EV_ADD|EV_ENABLE,
57b24f4e 1283 NOTE_OLDAPI, 0, (void *)(uintptr_t)
b4372719 1284 (pkap->lwp->lwp_kqueue_serial + pkap->pfds));
fe24d605
MD
1285 }
1286 if (pfd->events & (POLLPRI | POLLRDBAND)) {
7fbfbe29 1287 EV_SET(kev++, pfd->fd, EVFILT_EXCEPT, EV_ADD|EV_ENABLE,
57b24f4e 1288 NOTE_OLDAPI | NOTE_OOB, 0,
b4372719
MD
1289 (void *)(uintptr_t)
1290 (pkap->lwp->lwp_kqueue_serial + pkap->pfds));
fe24d605
MD
1291 }
1292
1293 if (nseldebug) {
21ae0f4c
SG
1294 kprintf("poll index %d/%d fd %d events %08x serial %d\n",
1295 pkap->pfds, pkap->nfds-1, pfd->fd, pfd->events,
1296 pkap->lwp->lwp_kqueue_serial);
fe24d605 1297 }
7fbfbe29
SG
1298
1299 ++pkap->pfds;
1300 (*events) += kev_count;
1301 }
1302
1303 return (0);
1304}
1305
1306static int
1307poll_copyout(void *arg, struct kevent *kevp, int count, int *res)
1308{
1309 struct poll_kevent_copyin_args *pkap;
1310 struct pollfd *pfd;
1311 struct kevent kev;
8c4ed426 1312 int count_res;
984263bc 1313 int i;
fe24d605 1314 u_int pi;
7fbfbe29
SG
1315
1316 pkap = (struct poll_kevent_copyin_args *)arg;
1317
1318 for (i = 0; i < count; ++i) {
fe24d605
MD
1319 /*
1320 * Extract the poll array index and delete spurious events.
1321 * We can easily tell if the serial number is incorrect
1322 * by checking whether the extracted index is out of range.
1323 */
b4372719
MD
1324 pi = (u_int)(uintptr_t)kevp[i].udata -
1325 (u_int)pkap->lwp->lwp_kqueue_serial;
ffdd7c7c 1326
fe24d605
MD
1327 if (pi >= pkap->nfds) {
1328 kev = kevp[i];
1329 kev.flags = EV_DISABLE|EV_DELETE;
1330 kqueue_register(&pkap->lwp->lwp_kqueue, &kev);
1331 if (nseldebug)
21ae0f4c
SG
1332 kprintf("poll index %d out of range against serial %d\n",
1333 pi, pkap->lwp->lwp_kqueue_serial);
fe24d605
MD
1334 continue;
1335 }
1336 pfd = &pkap->fds[pi];
1337 if (kevp[i].ident == pfd->fd) {
3c2a46a7
SG
1338 /*
1339 * A single descriptor may generate an error against
8c4ed426
MD
1340 * more than one filter, make sure to set the
1341 * appropriate flags but do not increment (*res)
1342 * more than once.
3c2a46a7 1343 */
8c4ed426 1344 count_res = (pfd->revents == 0);
fe24d605
MD
1345 if (kevp[i].flags & EV_ERROR) {
1346 switch(kevp[i].data) {
fe24d605
MD
1347 case EBADF:
1348 /* Bad file descriptor */
8c4ed426 1349 if (count_res)
3c2a46a7 1350 ++*res;
fe24d605 1351 pfd->revents |= POLLNVAL;
7fbfbe29 1352 break;
fe24d605 1353 default:
22690e50
SG
1354 /*
1355 * Poll silently swallows any unknown
1356 * errors except in the case of POLLPRI
1357 * (OOB/urgent data).
2b7d1884
SG
1358 *
1359 * ALWAYS filter out EOPNOTSUPP errors
1360 * from filters, common applications
1361 * set POLLPRI|POLLRDBAND and most
1362 * filters do not support EVFILT_EXCEPT.
22690e50
SG
1363 */
1364 if (kevp[i].filter != EVFILT_READ &&
2b7d1884
SG
1365 kevp[i].filter != EVFILT_WRITE &&
1366 kevp[i].data != EOPNOTSUPP) {
8c4ed426 1367 if (count_res == 0)
22690e50
SG
1368 ++*res;
1369 pfd->revents |= POLLERR;
1370 }
7fbfbe29
SG
1371 break;
1372 }
b4372719
MD
1373 if (nseldebug) {
1374 kprintf("poll index %d fd %d "
c9a1a153 1375 "filter %d error %jd\n",
fe24d605 1376 pi, pfd->fd,
b4372719 1377 kevp[i].filter,
c9a1a153 1378 (intmax_t)kevp[i].data);
b4372719 1379 }
fe24d605
MD
1380 continue;
1381 }
7fbfbe29 1382
fe24d605
MD
1383 switch (kevp[i].filter) {
1384 case EVFILT_READ:
9a5d20db
MD
1385#if 0
1386 /*
3bcb6e5e 1387 * NODATA on the read side can indicate a
9a5d20db
MD
1388 * half-closed situation and not necessarily
1389 * a disconnect, so depend on the user
1390 * issuing a read() and getting 0 bytes back.
1391 */
3bcb6e5e 1392 if (kevp[i].flags & EV_NODATA)
9a5d20db
MD
1393 pfd->revents |= POLLHUP;
1394#endif
aa622c3d
SZ
1395 if ((kevp[i].flags & EV_EOF) &&
1396 kevp[i].fflags != 0)
1397 pfd->revents |= POLLERR;
484efec7
SG
1398 if (pfd->events & POLLIN)
1399 pfd->revents |= POLLIN;
1400 if (pfd->events & POLLRDNORM)
1401 pfd->revents |= POLLRDNORM;
fe24d605
MD
1402 break;
1403 case EVFILT_WRITE:
9a5d20db
MD
1404 /*
1405 * As per the OpenGroup POLLHUP is mutually
1406 * exclusive with the writability flags. I
1407 * consider this a bit broken but...
1408 *
1409 * In this case a disconnect is implied even
1410 * for a half-closed (write side) situation.
1411 */
aa622c3d 1412 if (kevp[i].flags & EV_EOF) {
9a5d20db 1413 pfd->revents |= POLLHUP;
aa622c3d
SZ
1414 if (kevp[i].fflags != 0)
1415 pfd->revents |= POLLERR;
9a5d20db
MD
1416 } else {
1417 if (pfd->events & POLLOUT)
1418 pfd->revents |= POLLOUT;
1419 if (pfd->events & POLLWRNORM)
1420 pfd->revents |= POLLWRNORM;
1421 }
fe24d605
MD
1422 break;
1423 case EVFILT_EXCEPT:
9a5d20db 1424 /*
3bcb6e5e 1425 * EV_NODATA should never be tagged for this
9a5d20db
MD
1426 * filter.
1427 */
484efec7
SG
1428 if (pfd->events & POLLPRI)
1429 pfd->revents |= POLLPRI;
1430 if (pfd->events & POLLRDBAND)
1431 pfd->revents |= POLLRDBAND;
fe24d605
MD
1432 break;
1433 }
1434
1435 if (nseldebug) {
21ae0f4c
SG
1436 kprintf("poll index %d/%d fd %d revents %08x\n",
1437 pi, pkap->nfds, pfd->fd, pfd->revents);
fe24d605
MD
1438 }
1439
8c4ed426
MD
1440 if (count_res && pfd->revents)
1441 ++*res;
fe24d605 1442 } else {
b4372719 1443 if (nseldebug) {
c9a1a153
SW
1444 kprintf("poll index %d mismatch %ju/%d\n",
1445 pi, (uintmax_t)kevp[i].ident, pfd->fd);
b4372719 1446 }
fe24d605 1447 }
984263bc 1448 }
7fbfbe29 1449
984263bc
MD
1450 return (0);
1451}
1452
7fbfbe29
SG
1453static int
1454dopoll(int nfds, struct pollfd *fds, struct timespec *ts, int *res)
1455{
7fbfbe29
SG
1456 struct poll_kevent_copyin_args ka;
1457 struct pollfd sfds[64];
679058fb 1458 int bytes;
7fbfbe29
SG
1459 int error;
1460
1461 *res = 0;
1462 if (nfds < 0)
1463 return (EINVAL);
679058fb 1464
4979e190
MD
1465 if (nfds == 0)
1466 return (dotimeout_only(ts));
1467
679058fb
MD
1468 /*
1469 * This is a bit arbitrary but we need to limit internal kmallocs.
1470 */
1471 if (nfds > maxfilesperproc * 2)
1472 nfds = maxfilesperproc * 2;
1473 bytes = sizeof(struct pollfd) * nfds;
7fbfbe29
SG
1474
1475 ka.lwp = curthread->td_lwp;
1476 ka.nfds = nfds;
1477 ka.pfds = 0;
1478 ka.error = 0;
1479
1480 if (ka.nfds < 64)
1481 ka.fds = sfds;
1482 else
1483 ka.fds = kmalloc(bytes, M_SELECT, M_WAITOK);
1484
679058fb 1485 error = copyin(fds, ka.fds, bytes);
7fbfbe29 1486 if (error == 0)
3c2a46a7 1487 error = kern_kevent(&ka.lwp->lwp_kqueue, 0x7FFFFFFF, res, &ka,
7fbfbe29
SG
1488 poll_copyin, poll_copyout, ts);
1489
1490 if (error == 0)
679058fb 1491 error = copyout(ka.fds, fds, bytes);
7fbfbe29
SG
1492
1493 if (ka.fds != sfds)
1494 kfree(ka.fds, M_SELECT);
1495
fe24d605
MD
1496 ka.lwp->lwp_kqueue_serial += nfds;
1497
7fbfbe29
SG
1498 return (error);
1499}
1500
8b5c39bb
SG
1501static int
1502socket_wait_copyin(void *arg, struct kevent *kevp, int maxevents, int *events)
1503{
1504 return (0);
1505}
1506
1507static int
1508socket_wait_copyout(void *arg, struct kevent *kevp, int count, int *res)
1509{
1510 ++*res;
1511 return (0);
1512}
1513
1514extern struct fileops socketops;
6cef7136
MD
1515
1516/*
1517 * NOTE: Callers of socket_wait() must already have a reference on the
1518 * socket.
1519 */
8b5c39bb
SG
1520int
1521socket_wait(struct socket *so, struct timespec *ts, int *res)
1522{
ac62ea3c 1523 struct thread *td = curthread;
8b5c39bb 1524 struct file *fp;
8b5c39bb
SG
1525 struct kqueue kq;
1526 struct kevent kev;
ac62ea3c 1527 int error, fd;
8b5c39bb 1528
ac62ea3c 1529 if ((error = falloc(td->td_lwp, &fp, &fd)) != 0)
8b5c39bb
SG
1530 return (error);
1531
ac62ea3c
SG
1532 fp->f_type = DTYPE_SOCKET;
1533 fp->f_flag = FREAD | FWRITE;
8b5c39bb
SG
1534 fp->f_ops = &socketops;
1535 fp->f_data = so;
ac62ea3c 1536 fsetfd(td->td_lwp->lwp_proc->p_fd, fp, fd);
8b5c39bb 1537
ac62ea3c
SG
1538 kqueue_init(&kq, td->td_lwp->lwp_proc->p_fd);
1539 EV_SET(&kev, fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, NULL);
8b5c39bb
SG
1540 if ((error = kqueue_register(&kq, &kev)) != 0) {
1541 fdrop(fp);
1542 return (error);
1543 }
1544
1545 error = kern_kevent(&kq, 1, res, NULL, socket_wait_copyin,
1546 socket_wait_copyout, ts);
ac62ea3c
SG
1547
1548 EV_SET(&kev, fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
1549 kqueue_register(&kq, &kev);
1550 fp->f_ops = &badfileops;
8b5c39bb
SG
1551 fdrop(fp);
1552
1553 return (error);
1554}
1555
984263bc
MD
1556/*
1557 * OpenBSD poll system call.
1558 * XXX this isn't quite a true representation.. OpenBSD uses select ops.
3919ced0
MD
1559 *
1560 * MPSAFE
984263bc 1561 */
984263bc 1562int
753fd850 1563sys_openbsd_poll(struct openbsd_poll_args *uap)
984263bc 1564{
753fd850 1565 return (sys_poll((struct poll_args *)uap));
984263bc
MD
1566}
1567
1568/*ARGSUSED*/
1569int
b13267a5 1570seltrue(cdev_t dev, int events)
984263bc 1571{
984263bc
MD
1572 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
1573}