2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
27 * $DragonFly: src/sys/kern/kern_event.c,v 1.33 2007/02/03 17:05:57 corecode Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/unistd.h>
38 #include <sys/fcntl.h>
39 #include <sys/queue.h>
40 #include <sys/event.h>
41 #include <sys/eventvar.h>
42 #include <sys/protosw.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
46 #include <sys/sysctl.h>
47 #include <sys/sysproto.h>
48 #include <sys/thread.h>
50 #include <sys/signalvar.h>
51 #include <sys/filio.h>
54 #include <sys/thread2.h>
55 #include <sys/file2.h>
56 #include <sys/mplock2.h>
58 #include <vm/vm_zone.h>
61 * Global token for kqueue subsystem
63 struct lwkt_token kq_token = LWKT_TOKEN_UP_INITIALIZER(kq_token);
64 SYSCTL_INT(_lwkt, OID_AUTO, kq_mpsafe,
65 CTLFLAG_RW, &kq_token.t_flags, 0, "");
66 SYSCTL_LONG(_lwkt, OID_AUTO, kq_collisions,
67 CTLFLAG_RW, &kq_token.t_collisions, 0, "");
69 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
71 struct kevent_copyin_args {
72 struct kevent_args *ka;
76 static int kqueue_sleep(struct kqueue *kq, struct timespec *tsp);
77 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
78 struct knote *marker);
79 static int kqueue_read(struct file *fp, struct uio *uio,
80 struct ucred *cred, int flags);
81 static int kqueue_write(struct file *fp, struct uio *uio,
82 struct ucred *cred, int flags);
83 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
84 struct ucred *cred, struct sysmsg *msg);
85 static int kqueue_kqfilter(struct file *fp, struct knote *kn);
86 static int kqueue_stat(struct file *fp, struct stat *st,
88 static int kqueue_close(struct file *fp);
89 static void kqueue_wakeup(struct kqueue *kq);
90 static int filter_attach(struct knote *kn);
91 static int filter_event(struct knote *kn, long hint);
96 static struct fileops kqueueops = {
97 .fo_read = kqueue_read,
98 .fo_write = kqueue_write,
99 .fo_ioctl = kqueue_ioctl,
100 .fo_kqfilter = kqueue_kqfilter,
101 .fo_stat = kqueue_stat,
102 .fo_close = kqueue_close,
103 .fo_shutdown = nofo_shutdown
106 static void knote_attach(struct knote *kn);
107 static void knote_drop(struct knote *kn);
108 static void knote_detach_and_drop(struct knote *kn);
109 static void knote_detach_and_drop_locked(struct knote *kn);
110 static void knote_enqueue(struct knote *kn);
111 static void knote_dequeue(struct knote *kn);
112 static void knote_init(void);
113 static struct knote *knote_alloc(void);
114 static void knote_free(struct knote *kn);
116 static void filt_kqdetach(struct knote *kn);
117 static int filt_kqueue(struct knote *kn, long hint);
118 static int filt_procattach(struct knote *kn);
119 static void filt_procdetach(struct knote *kn);
120 static int filt_proc(struct knote *kn, long hint);
121 static int filt_fileattach(struct knote *kn);
122 static void filt_timerexpire(void *knx);
123 static int filt_timerattach(struct knote *kn);
124 static void filt_timerdetach(struct knote *kn);
125 static int filt_timer(struct knote *kn, long hint);
127 static struct filterops file_filtops =
128 { FILTEROP_ISFD, filt_fileattach, NULL, NULL };
129 static struct filterops kqread_filtops =
130 { FILTEROP_ISFD, NULL, filt_kqdetach, filt_kqueue };
131 static struct filterops proc_filtops =
132 { 0, filt_procattach, filt_procdetach, filt_proc };
133 static struct filterops timer_filtops =
134 { 0, filt_timerattach, filt_timerdetach, filt_timer };
136 static vm_zone_t knote_zone;
137 static int kq_ncallouts = 0;
138 static int kq_calloutmax = (4 * 1024);
139 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
140 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
141 static int kq_checkloop = 1000000;
142 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW,
143 &kq_checkloop, 0, "Maximum number of callouts allocated for kqueue");
145 #define KNOTE_ACTIVATE(kn) do { \
146 kn->kn_status |= KN_ACTIVE; \
147 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
151 #define KN_HASHSIZE 64 /* XXX should be tunable */
152 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
154 extern struct filterops aio_filtops;
155 extern struct filterops sig_filtops;
158 * Table for for all system-defined filters.
160 static struct filterops *sysfilt_ops[] = {
161 &file_filtops, /* EVFILT_READ */
162 &file_filtops, /* EVFILT_WRITE */
163 &aio_filtops, /* EVFILT_AIO */
164 &file_filtops, /* EVFILT_VNODE */
165 &proc_filtops, /* EVFILT_PROC */
166 &sig_filtops, /* EVFILT_SIGNAL */
167 &timer_filtops, /* EVFILT_TIMER */
168 &file_filtops, /* EVFILT_EXCEPT */
172 filt_fileattach(struct knote *kn)
174 return (fo_kqfilter(kn->kn_fp, kn));
181 kqueue_kqfilter(struct file *fp, struct knote *kn)
183 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
185 if (kn->kn_filter != EVFILT_READ)
188 kn->kn_fop = &kqread_filtops;
189 knote_insert(&kq->kq_kqinfo.ki_note, kn);
194 filt_kqdetach(struct knote *kn)
196 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
198 knote_remove(&kq->kq_kqinfo.ki_note, kn);
203 filt_kqueue(struct knote *kn, long hint)
205 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
207 kn->kn_data = kq->kq_count;
208 return (kn->kn_data > 0);
212 filt_procattach(struct knote *kn)
218 lwkt_gettoken(&proc_token);
219 p = pfind(kn->kn_id);
220 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
221 p = zpfind(kn->kn_id);
225 lwkt_reltoken(&proc_token);
228 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
229 lwkt_reltoken(&proc_token);
233 kn->kn_ptr.p_proc = p;
234 kn->kn_flags |= EV_CLEAR; /* automatically set */
237 * internal flag indicating registration done by kernel
239 if (kn->kn_flags & EV_FLAG1) {
240 kn->kn_data = kn->kn_sdata; /* ppid */
241 kn->kn_fflags = NOTE_CHILD;
242 kn->kn_flags &= ~EV_FLAG1;
245 knote_insert(&p->p_klist, kn);
248 * Immediately activate any exit notes if the target process is a
249 * zombie. This is necessary to handle the case where the target
250 * process, e.g. a child, dies before the kevent is negistered.
252 if (immediate && filt_proc(kn, NOTE_EXIT))
254 lwkt_reltoken(&proc_token);
260 * The knote may be attached to a different process, which may exit,
261 * leaving nothing for the knote to be attached to. So when the process
262 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
263 * it will be deleted when read out. However, as part of the knote deletion,
264 * this routine is called, so a check is needed to avoid actually performing
265 * a detach, because the original process does not exist any more.
268 filt_procdetach(struct knote *kn)
272 if (kn->kn_status & KN_DETACHED)
274 /* XXX locking? take proc_token here? */
275 p = kn->kn_ptr.p_proc;
276 knote_remove(&p->p_klist, kn);
280 filt_proc(struct knote *kn, long hint)
285 * mask off extra data
287 event = (u_int)hint & NOTE_PCTRLMASK;
290 * if the user is interested in this event, record it.
292 if (kn->kn_sfflags & event)
293 kn->kn_fflags |= event;
296 * Process is gone, so flag the event as finished. Detach the
297 * knote from the process now because the process will be poof,
300 if (event == NOTE_EXIT) {
301 struct proc *p = kn->kn_ptr.p_proc;
302 if ((kn->kn_status & KN_DETACHED) == 0) {
303 knote_remove(&p->p_klist, kn);
304 kn->kn_status |= KN_DETACHED;
305 kn->kn_data = p->p_xstat;
306 kn->kn_ptr.p_proc = NULL;
308 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
313 * process forked, and user wants to track the new process,
314 * so attach a new knote to it, and immediately report an
315 * event with the parent's pid.
317 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
322 * register knote with new process.
324 kev.ident = hint & NOTE_PDATAMASK; /* pid */
325 kev.filter = kn->kn_filter;
326 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
327 kev.fflags = kn->kn_sfflags;
328 kev.data = kn->kn_id; /* parent */
329 kev.udata = kn->kn_kevent.udata; /* preserve udata */
330 error = kqueue_register(kn->kn_kq, &kev);
332 kn->kn_fflags |= NOTE_TRACKERR;
335 return (kn->kn_fflags != 0);
339 filt_timerexpire(void *knx)
341 struct knote *kn = knx;
342 struct callout *calloutp;
346 lwkt_gettoken(&kq_token);
351 if ((kn->kn_flags & EV_ONESHOT) == 0) {
352 tv.tv_sec = kn->kn_sdata / 1000;
353 tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
354 tticks = tvtohz_high(&tv);
355 calloutp = (struct callout *)kn->kn_hook;
356 callout_reset(calloutp, tticks, filt_timerexpire, kn);
359 lwkt_reltoken(&kq_token);
363 * data contains amount of time to sleep, in milliseconds
366 filt_timerattach(struct knote *kn)
368 struct callout *calloutp;
372 if (kq_ncallouts >= kq_calloutmax)
376 tv.tv_sec = kn->kn_sdata / 1000;
377 tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
378 tticks = tvtohz_high(&tv);
380 kn->kn_flags |= EV_CLEAR; /* automatically set */
381 MALLOC(calloutp, struct callout *, sizeof(*calloutp),
383 callout_init(calloutp);
384 kn->kn_hook = (caddr_t)calloutp;
385 callout_reset(calloutp, tticks, filt_timerexpire, kn);
391 filt_timerdetach(struct knote *kn)
393 struct callout *calloutp;
395 calloutp = (struct callout *)kn->kn_hook;
396 callout_stop(calloutp);
397 FREE(calloutp, M_KQUEUE);
402 filt_timer(struct knote *kn, long hint)
405 return (kn->kn_data != 0);
409 * Initialize a kqueue.
411 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
416 kqueue_init(struct kqueue *kq, struct filedesc *fdp)
418 TAILQ_INIT(&kq->kq_knpend);
419 TAILQ_INIT(&kq->kq_knlist);
422 SLIST_INIT(&kq->kq_kqinfo.ki_note);
426 * Terminate a kqueue. Freeing the actual kq itself is left up to the
427 * caller (it might be embedded in a lwp so we don't do it here).
429 * The kq's knlist must be completely eradicated so block on any
433 kqueue_terminate(struct kqueue *kq)
437 lwkt_gettoken(&kq_token);
438 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) {
439 if (kn->kn_status & KN_PROCESSING) {
440 kn->kn_status |= KN_WAITING | KN_REPROCESS;
441 tsleep(kn, 0, "kqtrms", hz);
444 knote_detach_and_drop(kn);
447 kfree(kq->kq_knhash, M_KQUEUE);
448 kq->kq_knhash = NULL;
449 kq->kq_knhashmask = 0;
451 lwkt_reltoken(&kq_token);
458 sys_kqueue(struct kqueue_args *uap)
460 struct thread *td = curthread;
465 error = falloc(td->td_lwp, &fp, &fd);
468 fp->f_flag = FREAD | FWRITE;
469 fp->f_type = DTYPE_KQUEUE;
470 fp->f_ops = &kqueueops;
472 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO);
473 kqueue_init(kq, td->td_proc->p_fd);
476 fsetfd(kq->kq_fdp, fp, fd);
477 uap->sysmsg_result = fd;
483 * Copy 'count' items into the destination list pointed to by uap->eventlist.
486 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res)
488 struct kevent_copyin_args *kap;
491 kap = (struct kevent_copyin_args *)arg;
493 error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp));
495 kap->ka->eventlist += count;
505 * Copy at most 'max' items from the list pointed to by kap->changelist,
506 * return number of items in 'events'.
509 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events)
511 struct kevent_copyin_args *kap;
514 kap = (struct kevent_copyin_args *)arg;
516 count = min(kap->ka->nchanges - kap->pchanges, max);
517 error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp);
519 kap->ka->changelist += count;
520 kap->pchanges += count;
531 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap,
532 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn,
533 struct timespec *tsp_in)
536 struct timespec *tsp;
537 int i, n, total, error, nerrors = 0;
539 int limit = kq_checkloop;
540 struct kevent kev[KQ_NEVENTS];
546 lwkt_gettoken(&kq_token);
549 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n);
554 for (i = 0; i < n; i++) {
556 kevp->flags &= ~EV_SYSFLAGS;
557 error = kqueue_register(kq, kevp);
560 * If a registration returns an error we
561 * immediately post the error. The kevent()
562 * call itself will fail with the error if
563 * no space is available for posting.
565 * Such errors normally bypass the timeout/blocking
566 * code. However, if the copyoutfn function refuses
567 * to post the error (see sys_poll()), then we
571 kevp->flags = EV_ERROR;
574 kevent_copyoutfn(uap, kevp, 1, res);
588 * Acquire/wait for events - setup timeout
593 if (tsp->tv_sec || tsp->tv_nsec) {
595 timespecadd(tsp, &ats); /* tsp = target time */
602 * Collect as many events as we can. Sleeping on successive
603 * loops is disabled if copyoutfn has incremented (*res).
605 * The loop stops if an error occurs, all events have been
606 * scanned (the marker has been reached), or fewer than the
607 * maximum number of events is found.
609 * The copyoutfn function does not have to increment (*res) in
610 * order for the loop to continue.
612 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
616 marker.kn_filter = EVFILT_MARKER;
617 marker.kn_status = KN_PROCESSING;
618 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
619 while ((n = nevents - total) > 0) {
624 * If no events are pending sleep until timeout (if any)
625 * or an event occurs.
627 * After the sleep completes the marker is moved to the
628 * end of the list, making any received events available
631 if (kq->kq_count == 0 && *res == 0) {
632 error = kqueue_sleep(kq, tsp);
636 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
637 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
641 * Process all received events
642 * Account for all non-spurious events in our total
644 i = kqueue_scan(kq, kev, n, &marker);
647 error = kevent_copyoutfn(uap, kev, i, res);
648 total += *res - lres;
652 if (limit && --limit == 0)
653 panic("kqueue: checkloop failed i=%d", i);
656 * Normally when fewer events are returned than requested
657 * we can stop. However, if only spurious events were
658 * collected the copyout will not bump (*res) and we have
665 * Deal with an edge case where spurious events can cause
666 * a loop to occur without moving the marker. This can
667 * prevent kqueue_scan() from picking up new events which
668 * race us. We must be sure to move the marker for this
671 * NOTE: We do not want to move the marker if events
672 * were scanned because normal kqueue operations
673 * may reactivate events. Moving the marker in
674 * that case could result in duplicates for the
678 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
679 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
682 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
684 /* Timeouts do not return EWOULDBLOCK. */
685 if (error == EWOULDBLOCK)
689 lwkt_reltoken(&kq_token);
697 sys_kevent(struct kevent_args *uap)
699 struct thread *td = curthread;
700 struct proc *p = td->td_proc;
701 struct timespec ts, *tsp;
703 struct file *fp = NULL;
704 struct kevent_copyin_args *kap, ka;
708 error = copyin(uap->timeout, &ts, sizeof(ts));
716 fp = holdfp(p->p_fd, uap->fd, -1);
719 if (fp->f_type != DTYPE_KQUEUE) {
724 kq = (struct kqueue *)fp->f_data;
730 error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap,
731 kevent_copyin, kevent_copyout, tsp);
739 kqueue_register(struct kqueue *kq, struct kevent *kev)
741 struct filedesc *fdp = kq->kq_fdp;
742 struct filterops *fops;
743 struct file *fp = NULL;
744 struct knote *kn = NULL;
747 if (kev->filter < 0) {
748 if (kev->filter + EVFILT_SYSCOUNT < 0)
750 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
754 * filter attach routine is responsible for insuring that
755 * the identifier can be attached to it.
757 kprintf("unknown filter: %d\n", kev->filter);
761 lwkt_gettoken(&kq_token);
762 if (fops->f_flags & FILTEROP_ISFD) {
763 /* validate descriptor */
764 fp = holdfp(fdp, kev->ident, -1);
766 lwkt_reltoken(&kq_token);
770 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
771 if (kn->kn_kq == kq &&
772 kn->kn_filter == kev->filter &&
773 kn->kn_id == kev->ident) {
778 if (kq->kq_knhashmask) {
781 list = &kq->kq_knhash[
782 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
783 SLIST_FOREACH(kn, list, kn_link) {
784 if (kn->kn_id == kev->ident &&
785 kn->kn_filter == kev->filter)
791 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
797 * kn now contains the matching knote, or NULL if no match
799 if (kev->flags & EV_ADD) {
811 * apply reference count to knote structure, and
812 * do not release it at the end of this routine.
816 kn->kn_sfflags = kev->fflags;
817 kn->kn_sdata = kev->data;
820 kn->kn_kevent = *kev;
823 * KN_PROCESSING prevents the knote from getting
824 * ripped out from under us while we are trying
825 * to attach it, in case the attach blocks.
827 kn->kn_status = KN_PROCESSING;
829 if ((error = filter_attach(kn)) != 0) {
830 kn->kn_status |= KN_DELETING | KN_REPROCESS;
836 * Interlock against close races which either tried
837 * to remove our knote while we were blocked or missed
838 * it entirely prior to our attachment. We do not
839 * want to end up with a knote on a closed descriptor.
841 if ((fops->f_flags & FILTEROP_ISFD) &&
842 checkfdclosed(fdp, kev->ident, kn->kn_fp)) {
843 kn->kn_status |= KN_DELETING | KN_REPROCESS;
847 * The user may change some filter values after the
848 * initial EV_ADD, but doing so will not reset any
849 * filter which have already been triggered.
851 kn->kn_status |= KN_PROCESSING;
852 kn->kn_sfflags = kev->fflags;
853 kn->kn_sdata = kev->data;
854 kn->kn_kevent.udata = kev->udata;
858 * Execute the filter event to immediately activate the
859 * knote if necessary.
861 * We have set KN_PROCESSING so we are the reprocessing
862 * master. We must deal with any reprocessing events prior
863 * to running the filter. The filter may block and we
864 * could end up with a reprocessing request afterwords.
866 if ((kn->kn_status & KN_REPROCESS) == 0) {
867 if (filter_event(kn, 0))
870 while (kn->kn_status & KN_REPROCESS) {
871 kn->kn_status &= ~KN_REPROCESS;
872 if (kn->kn_status & KN_DELETING) {
874 knote_detach_and_drop_locked(kn);
877 if (kn->kn_status & KN_WAITING) {
878 kn->kn_status &= ~KN_WAITING;
881 if (filter_event(kn, 0))
884 kn->kn_status &= ~KN_PROCESSING;
885 } else if (kev->flags & EV_DELETE) {
887 * Attempt to delete the existing knote
889 knote_detach_and_drop(kn);
894 * Disablement does not deactivate a knote here.
896 if ((kev->flags & EV_DISABLE) &&
897 ((kn->kn_status & KN_DISABLED) == 0)) {
898 kn->kn_status |= KN_DISABLED;
902 * Re-enablement may have to immediately enqueue an active knote.
904 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
905 kn->kn_status &= ~KN_DISABLED;
906 if ((kn->kn_status & KN_ACTIVE) &&
907 ((kn->kn_status & KN_QUEUED) == 0)) {
913 lwkt_reltoken(&kq_token);
920 * Block as necessary until the target time is reached.
921 * If tsp is NULL we block indefinitely. If tsp->ts_secs/nsecs are both
922 * 0 we do not block at all.
925 kqueue_sleep(struct kqueue *kq, struct timespec *tsp)
930 kq->kq_state |= KQ_SLEEP;
931 error = tsleep(kq, PCATCH, "kqread", 0);
932 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) {
936 struct timespec atx = *tsp;
940 timespecsub(&atx, &ats);
941 if (ats.tv_sec < 0) {
944 timeout = atx.tv_sec > 24 * 60 * 60 ?
945 24 * 60 * 60 * hz : tstohz_high(&atx);
946 kq->kq_state |= KQ_SLEEP;
947 error = tsleep(kq, PCATCH, "kqread", timeout);
951 /* don't restart after signals... */
952 if (error == ERESTART)
959 * Scan the kqueue, return the number of active events placed in kevp up
962 * Continuous mode events may get recycled, do not continue scanning past
963 * marker unless no events have been collected.
966 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
967 struct knote *marker)
969 struct knote *kn, local_marker;
973 local_marker.kn_filter = EVFILT_MARKER;
974 local_marker.kn_status = KN_PROCESSING;
979 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe);
981 kn = TAILQ_NEXT(&local_marker, kn_tqe);
982 if (kn->kn_filter == EVFILT_MARKER) {
983 /* Marker reached, we are done */
987 /* Move local marker past some other threads marker */
988 kn = TAILQ_NEXT(kn, kn_tqe);
989 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
990 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe);
995 * We can't skip a knote undergoing processing, otherwise
996 * we risk not returning it when the user process expects
997 * it should be returned. Sleep and retry.
999 if (kn->kn_status & KN_PROCESSING) {
1000 kn->kn_status |= KN_WAITING | KN_REPROCESS;
1001 tsleep(kn, 0, "kqepts", hz);
1006 * Remove the event for processing.
1008 * WARNING! We must leave KN_QUEUED set to prevent the
1009 * event from being KNOTE_ACTIVATE()d while
1010 * the queue state is in limbo, in case we
1013 * WARNING! We must set KN_PROCESSING to avoid races
1014 * against deletion or another thread's
1017 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1019 kn->kn_status |= KN_PROCESSING;
1022 * We have to deal with an extremely important race against
1023 * file descriptor close()s here. The file descriptor can
1024 * disappear MPSAFE, and there is a small window of
1025 * opportunity between that and the call to knote_fdclose().
1027 * If we hit that window here while doselect or dopoll is
1028 * trying to delete a spurious event they will not be able
1029 * to match up the event against a knote and will go haywire.
1031 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) &&
1032 checkfdclosed(kq->kq_fdp, kn->kn_kevent.ident, kn->kn_fp)) {
1033 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1036 if (kn->kn_status & KN_DISABLED) {
1038 * If disabled we ensure the event is not queued
1039 * but leave its active bit set. On re-enablement
1040 * the event may be immediately triggered.
1042 kn->kn_status &= ~KN_QUEUED;
1043 } else if ((kn->kn_flags & EV_ONESHOT) == 0 &&
1044 filter_event(kn, 0) == 0) {
1046 * If not running in one-shot mode and the event
1047 * is no longer present we ensure it is removed
1048 * from the queue and ignore it.
1050 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1055 *kevp++ = kn->kn_kevent;
1059 if (kn->kn_flags & EV_ONESHOT) {
1060 kn->kn_status &= ~KN_QUEUED;
1061 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1062 } else if (kn->kn_flags & EV_CLEAR) {
1065 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1067 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1073 * Handle any post-processing states
1075 while (kn->kn_status & KN_REPROCESS) {
1076 kn->kn_status &= ~KN_REPROCESS;
1077 if (kn->kn_status & KN_WAITING) {
1078 kn->kn_status &= ~KN_WAITING;
1081 if (kn->kn_status & KN_DELETING) {
1082 knote_detach_and_drop_locked(kn);
1085 if (filter_event(kn, 0))
1088 kn->kn_status &= ~KN_PROCESSING;
1092 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1099 * This could be expanded to call kqueue_scan, if desired.
1104 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1113 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1122 kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
1123 struct ucred *cred, struct sysmsg *msg)
1128 lwkt_gettoken(&kq_token);
1129 kq = (struct kqueue *)fp->f_data;
1134 kq->kq_state |= KQ_ASYNC;
1136 kq->kq_state &= ~KQ_ASYNC;
1140 error = fsetown(*(int *)data, &kq->kq_sigio);
1146 lwkt_reltoken(&kq_token);
1154 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred)
1156 struct kqueue *kq = (struct kqueue *)fp->f_data;
1158 bzero((void *)st, sizeof(*st));
1159 st->st_size = kq->kq_count;
1160 st->st_blksize = sizeof(struct kevent);
1161 st->st_mode = S_IFIFO;
1169 kqueue_close(struct file *fp)
1171 struct kqueue *kq = (struct kqueue *)fp->f_data;
1173 kqueue_terminate(kq);
1176 funsetown(kq->kq_sigio);
1178 kfree(kq, M_KQUEUE);
1183 kqueue_wakeup(struct kqueue *kq)
1185 if (kq->kq_state & KQ_SLEEP) {
1186 kq->kq_state &= ~KQ_SLEEP;
1189 KNOTE(&kq->kq_kqinfo.ki_note, 0);
1193 * Calls filterops f_attach function, acquiring mplock if filter is not
1194 * marked as FILTEROP_MPSAFE.
1197 filter_attach(struct knote *kn)
1201 if (!(kn->kn_fop->f_flags & FILTEROP_MPSAFE)) {
1203 ret = kn->kn_fop->f_attach(kn);
1206 ret = kn->kn_fop->f_attach(kn);
1213 * Detach the knote and drop it, destroying the knote.
1215 * Calls filterops f_detach function, acquiring mplock if filter is not
1216 * marked as FILTEROP_MPSAFE.
1219 knote_detach_and_drop(struct knote *kn)
1222 * If someone else is procesing the knote we cannot destroy it now,
1223 * flag the request and return.
1225 if (kn->kn_status & KN_PROCESSING) {
1226 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1229 kn->kn_status |= KN_PROCESSING | KN_DELETING | KN_REPROCESS;
1230 knote_detach_and_drop_locked(kn);
1234 knote_detach_and_drop_locked(struct knote *kn)
1236 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1237 kn->kn_fop->f_detach(kn);
1240 kn->kn_fop->f_detach(kn);
1247 * Calls filterops f_event function, acquiring mplock if filter is not
1248 * marked as FILTEROP_MPSAFE.
1250 * If the knote is in the middle of being created or deleted we cannot
1251 * safely call the filter op.
1254 filter_event(struct knote *kn, long hint)
1258 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1259 ret = kn->kn_fop->f_event(kn, hint);
1262 ret = kn->kn_fop->f_event(kn, hint);
1269 * Walk down a list of knotes, activating them if their event has triggered.
1271 * If we encounter any knotes which are undergoing processing we just mark
1272 * them for reprocessing and do not try to [re]activate the knote. However,
1273 * if a hint is being passed we have to wait and that makes things a bit
1277 knote(struct klist *list, long hint)
1281 lwkt_gettoken(&kq_token);
1283 SLIST_FOREACH(kn, list, kn_next) {
1284 if (kn->kn_status & KN_PROCESSING) {
1286 * Someone else is processing the knote, ask the
1287 * other thread to reprocess it and don't mess
1288 * with it otherwise.
1291 kn->kn_status |= KN_REPROCESS;
1296 * If the hint is non-zero we have to wait or risk
1297 * losing the state the caller is trying to update.
1299 * XXX This is a real problem, certain process
1300 * and signal filters will bump kn_data for
1301 * already-processed notes more than once if
1302 * we restart the list scan. FIXME.
1304 kn->kn_status |= KN_WAITING | KN_REPROCESS;
1305 tsleep(kn, 0, "knotec", hz);
1310 * Become the reprocessing master ourselves.
1312 * If KN_REPROCESS is set we must handle reprocessing before
1313 * running the event. If not we must run the filter event
1314 * and then check for reprocessing requests. If the filter
1315 * event itself blocks IT must check for KN_REPROCESS and
1316 * return 0 if it finds it set.
1318 kn->kn_status |= KN_PROCESSING;
1319 if ((kn->kn_status & KN_REPROCESS) == 0) {
1320 if (filter_event(kn, hint))
1323 while (kn->kn_status & KN_REPROCESS) {
1324 kn->kn_status &= ~KN_REPROCESS;
1325 if (kn->kn_status & KN_WAITING) {
1326 kn->kn_status &= ~KN_WAITING;
1329 if (kn->kn_status & KN_DELETING) {
1330 knote_detach_and_drop_locked(kn);
1333 if (filter_event(kn, hint))
1336 kn->kn_status &= ~KN_PROCESSING;
1338 lwkt_reltoken(&kq_token);
1342 * Insert knote at head of klist.
1344 * This function may only be called via a filter function and thus
1345 * kq_token should already be held and marked for processing.
1348 knote_insert(struct klist *klist, struct knote *kn)
1350 KKASSERT(kn->kn_status & KN_PROCESSING);
1351 ASSERT_LWKT_TOKEN_HELD(&kq_token);
1352 SLIST_INSERT_HEAD(klist, kn, kn_next);
1356 * Remove knote from a klist
1358 * This function may only be called via a filter function and thus
1359 * kq_token should already be held and marked for processing.
1362 knote_remove(struct klist *klist, struct knote *kn)
1364 KKASSERT(kn->kn_status & KN_PROCESSING);
1365 ASSERT_LWKT_TOKEN_HELD(&kq_token);
1366 SLIST_REMOVE(klist, kn, knote, kn_next);
1370 * Remove all knotes from a specified klist
1372 * Only called from aio.
1375 knote_empty(struct klist *list)
1379 lwkt_gettoken(&kq_token);
1380 while ((kn = SLIST_FIRST(list)) != NULL) {
1381 if (kn->kn_status & KN_PROCESSING) {
1382 kn->kn_status |= KN_WAITING | KN_REPROCESS;
1383 tsleep(kn, 0, "kqepts", hz);
1386 knote_detach_and_drop(kn);
1388 lwkt_reltoken(&kq_token);
1392 * remove all knotes referencing a specified fd
1395 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd)
1399 lwkt_gettoken(&kq_token);
1401 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
1402 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) {
1403 if (kn->kn_status & KN_PROCESSING) {
1404 kn->kn_status |= KN_WAITING | KN_REPROCESS;
1405 tsleep(kn, 0, "kqepts", hz);
1407 knote_detach_and_drop(kn);
1412 lwkt_reltoken(&kq_token);
1416 * Low level attach function.
1418 * The knote should already be marked for processing.
1421 knote_attach(struct knote *kn)
1424 struct kqueue *kq = kn->kn_kq;
1426 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1427 KKASSERT(kn->kn_fp);
1428 list = &kn->kn_fp->f_klist;
1430 if (kq->kq_knhashmask == 0)
1431 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1432 &kq->kq_knhashmask);
1433 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1435 SLIST_INSERT_HEAD(list, kn, kn_link);
1436 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink);
1440 * Low level drop function.
1442 * The knote should already be marked for processing.
1445 knote_drop(struct knote *kn)
1452 if (kn->kn_fop->f_flags & FILTEROP_ISFD)
1453 list = &kn->kn_fp->f_klist;
1455 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1457 SLIST_REMOVE(list, kn, knote, kn_link);
1458 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
1459 if (kn->kn_status & KN_QUEUED)
1461 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1469 * Low level enqueue function.
1471 * The knote should already be marked for processing.
1474 knote_enqueue(struct knote *kn)
1476 struct kqueue *kq = kn->kn_kq;
1478 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
1479 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1480 kn->kn_status |= KN_QUEUED;
1484 * Send SIGIO on request (typically set up as a mailbox signal)
1486 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1)
1487 pgsigio(kq->kq_sigio, SIGIO, 0);
1493 * Low level dequeue function.
1495 * The knote should already be marked for processing.
1498 knote_dequeue(struct knote *kn)
1500 struct kqueue *kq = kn->kn_kq;
1502 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
1503 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1504 kn->kn_status &= ~KN_QUEUED;
1511 knote_zone = zinit("KNOTE", sizeof(struct knote), 0, 0, 1);
1513 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
1515 static struct knote *
1518 return ((struct knote *)zalloc(knote_zone));
1522 knote_free(struct knote *kn)
1524 zfree(knote_zone, kn);