2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
33 #include <sys/malloc.h>
34 #include <sys/unistd.h>
37 #include <sys/fcntl.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/eventvar.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
45 #include <sys/sysctl.h>
46 #include <sys/sysproto.h>
47 #include <sys/thread.h>
49 #include <sys/signalvar.h>
50 #include <sys/filio.h>
53 #include <sys/thread2.h>
54 #include <sys/file2.h>
55 #include <sys/mplock2.h>
57 #define EVENT_REGISTER 1
58 #define EVENT_PROCESS 2
60 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
62 struct kevent_copyin_args {
63 struct kevent_args *ka;
67 #define KNOTE_CACHE_MAX 8
69 struct knote_cache_list {
70 struct klist knote_cache;
74 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
75 struct knote *marker);
76 static int kqueue_read(struct file *fp, struct uio *uio,
77 struct ucred *cred, int flags);
78 static int kqueue_write(struct file *fp, struct uio *uio,
79 struct ucred *cred, int flags);
80 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
81 struct ucred *cred, struct sysmsg *msg);
82 static int kqueue_kqfilter(struct file *fp, struct knote *kn);
83 static int kqueue_stat(struct file *fp, struct stat *st,
85 static int kqueue_close(struct file *fp);
86 static void kqueue_wakeup(struct kqueue *kq);
87 static int filter_attach(struct knote *kn);
88 static int filter_event(struct knote *kn, long hint);
93 static struct fileops kqueueops = {
94 .fo_read = kqueue_read,
95 .fo_write = kqueue_write,
96 .fo_ioctl = kqueue_ioctl,
97 .fo_kqfilter = kqueue_kqfilter,
98 .fo_stat = kqueue_stat,
99 .fo_close = kqueue_close,
100 .fo_shutdown = nofo_shutdown
103 static void knote_attach(struct knote *kn);
104 static void knote_drop(struct knote *kn);
105 static void knote_detach_and_drop(struct knote *kn);
106 static void knote_enqueue(struct knote *kn);
107 static void knote_dequeue(struct knote *kn);
108 static struct knote *knote_alloc(void);
109 static void knote_free(struct knote *kn);
111 static void filt_kqdetach(struct knote *kn);
112 static int filt_kqueue(struct knote *kn, long hint);
113 static int filt_procattach(struct knote *kn);
114 static void filt_procdetach(struct knote *kn);
115 static int filt_proc(struct knote *kn, long hint);
116 static int filt_fileattach(struct knote *kn);
117 static void filt_timerexpire(void *knx);
118 static int filt_timerattach(struct knote *kn);
119 static void filt_timerdetach(struct knote *kn);
120 static int filt_timer(struct knote *kn, long hint);
121 static int filt_userattach(struct knote *kn);
122 static void filt_userdetach(struct knote *kn);
123 static int filt_user(struct knote *kn, long hint);
124 static void filt_usertouch(struct knote *kn, struct kevent *kev,
127 static struct filterops file_filtops =
128 { FILTEROP_ISFD | FILTEROP_MPSAFE, filt_fileattach, NULL, NULL };
129 static struct filterops kqread_filtops =
130 { FILTEROP_ISFD | FILTEROP_MPSAFE, NULL, filt_kqdetach, filt_kqueue };
131 static struct filterops proc_filtops =
132 { 0, filt_procattach, filt_procdetach, filt_proc };
133 static struct filterops timer_filtops =
134 { FILTEROP_MPSAFE, filt_timerattach, filt_timerdetach, filt_timer };
135 static struct filterops user_filtops =
136 { FILTEROP_MPSAFE, filt_userattach, filt_userdetach, filt_user };
138 static int kq_ncallouts = 0;
139 static int kq_calloutmax = (4 * 1024);
140 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
141 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
142 static int kq_checkloop = 1000000;
143 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW,
144 &kq_checkloop, 0, "Maximum number of loops for kqueue scan");
146 #define KNOTE_ACTIVATE(kn) do { \
147 kn->kn_status |= KN_ACTIVE; \
148 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
152 #define KN_HASHSIZE 64 /* XXX should be tunable */
153 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
155 extern struct filterops aio_filtops;
156 extern struct filterops sig_filtops;
159 * Table for for all system-defined filters.
161 static struct filterops *sysfilt_ops[] = {
162 &file_filtops, /* EVFILT_READ */
163 &file_filtops, /* EVFILT_WRITE */
164 &aio_filtops, /* EVFILT_AIO */
165 &file_filtops, /* EVFILT_VNODE */
166 &proc_filtops, /* EVFILT_PROC */
167 &sig_filtops, /* EVFILT_SIGNAL */
168 &timer_filtops, /* EVFILT_TIMER */
169 &file_filtops, /* EVFILT_EXCEPT */
170 &user_filtops, /* EVFILT_USER */
173 static struct knote_cache_list knote_cache_lists[MAXCPU];
176 * Acquire a knote, return non-zero on success, 0 on failure.
178 * If we cannot acquire the knote we sleep and return 0. The knote
179 * may be stale on return in this case and the caller must restart
180 * whatever loop they are in.
182 * Related kq token must be held.
185 knote_acquire(struct knote *kn)
187 if (kn->kn_status & KN_PROCESSING) {
188 kn->kn_status |= KN_WAITING | KN_REPROCESS;
189 tsleep(kn, 0, "kqepts", hz);
190 /* knote may be stale now */
193 kn->kn_status |= KN_PROCESSING;
198 * Release an acquired knote, clearing KN_PROCESSING and handling any
199 * KN_REPROCESS events.
201 * Caller must be holding the related kq token
203 * Non-zero is returned if the knote is destroyed or detached.
206 knote_release(struct knote *kn)
208 while (kn->kn_status & KN_REPROCESS) {
209 kn->kn_status &= ~KN_REPROCESS;
210 if (kn->kn_status & KN_WAITING) {
211 kn->kn_status &= ~KN_WAITING;
214 if (kn->kn_status & KN_DELETING) {
215 knote_detach_and_drop(kn);
219 if (filter_event(kn, 0))
222 kn->kn_status &= ~KN_PROCESSING;
223 /* kn should not be accessed anymore */
227 filt_fileattach(struct knote *kn)
229 return (fo_kqfilter(kn->kn_fp, kn));
236 kqueue_kqfilter(struct file *fp, struct knote *kn)
238 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
240 if (kn->kn_filter != EVFILT_READ)
243 kn->kn_fop = &kqread_filtops;
244 knote_insert(&kq->kq_kqinfo.ki_note, kn);
249 filt_kqdetach(struct knote *kn)
251 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
253 knote_remove(&kq->kq_kqinfo.ki_note, kn);
258 filt_kqueue(struct knote *kn, long hint)
260 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
262 kn->kn_data = kq->kq_count;
263 return (kn->kn_data > 0);
267 filt_procattach(struct knote *kn)
273 p = pfind(kn->kn_id);
274 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
275 p = zpfind(kn->kn_id);
281 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
287 lwkt_gettoken(&p->p_token);
288 kn->kn_ptr.p_proc = p;
289 kn->kn_flags |= EV_CLEAR; /* automatically set */
292 * internal flag indicating registration done by kernel
294 if (kn->kn_flags & EV_FLAG1) {
295 kn->kn_data = kn->kn_sdata; /* ppid */
296 kn->kn_fflags = NOTE_CHILD;
297 kn->kn_flags &= ~EV_FLAG1;
300 knote_insert(&p->p_klist, kn);
303 * Immediately activate any exit notes if the target process is a
304 * zombie. This is necessary to handle the case where the target
305 * process, e.g. a child, dies before the kevent is negistered.
307 if (immediate && filt_proc(kn, NOTE_EXIT))
309 lwkt_reltoken(&p->p_token);
316 * The knote may be attached to a different process, which may exit,
317 * leaving nothing for the knote to be attached to. So when the process
318 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
319 * it will be deleted when read out. However, as part of the knote deletion,
320 * this routine is called, so a check is needed to avoid actually performing
321 * a detach, because the original process does not exist any more.
324 filt_procdetach(struct knote *kn)
328 if (kn->kn_status & KN_DETACHED)
330 p = kn->kn_ptr.p_proc;
331 knote_remove(&p->p_klist, kn);
335 filt_proc(struct knote *kn, long hint)
340 * mask off extra data
342 event = (u_int)hint & NOTE_PCTRLMASK;
345 * if the user is interested in this event, record it.
347 if (kn->kn_sfflags & event)
348 kn->kn_fflags |= event;
351 * Process is gone, so flag the event as finished. Detach the
352 * knote from the process now because the process will be poof,
355 if (event == NOTE_EXIT) {
356 struct proc *p = kn->kn_ptr.p_proc;
357 if ((kn->kn_status & KN_DETACHED) == 0) {
359 knote_remove(&p->p_klist, kn);
360 kn->kn_status |= KN_DETACHED;
361 kn->kn_data = p->p_xstat;
362 kn->kn_ptr.p_proc = NULL;
365 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
370 * process forked, and user wants to track the new process,
371 * so attach a new knote to it, and immediately report an
372 * event with the parent's pid.
374 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
379 * register knote with new process.
381 kev.ident = hint & NOTE_PDATAMASK; /* pid */
382 kev.filter = kn->kn_filter;
383 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
384 kev.fflags = kn->kn_sfflags;
385 kev.data = kn->kn_id; /* parent */
386 kev.udata = kn->kn_kevent.udata; /* preserve udata */
387 error = kqueue_register(kn->kn_kq, &kev);
389 kn->kn_fflags |= NOTE_TRACKERR;
392 return (kn->kn_fflags != 0);
396 filt_timerreset(struct knote *kn)
398 struct callout *calloutp;
402 tv.tv_sec = kn->kn_sdata / 1000;
403 tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
404 tticks = tvtohz_high(&tv);
405 calloutp = (struct callout *)kn->kn_hook;
406 callout_reset(calloutp, tticks, filt_timerexpire, kn);
410 * The callout interlocks with callout_terminate() but can still
411 * race a deletion so if KN_DELETING is set we just don't touch
415 filt_timerexpire(void *knx)
417 struct knote *kn = knx;
418 struct kqueue *kq = kn->kn_kq;
420 lwkt_getpooltoken(kq);
423 * Open knote_acquire(), since we can't sleep in callout,
424 * however, we do need to record this expiration.
427 if (kn->kn_status & KN_PROCESSING) {
428 kn->kn_status |= KN_REPROCESS;
429 if ((kn->kn_status & KN_DELETING) == 0 &&
430 (kn->kn_flags & EV_ONESHOT) == 0)
432 lwkt_relpooltoken(kq);
435 KASSERT((kn->kn_status & KN_DELETING) == 0,
436 ("acquire a deleting knote %#x", kn->kn_status));
437 kn->kn_status |= KN_PROCESSING;
440 if ((kn->kn_flags & EV_ONESHOT) == 0)
445 lwkt_relpooltoken(kq);
449 * data contains amount of time to sleep, in milliseconds
452 filt_timerattach(struct knote *kn)
454 struct callout *calloutp;
457 prev_ncallouts = atomic_fetchadd_int(&kq_ncallouts, 1);
458 if (prev_ncallouts >= kq_calloutmax) {
459 atomic_subtract_int(&kq_ncallouts, 1);
464 kn->kn_flags |= EV_CLEAR; /* automatically set */
465 calloutp = kmalloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
466 callout_init_mp(calloutp);
467 kn->kn_hook = (caddr_t)calloutp;
474 * This function is called with the knote flagged locked but it is
475 * still possible to race a callout event due to the callback blocking.
476 * We must call callout_terminate() instead of callout_stop() to deal
480 filt_timerdetach(struct knote *kn)
482 struct callout *calloutp;
484 calloutp = (struct callout *)kn->kn_hook;
485 callout_terminate(calloutp);
486 kfree(calloutp, M_KQUEUE);
487 atomic_subtract_int(&kq_ncallouts, 1);
491 filt_timer(struct knote *kn, long hint)
494 return (kn->kn_data != 0);
501 filt_userattach(struct knote *kn)
504 if (kn->kn_fflags & NOTE_TRIGGER)
505 kn->kn_ptr.hookid = 1;
507 kn->kn_ptr.hookid = 0;
512 filt_userdetach(struct knote *kn)
518 filt_user(struct knote *kn, long hint)
520 return (kn->kn_ptr.hookid);
524 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
530 if (kev->fflags & NOTE_TRIGGER)
531 kn->kn_ptr.hookid = 1;
533 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
534 kev->fflags &= NOTE_FFLAGSMASK;
540 kn->kn_sfflags &= kev->fflags;
544 kn->kn_sfflags |= kev->fflags;
548 kn->kn_sfflags = kev->fflags;
552 /* XXX Return error? */
555 kn->kn_sdata = kev->data;
558 * This is not the correct use of EV_CLEAR in an event
559 * modification, it should have been passed as a NOTE instead.
560 * But we need to maintain compatibility with Apple & FreeBSD.
562 * Note however that EV_CLEAR can still be used when doing
563 * the initial registration of the event and works as expected
564 * (clears the event on reception).
566 if (kev->flags & EV_CLEAR) {
567 kn->kn_ptr.hookid = 0;
574 *kev = kn->kn_kevent;
575 kev->fflags = kn->kn_sfflags;
576 kev->data = kn->kn_sdata;
577 if (kn->kn_flags & EV_CLEAR) {
578 kn->kn_ptr.hookid = 0;
579 /* kn_data, kn_fflags handled by parent */
584 panic("filt_usertouch() - invalid type (%ld)", type);
590 * Initialize a kqueue.
592 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
597 kqueue_init(struct kqueue *kq, struct filedesc *fdp)
599 TAILQ_INIT(&kq->kq_knpend);
600 TAILQ_INIT(&kq->kq_knlist);
603 SLIST_INIT(&kq->kq_kqinfo.ki_note);
607 * Terminate a kqueue. Freeing the actual kq itself is left up to the
608 * caller (it might be embedded in a lwp so we don't do it here).
610 * The kq's knlist must be completely eradicated so block on any
614 kqueue_terminate(struct kqueue *kq)
616 struct lwkt_token *tok;
619 tok = lwkt_token_pool_lookup(kq);
621 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) {
622 if (knote_acquire(kn))
623 knote_detach_and_drop(kn);
628 hashdestroy(kq->kq_knhash, M_KQUEUE, kq->kq_knhashmask);
629 kq->kq_knhash = NULL;
630 kq->kq_knhashmask = 0;
638 sys_kqueue(struct kqueue_args *uap)
640 struct thread *td = curthread;
645 error = falloc(td->td_lwp, &fp, &fd);
648 fp->f_flag = FREAD | FWRITE;
649 fp->f_type = DTYPE_KQUEUE;
650 fp->f_ops = &kqueueops;
652 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO);
653 kqueue_init(kq, td->td_proc->p_fd);
656 fsetfd(kq->kq_fdp, fp, fd);
657 uap->sysmsg_result = fd;
663 * Copy 'count' items into the destination list pointed to by uap->eventlist.
666 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res)
668 struct kevent_copyin_args *kap;
671 kap = (struct kevent_copyin_args *)arg;
673 error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp));
675 kap->ka->eventlist += count;
685 * Copy at most 'max' items from the list pointed to by kap->changelist,
686 * return number of items in 'events'.
689 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events)
691 struct kevent_copyin_args *kap;
694 kap = (struct kevent_copyin_args *)arg;
696 count = min(kap->ka->nchanges - kap->pchanges, max);
697 error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp);
699 kap->ka->changelist += count;
700 kap->pchanges += count;
711 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap,
712 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn,
713 struct timespec *tsp_in)
716 struct timespec *tsp, ats;
717 int i, n, total, error, nerrors = 0;
719 int limit = kq_checkloop;
720 struct kevent kev[KQ_NEVENTS];
722 struct lwkt_token *tok;
724 if (tsp_in == NULL || tsp_in->tv_sec || tsp_in->tv_nsec)
725 atomic_set_int(&curthread->td_mpflags, TDF_MP_BATCH_DEMARC);
732 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n);
737 for (i = 0; i < n; i++) {
739 kevp->flags &= ~EV_SYSFLAGS;
740 error = kqueue_register(kq, kevp);
743 * If a registration returns an error we
744 * immediately post the error. The kevent()
745 * call itself will fail with the error if
746 * no space is available for posting.
748 * Such errors normally bypass the timeout/blocking
749 * code. However, if the copyoutfn function refuses
750 * to post the error (see sys_poll()), then we
753 if (error || (kevp->flags & EV_RECEIPT)) {
754 kevp->flags = EV_ERROR;
757 kevent_copyoutfn(uap, kevp, 1, res);
760 } else if (lres != *res) {
771 * Acquire/wait for events - setup timeout
774 if (tsp->tv_sec || tsp->tv_nsec) {
776 timespecadd(tsp, &ats); /* tsp = target time */
783 * Collect as many events as we can. Sleeping on successive
784 * loops is disabled if copyoutfn has incremented (*res).
786 * The loop stops if an error occurs, all events have been
787 * scanned (the marker has been reached), or fewer than the
788 * maximum number of events is found.
790 * The copyoutfn function does not have to increment (*res) in
791 * order for the loop to continue.
793 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
797 marker.kn_filter = EVFILT_MARKER;
798 marker.kn_status = KN_PROCESSING;
799 tok = lwkt_token_pool_lookup(kq);
801 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
803 while ((n = nevents - total) > 0) {
808 * If no events are pending sleep until timeout (if any)
809 * or an event occurs.
811 * After the sleep completes the marker is moved to the
812 * end of the list, making any received events available
815 if (kq->kq_count == 0 && *res == 0) {
820 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) {
824 struct timespec atx = *tsp;
827 timespecsub(&atx, &ats);
828 if (atx.tv_sec < 0) {
832 timeout = atx.tv_sec > 24 * 60 * 60 ?
839 if (kq->kq_count == 0) {
841 if (__predict_false(kq->kq_sleep_cnt == 0)) {
843 * Guard against possible wrapping. And
844 * set it to 2, so that kqueue_wakeup()
845 * can wake everyone up.
847 kq->kq_sleep_cnt = 2;
849 error = tsleep(kq, PCATCH, "kqread", timeout);
851 /* don't restart after signals... */
852 if (error == ERESTART)
859 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
860 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker,
867 * Process all received events
868 * Account for all non-spurious events in our total
870 i = kqueue_scan(kq, kev, n, &marker);
873 error = kevent_copyoutfn(uap, kev, i, res);
874 total += *res - lres;
878 if (limit && --limit == 0)
879 panic("kqueue: checkloop failed i=%d", i);
882 * Normally when fewer events are returned than requested
883 * we can stop. However, if only spurious events were
884 * collected the copyout will not bump (*res) and we have
891 * Deal with an edge case where spurious events can cause
892 * a loop to occur without moving the marker. This can
893 * prevent kqueue_scan() from picking up new events which
894 * race us. We must be sure to move the marker for this
897 * NOTE: We do not want to move the marker if events
898 * were scanned because normal kqueue operations
899 * may reactivate events. Moving the marker in
900 * that case could result in duplicates for the
905 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
906 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
911 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
914 /* Timeouts do not return EWOULDBLOCK. */
915 if (error == EWOULDBLOCK)
924 sys_kevent(struct kevent_args *uap)
926 struct thread *td = curthread;
927 struct proc *p = td->td_proc;
928 struct timespec ts, *tsp;
930 struct file *fp = NULL;
931 struct kevent_copyin_args *kap, ka;
935 error = copyin(uap->timeout, &ts, sizeof(ts));
942 fp = holdfp(p->p_fd, uap->fd, -1);
945 if (fp->f_type != DTYPE_KQUEUE) {
950 kq = (struct kqueue *)fp->f_data;
956 error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap,
957 kevent_copyin, kevent_copyout, tsp);
965 kqueue_register(struct kqueue *kq, struct kevent *kev)
967 struct filedesc *fdp = kq->kq_fdp;
968 struct klist *list = NULL;
969 struct filterops *fops;
970 struct file *fp = NULL;
971 struct knote *kn = NULL;
974 struct knote_cache_list *cache_list;
976 if (kev->filter < 0) {
977 if (kev->filter + EVFILT_SYSCOUNT < 0)
979 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
983 * filter attach routine is responsible for insuring that
984 * the identifier can be attached to it.
989 if (fops->f_flags & FILTEROP_ISFD) {
990 /* validate descriptor */
991 fp = holdfp(fdp, kev->ident, -1);
996 cache_list = &knote_cache_lists[mycpuid];
997 if (SLIST_EMPTY(&cache_list->knote_cache)) {
998 struct knote *new_kn;
1000 new_kn = knote_alloc();
1001 SLIST_INSERT_HEAD(&cache_list->knote_cache, new_kn, kn_link);
1002 cache_list->knote_cache_cnt++;
1006 lwkt_getpooltoken(kq);
1009 * Make sure that only one thread can register event on this kqueue,
1010 * so that we would not suffer any race, even if the registration
1011 * blocked, i.e. kq token was released, and the kqueue was shared
1012 * between threads (this should be rare though).
1014 while (__predict_false(kq->kq_regtd != NULL && kq->kq_regtd != td)) {
1015 kq->kq_state |= KQ_REGWAIT;
1016 tsleep(&kq->kq_regtd, 0, "kqreg", 0);
1018 if (__predict_false(kq->kq_regtd != NULL)) {
1019 /* Recursive calling of kqueue_register() */
1022 /* Owner of the kq_regtd, i.e. td != NULL */
1027 list = &fp->f_klist;
1028 } else if (kq->kq_knhashmask) {
1029 list = &kq->kq_knhash[
1030 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1033 lwkt_getpooltoken(list);
1035 SLIST_FOREACH(kn, list, kn_link) {
1036 if (kn->kn_kq == kq &&
1037 kn->kn_filter == kev->filter &&
1038 kn->kn_id == kev->ident) {
1039 if (knote_acquire(kn) == 0)
1044 lwkt_relpooltoken(list);
1048 * NOTE: At this point if kn is non-NULL we will have acquired
1049 * it and set KN_PROCESSING.
1051 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
1057 * kn now contains the matching knote, or NULL if no match
1059 if (kev->flags & EV_ADD) {
1061 kn = SLIST_FIRST(&cache_list->knote_cache);
1065 SLIST_REMOVE_HEAD(&cache_list->knote_cache,
1067 cache_list->knote_cache_cnt--;
1074 * apply reference count to knote structure, and
1075 * do not release it at the end of this routine.
1079 kn->kn_sfflags = kev->fflags;
1080 kn->kn_sdata = kev->data;
1083 kn->kn_kevent = *kev;
1086 * KN_PROCESSING prevents the knote from getting
1087 * ripped out from under us while we are trying
1088 * to attach it, in case the attach blocks.
1090 kn->kn_status = KN_PROCESSING;
1092 if ((error = filter_attach(kn)) != 0) {
1093 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1099 * Interlock against close races which either tried
1100 * to remove our knote while we were blocked or missed
1101 * it entirely prior to our attachment. We do not
1102 * want to end up with a knote on a closed descriptor.
1104 if ((fops->f_flags & FILTEROP_ISFD) &&
1105 checkfdclosed(fdp, kev->ident, kn->kn_fp)) {
1106 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1110 * The user may change some filter values after the
1111 * initial EV_ADD, but doing so will not reset any
1112 * filter which have already been triggered.
1114 KKASSERT(kn->kn_status & KN_PROCESSING);
1115 if (fops == &user_filtops) {
1116 filt_usertouch(kn, kev, EVENT_REGISTER);
1118 kn->kn_sfflags = kev->fflags;
1119 kn->kn_sdata = kev->data;
1120 kn->kn_kevent.udata = kev->udata;
1125 * Execute the filter event to immediately activate the
1126 * knote if necessary. If reprocessing events are pending
1127 * due to blocking above we do not run the filter here
1128 * but instead let knote_release() do it. Otherwise we
1129 * might run the filter on a deleted event.
1131 if ((kn->kn_status & KN_REPROCESS) == 0) {
1132 if (filter_event(kn, 0))
1135 } else if (kev->flags & EV_DELETE) {
1137 * Delete the existing knote
1139 knote_detach_and_drop(kn);
1143 * Modify an existing event.
1145 * The user may change some filter values after the
1146 * initial EV_ADD, but doing so will not reset any
1147 * filter which have already been triggered.
1149 KKASSERT(kn->kn_status & KN_PROCESSING);
1150 if (fops == &user_filtops) {
1151 filt_usertouch(kn, kev, EVENT_REGISTER);
1153 kn->kn_sfflags = kev->fflags;
1154 kn->kn_sdata = kev->data;
1155 kn->kn_kevent.udata = kev->udata;
1159 * Execute the filter event to immediately activate the
1160 * knote if necessary. If reprocessing events are pending
1161 * due to blocking above we do not run the filter here
1162 * but instead let knote_release() do it. Otherwise we
1163 * might run the filter on a deleted event.
1165 if ((kn->kn_status & KN_REPROCESS) == 0) {
1166 if (filter_event(kn, 0))
1172 * Disablement does not deactivate a knote here.
1174 if ((kev->flags & EV_DISABLE) &&
1175 ((kn->kn_status & KN_DISABLED) == 0)) {
1176 kn->kn_status |= KN_DISABLED;
1180 * Re-enablement may have to immediately enqueue an active knote.
1182 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
1183 kn->kn_status &= ~KN_DISABLED;
1184 if ((kn->kn_status & KN_ACTIVE) &&
1185 ((kn->kn_status & KN_QUEUED) == 0)) {
1191 * Handle any required reprocessing
1194 /* kn may be invalid now */
1197 if (td != NULL) { /* Owner of the kq_regtd */
1198 kq->kq_regtd = NULL;
1199 if (__predict_false(kq->kq_state & KQ_REGWAIT)) {
1200 kq->kq_state &= ~KQ_REGWAIT;
1201 wakeup(&kq->kq_regtd);
1204 lwkt_relpooltoken(kq);
1211 * Scan the kqueue, return the number of active events placed in kevp up
1214 * Continuous mode events may get recycled, do not continue scanning past
1215 * marker unless no events have been collected.
1218 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
1219 struct knote *marker)
1221 struct knote *kn, local_marker;
1225 local_marker.kn_filter = EVFILT_MARKER;
1226 local_marker.kn_status = KN_PROCESSING;
1228 lwkt_getpooltoken(kq);
1233 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe);
1235 kn = TAILQ_NEXT(&local_marker, kn_tqe);
1236 if (kn->kn_filter == EVFILT_MARKER) {
1237 /* Marker reached, we are done */
1241 /* Move local marker past some other threads marker */
1242 kn = TAILQ_NEXT(kn, kn_tqe);
1243 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1244 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe);
1249 * We can't skip a knote undergoing processing, otherwise
1250 * we risk not returning it when the user process expects
1251 * it should be returned. Sleep and retry.
1253 if (knote_acquire(kn) == 0)
1257 * Remove the event for processing.
1259 * WARNING! We must leave KN_QUEUED set to prevent the
1260 * event from being KNOTE_ACTIVATE()d while
1261 * the queue state is in limbo, in case we
1264 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1268 * We have to deal with an extremely important race against
1269 * file descriptor close()s here. The file descriptor can
1270 * disappear MPSAFE, and there is a small window of
1271 * opportunity between that and the call to knote_fdclose().
1273 * If we hit that window here while doselect or dopoll is
1274 * trying to delete a spurious event they will not be able
1275 * to match up the event against a knote and will go haywire.
1277 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) &&
1278 checkfdclosed(kq->kq_fdp, kn->kn_kevent.ident, kn->kn_fp)) {
1279 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1282 if (kn->kn_status & KN_DISABLED) {
1284 * If disabled we ensure the event is not queued
1285 * but leave its active bit set. On re-enablement
1286 * the event may be immediately triggered.
1288 kn->kn_status &= ~KN_QUEUED;
1289 } else if ((kn->kn_flags & EV_ONESHOT) == 0 &&
1290 (kn->kn_status & KN_DELETING) == 0 &&
1291 filter_event(kn, 0) == 0) {
1293 * If not running in one-shot mode and the event
1294 * is no longer present we ensure it is removed
1295 * from the queue and ignore it.
1297 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1302 if (kn->kn_fop == &user_filtops)
1303 filt_usertouch(kn, kevp, EVENT_PROCESS);
1305 *kevp = kn->kn_kevent;
1310 if (kn->kn_flags & EV_ONESHOT) {
1311 kn->kn_status &= ~KN_QUEUED;
1312 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1314 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
1315 if (kn->kn_flags & EV_CLEAR) {
1319 if (kn->kn_flags & EV_DISPATCH) {
1320 kn->kn_status |= KN_DISABLED;
1322 kn->kn_status &= ~(KN_QUEUED |
1325 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1332 * Handle any post-processing states
1336 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1338 lwkt_relpooltoken(kq);
1344 * This could be expanded to call kqueue_scan, if desired.
1349 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1358 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1367 kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
1368 struct ucred *cred, struct sysmsg *msg)
1370 struct lwkt_token *tok;
1374 kq = (struct kqueue *)fp->f_data;
1375 tok = lwkt_token_pool_lookup(kq);
1381 kq->kq_state |= KQ_ASYNC;
1383 kq->kq_state &= ~KQ_ASYNC;
1387 error = fsetown(*(int *)data, &kq->kq_sigio);
1401 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred)
1403 struct kqueue *kq = (struct kqueue *)fp->f_data;
1405 bzero((void *)st, sizeof(*st));
1406 st->st_size = kq->kq_count;
1407 st->st_blksize = sizeof(struct kevent);
1408 st->st_mode = S_IFIFO;
1416 kqueue_close(struct file *fp)
1418 struct kqueue *kq = (struct kqueue *)fp->f_data;
1420 kqueue_terminate(kq);
1423 funsetown(&kq->kq_sigio);
1425 kfree(kq, M_KQUEUE);
1430 kqueue_wakeup(struct kqueue *kq)
1432 if (kq->kq_sleep_cnt) {
1433 if (kq->kq_sleep_cnt == 1)
1437 kq->kq_sleep_cnt = 0;
1439 KNOTE(&kq->kq_kqinfo.ki_note, 0);
1443 * Calls filterops f_attach function, acquiring mplock if filter is not
1444 * marked as FILTEROP_MPSAFE.
1446 * Caller must be holding the related kq token
1449 filter_attach(struct knote *kn)
1453 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1454 ret = kn->kn_fop->f_attach(kn);
1457 ret = kn->kn_fop->f_attach(kn);
1464 * Detach the knote and drop it, destroying the knote.
1466 * Calls filterops f_detach function, acquiring mplock if filter is not
1467 * marked as FILTEROP_MPSAFE.
1469 * Caller must be holding the related kq token
1472 knote_detach_and_drop(struct knote *kn)
1474 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1475 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1476 kn->kn_fop->f_detach(kn);
1479 kn->kn_fop->f_detach(kn);
1486 * Calls filterops f_event function, acquiring mplock if filter is not
1487 * marked as FILTEROP_MPSAFE.
1489 * If the knote is in the middle of being created or deleted we cannot
1490 * safely call the filter op.
1492 * Caller must be holding the related kq token
1495 filter_event(struct knote *kn, long hint)
1499 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1500 ret = kn->kn_fop->f_event(kn, hint);
1503 ret = kn->kn_fop->f_event(kn, hint);
1510 * Walk down a list of knotes, activating them if their event has triggered.
1512 * If we encounter any knotes which are undergoing processing we just mark
1513 * them for reprocessing and do not try to [re]activate the knote. However,
1514 * if a hint is being passed we have to wait and that makes things a bit
1518 knote(struct klist *list, long hint)
1520 struct knote *kn, marker;
1522 marker.kn_filter = EVFILT_MARKER;
1523 marker.kn_status = KN_PROCESSING;
1525 lwkt_getpooltoken(list);
1526 if (SLIST_EMPTY(list)) {
1527 lwkt_relpooltoken(list);
1531 SLIST_INSERT_HEAD(list, &marker, kn_next);
1532 while ((kn = SLIST_NEXT(&marker, kn_next)) != NULL) {
1536 if (kn->kn_filter == EVFILT_MARKER) {
1538 SLIST_REMOVE(list, &marker, knote, kn_next);
1539 if (SLIST_NEXT(kn, kn_next) == NULL)
1541 SLIST_INSERT_AFTER(kn, &marker, kn_next);
1546 lwkt_getpooltoken(kq);
1548 if (kn != SLIST_NEXT(&marker, kn_next) || kn->kn_kq != kq) {
1550 * Don't move the marker; check the knote after
1553 lwkt_relpooltoken(kq);
1557 if (kn->kn_status & KN_PROCESSING) {
1559 * Someone else is processing the knote, ask the
1560 * other thread to reprocess it and don't mess
1561 * with it otherwise.
1565 * Move the marker w/ the kq token, so that
1566 * this knote will not be ripped behind our
1569 SLIST_REMOVE(list, &marker, knote, kn_next);
1570 if (SLIST_NEXT(kn, kn_next) != NULL)
1571 SLIST_INSERT_AFTER(kn, &marker, kn_next);
1574 kn->kn_status |= KN_REPROCESS;
1575 lwkt_relpooltoken(kq);
1583 * If the hint is non-zero we have to wait or risk
1584 * losing the state the caller is trying to update.
1586 kn->kn_status |= KN_WAITING | KN_REPROCESS;
1587 tsleep(kn, 0, "knotec", hz);
1590 * Don't move the marker; check this knote again,
1591 * hopefully it is still after the marker. Or it
1592 * was deleted and we would check the next knote.
1594 lwkt_relpooltoken(kq);
1599 * Become the reprocessing master ourselves.
1601 KASSERT((kn->kn_status & KN_DELETING) == 0,
1602 ("acquire a deleting knote %#x", kn->kn_status));
1603 kn->kn_status |= KN_PROCESSING;
1605 /* Move the marker */
1606 SLIST_REMOVE(list, &marker, knote, kn_next);
1607 if (SLIST_NEXT(kn, kn_next) != NULL)
1608 SLIST_INSERT_AFTER(kn, &marker, kn_next);
1613 * If hint is non-zero running the event is mandatory
1614 * so do it whether reprocessing is set or not.
1616 if (filter_event(kn, hint))
1620 lwkt_relpooltoken(kq);
1625 SLIST_REMOVE(list, &marker, knote, kn_next);
1627 lwkt_relpooltoken(list);
1631 * Insert knote at head of klist.
1633 * This function may only be called via a filter function and thus
1634 * kq_token should already be held and marked for processing.
1637 knote_insert(struct klist *klist, struct knote *kn)
1639 lwkt_getpooltoken(klist);
1640 KKASSERT(kn->kn_status & KN_PROCESSING);
1641 SLIST_INSERT_HEAD(klist, kn, kn_next);
1642 lwkt_relpooltoken(klist);
1646 * Remove knote from a klist
1648 * This function may only be called via a filter function and thus
1649 * kq_token should already be held and marked for processing.
1652 knote_remove(struct klist *klist, struct knote *kn)
1654 lwkt_getpooltoken(klist);
1655 KKASSERT(kn->kn_status & KN_PROCESSING);
1656 SLIST_REMOVE(klist, kn, knote, kn_next);
1657 lwkt_relpooltoken(klist);
1661 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst,
1662 struct filterops *ops, void *hook)
1664 struct knote *kn, marker;
1667 marker.kn_filter = EVFILT_MARKER;
1668 marker.kn_status = KN_PROCESSING;
1670 lwkt_getpooltoken(&src->ki_note);
1671 if (SLIST_EMPTY(&src->ki_note)) {
1672 lwkt_relpooltoken(&src->ki_note);
1675 lwkt_getpooltoken(&dst->ki_note);
1679 SLIST_INSERT_HEAD(&src->ki_note, &marker, kn_next);
1680 while ((kn = SLIST_NEXT(&marker, kn_next)) != NULL) {
1683 if (kn->kn_filter == EVFILT_MARKER) {
1685 SLIST_REMOVE(&src->ki_note, &marker, knote, kn_next);
1686 SLIST_INSERT_AFTER(kn, &marker, kn_next);
1691 lwkt_getpooltoken(kq);
1693 if (kn != SLIST_NEXT(&marker, kn_next) || kn->kn_kq != kq) {
1695 * Don't move the marker; check the knote after
1698 lwkt_relpooltoken(kq);
1703 SLIST_REMOVE(&src->ki_note, &marker, knote, kn_next);
1704 SLIST_INSERT_AFTER(kn, &marker, kn_next);
1707 if (knote_acquire(kn)) {
1708 knote_remove(&src->ki_note, kn);
1711 knote_insert(&dst->ki_note, kn);
1713 /* kn may be invalid now */
1715 lwkt_relpooltoken(kq);
1717 SLIST_REMOVE(&src->ki_note, &marker, knote, kn_next);
1719 /* Keep draining, until nothing left */
1723 lwkt_relpooltoken(&dst->ki_note);
1724 lwkt_relpooltoken(&src->ki_note);
1728 * Remove all knotes referencing a specified fd
1731 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd)
1735 struct knote *kntmp;
1737 lwkt_getpooltoken(&fp->f_klist);
1739 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
1740 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) {
1742 lwkt_getpooltoken(kq);
1744 /* temporary verification hack */
1745 SLIST_FOREACH(kntmp, &fp->f_klist, kn_link) {
1749 if (kn != kntmp || kn->kn_kq->kq_fdp != fdp ||
1750 kn->kn_id != fd || kn->kn_kq != kq) {
1751 lwkt_relpooltoken(kq);
1754 if (knote_acquire(kn))
1755 knote_detach_and_drop(kn);
1756 lwkt_relpooltoken(kq);
1760 lwkt_relpooltoken(&fp->f_klist);
1764 * Low level attach function.
1766 * The knote should already be marked for processing.
1767 * Caller must hold the related kq token.
1770 knote_attach(struct knote *kn)
1773 struct kqueue *kq = kn->kn_kq;
1775 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1776 KKASSERT(kn->kn_fp);
1777 list = &kn->kn_fp->f_klist;
1779 if (kq->kq_knhashmask == 0)
1780 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1781 &kq->kq_knhashmask);
1782 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1784 lwkt_getpooltoken(list);
1785 SLIST_INSERT_HEAD(list, kn, kn_link);
1786 lwkt_relpooltoken(list);
1787 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink);
1791 * Low level drop function.
1793 * The knote should already be marked for processing.
1794 * Caller must hold the related kq token.
1797 knote_drop(struct knote *kn)
1804 if (kn->kn_fop->f_flags & FILTEROP_ISFD)
1805 list = &kn->kn_fp->f_klist;
1807 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1809 lwkt_getpooltoken(list);
1810 SLIST_REMOVE(list, kn, knote, kn_link);
1811 lwkt_relpooltoken(list);
1812 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
1813 if (kn->kn_status & KN_QUEUED)
1815 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1823 * Low level enqueue function.
1825 * The knote should already be marked for processing.
1826 * Caller must be holding the kq token
1829 knote_enqueue(struct knote *kn)
1831 struct kqueue *kq = kn->kn_kq;
1833 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
1834 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1835 kn->kn_status |= KN_QUEUED;
1839 * Send SIGIO on request (typically set up as a mailbox signal)
1841 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1)
1842 pgsigio(kq->kq_sigio, SIGIO, 0);
1848 * Low level dequeue function.
1850 * The knote should already be marked for processing.
1851 * Caller must be holding the kq token
1854 knote_dequeue(struct knote *kn)
1856 struct kqueue *kq = kn->kn_kq;
1858 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
1859 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1860 kn->kn_status &= ~KN_QUEUED;
1864 static struct knote *
1867 return kmalloc(sizeof(struct knote), M_KQUEUE, M_WAITOK);
1871 knote_free(struct knote *kn)
1873 struct knote_cache_list *cache_list;
1875 cache_list = &knote_cache_lists[mycpuid];
1876 if (cache_list->knote_cache_cnt < KNOTE_CACHE_MAX) {
1877 SLIST_INSERT_HEAD(&cache_list->knote_cache, kn, kn_link);
1878 cache_list->knote_cache_cnt++;
1881 kfree(kn, M_KQUEUE);