2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
33 #include <sys/malloc.h>
34 #include <sys/unistd.h>
37 #include <sys/fcntl.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/eventvar.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
45 #include <sys/sysctl.h>
46 #include <sys/sysproto.h>
47 #include <sys/thread.h>
49 #include <sys/signalvar.h>
50 #include <sys/filio.h>
53 #include <sys/thread2.h>
54 #include <sys/file2.h>
55 #include <sys/mplock2.h>
57 #define EVENT_REGISTER 1
58 #define EVENT_PROCESS 2
60 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
62 struct kevent_copyin_args {
63 struct kevent_args *ka;
67 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
68 struct knote *marker);
69 static int kqueue_read(struct file *fp, struct uio *uio,
70 struct ucred *cred, int flags);
71 static int kqueue_write(struct file *fp, struct uio *uio,
72 struct ucred *cred, int flags);
73 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
74 struct ucred *cred, struct sysmsg *msg);
75 static int kqueue_kqfilter(struct file *fp, struct knote *kn);
76 static int kqueue_stat(struct file *fp, struct stat *st,
78 static int kqueue_close(struct file *fp);
79 static void kqueue_wakeup(struct kqueue *kq);
80 static int filter_attach(struct knote *kn);
81 static int filter_event(struct knote *kn, long hint);
86 static struct fileops kqueueops = {
87 .fo_read = kqueue_read,
88 .fo_write = kqueue_write,
89 .fo_ioctl = kqueue_ioctl,
90 .fo_kqfilter = kqueue_kqfilter,
91 .fo_stat = kqueue_stat,
92 .fo_close = kqueue_close,
93 .fo_shutdown = nofo_shutdown
96 static void knote_attach(struct knote *kn);
97 static void knote_drop(struct knote *kn);
98 static void knote_detach_and_drop(struct knote *kn);
99 static void knote_enqueue(struct knote *kn);
100 static void knote_dequeue(struct knote *kn);
101 static struct knote *knote_alloc(void);
102 static void knote_free(struct knote *kn);
104 static void filt_kqdetach(struct knote *kn);
105 static int filt_kqueue(struct knote *kn, long hint);
106 static int filt_procattach(struct knote *kn);
107 static void filt_procdetach(struct knote *kn);
108 static int filt_proc(struct knote *kn, long hint);
109 static int filt_fileattach(struct knote *kn);
110 static void filt_timerexpire(void *knx);
111 static int filt_timerattach(struct knote *kn);
112 static void filt_timerdetach(struct knote *kn);
113 static int filt_timer(struct knote *kn, long hint);
114 static int filt_userattach(struct knote *kn);
115 static void filt_userdetach(struct knote *kn);
116 static int filt_user(struct knote *kn, long hint);
117 static void filt_usertouch(struct knote *kn, struct kevent *kev,
120 static struct filterops file_filtops =
121 { FILTEROP_ISFD | FILTEROP_MPSAFE, filt_fileattach, NULL, NULL };
122 static struct filterops kqread_filtops =
123 { FILTEROP_ISFD | FILTEROP_MPSAFE, NULL, filt_kqdetach, filt_kqueue };
124 static struct filterops proc_filtops =
125 { 0, filt_procattach, filt_procdetach, filt_proc };
126 static struct filterops timer_filtops =
127 { FILTEROP_MPSAFE, filt_timerattach, filt_timerdetach, filt_timer };
128 static struct filterops user_filtops =
129 { FILTEROP_MPSAFE, filt_userattach, filt_userdetach, filt_user };
131 static int kq_ncallouts = 0;
132 static int kq_calloutmax = (4 * 1024);
133 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
134 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
135 static int kq_checkloop = 1000000;
136 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW,
137 &kq_checkloop, 0, "Maximum number of loops for kqueue scan");
138 static int kq_wakeup_one = 1;
139 SYSCTL_INT(_kern, OID_AUTO, kq_wakeup_one, CTLFLAG_RW,
140 &kq_wakeup_one, 0, "Wakeup only one kqueue scanner");
142 #define KNOTE_ACTIVATE(kn) do { \
143 kn->kn_status |= KN_ACTIVE; \
144 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
148 #define KN_HASHSIZE 64 /* XXX should be tunable */
149 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
151 extern struct filterops aio_filtops;
152 extern struct filterops sig_filtops;
155 * Table for for all system-defined filters.
157 static struct filterops *sysfilt_ops[] = {
158 &file_filtops, /* EVFILT_READ */
159 &file_filtops, /* EVFILT_WRITE */
160 &aio_filtops, /* EVFILT_AIO */
161 &file_filtops, /* EVFILT_VNODE */
162 &proc_filtops, /* EVFILT_PROC */
163 &sig_filtops, /* EVFILT_SIGNAL */
164 &timer_filtops, /* EVFILT_TIMER */
165 &file_filtops, /* EVFILT_EXCEPT */
166 &user_filtops, /* EVFILT_USER */
170 * Acquire a knote, return non-zero on success, 0 on failure.
172 * If we cannot acquire the knote we sleep and return 0. The knote
173 * may be stale on return in this case and the caller must restart
174 * whatever loop they are in.
176 * Related kq token must be held.
179 knote_acquire(struct knote *kn)
181 if (kn->kn_status & KN_PROCESSING) {
182 kn->kn_status |= KN_WAITING | KN_REPROCESS;
183 tsleep(kn, 0, "kqepts", hz);
184 /* knote may be stale now */
187 kn->kn_status |= KN_PROCESSING;
192 * Release an acquired knote, clearing KN_PROCESSING and handling any
193 * KN_REPROCESS events.
195 * Caller must be holding the related kq token
197 * Non-zero is returned if the knote is destroyed or detached.
200 knote_release(struct knote *kn)
202 while (kn->kn_status & KN_REPROCESS) {
203 kn->kn_status &= ~KN_REPROCESS;
204 if (kn->kn_status & KN_WAITING) {
205 kn->kn_status &= ~KN_WAITING;
208 if (kn->kn_status & KN_DELETING) {
209 knote_detach_and_drop(kn);
213 if (filter_event(kn, 0))
216 kn->kn_status &= ~KN_PROCESSING;
217 if (kn->kn_status & KN_DETACHED)
224 filt_fileattach(struct knote *kn)
226 return (fo_kqfilter(kn->kn_fp, kn));
233 kqueue_kqfilter(struct file *fp, struct knote *kn)
235 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
237 if (kn->kn_filter != EVFILT_READ)
240 kn->kn_fop = &kqread_filtops;
241 knote_insert(&kq->kq_kqinfo.ki_note, kn);
246 filt_kqdetach(struct knote *kn)
248 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
250 knote_remove(&kq->kq_kqinfo.ki_note, kn);
255 filt_kqueue(struct knote *kn, long hint)
257 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
259 kn->kn_data = kq->kq_count;
260 return (kn->kn_data > 0);
264 filt_procattach(struct knote *kn)
270 p = pfind(kn->kn_id);
271 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
272 p = zpfind(kn->kn_id);
278 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
284 lwkt_gettoken(&p->p_token);
285 kn->kn_ptr.p_proc = p;
286 kn->kn_flags |= EV_CLEAR; /* automatically set */
289 * internal flag indicating registration done by kernel
291 if (kn->kn_flags & EV_FLAG1) {
292 kn->kn_data = kn->kn_sdata; /* ppid */
293 kn->kn_fflags = NOTE_CHILD;
294 kn->kn_flags &= ~EV_FLAG1;
297 knote_insert(&p->p_klist, kn);
300 * Immediately activate any exit notes if the target process is a
301 * zombie. This is necessary to handle the case where the target
302 * process, e.g. a child, dies before the kevent is negistered.
304 if (immediate && filt_proc(kn, NOTE_EXIT))
306 lwkt_reltoken(&p->p_token);
313 * The knote may be attached to a different process, which may exit,
314 * leaving nothing for the knote to be attached to. So when the process
315 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
316 * it will be deleted when read out. However, as part of the knote deletion,
317 * this routine is called, so a check is needed to avoid actually performing
318 * a detach, because the original process does not exist any more.
321 filt_procdetach(struct knote *kn)
325 if (kn->kn_status & KN_DETACHED)
327 p = kn->kn_ptr.p_proc;
328 knote_remove(&p->p_klist, kn);
332 filt_proc(struct knote *kn, long hint)
337 * mask off extra data
339 event = (u_int)hint & NOTE_PCTRLMASK;
342 * if the user is interested in this event, record it.
344 if (kn->kn_sfflags & event)
345 kn->kn_fflags |= event;
348 * Process is gone, so flag the event as finished. Detach the
349 * knote from the process now because the process will be poof,
352 if (event == NOTE_EXIT) {
353 struct proc *p = kn->kn_ptr.p_proc;
354 if ((kn->kn_status & KN_DETACHED) == 0) {
356 knote_remove(&p->p_klist, kn);
357 kn->kn_status |= KN_DETACHED;
358 kn->kn_data = p->p_xstat;
359 kn->kn_ptr.p_proc = NULL;
362 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
367 * process forked, and user wants to track the new process,
368 * so attach a new knote to it, and immediately report an
369 * event with the parent's pid.
371 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
376 * register knote with new process.
378 kev.ident = hint & NOTE_PDATAMASK; /* pid */
379 kev.filter = kn->kn_filter;
380 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
381 kev.fflags = kn->kn_sfflags;
382 kev.data = kn->kn_id; /* parent */
383 kev.udata = kn->kn_kevent.udata; /* preserve udata */
384 error = kqueue_register(kn->kn_kq, &kev);
386 kn->kn_fflags |= NOTE_TRACKERR;
389 return (kn->kn_fflags != 0);
393 filt_timerreset(struct knote *kn)
395 struct callout *calloutp;
399 tv.tv_sec = kn->kn_sdata / 1000;
400 tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
401 tticks = tvtohz_high(&tv);
402 calloutp = (struct callout *)kn->kn_hook;
403 callout_reset(calloutp, tticks, filt_timerexpire, kn);
407 * The callout interlocks with callout_terminate() but can still
408 * race a deletion so if KN_DELETING is set we just don't touch
412 filt_timerexpire(void *knx)
414 struct knote *kn = knx;
415 struct kqueue *kq = kn->kn_kq;
417 lwkt_getpooltoken(kq);
420 * Open knote_acquire(), since we can't sleep in callout,
421 * however, we do need to record this expiration.
424 if (kn->kn_status & KN_PROCESSING) {
425 kn->kn_status |= KN_REPROCESS;
426 if ((kn->kn_status & KN_DELETING) == 0 &&
427 (kn->kn_flags & EV_ONESHOT) == 0)
429 lwkt_relpooltoken(kq);
432 KASSERT((kn->kn_status & KN_DELETING) == 0,
433 ("acquire a deleting knote %#x", kn->kn_status));
434 kn->kn_status |= KN_PROCESSING;
437 if ((kn->kn_flags & EV_ONESHOT) == 0)
442 lwkt_relpooltoken(kq);
446 * data contains amount of time to sleep, in milliseconds
449 filt_timerattach(struct knote *kn)
451 struct callout *calloutp;
454 prev_ncallouts = atomic_fetchadd_int(&kq_ncallouts, 1);
455 if (prev_ncallouts >= kq_calloutmax) {
456 atomic_subtract_int(&kq_ncallouts, 1);
461 kn->kn_flags |= EV_CLEAR; /* automatically set */
462 calloutp = kmalloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
463 callout_init_mp(calloutp);
464 kn->kn_hook = (caddr_t)calloutp;
471 * This function is called with the knote flagged locked but it is
472 * still possible to race a callout event due to the callback blocking.
473 * We must call callout_terminate() instead of callout_stop() to deal
477 filt_timerdetach(struct knote *kn)
479 struct callout *calloutp;
481 calloutp = (struct callout *)kn->kn_hook;
482 callout_terminate(calloutp);
483 kfree(calloutp, M_KQUEUE);
484 atomic_subtract_int(&kq_ncallouts, 1);
488 filt_timer(struct knote *kn, long hint)
491 return (kn->kn_data != 0);
498 filt_userattach(struct knote *kn)
501 if (kn->kn_fflags & NOTE_TRIGGER)
502 kn->kn_ptr.hookid = 1;
504 kn->kn_ptr.hookid = 0;
509 filt_userdetach(struct knote *kn)
515 filt_user(struct knote *kn, long hint)
517 return (kn->kn_ptr.hookid);
521 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
527 if (kev->fflags & NOTE_TRIGGER)
528 kn->kn_ptr.hookid = 1;
530 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
531 kev->fflags &= NOTE_FFLAGSMASK;
537 kn->kn_sfflags &= kev->fflags;
541 kn->kn_sfflags |= kev->fflags;
545 kn->kn_sfflags = kev->fflags;
549 /* XXX Return error? */
552 kn->kn_sdata = kev->data;
555 * This is not the correct use of EV_CLEAR in an event
556 * modification, it should have been passed as a NOTE instead.
557 * But we need to maintain compatibility with Apple & FreeBSD.
559 * Note however that EV_CLEAR can still be used when doing
560 * the initial registration of the event and works as expected
561 * (clears the event on reception).
563 if (kev->flags & EV_CLEAR) {
564 kn->kn_ptr.hookid = 0;
571 *kev = kn->kn_kevent;
572 kev->fflags = kn->kn_sfflags;
573 kev->data = kn->kn_sdata;
574 if (kn->kn_flags & EV_CLEAR) {
575 kn->kn_ptr.hookid = 0;
576 /* kn_data, kn_fflags handled by parent */
581 panic("filt_usertouch() - invalid type (%ld)", type);
587 * Initialize a kqueue.
589 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
594 kqueue_init(struct kqueue *kq, struct filedesc *fdp)
596 TAILQ_INIT(&kq->kq_knpend);
597 TAILQ_INIT(&kq->kq_knlist);
600 SLIST_INIT(&kq->kq_kqinfo.ki_note);
604 * Terminate a kqueue. Freeing the actual kq itself is left up to the
605 * caller (it might be embedded in a lwp so we don't do it here).
607 * The kq's knlist must be completely eradicated so block on any
611 kqueue_terminate(struct kqueue *kq)
613 struct lwkt_token *tok;
616 tok = lwkt_token_pool_lookup(kq);
618 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) {
619 if (knote_acquire(kn))
620 knote_detach_and_drop(kn);
625 hashdestroy(kq->kq_knhash, M_KQUEUE, kq->kq_knhashmask);
626 kq->kq_knhash = NULL;
627 kq->kq_knhashmask = 0;
635 sys_kqueue(struct kqueue_args *uap)
637 struct thread *td = curthread;
642 error = falloc(td->td_lwp, &fp, &fd);
645 fp->f_flag = FREAD | FWRITE;
646 fp->f_type = DTYPE_KQUEUE;
647 fp->f_ops = &kqueueops;
649 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO);
650 kqueue_init(kq, td->td_proc->p_fd);
653 fsetfd(kq->kq_fdp, fp, fd);
654 uap->sysmsg_result = fd;
660 * Copy 'count' items into the destination list pointed to by uap->eventlist.
663 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res)
665 struct kevent_copyin_args *kap;
668 kap = (struct kevent_copyin_args *)arg;
670 error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp));
672 kap->ka->eventlist += count;
682 * Copy at most 'max' items from the list pointed to by kap->changelist,
683 * return number of items in 'events'.
686 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events)
688 struct kevent_copyin_args *kap;
691 kap = (struct kevent_copyin_args *)arg;
693 count = min(kap->ka->nchanges - kap->pchanges, max);
694 error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp);
696 kap->ka->changelist += count;
697 kap->pchanges += count;
708 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap,
709 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn,
710 struct timespec *tsp_in)
713 struct timespec *tsp, ats;
714 int i, n, total, error, nerrors = 0;
716 int limit = kq_checkloop;
717 struct kevent kev[KQ_NEVENTS];
719 struct lwkt_token *tok;
721 if (tsp_in == NULL || tsp_in->tv_sec || tsp_in->tv_nsec)
722 atomic_set_int(&curthread->td_mpflags, TDF_MP_BATCH_DEMARC);
729 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n);
734 for (i = 0; i < n; i++) {
736 kevp->flags &= ~EV_SYSFLAGS;
737 error = kqueue_register(kq, kevp);
740 * If a registration returns an error we
741 * immediately post the error. The kevent()
742 * call itself will fail with the error if
743 * no space is available for posting.
745 * Such errors normally bypass the timeout/blocking
746 * code. However, if the copyoutfn function refuses
747 * to post the error (see sys_poll()), then we
750 if (error || (kevp->flags & EV_RECEIPT)) {
751 kevp->flags = EV_ERROR;
754 kevent_copyoutfn(uap, kevp, 1, res);
757 } else if (lres != *res) {
768 * Acquire/wait for events - setup timeout
771 if (tsp->tv_sec || tsp->tv_nsec) {
773 timespecadd(tsp, &ats); /* tsp = target time */
780 * Collect as many events as we can. Sleeping on successive
781 * loops is disabled if copyoutfn has incremented (*res).
783 * The loop stops if an error occurs, all events have been
784 * scanned (the marker has been reached), or fewer than the
785 * maximum number of events is found.
787 * The copyoutfn function does not have to increment (*res) in
788 * order for the loop to continue.
790 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
794 marker.kn_filter = EVFILT_MARKER;
795 marker.kn_status = KN_PROCESSING;
796 tok = lwkt_token_pool_lookup(kq);
798 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
800 while ((n = nevents - total) > 0) {
805 * If no events are pending sleep until timeout (if any)
806 * or an event occurs.
808 * After the sleep completes the marker is moved to the
809 * end of the list, making any received events available
812 if (kq->kq_count == 0 && *res == 0) {
817 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) {
821 struct timespec atx = *tsp;
824 timespecsub(&atx, &ats);
825 if (atx.tv_sec < 0) {
829 timeout = atx.tv_sec > 24 * 60 * 60 ?
836 if (kq->kq_count == 0) {
837 kq->kq_state |= KQ_SLEEP;
838 error = tsleep(kq, PCATCH, "kqread", timeout);
840 /* don't restart after signals... */
841 if (error == ERESTART)
848 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
849 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker,
856 * Process all received events
857 * Account for all non-spurious events in our total
859 i = kqueue_scan(kq, kev, n, &marker);
862 error = kevent_copyoutfn(uap, kev, i, res);
863 total += *res - lres;
867 if (limit && --limit == 0)
868 panic("kqueue: checkloop failed i=%d", i);
871 * Normally when fewer events are returned than requested
872 * we can stop. However, if only spurious events were
873 * collected the copyout will not bump (*res) and we have
880 * Deal with an edge case where spurious events can cause
881 * a loop to occur without moving the marker. This can
882 * prevent kqueue_scan() from picking up new events which
883 * race us. We must be sure to move the marker for this
886 * NOTE: We do not want to move the marker if events
887 * were scanned because normal kqueue operations
888 * may reactivate events. Moving the marker in
889 * that case could result in duplicates for the
894 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
895 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
900 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
903 /* Timeouts do not return EWOULDBLOCK. */
904 if (error == EWOULDBLOCK)
913 sys_kevent(struct kevent_args *uap)
915 struct thread *td = curthread;
916 struct proc *p = td->td_proc;
917 struct timespec ts, *tsp;
919 struct file *fp = NULL;
920 struct kevent_copyin_args *kap, ka;
924 error = copyin(uap->timeout, &ts, sizeof(ts));
931 fp = holdfp(p->p_fd, uap->fd, -1);
934 if (fp->f_type != DTYPE_KQUEUE) {
939 kq = (struct kqueue *)fp->f_data;
945 error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap,
946 kevent_copyin, kevent_copyout, tsp);
954 kqueue_register(struct kqueue *kq, struct kevent *kev)
956 struct lwkt_token *tok;
957 struct filedesc *fdp = kq->kq_fdp;
958 struct filterops *fops;
959 struct file *fp = NULL;
960 struct knote *kn = NULL;
963 if (kev->filter < 0) {
964 if (kev->filter + EVFILT_SYSCOUNT < 0)
966 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
970 * filter attach routine is responsible for insuring that
971 * the identifier can be attached to it.
976 tok = lwkt_token_pool_lookup(kq);
978 if (fops->f_flags & FILTEROP_ISFD) {
979 /* validate descriptor */
980 fp = holdfp(fdp, kev->ident, -1);
985 lwkt_getpooltoken(&fp->f_klist);
987 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
988 if (kn->kn_kq == kq &&
989 kn->kn_filter == kev->filter &&
990 kn->kn_id == kev->ident) {
991 if (knote_acquire(kn) == 0)
996 lwkt_relpooltoken(&fp->f_klist);
998 if (kq->kq_knhashmask) {
1001 list = &kq->kq_knhash[
1002 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1003 lwkt_getpooltoken(list);
1005 SLIST_FOREACH(kn, list, kn_link) {
1006 if (kn->kn_id == kev->ident &&
1007 kn->kn_filter == kev->filter) {
1008 if (knote_acquire(kn) == 0)
1013 lwkt_relpooltoken(list);
1018 * NOTE: At this point if kn is non-NULL we will have acquired
1019 * it and set KN_PROCESSING.
1021 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
1027 * kn now contains the matching knote, or NULL if no match
1029 if (kev->flags & EV_ADD) {
1037 * apply reference count to knote structure, and
1038 * do not release it at the end of this routine.
1042 kn->kn_sfflags = kev->fflags;
1043 kn->kn_sdata = kev->data;
1046 kn->kn_kevent = *kev;
1049 * KN_PROCESSING prevents the knote from getting
1050 * ripped out from under us while we are trying
1051 * to attach it, in case the attach blocks.
1053 kn->kn_status = KN_PROCESSING;
1055 if ((error = filter_attach(kn)) != 0) {
1056 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1062 * Interlock against close races which either tried
1063 * to remove our knote while we were blocked or missed
1064 * it entirely prior to our attachment. We do not
1065 * want to end up with a knote on a closed descriptor.
1067 if ((fops->f_flags & FILTEROP_ISFD) &&
1068 checkfdclosed(fdp, kev->ident, kn->kn_fp)) {
1069 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1073 * The user may change some filter values after the
1074 * initial EV_ADD, but doing so will not reset any
1075 * filter which have already been triggered.
1077 KKASSERT(kn->kn_status & KN_PROCESSING);
1078 if (fops == &user_filtops) {
1079 filt_usertouch(kn, kev, EVENT_REGISTER);
1081 kn->kn_sfflags = kev->fflags;
1082 kn->kn_sdata = kev->data;
1083 kn->kn_kevent.udata = kev->udata;
1088 * Execute the filter event to immediately activate the
1089 * knote if necessary. If reprocessing events are pending
1090 * due to blocking above we do not run the filter here
1091 * but instead let knote_release() do it. Otherwise we
1092 * might run the filter on a deleted event.
1094 if ((kn->kn_status & KN_REPROCESS) == 0) {
1095 if (filter_event(kn, 0))
1098 } else if (kev->flags & EV_DELETE) {
1100 * Delete the existing knote
1102 knote_detach_and_drop(kn);
1106 * Modify an existing event.
1108 * The user may change some filter values after the
1109 * initial EV_ADD, but doing so will not reset any
1110 * filter which have already been triggered.
1112 KKASSERT(kn->kn_status & KN_PROCESSING);
1113 if (fops == &user_filtops) {
1114 filt_usertouch(kn, kev, EVENT_REGISTER);
1116 kn->kn_sfflags = kev->fflags;
1117 kn->kn_sdata = kev->data;
1118 kn->kn_kevent.udata = kev->udata;
1122 * Execute the filter event to immediately activate the
1123 * knote if necessary. If reprocessing events are pending
1124 * due to blocking above we do not run the filter here
1125 * but instead let knote_release() do it. Otherwise we
1126 * might run the filter on a deleted event.
1128 if ((kn->kn_status & KN_REPROCESS) == 0) {
1129 if (filter_event(kn, 0))
1135 * Disablement does not deactivate a knote here.
1137 if ((kev->flags & EV_DISABLE) &&
1138 ((kn->kn_status & KN_DISABLED) == 0)) {
1139 kn->kn_status |= KN_DISABLED;
1143 * Re-enablement may have to immediately enqueue an active knote.
1145 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
1146 kn->kn_status &= ~KN_DISABLED;
1147 if ((kn->kn_status & KN_ACTIVE) &&
1148 ((kn->kn_status & KN_QUEUED) == 0)) {
1154 * Handle any required reprocessing
1157 /* kn may be invalid now */
1167 * Scan the kqueue, return the number of active events placed in kevp up
1170 * Continuous mode events may get recycled, do not continue scanning past
1171 * marker unless no events have been collected.
1174 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
1175 struct knote *marker)
1177 struct knote *kn, local_marker;
1181 local_marker.kn_filter = EVFILT_MARKER;
1182 local_marker.kn_status = KN_PROCESSING;
1184 lwkt_getpooltoken(kq);
1189 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe);
1191 kn = TAILQ_NEXT(&local_marker, kn_tqe);
1192 if (kn->kn_filter == EVFILT_MARKER) {
1193 /* Marker reached, we are done */
1197 /* Move local marker past some other threads marker */
1198 kn = TAILQ_NEXT(kn, kn_tqe);
1199 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1200 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe);
1205 * We can't skip a knote undergoing processing, otherwise
1206 * we risk not returning it when the user process expects
1207 * it should be returned. Sleep and retry.
1209 if (knote_acquire(kn) == 0)
1213 * Remove the event for processing.
1215 * WARNING! We must leave KN_QUEUED set to prevent the
1216 * event from being KNOTE_ACTIVATE()d while
1217 * the queue state is in limbo, in case we
1220 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1224 * We have to deal with an extremely important race against
1225 * file descriptor close()s here. The file descriptor can
1226 * disappear MPSAFE, and there is a small window of
1227 * opportunity between that and the call to knote_fdclose().
1229 * If we hit that window here while doselect or dopoll is
1230 * trying to delete a spurious event they will not be able
1231 * to match up the event against a knote and will go haywire.
1233 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) &&
1234 checkfdclosed(kq->kq_fdp, kn->kn_kevent.ident, kn->kn_fp)) {
1235 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1238 if (kn->kn_status & KN_DISABLED) {
1240 * If disabled we ensure the event is not queued
1241 * but leave its active bit set. On re-enablement
1242 * the event may be immediately triggered.
1244 kn->kn_status &= ~KN_QUEUED;
1245 } else if ((kn->kn_flags & EV_ONESHOT) == 0 &&
1246 (kn->kn_status & KN_DELETING) == 0 &&
1247 filter_event(kn, 0) == 0) {
1249 * If not running in one-shot mode and the event
1250 * is no longer present we ensure it is removed
1251 * from the queue and ignore it.
1253 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1258 if (kn->kn_fop == &user_filtops)
1259 filt_usertouch(kn, kevp, EVENT_PROCESS);
1261 *kevp = kn->kn_kevent;
1266 if (kn->kn_flags & EV_ONESHOT) {
1267 kn->kn_status &= ~KN_QUEUED;
1268 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1270 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
1271 if (kn->kn_flags & EV_CLEAR) {
1275 if (kn->kn_flags & EV_DISPATCH) {
1276 kn->kn_status |= KN_DISABLED;
1278 kn->kn_status &= ~(KN_QUEUED |
1281 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1288 * Handle any post-processing states
1292 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1294 lwkt_relpooltoken(kq);
1300 * This could be expanded to call kqueue_scan, if desired.
1305 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1314 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1323 kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
1324 struct ucred *cred, struct sysmsg *msg)
1326 struct lwkt_token *tok;
1330 kq = (struct kqueue *)fp->f_data;
1331 tok = lwkt_token_pool_lookup(kq);
1337 kq->kq_state |= KQ_ASYNC;
1339 kq->kq_state &= ~KQ_ASYNC;
1343 error = fsetown(*(int *)data, &kq->kq_sigio);
1357 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred)
1359 struct kqueue *kq = (struct kqueue *)fp->f_data;
1361 bzero((void *)st, sizeof(*st));
1362 st->st_size = kq->kq_count;
1363 st->st_blksize = sizeof(struct kevent);
1364 st->st_mode = S_IFIFO;
1372 kqueue_close(struct file *fp)
1374 struct kqueue *kq = (struct kqueue *)fp->f_data;
1376 kqueue_terminate(kq);
1379 funsetown(&kq->kq_sigio);
1381 kfree(kq, M_KQUEUE);
1386 kqueue_wakeup(struct kqueue *kq)
1388 if (kq->kq_state & KQ_SLEEP) {
1389 kq->kq_state &= ~KQ_SLEEP;
1395 KNOTE(&kq->kq_kqinfo.ki_note, 0);
1399 * Calls filterops f_attach function, acquiring mplock if filter is not
1400 * marked as FILTEROP_MPSAFE.
1402 * Caller must be holding the related kq token
1405 filter_attach(struct knote *kn)
1409 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1410 ret = kn->kn_fop->f_attach(kn);
1413 ret = kn->kn_fop->f_attach(kn);
1420 * Detach the knote and drop it, destroying the knote.
1422 * Calls filterops f_detach function, acquiring mplock if filter is not
1423 * marked as FILTEROP_MPSAFE.
1425 * Caller must be holding the related kq token
1428 knote_detach_and_drop(struct knote *kn)
1430 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1431 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1432 kn->kn_fop->f_detach(kn);
1435 kn->kn_fop->f_detach(kn);
1442 * Calls filterops f_event function, acquiring mplock if filter is not
1443 * marked as FILTEROP_MPSAFE.
1445 * If the knote is in the middle of being created or deleted we cannot
1446 * safely call the filter op.
1448 * Caller must be holding the related kq token
1451 filter_event(struct knote *kn, long hint)
1455 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1456 ret = kn->kn_fop->f_event(kn, hint);
1459 ret = kn->kn_fop->f_event(kn, hint);
1466 * Walk down a list of knotes, activating them if their event has triggered.
1468 * If we encounter any knotes which are undergoing processing we just mark
1469 * them for reprocessing and do not try to [re]activate the knote. However,
1470 * if a hint is being passed we have to wait and that makes things a bit
1474 knote(struct klist *list, long hint)
1478 struct knote *kntmp;
1480 lwkt_getpooltoken(list);
1482 SLIST_FOREACH(kn, list, kn_next) {
1484 lwkt_getpooltoken(kq);
1486 /* temporary verification hack */
1487 SLIST_FOREACH(kntmp, list, kn_next) {
1491 if (kn != kntmp || kn->kn_kq != kq) {
1492 lwkt_relpooltoken(kq);
1496 if (kn->kn_status & KN_PROCESSING) {
1498 * Someone else is processing the knote, ask the
1499 * other thread to reprocess it and don't mess
1500 * with it otherwise.
1503 kn->kn_status |= KN_REPROCESS;
1504 lwkt_relpooltoken(kq);
1509 * If the hint is non-zero we have to wait or risk
1510 * losing the state the caller is trying to update.
1512 * XXX This is a real problem, certain process
1513 * and signal filters will bump kn_data for
1514 * already-processed notes more than once if
1515 * we restart the list scan. FIXME.
1517 kn->kn_status |= KN_WAITING | KN_REPROCESS;
1518 tsleep(kn, 0, "knotec", hz);
1519 lwkt_relpooltoken(kq);
1524 * Become the reprocessing master ourselves.
1526 * If hint is non-zero running the event is mandatory
1527 * when not deleting so do it whether reprocessing is
1530 kn->kn_status |= KN_PROCESSING;
1531 if ((kn->kn_status & KN_DELETING) == 0) {
1532 if (filter_event(kn, hint))
1535 if (knote_release(kn)) {
1536 lwkt_relpooltoken(kq);
1539 lwkt_relpooltoken(kq);
1541 lwkt_relpooltoken(list);
1545 * Insert knote at head of klist.
1547 * This function may only be called via a filter function and thus
1548 * kq_token should already be held and marked for processing.
1551 knote_insert(struct klist *klist, struct knote *kn)
1553 lwkt_getpooltoken(klist);
1554 KKASSERT(kn->kn_status & KN_PROCESSING);
1555 SLIST_INSERT_HEAD(klist, kn, kn_next);
1556 lwkt_relpooltoken(klist);
1560 * Remove knote from a klist
1562 * This function may only be called via a filter function and thus
1563 * kq_token should already be held and marked for processing.
1566 knote_remove(struct klist *klist, struct knote *kn)
1568 lwkt_getpooltoken(klist);
1569 KKASSERT(kn->kn_status & KN_PROCESSING);
1570 SLIST_REMOVE(klist, kn, knote, kn_next);
1571 lwkt_relpooltoken(klist);
1576 * Remove all knotes from a specified klist
1578 * Only called from aio.
1581 knote_empty(struct klist *list)
1585 lwkt_gettoken(&kq_token);
1586 while ((kn = SLIST_FIRST(list)) != NULL) {
1587 if (knote_acquire(kn))
1588 knote_detach_and_drop(kn);
1590 lwkt_reltoken(&kq_token);
1595 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst,
1596 struct filterops *ops, void *hook)
1601 lwkt_getpooltoken(&src->ki_note);
1602 lwkt_getpooltoken(&dst->ki_note);
1603 while ((kn = SLIST_FIRST(&src->ki_note)) != NULL) {
1605 lwkt_getpooltoken(kq);
1606 if (SLIST_FIRST(&src->ki_note) != kn || kn->kn_kq != kq) {
1607 lwkt_relpooltoken(kq);
1610 if (knote_acquire(kn)) {
1611 knote_remove(&src->ki_note, kn);
1614 knote_insert(&dst->ki_note, kn);
1616 /* kn may be invalid now */
1618 lwkt_relpooltoken(kq);
1620 lwkt_relpooltoken(&dst->ki_note);
1621 lwkt_relpooltoken(&src->ki_note);
1625 * Remove all knotes referencing a specified fd
1628 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd)
1632 struct knote *kntmp;
1634 lwkt_getpooltoken(&fp->f_klist);
1636 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
1637 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) {
1639 lwkt_getpooltoken(kq);
1641 /* temporary verification hack */
1642 SLIST_FOREACH(kntmp, &fp->f_klist, kn_link) {
1646 if (kn != kntmp || kn->kn_kq->kq_fdp != fdp ||
1647 kn->kn_id != fd || kn->kn_kq != kq) {
1648 lwkt_relpooltoken(kq);
1651 if (knote_acquire(kn))
1652 knote_detach_and_drop(kn);
1653 lwkt_relpooltoken(kq);
1657 lwkt_relpooltoken(&fp->f_klist);
1661 * Low level attach function.
1663 * The knote should already be marked for processing.
1664 * Caller must hold the related kq token.
1667 knote_attach(struct knote *kn)
1670 struct kqueue *kq = kn->kn_kq;
1672 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1673 KKASSERT(kn->kn_fp);
1674 list = &kn->kn_fp->f_klist;
1676 if (kq->kq_knhashmask == 0)
1677 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1678 &kq->kq_knhashmask);
1679 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1681 lwkt_getpooltoken(list);
1682 SLIST_INSERT_HEAD(list, kn, kn_link);
1683 lwkt_relpooltoken(list);
1684 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink);
1688 * Low level drop function.
1690 * The knote should already be marked for processing.
1691 * Caller must hold the related kq token.
1694 knote_drop(struct knote *kn)
1701 if (kn->kn_fop->f_flags & FILTEROP_ISFD)
1702 list = &kn->kn_fp->f_klist;
1704 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1706 lwkt_getpooltoken(list);
1707 SLIST_REMOVE(list, kn, knote, kn_link);
1708 lwkt_relpooltoken(list);
1709 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
1710 if (kn->kn_status & KN_QUEUED)
1712 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1720 * Low level enqueue function.
1722 * The knote should already be marked for processing.
1723 * Caller must be holding the kq token
1726 knote_enqueue(struct knote *kn)
1728 struct kqueue *kq = kn->kn_kq;
1730 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
1731 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1732 kn->kn_status |= KN_QUEUED;
1736 * Send SIGIO on request (typically set up as a mailbox signal)
1738 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1)
1739 pgsigio(kq->kq_sigio, SIGIO, 0);
1745 * Low level dequeue function.
1747 * The knote should already be marked for processing.
1748 * Caller must be holding the kq token
1751 knote_dequeue(struct knote *kn)
1753 struct kqueue *kq = kn->kn_kq;
1755 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
1756 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1757 kn->kn_status &= ~KN_QUEUED;
1761 static struct knote *
1764 return kmalloc(sizeof(struct knote), M_KQUEUE, M_WAITOK);
1768 knote_free(struct knote *kn)
1770 kfree(kn, M_KQUEUE);