2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
27 * $DragonFly: src/sys/kern/kern_event.c,v 1.33 2007/02/03 17:05:57 corecode Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/unistd.h>
38 #include <sys/fcntl.h>
39 #include <sys/queue.h>
40 #include <sys/event.h>
41 #include <sys/eventvar.h>
42 #include <sys/protosw.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
46 #include <sys/sysctl.h>
47 #include <sys/sysproto.h>
48 #include <sys/thread.h>
50 #include <sys/signalvar.h>
51 #include <sys/filio.h>
54 #include <sys/thread2.h>
55 #include <sys/file2.h>
56 #include <sys/mplock2.h>
58 #include <vm/vm_zone.h>
61 * Global token for kqueue subsystem
63 struct lwkt_token kq_token = LWKT_TOKEN_UP_INITIALIZER;
65 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
67 struct kevent_copyin_args {
68 struct kevent_args *ka;
72 static int kqueue_sleep(struct kqueue *kq, struct timespec *tsp);
73 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
74 struct knote *marker);
75 static int kqueue_read(struct file *fp, struct uio *uio,
76 struct ucred *cred, int flags);
77 static int kqueue_write(struct file *fp, struct uio *uio,
78 struct ucred *cred, int flags);
79 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
80 struct ucred *cred, struct sysmsg *msg);
81 static int kqueue_kqfilter(struct file *fp, struct knote *kn);
82 static int kqueue_stat(struct file *fp, struct stat *st,
84 static int kqueue_close(struct file *fp);
85 static void kqueue_wakeup(struct kqueue *kq);
86 static int filter_attach(struct knote *kn);
87 static void filter_detach(struct knote *kn);
88 static int filter_event(struct knote *kn, long hint);
93 static struct fileops kqueueops = {
94 .fo_read = kqueue_read,
95 .fo_write = kqueue_write,
96 .fo_ioctl = kqueue_ioctl,
97 .fo_kqfilter = kqueue_kqfilter,
98 .fo_stat = kqueue_stat,
99 .fo_close = kqueue_close,
100 .fo_shutdown = nofo_shutdown
103 static void knote_attach(struct knote *kn);
104 static void knote_drop(struct knote *kn);
105 static void knote_enqueue(struct knote *kn);
106 static void knote_dequeue(struct knote *kn);
107 static void knote_init(void);
108 static struct knote *knote_alloc(void);
109 static void knote_free(struct knote *kn);
111 static void filt_kqdetach(struct knote *kn);
112 static int filt_kqueue(struct knote *kn, long hint);
113 static int filt_procattach(struct knote *kn);
114 static void filt_procdetach(struct knote *kn);
115 static int filt_proc(struct knote *kn, long hint);
116 static int filt_fileattach(struct knote *kn);
117 static void filt_timerexpire(void *knx);
118 static int filt_timerattach(struct knote *kn);
119 static void filt_timerdetach(struct knote *kn);
120 static int filt_timer(struct knote *kn, long hint);
122 static struct filterops file_filtops =
123 { FILTEROP_ISFD, filt_fileattach, NULL, NULL };
124 static struct filterops kqread_filtops =
125 { FILTEROP_ISFD, NULL, filt_kqdetach, filt_kqueue };
126 static struct filterops proc_filtops =
127 { 0, filt_procattach, filt_procdetach, filt_proc };
128 static struct filterops timer_filtops =
129 { 0, filt_timerattach, filt_timerdetach, filt_timer };
131 static vm_zone_t knote_zone;
132 static int kq_ncallouts = 0;
133 static int kq_calloutmax = (4 * 1024);
134 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
135 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
137 #define KNOTE_ACTIVATE(kn) do { \
138 kn->kn_status |= KN_ACTIVE; \
139 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
143 #define KN_HASHSIZE 64 /* XXX should be tunable */
144 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
146 extern struct filterops aio_filtops;
147 extern struct filterops sig_filtops;
150 * Table for for all system-defined filters.
152 static struct filterops *sysfilt_ops[] = {
153 &file_filtops, /* EVFILT_READ */
154 &file_filtops, /* EVFILT_WRITE */
155 &aio_filtops, /* EVFILT_AIO */
156 &file_filtops, /* EVFILT_VNODE */
157 &proc_filtops, /* EVFILT_PROC */
158 &sig_filtops, /* EVFILT_SIGNAL */
159 &timer_filtops, /* EVFILT_TIMER */
160 &file_filtops, /* EVFILT_EXCEPT */
164 filt_fileattach(struct knote *kn)
166 return (fo_kqfilter(kn->kn_fp, kn));
173 kqueue_kqfilter(struct file *fp, struct knote *kn)
175 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
177 if (kn->kn_filter != EVFILT_READ)
180 kn->kn_fop = &kqread_filtops;
181 knote_insert(&kq->kq_kqinfo.ki_note, kn);
186 filt_kqdetach(struct knote *kn)
188 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
190 knote_remove(&kq->kq_kqinfo.ki_note, kn);
195 filt_kqueue(struct knote *kn, long hint)
197 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
199 kn->kn_data = kq->kq_count;
200 return (kn->kn_data > 0);
204 filt_procattach(struct knote *kn)
210 lwkt_gettoken(&proc_token);
211 p = pfind(kn->kn_id);
212 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
213 p = zpfind(kn->kn_id);
217 lwkt_reltoken(&proc_token);
220 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
221 lwkt_reltoken(&proc_token);
225 kn->kn_ptr.p_proc = p;
226 kn->kn_flags |= EV_CLEAR; /* automatically set */
229 * internal flag indicating registration done by kernel
231 if (kn->kn_flags & EV_FLAG1) {
232 kn->kn_data = kn->kn_sdata; /* ppid */
233 kn->kn_fflags = NOTE_CHILD;
234 kn->kn_flags &= ~EV_FLAG1;
237 knote_insert(&p->p_klist, kn);
240 * Immediately activate any exit notes if the target process is a
241 * zombie. This is necessary to handle the case where the target
242 * process, e.g. a child, dies before the kevent is negistered.
244 if (immediate && filt_proc(kn, NOTE_EXIT))
246 lwkt_reltoken(&proc_token);
252 * The knote may be attached to a different process, which may exit,
253 * leaving nothing for the knote to be attached to. So when the process
254 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
255 * it will be deleted when read out. However, as part of the knote deletion,
256 * this routine is called, so a check is needed to avoid actually performing
257 * a detach, because the original process does not exist any more.
260 filt_procdetach(struct knote *kn)
264 if (kn->kn_status & KN_DETACHED)
266 /* XXX locking? take proc_token here? */
267 p = kn->kn_ptr.p_proc;
268 knote_remove(&p->p_klist, kn);
272 filt_proc(struct knote *kn, long hint)
277 * mask off extra data
279 event = (u_int)hint & NOTE_PCTRLMASK;
282 * if the user is interested in this event, record it.
284 if (kn->kn_sfflags & event)
285 kn->kn_fflags |= event;
288 * Process is gone, so flag the event as finished. Detach the
289 * knote from the process now because the process will be poof,
292 if (event == NOTE_EXIT) {
293 struct proc *p = kn->kn_ptr.p_proc;
294 if ((kn->kn_status & KN_DETACHED) == 0) {
295 knote_remove(&p->p_klist, kn);
296 kn->kn_status |= KN_DETACHED;
297 kn->kn_data = p->p_xstat;
298 kn->kn_ptr.p_proc = NULL;
300 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
305 * process forked, and user wants to track the new process,
306 * so attach a new knote to it, and immediately report an
307 * event with the parent's pid.
309 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
314 * register knote with new process.
316 kev.ident = hint & NOTE_PDATAMASK; /* pid */
317 kev.filter = kn->kn_filter;
318 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
319 kev.fflags = kn->kn_sfflags;
320 kev.data = kn->kn_id; /* parent */
321 kev.udata = kn->kn_kevent.udata; /* preserve udata */
322 error = kqueue_register(kn->kn_kq, &kev);
324 kn->kn_fflags |= NOTE_TRACKERR;
327 return (kn->kn_fflags != 0);
331 filt_timerexpire(void *knx)
333 struct knote *kn = knx;
334 struct callout *calloutp;
341 if ((kn->kn_flags & EV_ONESHOT) == 0) {
342 tv.tv_sec = kn->kn_sdata / 1000;
343 tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
344 tticks = tvtohz_high(&tv);
345 calloutp = (struct callout *)kn->kn_hook;
346 callout_reset(calloutp, tticks, filt_timerexpire, kn);
351 * data contains amount of time to sleep, in milliseconds
354 filt_timerattach(struct knote *kn)
356 struct callout *calloutp;
360 if (kq_ncallouts >= kq_calloutmax)
364 tv.tv_sec = kn->kn_sdata / 1000;
365 tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
366 tticks = tvtohz_high(&tv);
368 kn->kn_flags |= EV_CLEAR; /* automatically set */
369 MALLOC(calloutp, struct callout *, sizeof(*calloutp),
371 callout_init(calloutp);
372 kn->kn_hook = (caddr_t)calloutp;
373 callout_reset(calloutp, tticks, filt_timerexpire, kn);
379 filt_timerdetach(struct knote *kn)
381 struct callout *calloutp;
383 calloutp = (struct callout *)kn->kn_hook;
384 callout_stop(calloutp);
385 FREE(calloutp, M_KQUEUE);
390 filt_timer(struct knote *kn, long hint)
393 return (kn->kn_data != 0);
397 * Initialize a kqueue.
399 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
404 kqueue_init(struct kqueue *kq, struct filedesc *fdp)
406 TAILQ_INIT(&kq->kq_knpend);
407 TAILQ_INIT(&kq->kq_knlist);
410 SLIST_INIT(&kq->kq_kqinfo.ki_note);
414 * Terminate a kqueue. Freeing the actual kq itself is left up to the
415 * caller (it might be embedded in a lwp so we don't do it here).
418 kqueue_terminate(struct kqueue *kq)
424 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) {
426 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
427 list = &kn->kn_fp->f_klist;
428 SLIST_REMOVE(list, kn, knote, kn_link);
432 hv = KN_HASH(kn->kn_id, kq->kq_knhashmask);
433 list = &kq->kq_knhash[hv];
434 SLIST_REMOVE(list, kn, knote, kn_link);
436 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
437 if (kn->kn_status & KN_QUEUED)
443 kfree(kq->kq_knhash, M_KQUEUE);
444 kq->kq_knhash = NULL;
445 kq->kq_knhashmask = 0;
453 sys_kqueue(struct kqueue_args *uap)
455 struct thread *td = curthread;
460 error = falloc(td->td_lwp, &fp, &fd);
463 fp->f_flag = FREAD | FWRITE;
464 fp->f_type = DTYPE_KQUEUE;
465 fp->f_ops = &kqueueops;
467 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO);
468 kqueue_init(kq, td->td_proc->p_fd);
471 fsetfd(kq->kq_fdp, fp, fd);
472 uap->sysmsg_result = fd;
478 * Copy 'count' items into the destination list pointed to by uap->eventlist.
481 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res)
483 struct kevent_copyin_args *kap;
486 kap = (struct kevent_copyin_args *)arg;
488 error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp));
490 kap->ka->eventlist += count;
500 * Copy at most 'max' items from the list pointed to by kap->changelist,
501 * return number of items in 'events'.
504 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events)
506 struct kevent_copyin_args *kap;
509 kap = (struct kevent_copyin_args *)arg;
511 count = min(kap->ka->nchanges - kap->pchanges, max);
512 error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp);
514 kap->ka->changelist += count;
515 kap->pchanges += count;
526 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap,
527 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn,
528 struct timespec *tsp_in)
531 struct timespec *tsp;
532 int i, n, total, error, nerrors = 0;
534 struct kevent kev[KQ_NEVENTS];
540 lwkt_gettoken(&kq_token);
543 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n);
548 for (i = 0; i < n; i++) {
550 kevp->flags &= ~EV_SYSFLAGS;
551 error = kqueue_register(kq, kevp);
554 * If a registration returns an error we
555 * immediately post the error. The kevent()
556 * call itself will fail with the error if
557 * no space is available for posting.
559 * Such errors normally bypass the timeout/blocking
560 * code. However, if the copyoutfn function refuses
561 * to post the error (see sys_poll()), then we
565 kevp->flags = EV_ERROR;
568 kevent_copyoutfn(uap, kevp, 1, res);
582 * Acquire/wait for events - setup timeout
587 if (tsp->tv_sec || tsp->tv_nsec) {
589 timespecadd(tsp, &ats); /* tsp = target time */
596 * Collect as many events as we can. Sleeping on successive
597 * loops is disabled if copyoutfn has incremented (*res).
599 * The loop stops if an error occurs, all events have been
600 * scanned (the marker has been reached), or fewer than the
601 * maximum number of events is found.
603 * The copyoutfn function does not have to increment (*res) in
604 * order for the loop to continue.
606 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
610 marker.kn_filter = EVFILT_MARKER;
611 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
612 while ((n = nevents - total) > 0) {
617 * If no events are pending sleep until timeout (if any)
618 * or an event occurs.
620 * After the sleep completes the marker is moved to the
621 * end of the list, making any received events available
624 if (kq->kq_count == 0 && *res == 0) {
625 error = kqueue_sleep(kq, tsp);
629 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
630 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
634 * Process all received events
635 * Account for all non-spurious events in our total
637 i = kqueue_scan(kq, kev, n, &marker);
640 error = kevent_copyoutfn(uap, kev, i, res);
641 total += *res - lres;
647 * Normally when fewer events are returned than requested
648 * we can stop. However, if only spurious events were
649 * collected the copyout will not bump (*res) and we have
656 * Deal with an edge case where spurious events can cause
657 * a loop to occur without moving the marker. This can
658 * prevent kqueue_scan() from picking up new events which
659 * race us. We must be sure to move the marker for this
662 * NOTE: We do not want to move the marker if events
663 * were scanned because normal kqueue operations
664 * may reactivate events. Moving the marker in
665 * that case could result in duplicates for the
669 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
670 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
673 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
675 /* Timeouts do not return EWOULDBLOCK. */
676 if (error == EWOULDBLOCK)
680 lwkt_reltoken(&kq_token);
688 sys_kevent(struct kevent_args *uap)
690 struct thread *td = curthread;
691 struct proc *p = td->td_proc;
692 struct timespec ts, *tsp;
694 struct file *fp = NULL;
695 struct kevent_copyin_args *kap, ka;
699 error = copyin(uap->timeout, &ts, sizeof(ts));
707 fp = holdfp(p->p_fd, uap->fd, -1);
710 if (fp->f_type != DTYPE_KQUEUE) {
715 kq = (struct kqueue *)fp->f_data;
721 error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap,
722 kevent_copyin, kevent_copyout, tsp);
730 kqueue_register(struct kqueue *kq, struct kevent *kev)
732 struct filedesc *fdp = kq->kq_fdp;
733 struct filterops *fops;
734 struct file *fp = NULL;
735 struct knote *kn = NULL;
738 if (kev->filter < 0) {
739 if (kev->filter + EVFILT_SYSCOUNT < 0)
741 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
745 * filter attach routine is responsible for insuring that
746 * the identifier can be attached to it.
748 kprintf("unknown filter: %d\n", kev->filter);
752 if (fops->f_flags & FILTEROP_ISFD) {
753 /* validate descriptor */
754 fp = holdfp(fdp, kev->ident, -1);
758 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
759 if (kn->kn_kq == kq &&
760 kn->kn_filter == kev->filter &&
761 kn->kn_id == kev->ident) {
766 if (kq->kq_knhashmask) {
769 list = &kq->kq_knhash[
770 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
771 SLIST_FOREACH(kn, list, kn_link) {
772 if (kn->kn_id == kev->ident &&
773 kn->kn_filter == kev->filter)
779 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
785 * kn now contains the matching knote, or NULL if no match
787 if (kev->flags & EV_ADD) {
799 * apply reference count to knote structure, and
800 * do not release it at the end of this routine.
804 kn->kn_sfflags = kev->fflags;
805 kn->kn_sdata = kev->data;
808 kn->kn_kevent = *kev;
811 if ((error = filter_attach(kn)) != 0) {
817 * The user may change some filter values after the
818 * initial EV_ADD, but doing so will not reset any
819 * filter which have already been triggered.
821 kn->kn_sfflags = kev->fflags;
822 kn->kn_sdata = kev->data;
823 kn->kn_kevent.udata = kev->udata;
826 if (filter_event(kn, 0))
829 } else if (kev->flags & EV_DELETE) {
835 if ((kev->flags & EV_DISABLE) &&
836 ((kn->kn_status & KN_DISABLED) == 0)) {
837 kn->kn_status |= KN_DISABLED;
840 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
841 kn->kn_status &= ~KN_DISABLED;
842 if ((kn->kn_status & KN_ACTIVE) &&
843 ((kn->kn_status & KN_QUEUED) == 0))
854 * Block as necessary until the target time is reached.
855 * If tsp is NULL we block indefinitely. If tsp->ts_secs/nsecs are both
856 * 0 we do not block at all.
859 kqueue_sleep(struct kqueue *kq, struct timespec *tsp)
864 kq->kq_state |= KQ_SLEEP;
865 error = tsleep(kq, PCATCH, "kqread", 0);
866 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) {
870 struct timespec atx = *tsp;
874 timespecsub(&atx, &ats);
875 if (ats.tv_sec < 0) {
878 timeout = atx.tv_sec > 24 * 60 * 60 ?
879 24 * 60 * 60 * hz : tstohz_high(&atx);
880 kq->kq_state |= KQ_SLEEP;
881 error = tsleep(kq, PCATCH, "kqread", timeout);
885 /* don't restart after signals... */
886 if (error == ERESTART)
893 * Scan the kqueue, return the number of active events placed in kevp up
896 * Continuous mode events may get recycled, do not continue scanning past
897 * marker unless no events have been collected.
900 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
901 struct knote *marker)
903 struct knote *kn, local_marker;
907 local_marker.kn_filter = EVFILT_MARKER;
912 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe);
914 kn = TAILQ_NEXT(&local_marker, kn_tqe);
915 if (kn->kn_filter == EVFILT_MARKER) {
916 /* Marker reached, we are done */
920 /* Move local marker past some other threads marker */
921 kn = TAILQ_NEXT(kn, kn_tqe);
922 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
923 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe);
927 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
929 if (kn->kn_status & KN_DISABLED) {
930 kn->kn_status &= ~KN_QUEUED;
933 if ((kn->kn_flags & EV_ONESHOT) == 0 &&
934 filter_event(kn, 0) == 0) {
935 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
938 *kevp++ = kn->kn_kevent;
943 * Post-event action on the note
945 if (kn->kn_flags & EV_ONESHOT) {
946 kn->kn_status &= ~KN_QUEUED;
949 } else if (kn->kn_flags & EV_CLEAR) {
952 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
954 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
958 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
965 * This could be expanded to call kqueue_scan, if desired.
970 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
979 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
988 kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
989 struct ucred *cred, struct sysmsg *msg)
994 lwkt_gettoken(&kq_token);
995 kq = (struct kqueue *)fp->f_data;
1000 kq->kq_state |= KQ_ASYNC;
1002 kq->kq_state &= ~KQ_ASYNC;
1006 error = fsetown(*(int *)data, &kq->kq_sigio);
1012 lwkt_reltoken(&kq_token);
1020 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred)
1022 struct kqueue *kq = (struct kqueue *)fp->f_data;
1024 bzero((void *)st, sizeof(*st));
1025 st->st_size = kq->kq_count;
1026 st->st_blksize = sizeof(struct kevent);
1027 st->st_mode = S_IFIFO;
1035 kqueue_close(struct file *fp)
1037 struct kqueue *kq = (struct kqueue *)fp->f_data;
1039 lwkt_gettoken(&kq_token);
1041 kqueue_terminate(kq);
1044 funsetown(kq->kq_sigio);
1045 lwkt_reltoken(&kq_token);
1047 kfree(kq, M_KQUEUE);
1052 kqueue_wakeup(struct kqueue *kq)
1054 if (kq->kq_state & KQ_SLEEP) {
1055 kq->kq_state &= ~KQ_SLEEP;
1058 KNOTE(&kq->kq_kqinfo.ki_note, 0);
1062 * Calls filterops f_attach function, acquiring mplock if filter is not
1063 * marked as FILTEROP_MPSAFE.
1066 filter_attach(struct knote *kn)
1070 if (!(kn->kn_fop->f_flags & FILTEROP_MPSAFE)) {
1072 ret = kn->kn_fop->f_attach(kn);
1075 ret = kn->kn_fop->f_attach(kn);
1082 * Calls filterops f_detach function, acquiring mplock if filter is not
1083 * marked as FILTEROP_MPSAFE.
1086 filter_detach(struct knote *kn)
1088 if (!(kn->kn_fop->f_flags & FILTEROP_MPSAFE)) {
1090 kn->kn_fop->f_detach(kn);
1093 kn->kn_fop->f_detach(kn);
1098 * Calls filterops f_event function, acquiring mplock if filter is not
1099 * marked as FILTEROP_MPSAFE.
1102 filter_event(struct knote *kn, long hint)
1106 if (!(kn->kn_fop->f_flags & FILTEROP_MPSAFE)) {
1108 ret = kn->kn_fop->f_event(kn, hint);
1111 ret = kn->kn_fop->f_event(kn, hint);
1118 * walk down a list of knotes, activating them if their event has triggered.
1121 knote(struct klist *list, long hint)
1125 lwkt_gettoken(&kq_token);
1126 SLIST_FOREACH(kn, list, kn_next)
1127 if (filter_event(kn, hint))
1129 lwkt_reltoken(&kq_token);
1133 * insert knote at head of klist
1135 * Requires: kq_token
1138 knote_insert(struct klist *klist, struct knote *kn)
1140 SLIST_INSERT_HEAD(klist, kn, kn_next);
1144 * remove knote from a klist
1146 * Requires: kq_token
1149 knote_remove(struct klist *klist, struct knote *kn)
1151 SLIST_REMOVE(klist, kn, knote, kn_next);
1155 * remove all knotes from a specified klist
1158 knote_empty(struct klist *list)
1162 lwkt_gettoken(&kq_token);
1163 while ((kn = SLIST_FIRST(list)) != NULL) {
1167 lwkt_reltoken(&kq_token);
1171 * remove all knotes referencing a specified fd
1174 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd)
1178 lwkt_gettoken(&kq_token);
1180 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
1181 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) {
1187 lwkt_reltoken(&kq_token);
1191 knote_attach(struct knote *kn)
1194 struct kqueue *kq = kn->kn_kq;
1196 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1197 KKASSERT(kn->kn_fp);
1198 list = &kn->kn_fp->f_klist;
1200 if (kq->kq_knhashmask == 0)
1201 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1202 &kq->kq_knhashmask);
1203 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1205 SLIST_INSERT_HEAD(list, kn, kn_link);
1206 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink);
1211 knote_drop(struct knote *kn)
1218 if (kn->kn_fop->f_flags & FILTEROP_ISFD)
1219 list = &kn->kn_fp->f_klist;
1221 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1223 SLIST_REMOVE(list, kn, knote, kn_link);
1224 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
1225 if (kn->kn_status & KN_QUEUED)
1227 if (kn->kn_fop->f_flags & FILTEROP_ISFD)
1234 knote_enqueue(struct knote *kn)
1236 struct kqueue *kq = kn->kn_kq;
1238 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
1240 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1241 kn->kn_status |= KN_QUEUED;
1245 * Send SIGIO on request (typically set up as a mailbox signal)
1247 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1)
1248 pgsigio(kq->kq_sigio, SIGIO, 0);
1254 knote_dequeue(struct knote *kn)
1256 struct kqueue *kq = kn->kn_kq;
1258 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
1260 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1261 kn->kn_status &= ~KN_QUEUED;
1268 knote_zone = zinit("KNOTE", sizeof(struct knote), 0, 0, 1);
1270 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
1272 static struct knote *
1275 return ((struct knote *)zalloc(knote_zone));
1279 knote_free(struct knote *kn)
1281 zfree(knote_zone, kn);