2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
33 #include <sys/malloc.h>
34 #include <sys/unistd.h>
37 #include <sys/fcntl.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/eventvar.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
45 #include <sys/sysctl.h>
46 #include <sys/sysproto.h>
47 #include <sys/thread.h>
49 #include <sys/signalvar.h>
50 #include <sys/filio.h>
52 #include <sys/spinlock.h>
54 #include <sys/thread2.h>
55 #include <sys/file2.h>
56 #include <sys/mplock2.h>
57 #include <sys/spinlock2.h>
59 #define EVENT_REGISTER 1
60 #define EVENT_PROCESS 2
62 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
64 struct kevent_copyin_args {
65 struct kevent_args *ka;
69 #define KNOTE_CACHE_MAX 8
71 struct knote_cache_list {
72 struct klist knote_cache;
76 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
77 struct knote *marker, int closedcounter);
78 static int kqueue_read(struct file *fp, struct uio *uio,
79 struct ucred *cred, int flags);
80 static int kqueue_write(struct file *fp, struct uio *uio,
81 struct ucred *cred, int flags);
82 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
83 struct ucred *cred, struct sysmsg *msg);
84 static int kqueue_kqfilter(struct file *fp, struct knote *kn);
85 static int kqueue_stat(struct file *fp, struct stat *st,
87 static int kqueue_close(struct file *fp);
88 static void kqueue_wakeup(struct kqueue *kq);
89 static int filter_attach(struct knote *kn);
90 static int filter_event(struct knote *kn, long hint);
95 static struct fileops kqueueops = {
96 .fo_read = kqueue_read,
97 .fo_write = kqueue_write,
98 .fo_ioctl = kqueue_ioctl,
99 .fo_kqfilter = kqueue_kqfilter,
100 .fo_stat = kqueue_stat,
101 .fo_close = kqueue_close,
102 .fo_shutdown = nofo_shutdown
105 static void knote_attach(struct knote *kn);
106 static void knote_drop(struct knote *kn);
107 static void knote_detach_and_drop(struct knote *kn);
108 static void knote_enqueue(struct knote *kn);
109 static void knote_dequeue(struct knote *kn);
110 static struct knote *knote_alloc(void);
111 static void knote_free(struct knote *kn);
113 static void precise_sleep_intr(systimer_t info, int in_ipi,
114 struct intrframe *frame);
115 static int precise_sleep(void *ident, int flags, const char *wmesg,
118 static void filt_kqdetach(struct knote *kn);
119 static int filt_kqueue(struct knote *kn, long hint);
120 static int filt_procattach(struct knote *kn);
121 static void filt_procdetach(struct knote *kn);
122 static int filt_proc(struct knote *kn, long hint);
123 static int filt_fileattach(struct knote *kn);
124 static void filt_timerexpire(void *knx);
125 static int filt_timerattach(struct knote *kn);
126 static void filt_timerdetach(struct knote *kn);
127 static int filt_timer(struct knote *kn, long hint);
128 static int filt_userattach(struct knote *kn);
129 static void filt_userdetach(struct knote *kn);
130 static int filt_user(struct knote *kn, long hint);
131 static void filt_usertouch(struct knote *kn, struct kevent *kev,
133 static int filt_fsattach(struct knote *kn);
134 static void filt_fsdetach(struct knote *kn);
135 static int filt_fs(struct knote *kn, long hint);
137 static struct filterops file_filtops =
138 { FILTEROP_ISFD | FILTEROP_MPSAFE, filt_fileattach, NULL, NULL };
139 static struct filterops kqread_filtops =
140 { FILTEROP_ISFD | FILTEROP_MPSAFE, NULL, filt_kqdetach, filt_kqueue };
141 static struct filterops proc_filtops =
142 { FILTEROP_MPSAFE, filt_procattach, filt_procdetach, filt_proc };
143 static struct filterops timer_filtops =
144 { FILTEROP_MPSAFE, filt_timerattach, filt_timerdetach, filt_timer };
145 static struct filterops user_filtops =
146 { FILTEROP_MPSAFE, filt_userattach, filt_userdetach, filt_user };
147 static struct filterops fs_filtops =
148 { FILTEROP_MPSAFE, filt_fsattach, filt_fsdetach, filt_fs };
150 static int kq_ncallouts = 0;
151 static int kq_calloutmax = 65536;
152 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
153 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
154 static int kq_checkloop = 1000000;
155 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW,
156 &kq_checkloop, 0, "Maximum number of loops for kqueue scan");
157 static int kq_sleep_threshold = 20000;
158 SYSCTL_INT(_kern, OID_AUTO, kq_sleep_threshold, CTLFLAG_RW,
159 &kq_sleep_threshold, 0, "Minimum sleep duration without busy-looping");
161 #define KNOTE_ACTIVATE(kn) do { \
162 kn->kn_status |= KN_ACTIVE; \
163 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
167 #define KN_HASHSIZE 64 /* XXX should be tunable */
168 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
170 extern struct filterops aio_filtops;
171 extern struct filterops sig_filtops;
174 * Table for for all system-defined filters.
176 static struct filterops *sysfilt_ops[] = {
177 &file_filtops, /* EVFILT_READ */
178 &file_filtops, /* EVFILT_WRITE */
179 &aio_filtops, /* EVFILT_AIO */
180 &file_filtops, /* EVFILT_VNODE */
181 &proc_filtops, /* EVFILT_PROC */
182 &sig_filtops, /* EVFILT_SIGNAL */
183 &timer_filtops, /* EVFILT_TIMER */
184 &file_filtops, /* EVFILT_EXCEPT */
185 &user_filtops, /* EVFILT_USER */
186 &fs_filtops, /* EVFILT_FS */
189 static struct knote_cache_list knote_cache_lists[MAXCPU];
192 * Acquire a knote, return non-zero on success, 0 on failure.
194 * If we cannot acquire the knote we sleep and return 0. The knote
195 * may be stale on return in this case and the caller must restart
196 * whatever loop they are in.
198 * Related kq token must be held.
201 knote_acquire(struct knote *kn)
203 if (kn->kn_status & KN_PROCESSING) {
204 kn->kn_status |= KN_WAITING | KN_REPROCESS;
205 tsleep(kn, 0, "kqepts", hz);
206 /* knote may be stale now */
209 kn->kn_status |= KN_PROCESSING;
214 * Release an acquired knote, clearing KN_PROCESSING and handling any
215 * KN_REPROCESS events.
217 * Caller must be holding the related kq token
219 * Non-zero is returned if the knote is destroyed or detached.
222 knote_release(struct knote *kn)
226 while (kn->kn_status & KN_REPROCESS) {
227 kn->kn_status &= ~KN_REPROCESS;
228 if (kn->kn_status & KN_WAITING) {
229 kn->kn_status &= ~KN_WAITING;
232 if (kn->kn_status & KN_DELETING) {
233 knote_detach_and_drop(kn);
237 if (filter_event(kn, 0))
240 if (kn->kn_status & KN_DETACHED)
244 kn->kn_status &= ~KN_PROCESSING;
245 /* kn should not be accessed anymore */
250 filt_fileattach(struct knote *kn)
252 return (fo_kqfilter(kn->kn_fp, kn));
259 kqueue_kqfilter(struct file *fp, struct knote *kn)
261 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
263 if (kn->kn_filter != EVFILT_READ)
266 kn->kn_fop = &kqread_filtops;
267 knote_insert(&kq->kq_kqinfo.ki_note, kn);
272 filt_kqdetach(struct knote *kn)
274 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
276 knote_remove(&kq->kq_kqinfo.ki_note, kn);
281 filt_kqueue(struct knote *kn, long hint)
283 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
285 kn->kn_data = kq->kq_count;
286 return (kn->kn_data > 0);
290 filt_procattach(struct knote *kn)
296 p = pfind(kn->kn_id);
297 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
298 p = zpfind(kn->kn_id);
304 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
310 lwkt_gettoken(&p->p_token);
311 kn->kn_ptr.p_proc = p;
312 kn->kn_flags |= EV_CLEAR; /* automatically set */
315 * internal flag indicating registration done by kernel
317 if (kn->kn_flags & EV_FLAG1) {
318 kn->kn_data = kn->kn_sdata; /* ppid */
319 kn->kn_fflags = NOTE_CHILD;
320 kn->kn_flags &= ~EV_FLAG1;
323 knote_insert(&p->p_klist, kn);
326 * Immediately activate any exit notes if the target process is a
327 * zombie. This is necessary to handle the case where the target
328 * process, e.g. a child, dies before the kevent is negistered.
330 if (immediate && filt_proc(kn, NOTE_EXIT))
332 lwkt_reltoken(&p->p_token);
339 * The knote may be attached to a different process, which may exit,
340 * leaving nothing for the knote to be attached to. So when the process
341 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
342 * it will be deleted when read out. However, as part of the knote deletion,
343 * this routine is called, so a check is needed to avoid actually performing
344 * a detach, because the original process does not exist any more.
347 filt_procdetach(struct knote *kn)
351 if (kn->kn_status & KN_DETACHED)
353 p = kn->kn_ptr.p_proc;
354 knote_remove(&p->p_klist, kn);
358 filt_proc(struct knote *kn, long hint)
363 * mask off extra data
365 event = (u_int)hint & NOTE_PCTRLMASK;
368 * if the user is interested in this event, record it.
370 if (kn->kn_sfflags & event)
371 kn->kn_fflags |= event;
374 * Process is gone, so flag the event as finished. Detach the
375 * knote from the process now because the process will be poof,
378 if (event == NOTE_EXIT) {
379 struct proc *p = kn->kn_ptr.p_proc;
380 if ((kn->kn_status & KN_DETACHED) == 0) {
382 knote_remove(&p->p_klist, kn);
383 kn->kn_status |= KN_DETACHED;
384 kn->kn_data = p->p_xstat;
385 kn->kn_ptr.p_proc = NULL;
388 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
393 * process forked, and user wants to track the new process,
394 * so attach a new knote to it, and immediately report an
395 * event with the parent's pid.
397 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
403 * register knote with new process.
405 kev.ident = hint & NOTE_PDATAMASK; /* pid */
406 kev.filter = kn->kn_filter;
407 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
408 kev.fflags = kn->kn_sfflags;
409 kev.data = kn->kn_id; /* parent */
410 kev.udata = kn->kn_kevent.udata; /* preserve udata */
412 error = kqueue_register(kn->kn_kq, &kev, &n);
414 kn->kn_fflags |= NOTE_TRACKERR;
417 return (kn->kn_fflags != 0);
421 filt_timerreset(struct knote *kn)
423 struct callout *calloutp;
427 tv.tv_sec = kn->kn_sdata / 1000;
428 tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
429 tticks = tvtohz_high(&tv);
430 calloutp = (struct callout *)kn->kn_hook;
431 callout_reset(calloutp, tticks, filt_timerexpire, kn);
435 * The callout interlocks with callout_stop() but can still
436 * race a deletion so if KN_DELETING is set we just don't touch
440 filt_timerexpire(void *knx)
442 struct knote *kn = knx;
443 struct kqueue *kq = kn->kn_kq;
445 lwkt_getpooltoken(kq);
448 * Open knote_acquire(), since we can't sleep in callout,
449 * however, we do need to record this expiration.
452 if (kn->kn_status & KN_PROCESSING) {
453 kn->kn_status |= KN_REPROCESS;
454 if ((kn->kn_status & KN_DELETING) == 0 &&
455 (kn->kn_flags & EV_ONESHOT) == 0)
457 lwkt_relpooltoken(kq);
460 KASSERT((kn->kn_status & KN_DELETING) == 0,
461 ("acquire a deleting knote %#x", kn->kn_status));
462 kn->kn_status |= KN_PROCESSING;
465 if ((kn->kn_flags & EV_ONESHOT) == 0)
470 lwkt_relpooltoken(kq);
474 * data contains amount of time to sleep, in milliseconds
477 filt_timerattach(struct knote *kn)
479 struct callout *calloutp;
482 prev_ncallouts = atomic_fetchadd_int(&kq_ncallouts, 1);
483 if (prev_ncallouts >= kq_calloutmax) {
484 atomic_subtract_int(&kq_ncallouts, 1);
489 kn->kn_flags |= EV_CLEAR; /* automatically set */
490 calloutp = kmalloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
491 callout_init_mp(calloutp);
492 kn->kn_hook = (caddr_t)calloutp;
499 * This function is called with the knote flagged locked but it is
500 * still possible to race a callout event due to the callback blocking.
503 filt_timerdetach(struct knote *kn)
505 struct callout *calloutp;
507 calloutp = (struct callout *)kn->kn_hook;
508 callout_terminate(calloutp);
510 kfree(calloutp, M_KQUEUE);
511 atomic_subtract_int(&kq_ncallouts, 1);
515 filt_timer(struct knote *kn, long hint)
517 return (kn->kn_data != 0);
524 filt_userattach(struct knote *kn)
529 if (kn->kn_sfflags & NOTE_TRIGGER)
530 kn->kn_ptr.hookid = 1;
532 kn->kn_ptr.hookid = 0;
534 ffctrl = kn->kn_sfflags & NOTE_FFCTRLMASK;
535 kn->kn_sfflags &= NOTE_FFLAGSMASK;
541 kn->kn_fflags &= kn->kn_sfflags;
545 kn->kn_fflags |= kn->kn_sfflags;
549 kn->kn_fflags = kn->kn_sfflags;
553 /* XXX Return error? */
556 /* We just happen to copy this value as well. Undocumented. */
557 kn->kn_data = kn->kn_sdata;
563 filt_userdetach(struct knote *kn)
569 filt_user(struct knote *kn, long hint)
571 return (kn->kn_ptr.hookid);
575 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
581 if (kev->fflags & NOTE_TRIGGER)
582 kn->kn_ptr.hookid = 1;
584 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
585 kev->fflags &= NOTE_FFLAGSMASK;
591 kn->kn_fflags &= kev->fflags;
595 kn->kn_fflags |= kev->fflags;
599 kn->kn_fflags = kev->fflags;
603 /* XXX Return error? */
606 /* We just happen to copy this value as well. Undocumented. */
607 kn->kn_data = kev->data;
610 * This is not the correct use of EV_CLEAR in an event
611 * modification, it should have been passed as a NOTE instead.
612 * But we need to maintain compatibility with Apple & FreeBSD.
614 * Note however that EV_CLEAR can still be used when doing
615 * the initial registration of the event and works as expected
616 * (clears the event on reception).
618 if (kev->flags & EV_CLEAR) {
619 kn->kn_ptr.hookid = 0;
621 * Clearing kn->kn_data is fine, since it gets set
622 * every time anyway. We just shouldn't clear
623 * kn->kn_fflags here, since that would limit the
624 * possible uses of this API. NOTE_FFAND or
625 * NOTE_FFCOPY should be used for explicitly clearing
633 *kev = kn->kn_kevent;
634 kev->fflags = kn->kn_fflags;
635 kev->data = kn->kn_data;
636 if (kn->kn_flags & EV_CLEAR) {
637 kn->kn_ptr.hookid = 0;
638 /* kn_data, kn_fflags handled by parent */
643 panic("filt_usertouch() - invalid type (%ld)", type);
651 struct klist fs_klist = SLIST_HEAD_INITIALIZER(&fs_klist);
654 filt_fsattach(struct knote *kn)
656 kn->kn_flags |= EV_CLEAR;
657 knote_insert(&fs_klist, kn);
663 filt_fsdetach(struct knote *kn)
665 knote_remove(&fs_klist, kn);
669 filt_fs(struct knote *kn, long hint)
671 kn->kn_fflags |= hint;
672 return (kn->kn_fflags != 0);
676 * Initialize a kqueue.
678 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
683 kqueue_init(struct kqueue *kq, struct filedesc *fdp)
685 TAILQ_INIT(&kq->kq_knpend);
686 TAILQ_INIT(&kq->kq_knlist);
689 SLIST_INIT(&kq->kq_kqinfo.ki_note);
693 * Terminate a kqueue. Freeing the actual kq itself is left up to the
694 * caller (it might be embedded in a lwp so we don't do it here).
696 * The kq's knlist must be completely eradicated so block on any
700 kqueue_terminate(struct kqueue *kq)
704 lwkt_getpooltoken(kq);
705 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) {
706 if (knote_acquire(kn))
707 knote_detach_and_drop(kn);
709 lwkt_relpooltoken(kq);
712 hashdestroy(kq->kq_knhash, M_KQUEUE, kq->kq_knhashmask);
713 kq->kq_knhash = NULL;
714 kq->kq_knhashmask = 0;
722 sys_kqueue(struct kqueue_args *uap)
724 struct thread *td = curthread;
729 error = falloc(td->td_lwp, &fp, &fd);
732 fp->f_flag = FREAD | FWRITE;
733 fp->f_type = DTYPE_KQUEUE;
734 fp->f_ops = &kqueueops;
736 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO);
737 kqueue_init(kq, td->td_proc->p_fd);
740 fsetfd(kq->kq_fdp, fp, fd);
741 uap->sysmsg_result = fd;
747 * Copy 'count' items into the destination list pointed to by uap->eventlist.
750 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res)
752 struct kevent_copyin_args *kap;
755 kap = (struct kevent_copyin_args *)arg;
757 error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp));
759 kap->ka->eventlist += count;
769 * Copy at most 'max' items from the list pointed to by kap->changelist,
770 * return number of items in 'events'.
773 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events)
775 struct kevent_copyin_args *kap;
778 kap = (struct kevent_copyin_args *)arg;
780 count = min(kap->ka->nchanges - kap->pchanges, max);
781 error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp);
783 kap->ka->changelist += count;
784 kap->pchanges += count;
795 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap,
796 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn,
797 struct timespec *tsp_in, int flags)
800 struct timespec *tsp, ats;
801 int i, n, total, error, nerrors = 0;
804 int limit = kq_checkloop;
806 struct kevent kev[KQ_NEVENTS];
808 struct lwkt_token *tok;
810 if (tsp_in == NULL || tsp_in->tv_sec || tsp_in->tv_nsec)
811 atomic_set_int(&curthread->td_mpflags, TDF_MP_BATCH_DEMARC);
816 closedcounter = kq->kq_fdp->fd_closedcounter;
820 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n);
825 for (i = 0; i < n; ++i)
826 kev[i].flags &= ~EV_SYSFLAGS;
827 for (i = 0; i < n; ++i) {
829 error = kqueue_register(kq, &kev[i], &gobbled);
834 * If a registration returns an error we
835 * immediately post the error. The kevent()
836 * call itself will fail with the error if
837 * no space is available for posting.
839 * Such errors normally bypass the timeout/blocking
840 * code. However, if the copyoutfn function refuses
841 * to post the error (see sys_poll()), then we
844 if (error || (kevp->flags & EV_RECEIPT)) {
845 kevp->flags = EV_ERROR;
848 kevent_copyoutfn(uap, kevp, 1, res);
851 } else if (lres != *res) {
862 * Acquire/wait for events - setup timeout
865 if (tsp->tv_sec || tsp->tv_nsec) {
867 timespecadd(tsp, &ats, tsp); /* tsp = target time */
874 * Collect as many events as we can. Sleeping on successive
875 * loops is disabled if copyoutfn has incremented (*res).
877 * The loop stops if an error occurs, all events have been
878 * scanned (the marker has been reached), or fewer than the
879 * maximum number of events is found.
881 * The copyoutfn function does not have to increment (*res) in
882 * order for the loop to continue.
884 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
888 marker.kn_filter = EVFILT_MARKER;
889 marker.kn_status = KN_PROCESSING;
890 tok = lwkt_token_pool_lookup(kq);
892 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
894 while ((n = nevents - total) > 0) {
899 * If no events are pending sleep until timeout (if any)
900 * or an event occurs.
902 * After the sleep completes the marker is moved to the
903 * end of the list, making any received events available
906 if (kq->kq_count == 0 && *res == 0) {
907 int timeout, ustimeout = 0;
911 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) {
915 struct timespec atx = *tsp;
918 timespecsub(&atx, &ats, &atx);
919 if (atx.tv_sec < 0) {
923 timeout = atx.tv_sec > 24 * 60 * 60 ?
927 if (flags & KEVENT_TIMEOUT_PRECISE &&
929 if (atx.tv_sec == 0 &&
930 atx.tv_nsec < kq_sleep_threshold) {
931 DELAY(atx.tv_nsec / 1000);
934 } else if (atx.tv_sec < 2000) {
935 ustimeout = atx.tv_sec *
936 1000000 + atx.tv_nsec/1000;
938 ustimeout = 2000000000;
944 if (kq->kq_count == 0) {
946 if (__predict_false(kq->kq_sleep_cnt == 0)) {
948 * Guard against possible wrapping. And
949 * set it to 2, so that kqueue_wakeup()
950 * can wake everyone up.
952 kq->kq_sleep_cnt = 2;
954 if ((flags & KEVENT_TIMEOUT_PRECISE) &&
956 error = precise_sleep(kq, PCATCH,
957 "kqread", ustimeout);
959 error = tsleep(kq, PCATCH, "kqread",
963 /* don't restart after signals... */
964 if (error == ERESTART)
971 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
972 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker,
979 * Process all received events
980 * Account for all non-spurious events in our total
982 i = kqueue_scan(kq, kev, n, &marker, closedcounter);
985 error = kevent_copyoutfn(uap, kev, i, res);
986 total += *res - lres;
990 if (limit && --limit == 0)
991 panic("kqueue: checkloop failed i=%d", i);
994 * Normally when fewer events are returned than requested
995 * we can stop. However, if only spurious events were
996 * collected the copyout will not bump (*res) and we have
1003 * Deal with an edge case where spurious events can cause
1004 * a loop to occur without moving the marker. This can
1005 * prevent kqueue_scan() from picking up new events which
1006 * race us. We must be sure to move the marker for this
1009 * NOTE: We do not want to move the marker if events
1010 * were scanned because normal kqueue operations
1011 * may reactivate events. Moving the marker in
1012 * that case could result in duplicates for the
1017 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
1018 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
1023 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
1026 /* Timeouts do not return EWOULDBLOCK. */
1027 if (error == EWOULDBLOCK)
1036 sys_kevent(struct kevent_args *uap)
1038 struct thread *td = curthread;
1039 struct timespec ts, *tsp;
1041 struct file *fp = NULL;
1042 struct kevent_copyin_args *kap, ka;
1046 error = copyin(uap->timeout, &ts, sizeof(ts));
1053 fp = holdfp(td, uap->fd, -1);
1056 if (fp->f_type != DTYPE_KQUEUE) {
1061 kq = (struct kqueue *)fp->f_data;
1067 error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap,
1068 kevent_copyin, kevent_copyout, tsp, 0);
1070 dropfp(td, uap->fd, fp);
1076 * Efficiently load multiple file pointers. This significantly reduces
1077 * threaded overhead. When doing simple polling we can depend on the
1078 * per-thread (fd,fp) cache. With more descriptors, we batch.
1082 floadkevfps(thread_t td, struct filedesc *fdp, struct kevent *kev,
1083 struct file **fp, int climit)
1085 struct filterops *fops;
1088 if (climit <= 2 && td->td_proc && td->td_proc->p_fd == fdp) {
1092 spin_lock_shared(&fdp->fd_spin);
1097 if (kev->filter < 0 &&
1098 kev->filter + EVFILT_SYSCOUNT >= 0) {
1099 fops = sysfilt_ops[~kev->filter];
1100 if (fops->f_flags & FILTEROP_ISFD) {
1102 *fp = holdfp(td, kev->ident, -1);
1104 *fp = holdfp_fdp_locked(fdp,
1114 spin_unlock_shared(&fdp->fd_spin);
1118 * Register up to *countp kev's. Always registers at least 1.
1120 * The number registered is returned in *countp.
1122 * If an error occurs or a kev is flagged EV_RECEIPT, it is
1123 * processed and included in *countp, and processing then
1127 kqueue_register(struct kqueue *kq, struct kevent *kev, int *countp)
1129 struct filedesc *fdp = kq->kq_fdp;
1130 struct klist *list = NULL;
1131 struct filterops *fops;
1132 struct file *fp[KQ_NEVENTS];
1133 struct knote *kn = NULL;
1139 struct knote_cache_list *cache_list;
1143 if (climit > KQ_NEVENTS)
1144 climit = KQ_NEVENTS;
1145 closedcounter = fdp->fd_closedcounter;
1146 floadkevfps(td, fdp, kev, fp, climit);
1148 lwkt_getpooltoken(kq);
1152 * To avoid races, only one thread can register events on this
1155 while (__predict_false(kq->kq_regtd != NULL && kq->kq_regtd != td)) {
1156 kq->kq_state |= KQ_REGWAIT;
1157 tsleep(&kq->kq_regtd, 0, "kqreg", 0);
1159 if (__predict_false(kq->kq_regtd != NULL)) {
1160 /* Recursive calling of kqueue_register() */
1163 /* Owner of the kq_regtd, i.e. td != NULL */
1168 if (kev->filter < 0) {
1169 if (kev->filter + EVFILT_SYSCOUNT < 0) {
1174 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
1178 * filter attach routine is responsible for insuring that
1179 * the identifier can be attached to it.
1186 if (fops->f_flags & FILTEROP_ISFD) {
1187 /* validate descriptor */
1188 if (fp[count] == NULL) {
1195 cache_list = &knote_cache_lists[mycpuid];
1196 if (SLIST_EMPTY(&cache_list->knote_cache)) {
1197 struct knote *new_kn;
1199 new_kn = knote_alloc();
1201 SLIST_INSERT_HEAD(&cache_list->knote_cache, new_kn, kn_link);
1202 cache_list->knote_cache_cnt++;
1206 if (fp[count] != NULL) {
1207 list = &fp[count]->f_klist;
1208 } else if (kq->kq_knhashmask) {
1209 list = &kq->kq_knhash[
1210 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1213 lwkt_getpooltoken(list);
1215 SLIST_FOREACH(kn, list, kn_link) {
1216 if (kn->kn_kq == kq &&
1217 kn->kn_filter == kev->filter &&
1218 kn->kn_id == kev->ident) {
1219 if (knote_acquire(kn) == 0)
1224 lwkt_relpooltoken(list);
1228 * NOTE: At this point if kn is non-NULL we will have acquired
1229 * it and set KN_PROCESSING.
1231 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
1238 * kn now contains the matching knote, or NULL if no match
1240 if (kev->flags & EV_ADD) {
1243 kn = SLIST_FIRST(&cache_list->knote_cache);
1248 SLIST_REMOVE_HEAD(&cache_list->knote_cache,
1250 cache_list->knote_cache_cnt--;
1253 kn->kn_fp = fp[count];
1258 * apply reference count to knote structure, and
1259 * do not release it at the end of this routine.
1261 fp[count] = NULL; /* safety */
1263 kn->kn_sfflags = kev->fflags;
1264 kn->kn_sdata = kev->data;
1267 kn->kn_kevent = *kev;
1270 * KN_PROCESSING prevents the knote from getting
1271 * ripped out from under us while we are trying
1272 * to attach it, in case the attach blocks.
1274 kn->kn_status = KN_PROCESSING;
1276 if ((error = filter_attach(kn)) != 0) {
1277 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1284 * Interlock against close races which either tried
1285 * to remove our knote while we were blocked or missed
1286 * it entirely prior to our attachment. We do not
1287 * want to end up with a knote on a closed descriptor.
1289 if ((fops->f_flags & FILTEROP_ISFD) &&
1290 checkfdclosed(curthread, fdp, kev->ident, kn->kn_fp,
1292 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1296 * The user may change some filter values after the
1297 * initial EV_ADD, but doing so will not reset any
1298 * filter which have already been triggered.
1300 KKASSERT(kn->kn_status & KN_PROCESSING);
1301 if (fops == &user_filtops) {
1302 filt_usertouch(kn, kev, EVENT_REGISTER);
1304 kn->kn_sfflags = kev->fflags;
1305 kn->kn_sdata = kev->data;
1306 kn->kn_kevent.udata = kev->udata;
1311 * Execute the filter event to immediately activate the
1312 * knote if necessary. If reprocessing events are pending
1313 * due to blocking above we do not run the filter here
1314 * but instead let knote_release() do it. Otherwise we
1315 * might run the filter on a deleted event.
1317 if ((kn->kn_status & KN_REPROCESS) == 0) {
1318 if (filter_event(kn, 0))
1321 } else if (kev->flags & EV_DELETE) {
1323 * Delete the existing knote
1325 knote_detach_and_drop(kn);
1331 * Modify an existing event.
1333 * The user may change some filter values after the
1334 * initial EV_ADD, but doing so will not reset any
1335 * filter which have already been triggered.
1337 KKASSERT(kn->kn_status & KN_PROCESSING);
1338 if (fops == &user_filtops) {
1339 filt_usertouch(kn, kev, EVENT_REGISTER);
1341 kn->kn_sfflags = kev->fflags;
1342 kn->kn_sdata = kev->data;
1343 kn->kn_kevent.udata = kev->udata;
1347 * Execute the filter event to immediately activate the
1348 * knote if necessary. If reprocessing events are pending
1349 * due to blocking above we do not run the filter here
1350 * but instead let knote_release() do it. Otherwise we
1351 * might run the filter on a deleted event.
1353 if ((kn->kn_status & KN_REPROCESS) == 0) {
1354 if (filter_event(kn, 0))
1360 * Disablement does not deactivate a knote here.
1362 if ((kev->flags & EV_DISABLE) &&
1363 ((kn->kn_status & KN_DISABLED) == 0)) {
1364 kn->kn_status |= KN_DISABLED;
1368 * Re-enablement may have to immediately enqueue an active knote.
1370 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
1371 kn->kn_status &= ~KN_DISABLED;
1372 if ((kn->kn_status & KN_ACTIVE) &&
1373 ((kn->kn_status & KN_QUEUED) == 0)) {
1379 * Handle any required reprocessing
1382 /* kn may be invalid now */
1385 * Loop control. We stop on errors (above), and also stop after
1386 * processing EV_RECEIPT, so the caller can process it.
1389 if (kev->flags & EV_RECEIPT) {
1394 if (count < climit) {
1395 if (fp[count-1]) /* drop unprocessed fp */
1404 if (td != NULL) { /* Owner of the kq_regtd */
1405 kq->kq_regtd = NULL;
1406 if (__predict_false(kq->kq_state & KQ_REGWAIT)) {
1407 kq->kq_state &= ~KQ_REGWAIT;
1408 wakeup(&kq->kq_regtd);
1411 lwkt_relpooltoken(kq);
1414 * Drop unprocessed file pointers
1417 if (count && fp[count-1])
1419 while (count < climit) {
1428 * Scan the kqueue, return the number of active events placed in kevp up
1431 * Continuous mode events may get recycled, do not continue scanning past
1432 * marker unless no events have been collected.
1435 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
1436 struct knote *marker, int closedcounter)
1438 struct knote *kn, local_marker;
1439 thread_t td = curthread;
1443 local_marker.kn_filter = EVFILT_MARKER;
1444 local_marker.kn_status = KN_PROCESSING;
1446 lwkt_getpooltoken(kq);
1451 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe);
1453 kn = TAILQ_NEXT(&local_marker, kn_tqe);
1454 if (kn->kn_filter == EVFILT_MARKER) {
1455 /* Marker reached, we are done */
1459 /* Move local marker past some other threads marker */
1460 kn = TAILQ_NEXT(kn, kn_tqe);
1461 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1462 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe);
1467 * We can't skip a knote undergoing processing, otherwise
1468 * we risk not returning it when the user process expects
1469 * it should be returned. Sleep and retry.
1471 if (knote_acquire(kn) == 0)
1475 * Remove the event for processing.
1477 * WARNING! We must leave KN_QUEUED set to prevent the
1478 * event from being KNOTE_ACTIVATE()d while
1479 * the queue state is in limbo, in case we
1482 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1486 * We have to deal with an extremely important race against
1487 * file descriptor close()s here. The file descriptor can
1488 * disappear MPSAFE, and there is a small window of
1489 * opportunity between that and the call to knote_fdclose().
1491 * If we hit that window here while doselect or dopoll is
1492 * trying to delete a spurious event they will not be able
1493 * to match up the event against a knote and will go haywire.
1495 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) &&
1496 checkfdclosed(td, kq->kq_fdp, kn->kn_kevent.ident,
1497 kn->kn_fp, closedcounter)) {
1498 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1501 if (kn->kn_status & KN_DISABLED) {
1503 * If disabled we ensure the event is not queued
1504 * but leave its active bit set. On re-enablement
1505 * the event may be immediately triggered.
1507 kn->kn_status &= ~KN_QUEUED;
1508 } else if ((kn->kn_flags & EV_ONESHOT) == 0 &&
1509 (kn->kn_status & KN_DELETING) == 0 &&
1510 filter_event(kn, 0) == 0) {
1512 * If not running in one-shot mode and the event
1513 * is no longer present we ensure it is removed
1514 * from the queue and ignore it.
1516 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1521 if (kn->kn_fop == &user_filtops)
1522 filt_usertouch(kn, kevp, EVENT_PROCESS);
1524 *kevp = kn->kn_kevent;
1529 if (kn->kn_flags & EV_ONESHOT) {
1530 kn->kn_status &= ~KN_QUEUED;
1531 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1533 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
1534 if (kn->kn_flags & EV_CLEAR) {
1538 if (kn->kn_flags & EV_DISPATCH) {
1539 kn->kn_status |= KN_DISABLED;
1541 kn->kn_status &= ~(KN_QUEUED |
1544 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1551 * Handle any post-processing states
1555 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1557 lwkt_relpooltoken(kq);
1563 * This could be expanded to call kqueue_scan, if desired.
1568 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1577 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1586 kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
1587 struct ucred *cred, struct sysmsg *msg)
1592 kq = (struct kqueue *)fp->f_data;
1593 lwkt_getpooltoken(kq);
1597 kq->kq_state |= KQ_ASYNC;
1599 kq->kq_state &= ~KQ_ASYNC;
1603 error = fsetown(*(int *)data, &kq->kq_sigio);
1609 lwkt_relpooltoken(kq);
1617 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred)
1619 struct kqueue *kq = (struct kqueue *)fp->f_data;
1621 bzero((void *)st, sizeof(*st));
1622 st->st_size = kq->kq_count;
1623 st->st_blksize = sizeof(struct kevent);
1624 st->st_mode = S_IFIFO;
1632 kqueue_close(struct file *fp)
1634 struct kqueue *kq = (struct kqueue *)fp->f_data;
1636 kqueue_terminate(kq);
1639 funsetown(&kq->kq_sigio);
1641 kfree(kq, M_KQUEUE);
1646 kqueue_wakeup(struct kqueue *kq)
1648 if (kq->kq_sleep_cnt) {
1649 u_int sleep_cnt = kq->kq_sleep_cnt;
1651 kq->kq_sleep_cnt = 0;
1657 KNOTE(&kq->kq_kqinfo.ki_note, 0);
1661 * Calls filterops f_attach function, acquiring mplock if filter is not
1662 * marked as FILTEROP_MPSAFE.
1664 * Caller must be holding the related kq token
1667 filter_attach(struct knote *kn)
1671 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1672 ret = kn->kn_fop->f_attach(kn);
1675 ret = kn->kn_fop->f_attach(kn);
1682 * Detach the knote and drop it, destroying the knote.
1684 * Calls filterops f_detach function, acquiring mplock if filter is not
1685 * marked as FILTEROP_MPSAFE.
1687 * Caller must be holding the related kq token
1690 knote_detach_and_drop(struct knote *kn)
1692 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1693 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1694 kn->kn_fop->f_detach(kn);
1697 kn->kn_fop->f_detach(kn);
1704 * Calls filterops f_event function, acquiring mplock if filter is not
1705 * marked as FILTEROP_MPSAFE.
1707 * If the knote is in the middle of being created or deleted we cannot
1708 * safely call the filter op.
1710 * Caller must be holding the related kq token
1713 filter_event(struct knote *kn, long hint)
1717 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1718 ret = kn->kn_fop->f_event(kn, hint);
1721 ret = kn->kn_fop->f_event(kn, hint);
1728 * Walk down a list of knotes, activating them if their event has triggered.
1730 * If we encounter any knotes which are undergoing processing we just mark
1731 * them for reprocessing and do not try to [re]activate the knote. However,
1732 * if a hint is being passed we have to wait and that makes things a bit
1736 knote(struct klist *list, long hint)
1740 struct knote *kntmp;
1742 lwkt_getpooltoken(list);
1744 SLIST_FOREACH(kn, list, kn_next) {
1746 lwkt_getpooltoken(kq);
1748 /* temporary verification hack */
1749 SLIST_FOREACH(kntmp, list, kn_next) {
1753 if (kn != kntmp || kn->kn_kq != kq) {
1754 lwkt_relpooltoken(kq);
1758 if (kn->kn_status & KN_PROCESSING) {
1760 * Someone else is processing the knote, ask the
1761 * other thread to reprocess it and don't mess
1762 * with it otherwise.
1765 kn->kn_status |= KN_REPROCESS;
1766 lwkt_relpooltoken(kq);
1771 * If the hint is non-zero we have to wait or risk
1772 * losing the state the caller is trying to update.
1774 * XXX This is a real problem, certain process
1775 * and signal filters will bump kn_data for
1776 * already-processed notes more than once if
1777 * we restart the list scan. FIXME.
1779 kn->kn_status |= KN_WAITING | KN_REPROCESS;
1780 tsleep(kn, 0, "knotec", hz);
1781 lwkt_relpooltoken(kq);
1786 * Become the reprocessing master ourselves.
1788 * If hint is non-zero running the event is mandatory
1789 * when not deleting so do it whether reprocessing is
1792 kn->kn_status |= KN_PROCESSING;
1793 if ((kn->kn_status & KN_DELETING) == 0) {
1794 if (filter_event(kn, hint))
1797 if (knote_release(kn)) {
1798 lwkt_relpooltoken(kq);
1801 lwkt_relpooltoken(kq);
1803 lwkt_relpooltoken(list);
1807 * Insert knote at head of klist.
1809 * This function may only be called via a filter function and thus
1810 * kq_token should already be held and marked for processing.
1813 knote_insert(struct klist *klist, struct knote *kn)
1815 lwkt_getpooltoken(klist);
1816 KKASSERT(kn->kn_status & KN_PROCESSING);
1817 SLIST_INSERT_HEAD(klist, kn, kn_next);
1818 lwkt_relpooltoken(klist);
1822 * Remove knote from a klist
1824 * This function may only be called via a filter function and thus
1825 * kq_token should already be held and marked for processing.
1828 knote_remove(struct klist *klist, struct knote *kn)
1830 lwkt_getpooltoken(klist);
1831 KKASSERT(kn->kn_status & KN_PROCESSING);
1832 SLIST_REMOVE(klist, kn, knote, kn_next);
1833 lwkt_relpooltoken(klist);
1837 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst,
1838 struct filterops *ops, void *hook)
1843 lwkt_getpooltoken(&src->ki_note);
1844 lwkt_getpooltoken(&dst->ki_note);
1845 while ((kn = SLIST_FIRST(&src->ki_note)) != NULL) {
1847 lwkt_getpooltoken(kq);
1848 if (SLIST_FIRST(&src->ki_note) != kn || kn->kn_kq != kq) {
1849 lwkt_relpooltoken(kq);
1852 if (knote_acquire(kn)) {
1853 knote_remove(&src->ki_note, kn);
1856 knote_insert(&dst->ki_note, kn);
1858 /* kn may be invalid now */
1860 lwkt_relpooltoken(kq);
1862 lwkt_relpooltoken(&dst->ki_note);
1863 lwkt_relpooltoken(&src->ki_note);
1867 * Remove all knotes referencing a specified fd
1870 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd)
1874 struct knote *kntmp;
1876 lwkt_getpooltoken(&fp->f_klist);
1878 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
1879 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) {
1881 lwkt_getpooltoken(kq);
1883 /* temporary verification hack */
1884 SLIST_FOREACH(kntmp, &fp->f_klist, kn_link) {
1888 if (kn != kntmp || kn->kn_kq->kq_fdp != fdp ||
1889 kn->kn_id != fd || kn->kn_kq != kq) {
1890 lwkt_relpooltoken(kq);
1893 if (knote_acquire(kn))
1894 knote_detach_and_drop(kn);
1895 lwkt_relpooltoken(kq);
1899 lwkt_relpooltoken(&fp->f_klist);
1903 * Low level attach function.
1905 * The knote should already be marked for processing.
1906 * Caller must hold the related kq token.
1909 knote_attach(struct knote *kn)
1912 struct kqueue *kq = kn->kn_kq;
1914 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1915 KKASSERT(kn->kn_fp);
1916 list = &kn->kn_fp->f_klist;
1918 if (kq->kq_knhashmask == 0)
1919 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1920 &kq->kq_knhashmask);
1921 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1923 lwkt_getpooltoken(list);
1924 SLIST_INSERT_HEAD(list, kn, kn_link);
1925 lwkt_relpooltoken(list);
1926 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink);
1930 * Low level drop function.
1932 * The knote should already be marked for processing.
1933 * Caller must hold the related kq token.
1936 knote_drop(struct knote *kn)
1943 if (kn->kn_fop->f_flags & FILTEROP_ISFD)
1944 list = &kn->kn_fp->f_klist;
1946 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1948 lwkt_getpooltoken(list);
1949 SLIST_REMOVE(list, kn, knote, kn_link);
1950 lwkt_relpooltoken(list);
1951 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
1952 if (kn->kn_status & KN_QUEUED)
1954 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1962 * Low level enqueue function.
1964 * The knote should already be marked for processing.
1965 * Caller must be holding the kq token
1968 knote_enqueue(struct knote *kn)
1970 struct kqueue *kq = kn->kn_kq;
1972 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
1973 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1974 kn->kn_status |= KN_QUEUED;
1978 * Send SIGIO on request (typically set up as a mailbox signal)
1980 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1)
1981 pgsigio(kq->kq_sigio, SIGIO, 0);
1987 * Low level dequeue function.
1989 * The knote should already be marked for processing.
1990 * Caller must be holding the kq token
1993 knote_dequeue(struct knote *kn)
1995 struct kqueue *kq = kn->kn_kq;
1997 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
1998 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1999 kn->kn_status &= ~KN_QUEUED;
2003 static struct knote *
2006 return kmalloc(sizeof(struct knote), M_KQUEUE, M_WAITOK);
2010 knote_free(struct knote *kn)
2012 struct knote_cache_list *cache_list;
2014 cache_list = &knote_cache_lists[mycpuid];
2015 if (cache_list->knote_cache_cnt < KNOTE_CACHE_MAX) {
2017 SLIST_INSERT_HEAD(&cache_list->knote_cache, kn, kn_link);
2018 cache_list->knote_cache_cnt++;
2022 kfree(kn, M_KQUEUE);
2031 precise_sleep_intr(systimer_t info, int in_ipi, struct intrframe *frame)
2033 struct sleepinfo *si;
2041 precise_sleep(void *ident, int flags, const char *wmesg, int us)
2043 struct systimer info;
2044 struct sleepinfo si = {
2050 tsleep_interlock(ident, flags);
2051 systimer_init_oneshot(&info, precise_sleep_intr, &si,
2053 r = tsleep(ident, flags | PINTERLOCKED, wmesg, 0);
2054 systimer_del(&info);