2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
27 * $DragonFly: src/sys/kern/kern_event.c,v 1.33 2007/02/03 17:05:57 corecode Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/unistd.h>
38 #include <sys/fcntl.h>
39 #include <sys/select.h>
40 #include <sys/queue.h>
41 #include <sys/event.h>
42 #include <sys/eventvar.h>
44 #include <sys/protosw.h>
45 #include <sys/socket.h>
46 #include <sys/socketvar.h>
48 #include <sys/sysctl.h>
49 #include <sys/sysproto.h>
51 #include <sys/signalvar.h>
52 #include <sys/filio.h>
55 #include <sys/thread2.h>
56 #include <sys/file2.h>
57 #include <sys/mplock2.h>
59 #include <vm/vm_zone.h>
61 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
63 struct kevent_copyin_args {
64 struct kevent_args *ka;
68 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
69 struct timespec *tsp, int *errorp);
70 static int kqueue_read(struct file *fp, struct uio *uio,
71 struct ucred *cred, int flags);
72 static int kqueue_write(struct file *fp, struct uio *uio,
73 struct ucred *cred, int flags);
74 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
75 struct ucred *cred, struct sysmsg *msg);
76 static int kqueue_poll(struct file *fp, int events, struct ucred *cred);
77 static int kqueue_kqfilter(struct file *fp, struct knote *kn);
78 static int kqueue_stat(struct file *fp, struct stat *st,
80 static int kqueue_close(struct file *fp);
85 static struct fileops kqueueops = {
86 .fo_read = kqueue_read,
87 .fo_write = kqueue_write,
88 .fo_ioctl = kqueue_ioctl,
89 .fo_poll = kqueue_poll,
90 .fo_kqfilter = kqueue_kqfilter,
91 .fo_stat = kqueue_stat,
92 .fo_close = kqueue_close,
93 .fo_shutdown = nofo_shutdown
96 static void knote_attach(struct knote *kn);
97 static void knote_drop(struct knote *kn);
98 static void knote_enqueue(struct knote *kn);
99 static void knote_dequeue(struct knote *kn);
100 static void knote_init(void);
101 static struct knote *knote_alloc(void);
102 static void knote_free(struct knote *kn);
104 static void filt_kqdetach(struct knote *kn);
105 static int filt_kqueue(struct knote *kn, long hint);
106 static int filt_procattach(struct knote *kn);
107 static void filt_procdetach(struct knote *kn);
108 static int filt_proc(struct knote *kn, long hint);
109 static int filt_fileattach(struct knote *kn);
110 static void filt_timerexpire(void *knx);
111 static int filt_timerattach(struct knote *kn);
112 static void filt_timerdetach(struct knote *kn);
113 static int filt_timer(struct knote *kn, long hint);
115 static struct filterops file_filtops =
116 { 1, filt_fileattach, NULL, NULL };
117 static struct filterops kqread_filtops =
118 { 1, NULL, filt_kqdetach, filt_kqueue };
119 static struct filterops proc_filtops =
120 { 0, filt_procattach, filt_procdetach, filt_proc };
121 static struct filterops timer_filtops =
122 { 0, filt_timerattach, filt_timerdetach, filt_timer };
124 static vm_zone_t knote_zone;
125 static int kq_ncallouts = 0;
126 static int kq_calloutmax = (4 * 1024);
127 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
128 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
130 #define KNOTE_ACTIVATE(kn) do { \
131 kn->kn_status |= KN_ACTIVE; \
132 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
136 #define KN_HASHSIZE 64 /* XXX should be tunable */
137 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
139 extern struct filterops aio_filtops;
140 extern struct filterops sig_filtops;
143 * Table for for all system-defined filters.
145 static struct filterops *sysfilt_ops[] = {
146 &file_filtops, /* EVFILT_READ */
147 &file_filtops, /* EVFILT_WRITE */
148 &aio_filtops, /* EVFILT_AIO */
149 &file_filtops, /* EVFILT_VNODE */
150 &proc_filtops, /* EVFILT_PROC */
151 &sig_filtops, /* EVFILT_SIGNAL */
152 &timer_filtops, /* EVFILT_TIMER */
156 filt_fileattach(struct knote *kn)
158 return (fo_kqfilter(kn->kn_fp, kn));
162 * MPALMOSTSAFE - acquires mplock
165 kqueue_kqfilter(struct file *fp, struct knote *kn)
167 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
170 if (kn->kn_filter != EVFILT_READ) {
175 kn->kn_fop = &kqread_filtops;
176 SLIST_INSERT_HEAD(&kq->kq_sel.si_note, kn, kn_selnext);
182 filt_kqdetach(struct knote *kn)
184 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
186 SLIST_REMOVE(&kq->kq_sel.si_note, kn, knote, kn_selnext);
191 filt_kqueue(struct knote *kn, long hint)
193 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
195 kn->kn_data = kq->kq_count;
196 return (kn->kn_data > 0);
200 filt_procattach(struct knote *kn)
206 lwkt_gettoken(&proc_token);
207 p = pfind(kn->kn_id);
208 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
209 p = zpfind(kn->kn_id);
213 lwkt_reltoken(&proc_token);
216 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
217 lwkt_reltoken(&proc_token);
221 kn->kn_ptr.p_proc = p;
222 kn->kn_flags |= EV_CLEAR; /* automatically set */
225 * internal flag indicating registration done by kernel
227 if (kn->kn_flags & EV_FLAG1) {
228 kn->kn_data = kn->kn_sdata; /* ppid */
229 kn->kn_fflags = NOTE_CHILD;
230 kn->kn_flags &= ~EV_FLAG1;
233 /* XXX lock the proc here while adding to the list? */
234 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
237 * Immediately activate any exit notes if the target process is a
238 * zombie. This is necessary to handle the case where the target
239 * process, e.g. a child, dies before the kevent is registered.
241 if (immediate && filt_proc(kn, NOTE_EXIT))
243 lwkt_reltoken(&proc_token);
249 * The knote may be attached to a different process, which may exit,
250 * leaving nothing for the knote to be attached to. So when the process
251 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
252 * it will be deleted when read out. However, as part of the knote deletion,
253 * this routine is called, so a check is needed to avoid actually performing
254 * a detach, because the original process does not exist any more.
257 filt_procdetach(struct knote *kn)
261 if (kn->kn_status & KN_DETACHED)
263 /* XXX locking? this might modify another process. */
264 p = kn->kn_ptr.p_proc;
265 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
269 filt_proc(struct knote *kn, long hint)
274 * mask off extra data
276 event = (u_int)hint & NOTE_PCTRLMASK;
279 * if the user is interested in this event, record it.
281 if (kn->kn_sfflags & event)
282 kn->kn_fflags |= event;
285 * Process is gone, so flag the event as finished. Detach the
286 * knote from the process now because the process will be poof,
289 if (event == NOTE_EXIT) {
290 struct proc *p = kn->kn_ptr.p_proc;
291 if ((kn->kn_status & KN_DETACHED) == 0) {
292 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
293 kn->kn_status |= KN_DETACHED;
294 kn->kn_data = p->p_xstat;
295 kn->kn_ptr.p_proc = NULL;
297 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
302 * process forked, and user wants to track the new process,
303 * so attach a new knote to it, and immediately report an
304 * event with the parent's pid.
306 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
311 * register knote with new process.
313 kev.ident = hint & NOTE_PDATAMASK; /* pid */
314 kev.filter = kn->kn_filter;
315 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
316 kev.fflags = kn->kn_sfflags;
317 kev.data = kn->kn_id; /* parent */
318 kev.udata = kn->kn_kevent.udata; /* preserve udata */
319 error = kqueue_register(kn->kn_kq, &kev);
321 kn->kn_fflags |= NOTE_TRACKERR;
324 return (kn->kn_fflags != 0);
328 filt_timerexpire(void *knx)
330 struct knote *kn = knx;
331 struct callout *calloutp;
338 if ((kn->kn_flags & EV_ONESHOT) == 0) {
339 tv.tv_sec = kn->kn_sdata / 1000;
340 tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
341 tticks = tvtohz_high(&tv);
342 calloutp = (struct callout *)kn->kn_hook;
343 callout_reset(calloutp, tticks, filt_timerexpire, kn);
348 * data contains amount of time to sleep, in milliseconds
351 filt_timerattach(struct knote *kn)
353 struct callout *calloutp;
357 if (kq_ncallouts >= kq_calloutmax)
361 tv.tv_sec = kn->kn_sdata / 1000;
362 tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
363 tticks = tvtohz_high(&tv);
365 kn->kn_flags |= EV_CLEAR; /* automatically set */
366 MALLOC(calloutp, struct callout *, sizeof(*calloutp),
368 callout_init(calloutp);
369 kn->kn_hook = (caddr_t)calloutp;
370 callout_reset(calloutp, tticks, filt_timerexpire, kn);
376 filt_timerdetach(struct knote *kn)
378 struct callout *calloutp;
380 calloutp = (struct callout *)kn->kn_hook;
381 callout_stop(calloutp);
382 FREE(calloutp, M_KQUEUE);
387 filt_timer(struct knote *kn, long hint)
390 return (kn->kn_data != 0);
394 * Initialize a kqueue.
396 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
401 kqueue_init(struct kqueue *kq, struct filedesc *fdp)
403 TAILQ_INIT(&kq->kq_knpend);
404 TAILQ_INIT(&kq->kq_knlist);
409 * Terminate a kqueue. Freeing the actual kq itself is left up to the
410 * caller (it might be embedded in a lwp so we don't do it here).
413 kqueue_terminate(struct kqueue *kq)
419 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) {
420 kn->kn_fop->f_detach(kn);
421 if (kn->kn_fop->f_isfd) {
422 list = &kn->kn_fp->f_klist;
423 SLIST_REMOVE(list, kn, knote, kn_link);
427 hv = KN_HASH(kn->kn_id, kq->kq_knhashmask);
428 list = &kq->kq_knhash[hv];
429 SLIST_REMOVE(list, kn, knote, kn_link);
431 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
432 if (kn->kn_status & KN_QUEUED)
438 kfree(kq->kq_knhash, M_KQUEUE);
439 kq->kq_knhash = NULL;
440 kq->kq_knhashmask = 0;
448 sys_kqueue(struct kqueue_args *uap)
450 struct thread *td = curthread;
455 error = falloc(td->td_lwp, &fp, &fd);
458 fp->f_flag = FREAD | FWRITE;
459 fp->f_type = DTYPE_KQUEUE;
460 fp->f_ops = &kqueueops;
462 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO);
463 kqueue_init(kq, td->td_proc->p_fd);
466 fsetfd(kq->kq_fdp, fp, fd);
467 uap->sysmsg_result = fd;
473 * Copy 'count' items into the destination list pointed to by uap->eventlist.
476 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res)
478 struct kevent_copyin_args *kap;
481 kap = (struct kevent_copyin_args *)arg;
483 error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp));
485 kap->ka->eventlist += count;
495 * Copy at most 'max' items from the list pointed to by kap->changelist,
496 * return number of items in 'events'.
499 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events)
501 struct kevent_copyin_args *kap;
504 kap = (struct kevent_copyin_args *)arg;
506 count = min(kap->ka->nchanges - kap->pchanges, max);
507 error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp);
509 kap->ka->changelist += count;
510 kap->pchanges += count;
521 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap,
522 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn,
523 struct timespec *tsp_in)
527 struct timespec *tsp;
528 int i, n, total, error, nerrors = 0;
529 struct kevent kev[KQ_NEVENTS];
537 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n);
542 for (i = 0; i < n; i++) {
544 kevp->flags &= ~EV_SYSFLAGS;
545 error = kqueue_register(kq, kevp);
548 kevp->flags = EV_ERROR;
550 kevent_copyoutfn(uap, kevp, 1, res);
565 * Acquire/wait for events - setup timeout
570 if (tsp->tv_sec || tsp->tv_nsec) {
572 timespecadd(tsp, &ats); /* tsp = target time */
579 * Collect as many events as we can. The timeout on successive
580 * loops is disabled (kqueue_scan() becomes non-blocking).
582 * The loop stops if an error occurs or all events have been
583 * scanned. The copyoutfn function does not have to increment
584 * (*res) in order for the loop to continue.
588 while ((n = nevents - total) > 0) {
591 i = kqueue_scan(kq, kev, n, tsp, &error);
594 error = kevent_copyoutfn(uap, kev, i, res);
600 * successive loops are non-blocking only if (*res)
619 sys_kevent(struct kevent_args *uap)
621 struct thread *td = curthread;
622 struct proc *p = td->td_proc;
623 struct timespec ts, *tsp;
625 struct file *fp = NULL;
626 struct kevent_copyin_args *kap, ka;
630 error = copyin(uap->timeout, &ts, sizeof(ts));
638 fp = holdfp(p->p_fd, uap->fd, -1);
641 if (fp->f_type != DTYPE_KQUEUE) {
646 kq = (struct kqueue *)fp->f_data;
652 error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap,
653 kevent_copyin, kevent_copyout, tsp);
661 kqueue_register(struct kqueue *kq, struct kevent *kev)
663 struct filedesc *fdp = kq->kq_fdp;
664 struct filterops *fops;
665 struct file *fp = NULL;
666 struct knote *kn = NULL;
669 if (kev->filter < 0) {
670 if (kev->filter + EVFILT_SYSCOUNT < 0)
672 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
676 * filter attach routine is responsible for insuring that
677 * the identifier can be attached to it.
679 kprintf("unknown filter: %d\n", kev->filter);
684 /* validate descriptor */
685 fp = holdfp(fdp, kev->ident, -1);
689 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
690 if (kn->kn_kq == kq &&
691 kn->kn_filter == kev->filter &&
692 kn->kn_id == kev->ident) {
697 if (kq->kq_knhashmask) {
700 list = &kq->kq_knhash[
701 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
702 SLIST_FOREACH(kn, list, kn_link) {
703 if (kn->kn_id == kev->ident &&
704 kn->kn_filter == kev->filter)
710 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
716 * kn now contains the matching knote, or NULL if no match
718 if (kev->flags & EV_ADD) {
730 * apply reference count to knote structure, and
731 * do not release it at the end of this routine.
735 kn->kn_sfflags = kev->fflags;
736 kn->kn_sdata = kev->data;
739 kn->kn_kevent = *kev;
742 if ((error = fops->f_attach(kn)) != 0) {
748 * The user may change some filter values after the
749 * initial EV_ADD, but doing so will not reset any
750 * filter which have already been triggered.
752 kn->kn_sfflags = kev->fflags;
753 kn->kn_sdata = kev->data;
754 kn->kn_kevent.udata = kev->udata;
758 if (kn->kn_fop->f_event(kn, 0))
761 } else if (kev->flags & EV_DELETE) {
762 kn->kn_fop->f_detach(kn);
767 if ((kev->flags & EV_DISABLE) &&
768 ((kn->kn_status & KN_DISABLED) == 0)) {
770 kn->kn_status |= KN_DISABLED;
774 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
776 kn->kn_status &= ~KN_DISABLED;
777 if ((kn->kn_status & KN_ACTIVE) &&
778 ((kn->kn_status & KN_QUEUED) == 0))
790 * Scan the kqueue, blocking if necessary until the target time is reached.
791 * If tsp is NULL we block indefinitely. If tsp->ts_secs/nsecs are both
792 * 0 we do not block at all.
795 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
796 struct timespec *tsp, int *errorp)
798 struct knote *kn, marker;
804 if (kq->kq_count == 0) {
806 kq->kq_state |= KQ_SLEEP;
807 *errorp = tsleep(kq, PCATCH, "kqread", 0);
808 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) {
809 *errorp = EWOULDBLOCK;
812 struct timespec atx = *tsp;
816 timespecsub(&atx, &ats);
817 if (ats.tv_sec < 0) {
818 *errorp = EWOULDBLOCK;
820 timeout = atx.tv_sec > 24 * 60 * 60 ?
821 24 * 60 * 60 * hz : tstohz_high(&atx);
822 kq->kq_state |= KQ_SLEEP;
823 *errorp = tsleep(kq, PCATCH, "kqread", timeout);
829 /* don't restart after signals... */
830 if (*errorp == ERESTART)
832 else if (*errorp == EWOULDBLOCK)
838 * Collect events. Continuous mode events may get recycled
839 * past the marker so we stop when we hit it unless no events
840 * have been collected.
842 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
844 kn = TAILQ_FIRST(&kq->kq_knpend);
847 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
848 if (kn->kn_status & KN_DISABLED) {
849 kn->kn_status &= ~KN_QUEUED;
853 if ((kn->kn_flags & EV_ONESHOT) == 0 &&
854 kn->kn_fop->f_event(kn, 0) == 0) {
855 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
859 *kevp++ = kn->kn_kevent;
864 * Post-event action on the note
866 if (kn->kn_flags & EV_ONESHOT) {
867 kn->kn_status &= ~KN_QUEUED;
870 kn->kn_fop->f_detach(kn);
873 } else if (kn->kn_flags & EV_CLEAR) {
876 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
879 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
882 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
892 * This could be expanded to call kqueue_scan, if desired.
897 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
906 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
915 kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
916 struct ucred *cred, struct sysmsg *msg)
922 kq = (struct kqueue *)fp->f_data;
927 kq->kq_state |= KQ_ASYNC;
929 kq->kq_state &= ~KQ_ASYNC;
933 error = fsetown(*(int *)data, &kq->kq_sigio);
944 * MPALMOSTSAFE - acquires mplock
947 kqueue_poll(struct file *fp, int events, struct ucred *cred)
949 struct kqueue *kq = (struct kqueue *)fp->f_data;
954 if (events & (POLLIN | POLLRDNORM)) {
956 revents |= events & (POLLIN | POLLRDNORM);
958 selrecord(curthread, &kq->kq_sel);
959 kq->kq_state |= KQ_SEL;
971 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred)
973 struct kqueue *kq = (struct kqueue *)fp->f_data;
975 bzero((void *)st, sizeof(*st));
976 st->st_size = kq->kq_count;
977 st->st_blksize = sizeof(struct kevent);
978 st->st_mode = S_IFIFO;
983 * MPALMOSTSAFE - acquires mplock
986 kqueue_close(struct file *fp)
988 struct kqueue *kq = (struct kqueue *)fp->f_data;
992 kqueue_terminate(kq);
995 funsetown(kq->kq_sigio);
1003 kqueue_wakeup(struct kqueue *kq)
1005 if (kq->kq_state & KQ_SLEEP) {
1006 kq->kq_state &= ~KQ_SLEEP;
1009 if (kq->kq_state & KQ_SEL) {
1010 kq->kq_state &= ~KQ_SEL;
1011 selwakeup(&kq->kq_sel);
1013 KNOTE(&kq->kq_sel.si_note, 0);
1017 * walk down a list of knotes, activating them if their event has triggered.
1020 knote(struct klist *list, long hint)
1024 SLIST_FOREACH(kn, list, kn_selnext)
1025 if (kn->kn_fop->f_event(kn, hint))
1030 * remove all knotes from a specified klist
1033 knote_remove(struct klist *list)
1037 while ((kn = SLIST_FIRST(list)) != NULL) {
1038 kn->kn_fop->f_detach(kn);
1044 * remove all knotes referencing a specified fd
1047 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd)
1052 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
1053 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) {
1054 kn->kn_fop->f_detach(kn);
1062 knote_attach(struct knote *kn)
1065 struct kqueue *kq = kn->kn_kq;
1067 if (kn->kn_fop->f_isfd) {
1068 KKASSERT(kn->kn_fp);
1069 list = &kn->kn_fp->f_klist;
1071 if (kq->kq_knhashmask == 0)
1072 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1073 &kq->kq_knhashmask);
1074 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1076 SLIST_INSERT_HEAD(list, kn, kn_link);
1077 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink);
1082 * should be called outside of a critical section, since we don't want to
1083 * hold a critical section while calling fdrop and free.
1086 knote_drop(struct knote *kn)
1093 if (kn->kn_fop->f_isfd)
1094 list = &kn->kn_fp->f_klist;
1096 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1098 SLIST_REMOVE(list, kn, knote, kn_link);
1099 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
1100 if (kn->kn_status & KN_QUEUED)
1102 if (kn->kn_fop->f_isfd)
1109 knote_enqueue(struct knote *kn)
1111 struct kqueue *kq = kn->kn_kq;
1114 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
1116 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1117 kn->kn_status |= KN_QUEUED;
1121 * Send SIGIO on request (typically set up as a mailbox signal)
1123 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1)
1124 pgsigio(kq->kq_sigio, SIGIO, 0);
1130 knote_dequeue(struct knote *kn)
1132 struct kqueue *kq = kn->kn_kq;
1134 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
1137 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1138 kn->kn_status &= ~KN_QUEUED;
1146 knote_zone = zinit("KNOTE", sizeof(struct knote), 0, 0, 1);
1148 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
1150 static struct knote *
1153 return ((struct knote *)zalloc(knote_zone));
1157 knote_free(struct knote *kn)
1159 zfree(knote_zone, kn);