kernel - Provide descriptions for lwkt.* and debug.* sysctl's
[dragonfly.git] / sys / kern / kern_event.c
index 40c42ab..1aefd31 100644 (file)
  * Global token for kqueue subsystem
  */
 struct lwkt_token kq_token = LWKT_TOKEN_UP_INITIALIZER(kq_token);
+SYSCTL_INT(_lwkt, OID_AUTO, kq_mpsafe,
+    CTLFLAG_RW, &kq_token.t_flags, 0,
+    "Require MP lock for kq_token");
+SYSCTL_LONG(_lwkt, OID_AUTO, kq_collisions,
+    CTLFLAG_RW, &kq_token.t_collisions, 0,
+    "Collision counter of kq_token");
 
 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
 
@@ -330,6 +336,12 @@ filt_proc(struct knote *kn, long hint)
        return (kn->kn_fflags != 0);
 }
 
+/*
+ * The callout interlocks with callout_stop() (or should), so the
+ * knote should still be a valid structure.  However the timeout
+ * can race a deletion so if KN_DELETING is set we just don't touch
+ * the knote.
+ */
 static void
 filt_timerexpire(void *knx)
 {
@@ -338,16 +350,20 @@ filt_timerexpire(void *knx)
        struct timeval tv;
        int tticks;
 
-       kn->kn_data++;
-       KNOTE_ACTIVATE(kn);
+       lwkt_gettoken(&kq_token);
+       if ((kn->kn_status & KN_DELETING) == 0) {
+               kn->kn_data++;
+               KNOTE_ACTIVATE(kn);
 
-       if ((kn->kn_flags & EV_ONESHOT) == 0) {
-               tv.tv_sec = kn->kn_sdata / 1000;
-               tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
-               tticks = tvtohz_high(&tv);
-               calloutp = (struct callout *)kn->kn_hook;
-               callout_reset(calloutp, tticks, filt_timerexpire, kn);
+               if ((kn->kn_flags & EV_ONESHOT) == 0) {
+                       tv.tv_sec = kn->kn_sdata / 1000;
+                       tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
+                       tticks = tvtohz_high(&tv);
+                       calloutp = (struct callout *)kn->kn_hook;
+                       callout_reset(calloutp, tticks, filt_timerexpire, kn);
+               }
        }
+       lwkt_reltoken(&kq_token);
 }
 
 /*
@@ -396,6 +412,55 @@ filt_timer(struct knote *kn, long hint)
        return (kn->kn_data != 0);
 }
 
+/*
+ * Acquire a knote, return non-zero on success, 0 on failure.
+ *
+ * If we cannot acquire the knote we sleep and return 0.  The knote
+ * may be stale on return in this case and the caller must restart
+ * whatever loop they are in.
+ */
+static __inline
+int
+knote_acquire(struct knote *kn)
+{
+       if (kn->kn_status & KN_PROCESSING) {
+               kn->kn_status |= KN_WAITING | KN_REPROCESS;
+               tsleep(kn, 0, "kqepts", hz);
+               /* knote may be stale now */
+               return(0);
+       }
+       kn->kn_status |= KN_PROCESSING;
+       return(1);
+}
+
+/*
+ * Release an acquired knote, clearing KN_PROCESSING and handling any
+ * KN_REPROCESS events.
+ *
+ * Non-zero is returned if the knote is destroyed.
+ */
+static __inline
+int
+knote_release(struct knote *kn)
+{
+       while (kn->kn_status & KN_REPROCESS) {
+               kn->kn_status &= ~KN_REPROCESS;
+               if (kn->kn_status & KN_WAITING) {
+                       kn->kn_status &= ~KN_WAITING;
+                       wakeup(kn);
+               }
+               if (kn->kn_status & KN_DELETING) {
+                       knote_detach_and_drop(kn);
+                       return(1);
+                       /* NOT REACHED */
+               }
+               if (filter_event(kn, 0))
+                       KNOTE_ACTIVATE(kn);
+       }
+       kn->kn_status &= ~KN_PROCESSING;
+       return(0);
+}
+
 /*
  * Initialize a kqueue.
  *
@@ -416,6 +481,9 @@ kqueue_init(struct kqueue *kq, struct filedesc *fdp)
 /*
  * Terminate a kqueue.  Freeing the actual kq itself is left up to the
  * caller (it might be embedded in a lwp so we don't do it here).
+ *
+ * The kq's knlist must be completely eradicated so block on any
+ * processing races.
  */
 void
 kqueue_terminate(struct kqueue *kq)
@@ -423,9 +491,10 @@ kqueue_terminate(struct kqueue *kq)
        struct knote *kn;
 
        lwkt_gettoken(&kq_token);
-       while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL)
-               knote_detach_and_drop(kn);
-
+       while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) {
+               if (knote_acquire(kn))
+                       knote_detach_and_drop(kn);
+       }
        if (kq->kq_knhash) {
                kfree(kq->kq_knhash, M_KQUEUE);
                kq->kq_knhash = NULL;
@@ -597,6 +666,7 @@ kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap,
        total = 0;
        error = 0;
        marker.kn_filter = EVFILT_MARKER;
+       marker.kn_status = KN_PROCESSING;
        TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
        while ((n = nevents - total) > 0) {
                if (n > KQ_NEVENTS)
@@ -749,10 +819,13 @@ kqueue_register(struct kqueue *kq, struct kevent *kev)
                        return (EBADF);
                }
 
+again1:
                SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
                        if (kn->kn_kq == kq &&
                            kn->kn_filter == kev->filter &&
                            kn->kn_id == kev->ident) {
+                               if (knote_acquire(kn) == 0)
+                                       goto again1;
                                break;
                        }
                }
@@ -762,14 +835,22 @@ kqueue_register(struct kqueue *kq, struct kevent *kev)
                        
                        list = &kq->kq_knhash[
                            KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
+again2:
                        SLIST_FOREACH(kn, list, kn_link) {
                                if (kn->kn_id == kev->ident &&
-                                   kn->kn_filter == kev->filter)
+                                   kn->kn_filter == kev->filter) {
+                                       if (knote_acquire(kn) == 0)
+                                               goto again2;
                                        break;
+                               }
                        }
                }
        }
 
+       /*
+        * NOTE: At this point if kn is non-NULL we will have acquired
+        *       it and set KN_PROCESSING.
+        */
        if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
                error = ENOENT;
                goto done;
@@ -802,27 +883,27 @@ kqueue_register(struct kqueue *kq, struct kevent *kev)
                        kn->kn_kevent = *kev;
 
                        /*
-                        * Interlock against creation/deletion races due
-                        * to f_attach() blocking.  knote_attach() will set
-                        * KN_CREATING.
+                        * KN_PROCESSING prevents the knote from getting
+                        * ripped out from under us while we are trying
+                        * to attach it, in case the attach blocks.
                         */
+                       kn->kn_status = KN_PROCESSING;
                        knote_attach(kn);
                        if ((error = filter_attach(kn)) != 0) {
-                               kn->kn_status |= KN_DELETING;
+                               kn->kn_status |= KN_DELETING | KN_REPROCESS;
                                knote_drop(kn);
                                goto done;
                        }
-                       kn->kn_status &= ~KN_CREATING;
 
                        /*
-                        * Interlock against close races which remove our
-                        * knotes.  We do not want to end up with a knote
-                        * on a closed descriptor.
+                        * Interlock against close races which either tried
+                        * to remove our knote while we were blocked or missed
+                        * it entirely prior to our attachment.  We do not
+                        * want to end up with a knote on a closed descriptor.
                         */
                        if ((fops->f_flags & FILTEROP_ISFD) &&
-                           (error = checkfdclosed(fdp, kev->ident, kn->kn_fp)) != 0) {
-                               knote_detach_and_drop(kn);
-                               goto done;
+                           checkfdclosed(fdp, kev->ident, kn->kn_fp)) {
+                               kn->kn_status |= KN_DELETING | KN_REPROCESS;
                        }
                } else {
                        /*
@@ -830,30 +911,56 @@ kqueue_register(struct kqueue *kq, struct kevent *kev)
                         * initial EV_ADD, but doing so will not reset any 
                         * filter which have already been triggered.
                         */
+                       KKASSERT(kn->kn_status & KN_PROCESSING);
                        kn->kn_sfflags = kev->fflags;
                        kn->kn_sdata = kev->data;
                        kn->kn_kevent.udata = kev->udata;
                }
 
-               if (filter_event(kn, 0))
-                       KNOTE_ACTIVATE(kn);
+               /*
+                * Execute the filter event to immediately activate the
+                * knote if necessary.  If reprocessing events are pending
+                * due to blocking above we do not run the filter here
+                * but instead let knote_release() do it.  Otherwise we
+                * might run the filter on a deleted event.
+                */
+               if ((kn->kn_status & KN_REPROCESS) == 0) {
+                       if (filter_event(kn, 0))
+                               KNOTE_ACTIVATE(kn);
+               }
        } else if (kev->flags & EV_DELETE) {
+               /*
+                * Delete the existing knote
+                */
                knote_detach_and_drop(kn);
                goto done;
        }
 
+       /*
+        * Disablement does not deactivate a knote here.
+        */
        if ((kev->flags & EV_DISABLE) &&
            ((kn->kn_status & KN_DISABLED) == 0)) {
                kn->kn_status |= KN_DISABLED;
        }
 
+       /*
+        * Re-enablement may have to immediately enqueue an active knote.
+        */
        if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
                kn->kn_status &= ~KN_DISABLED;
                if ((kn->kn_status & KN_ACTIVE) &&
-                   ((kn->kn_status & KN_QUEUED) == 0))
+                   ((kn->kn_status & KN_QUEUED) == 0)) {
                        knote_enqueue(kn);
+               }
        }
 
+       /*
+        * Handle any required reprocessing
+        */
+       knote_release(kn);
+       /* kn may be invalid now */
+
 done:
        lwkt_reltoken(&kq_token);
        if (fp != NULL)
@@ -916,6 +1023,7 @@ kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
 
         total = 0;
        local_marker.kn_filter = EVFILT_MARKER;
+       local_marker.kn_status = KN_PROCESSING;
 
        /*
         * Collect events.
@@ -935,83 +1043,85 @@ kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
                        continue;
                }
 
+               /*
+                * We can't skip a knote undergoing processing, otherwise
+                * we risk not returning it when the user process expects
+                * it should be returned.  Sleep and retry.
+                */
+               if (knote_acquire(kn) == 0)
+                       continue;
+
                /*
                 * Remove the event for processing.
                 *
                 * WARNING!  We must leave KN_QUEUED set to prevent the
-                *           event from being KNOTE()d again while we
-                *           potentially block in the filter function.
-                *
-                *           This protects the knote from everything except
-                *           getting dropped.
+                *           event from being KNOTE_ACTIVATE()d while
+                *           the queue state is in limbo, in case we
+                *           block.
                 *
-                * WARNING!  KN_PROCESSING is meant to handle any cases
-                *           that leaving KN_QUEUED set does not.
+                * WARNING!  We must set KN_PROCESSING to avoid races
+                *           against deletion or another thread's
+                *           processing.
                 */
                TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
                kq->kq_count--;
-               kn->kn_status |= KN_PROCESSING;
 
                /*
-                * Even though close/dup2 will clean out pending knotes this
-                * code is MPSAFE and it is possible to race a close inbetween
-                * the removal of its descriptor and the clearing out of the
-                * knote(s).
+                * We have to deal with an extremely important race against
+                * file descriptor close()s here.  The file descriptor can
+                * disappear MPSAFE, and there is a small window of
+                * opportunity between that and the call to knote_fdclose().
                 *
-                * In this case we must ensure that the knote is not queued
-                * to knpend or we risk an infinite kernel loop calling
-                * kscan, because the select/poll code will not be able to
-                * delete the event.
+                * If we hit that window here while doselect or dopoll is
+                * trying to delete a spurious event they will not be able
+                * to match up the event against a knote and will go haywire.
                 */
                if ((kn->kn_fop->f_flags & FILTEROP_ISFD) &&
                    checkfdclosed(kq->kq_fdp, kn->kn_kevent.ident, kn->kn_fp)) {
-                       kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE |
-                                          KN_PROCESSING);
-                       continue;
+                       kn->kn_status |= KN_DELETING | KN_REPROCESS;
                }
 
-               /*
-                * If disabled we ensure the event is not queued but leave
-                * its active bit set.  On re-enablement the event may be
-                * immediately triggered.
-                */
                if (kn->kn_status & KN_DISABLED) {
-                       kn->kn_status &= ~(KN_QUEUED | KN_PROCESSING);
-                       continue;
-               }
-
-               /*
-                * If not running in one-shot mode and the event is no
-                * longer present we ensure it is removed from the queue and
-                * ignore it.
-                */
-               if ((kn->kn_flags & EV_ONESHOT) == 0 &&
-                   filter_event(kn, 0) == 0) {
-                       kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE |
-                                          KN_PROCESSING);
-                       continue;
+                       /*
+                        * If disabled we ensure the event is not queued
+                        * but leave its active bit set.  On re-enablement
+                        * the event may be immediately triggered.
+                        */
+                       kn->kn_status &= ~KN_QUEUED;
+               } else if ((kn->kn_flags & EV_ONESHOT) == 0 &&
+                          (kn->kn_status & KN_DELETING) == 0 &&
+                          filter_event(kn, 0) == 0) {
+                       /*
+                        * If not running in one-shot mode and the event
+                        * is no longer present we ensure it is removed
+                        * from the queue and ignore it.
+                        */
+                       kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
+               } else {
+                       /*
+                        * Post the event
+                        */
+                       *kevp++ = kn->kn_kevent;
+                       ++total;
+                       --count;
+
+                       if (kn->kn_flags & EV_ONESHOT) {
+                               kn->kn_status &= ~KN_QUEUED;
+                               kn->kn_status |= KN_DELETING | KN_REPROCESS;
+                       } else if (kn->kn_flags & EV_CLEAR) {
+                               kn->kn_data = 0;
+                               kn->kn_fflags = 0;
+                               kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
+                       } else {
+                               TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
+                               kq->kq_count++;
+                       }
                }
 
-               *kevp++ = kn->kn_kevent;
-               ++total;
-               --count;
-
                /*
-                * Post-event action on the note
+                * Handle any post-processing states
                 */
-               if (kn->kn_flags & EV_ONESHOT) {
-                       kn->kn_status &= ~(KN_QUEUED | KN_PROCESSING);
-                       knote_detach_and_drop(kn);
-               } else if (kn->kn_flags & EV_CLEAR) {
-                       kn->kn_data = 0;
-                       kn->kn_fflags = 0;
-                       kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE |
-                                          KN_PROCESSING);
-               } else {
-                       TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
-                       kq->kq_count++;
-                       kn->kn_status &= ~KN_PROCESSING;
-               }
+               knote_release(kn);
        }
        TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
 
@@ -1138,19 +1248,11 @@ filter_attach(struct knote *kn)
  *
  * Calls filterops f_detach function, acquiring mplock if filter is not
  * marked as FILTEROP_MPSAFE.
- *
- * This can race due to the MP lock and/or locks acquired by f_detach,
- * so we interlock with KN_DELETING.  It is also possible to race
- * a create for the same reason if userland tries to delete the knote
- * before the create is complete.
  */
 static void
 knote_detach_and_drop(struct knote *kn)
 {
-       if (kn->kn_status & (KN_CREATING | KN_DELETING))
-               return;
-       kn->kn_status |= KN_DELETING;
-
+       kn->kn_status |= KN_DELETING | KN_REPROCESS;
        if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
                kn->kn_fop->f_detach(kn);
        } else {
@@ -1173,22 +1275,23 @@ filter_event(struct knote *kn, long hint)
 {
        int ret;
 
-       if (kn->kn_status & (KN_CREATING | KN_DELETING))
-               return(0);
-
-       if (!(kn->kn_fop->f_flags & FILTEROP_MPSAFE)) {
-               get_mplock();
+       if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
                ret = kn->kn_fop->f_event(kn, hint);
-               rel_mplock();
        } else {
+               get_mplock();
                ret = kn->kn_fop->f_event(kn, hint);
+               rel_mplock();
        }
-
        return (ret);
 }
 
 /*
- * walk down a list of knotes, activating them if their event has triggered.
+ * Walk down a list of knotes, activating them if their event has triggered.
+ *
+ * If we encounter any knotes which are undergoing processing we just mark
+ * them for reprocessing and do not try to [re]activate the knote.  However,
+ * if a hint is being passed we have to wait and that makes things a bit
+ * sticky.
  */
 void
 knote(struct klist *list, long hint)
@@ -1196,41 +1299,83 @@ knote(struct klist *list, long hint)
        struct knote *kn;
 
        lwkt_gettoken(&kq_token);
+restart:
        SLIST_FOREACH(kn, list, kn_next) {
-               if (filter_event(kn, hint))
-                       KNOTE_ACTIVATE(kn);
+               if (kn->kn_status & KN_PROCESSING) {
+                       /*
+                        * Someone else is processing the knote, ask the
+                        * other thread to reprocess it and don't mess
+                        * with it otherwise.
+                        */
+                       if (hint == 0) {
+                               kn->kn_status |= KN_REPROCESS;
+                               continue;
+                       }
+
+                       /*
+                        * If the hint is non-zero we have to wait or risk
+                        * losing the state the caller is trying to update.
+                        *
+                        * XXX This is a real problem, certain process
+                        *     and signal filters will bump kn_data for
+                        *     already-processed notes more than once if
+                        *     we restart the list scan.  FIXME.
+                        */
+                       kn->kn_status |= KN_WAITING | KN_REPROCESS;
+                       tsleep(kn, 0, "knotec", hz);
+                       goto restart;
+               }
+
+               /*
+                * Become the reprocessing master ourselves.
+                *
+                * If hint is non-zer running the event is mandatory
+                * when not deleting so do it whether reprocessing is
+                * set or not.
+                */
+               kn->kn_status |= KN_PROCESSING;
+               if ((kn->kn_status & KN_DELETING) == 0) {
+                       if (filter_event(kn, hint))
+                               KNOTE_ACTIVATE(kn);
+               }
+               if (knote_release(kn))
+                       goto restart;
        }
        lwkt_reltoken(&kq_token);
 }
 
 /*
- * insert knote at head of klist
+ * Insert knote at head of klist.
  *
- * Requires: kq_token
+ * This function may only be called via a filter function and thus
+ * kq_token should already be held and marked for processing.
  */
 void
 knote_insert(struct klist *klist, struct knote *kn)
 {
-       lwkt_gettoken(&kq_token);
+       KKASSERT(kn->kn_status & KN_PROCESSING);
+       ASSERT_LWKT_TOKEN_HELD(&kq_token);
        SLIST_INSERT_HEAD(klist, kn, kn_next);
-       lwkt_reltoken(&kq_token);
 }
 
 /*
- * remove knote from a klist
+ * Remove knote from a klist
  *
- * Requires: kq_token
+ * This function may only be called via a filter function and thus
+ * kq_token should already be held and marked for processing.
  */
 void
 knote_remove(struct klist *klist, struct knote *kn)
 {
-       lwkt_gettoken(&kq_token);
+       KKASSERT(kn->kn_status & KN_PROCESSING);
+       ASSERT_LWKT_TOKEN_HELD(&kq_token);
        SLIST_REMOVE(klist, kn, knote, kn_next);
-       lwkt_reltoken(&kq_token);
 }
 
 /*
- * remove all knotes from a specified klist
+ * Remove all knotes from a specified klist
+ *
+ * Only called from aio.
  */
 void
 knote_empty(struct klist *list)
@@ -1238,13 +1383,35 @@ knote_empty(struct klist *list)
        struct knote *kn;
 
        lwkt_gettoken(&kq_token);
-       while ((kn = SLIST_FIRST(list)) != NULL)
-               knote_detach_and_drop(kn);
+       while ((kn = SLIST_FIRST(list)) != NULL) {
+               if (knote_acquire(kn))
+                       knote_detach_and_drop(kn);
+       }
+       lwkt_reltoken(&kq_token);
+}
+
+void
+knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst,
+                   struct filterops *ops, void *hook)
+{
+       struct knote *kn;
+
+       lwkt_gettoken(&kq_token);
+       while ((kn = SLIST_FIRST(&src->ki_note)) != NULL) {
+               if (knote_acquire(kn)) {
+                       knote_remove(&src->ki_note, kn);
+                       kn->kn_fop = ops;
+                       kn->kn_hook = hook;
+                       knote_insert(&dst->ki_note, kn);
+                       knote_release(kn);
+                       /* kn may be invalid now */
+               }
+       }
        lwkt_reltoken(&kq_token);
 }
 
 /*
- * remove all knotes referencing a specified fd
+ * Remove all knotes referencing a specified fd
  */
 void
 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd)
@@ -1255,13 +1422,19 @@ knote_fdclose(struct file *fp, struct filedesc *fdp, int fd)
 restart:
        SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
                if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) {
-                       knote_detach_and_drop(kn);
+                       if (knote_acquire(kn))
+                               knote_detach_and_drop(kn);
                        goto restart;
                }
        }
        lwkt_reltoken(&kq_token);
 }
 
+/*
+ * Low level attach function.
+ *
+ * The knote should already be marked for processing.
+ */
 static void
 knote_attach(struct knote *kn)
 {
@@ -1279,9 +1452,13 @@ knote_attach(struct knote *kn)
        }
        SLIST_INSERT_HEAD(list, kn, kn_link);
        TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink);
-       kn->kn_status = KN_CREATING;
 }
 
+/*
+ * Low level drop function.
+ *
+ * The knote should already be marked for processing.
+ */
 static void
 knote_drop(struct knote *kn)
 {
@@ -1306,13 +1483,17 @@ knote_drop(struct knote *kn)
        knote_free(kn);
 }
 
+/*
+ * Low level enqueue function.
+ *
+ * The knote should already be marked for processing.
+ */
 static void
 knote_enqueue(struct knote *kn)
 {
        struct kqueue *kq = kn->kn_kq;
 
        KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
-
        TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
        kn->kn_status |= KN_QUEUED;
        ++kq->kq_count;
@@ -1326,14 +1507,17 @@ knote_enqueue(struct knote *kn)
        kqueue_wakeup(kq);
 }
 
+/*
+ * Low level dequeue function.
+ *
+ * The knote should already be marked for processing.
+ */
 static void
 knote_dequeue(struct knote *kn)
 {
        struct kqueue *kq = kn->kn_kq;
 
        KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
-       KKASSERT((kn->kn_status & KN_PROCESSING) == 0);
-
        TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
        kn->kn_status &= ~KN_QUEUED;
        kq->kq_count--;