kernel - Refactor smp collision statistics (2)
authorMatthew Dillon <dillon@apollo.backplane.com>
Thu, 5 Oct 2017 16:09:27 +0000 (09:09 -0700)
committerMatthew Dillon <dillon@apollo.backplane.com>
Mon, 16 Oct 2017 18:30:22 +0000 (11:30 -0700)
* Refactor indefinite_info mechanics.  Instead of tracking indefinite
  loops on a per-thread basis for tokens, track them on a scheduler
  basis.  The scheduler records the overhead while it is live-looping
  on tokens, but the moment it finds a thread it can actually schedule
  it stops (then restarts later the next time it is entered), even
  if some of the other threads still have unresolved tokens.

  This gives us a fairer representation of how many cpu cycles are
  actually being wasted waiting for tokens.

* Go back to using a local indefinite_info in the lockmgr*(), mutex*(),
  and spinlock code.

* Refactor lockmgr() by implementing an __inline frontend to
  interpret the directive.  Since this argument is usually a constant,
  the change effectively removes the switch().

  Use LK_NOCOLLSTATS to create a clean recursion to wrap the blocking
  case with the indefinite*() API.

sys/kern/kern_lock.c
sys/kern/kern_mutex.c
sys/kern/kern_spinlock.c
sys/kern/lwkt_thread.c
sys/kern/lwkt_token.c
sys/sys/globaldata.h
sys/sys/indefinite2.h
sys/sys/lock.h
sys/sys/thread.h

index f95acb3..cae043a 100644 (file)
@@ -79,39 +79,19 @@ SYSCTL_INT(_debug, OID_AUTO, lock_test_mode, CTLFLAG_RW,
 #define COUNT(td, x)
 #endif
 
+static int lockmgr_waitupgrade(struct lock *lkp, u_int flags);
+
 /*
- * Set, change, or release a lock.
+ * Helper, assert basic conditions
  */
-int
-#ifndef        DEBUG_LOCKS
-lockmgr(struct lock *lkp, u_int flags)
-#else
-debuglockmgr(struct lock *lkp, u_int flags,
-            const char *name, const char *file, int line)
-#endif
+static __inline void
+_lockmgr_assert(struct lock *lkp, u_int flags)
 {
-       thread_t td;
-       thread_t otd;
-       int error;
-       int extflags;
-       int count;
-       int pflags;
-       int wflags;
-       int timo;
-       int info_init;
-#ifdef DEBUG_LOCKS
-       int i;
-#endif
-
-       error = 0;
-       info_init = 0;
-
        if (mycpu->gd_intr_nesting_level &&
            (flags & LK_NOWAIT) == 0 &&
            (flags & LK_TYPE_MASK) != LK_RELEASE &&
            panic_cpu_gd != mycpu
        ) {
-
 #ifndef DEBUG_LOCKS
                panic("lockmgr %s from %p: called from interrupt, ipi, "
                      "or hard code section",
@@ -129,18 +109,33 @@ debuglockmgr(struct lock *lkp, u_int flags,
                      lkp->lk_wmesg, file, line, mycpu->gd_spinlocks);
        }
 #endif
+}
+
+/*
+ * Acquire a shared lock
+ */
+int
+lockmgr_shared(struct lock *lkp, u_int flags)
+{
+       uint32_t extflags;
+       thread_t td;
+       int count;
+       int error;
+       int pflags;
+       int wflags;
+       int timo;
 
+       _lockmgr_assert(lkp, flags);
        extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
        td = curthread;
+       error = 0;
 
-again:
-       count = lkp->lk_count;
-       cpu_ccfence();
+       for (;;) {
+               count = lkp->lk_count;
+               cpu_ccfence();
 
-       switch (flags & LK_TYPE_MASK) {
-       case LK_SHARED:
                /*
-                * Shared lock critical path case
+                * Normal case
                 */
                if ((count & (LKC_EXREQ|LKC_UPREQ|LKC_EXCL)) == 0) {
                        if (atomic_cmpset_int(&lkp->lk_count,
@@ -148,7 +143,7 @@ again:
                                COUNT(td, 1);
                                break;
                        }
-                       goto again;
+                       continue;
                }
 
                /*
@@ -199,19 +194,22 @@ again:
                                error = EBUSY;
                                break;
                        }
+
+                       if ((extflags & LK_NOCOLLSTATS) == 0) {
+                               indefinite_info_t info;
+
+                               flags |= LK_NOCOLLSTATS;
+                               indefinite_init(&info, lkp->lk_wmesg, 1, 'l');
+                               error = lockmgr_shared(lkp, flags);
+                               indefinite_done(&info);
+                               break;
+                       }
+
                        tsleep_interlock(lkp, pflags);
                        if (!atomic_cmpset_int(&lkp->lk_count, count,
                                              count | LKC_SHREQ)) {
-                               goto again;
-                       }
-
-                       if (info_init == 0 &&
-                           (lkp->lk_flags & LK_NOCOLLSTATS) == 0) {
-                               indefinite_init(&td->td_indefinite,
-                                               lkp->lk_wmesg, 1, 'l');
-                               info_init = 1;
+                               continue;
                        }
-
                        error = tsleep(lkp, pflags | PINTERLOCKED,
                                       lkp->lk_wmesg, timo);
                        if (error)
@@ -220,7 +218,7 @@ again:
                                error = ENOLCK;
                                break;
                        }
-                       goto again;
+                       continue;
                }
 
                /*
@@ -230,9 +228,34 @@ again:
                        COUNT(td, 1);
                        break;
                }
-               goto again;
+               /* retry */
+       }
+       return error;
+}
+
+/*
+ * Acquire an exclusive lock
+ */
+int
+lockmgr_exclusive(struct lock *lkp, u_int flags)
+{
+       uint32_t extflags;
+       thread_t td;
+       int count;
+       int error;
+       int pflags;
+       int timo;
+
+       _lockmgr_assert(lkp, flags);
+       extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
+       td = curthread;
+
+       error = 0;
+
+       for (;;) {
+               count = lkp->lk_count;
+               cpu_ccfence();
 
-       case LK_EXCLUSIVE:
                /*
                 * Exclusive lock critical path.
                 */
@@ -243,7 +266,7 @@ again:
                                COUNT(td, 1);
                                break;
                        }
-                       goto again;
+                       continue;
                }
 
                /*
@@ -277,6 +300,16 @@ again:
                        }
                }
 
+               if ((extflags & LK_NOCOLLSTATS) == 0) {
+                       indefinite_info_t info;
+
+                       flags |= LK_NOCOLLSTATS;
+                       indefinite_init(&info, lkp->lk_wmesg, 1, 'L');
+                       error = lockmgr_exclusive(lkp, flags);
+                       indefinite_done(&info);
+                       break;
+               }
+
                /*
                 * Wait until we can obtain the exclusive lock.  EXREQ is
                 * automatically cleared when all current holders release
@@ -289,14 +322,7 @@ again:
                tsleep_interlock(lkp, pflags);
                if (!atomic_cmpset_int(&lkp->lk_count, count,
                                       count | LKC_EXREQ)) {
-                       goto again;
-               }
-
-               if (info_init == 0 &&
-                   (lkp->lk_flags & LK_NOCOLLSTATS) == 0) {
-                       indefinite_init(&td->td_indefinite, lkp->lk_wmesg,
-                                       1, 'L');
-                       info_init = 1;
+                       continue;
                }
 
                error = tsleep(lkp, pflags | PINTERLOCKED,
@@ -307,10 +333,29 @@ again:
                        error = ENOLCK;
                        break;
                }
-               indefinite_check(&td->td_indefinite);
-               goto again;
+               /* retry */
+       }
+       return error;
+}
+
+/*
+ * Downgrade an exclusive lock to shared
+ */
+int
+lockmgr_downgrade(struct lock *lkp, u_int flags)
+{
+       uint32_t extflags;
+       thread_t otd;
+       thread_t td;
+       int count;
+
+       extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
+       td = curthread;
+
+       for (;;) {
+               count = lkp->lk_count;
+               cpu_ccfence();
 
-       case LK_DOWNGRADE:
                /*
                 * Downgrade an exclusive lock into a shared lock.  All
                 * counts on a recursive exclusive lock become shared.
@@ -322,16 +367,6 @@ again:
                        panic("lockmgr: not holding exclusive lock");
                }
 
-#ifdef DEBUG_LOCKS
-               for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
-                       if (td->td_lockmgr_stack[i] == lkp &&
-                           td->td_lockmgr_stack_id[i] > 0
-                       ) {
-                               td->td_lockmgr_stack_id[i]--;
-                               break;
-                       }
-               }
-#endif
                /*
                 * NOTE! Must NULL-out lockholder before releasing LKC_EXCL.
                 */
@@ -344,9 +379,36 @@ again:
                        break;
                }
                lkp->lk_lockholder = otd;
-               goto again;
+               /* retry */
+       }
+       return 0;
+}
+
+/*
+ * Upgrade a shared lock to exclusive.  If LK_EXCLUPGRADE then guarantee
+ * that no other exclusive requester can get in front of us and fail
+ * immediately if another upgrade is pending.
+ */
+int
+lockmgr_upgrade(struct lock *lkp, u_int flags)
+{
+       uint32_t extflags;
+       thread_t td;
+       int count;
+       int error;
+       int pflags;
+       int wflags;
+       int timo;
+
+       _lockmgr_assert(lkp, flags);
+       extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
+       td = curthread;
+       error = 0;
+
+       for (;;) {
+               count = lkp->lk_count;
+               cpu_ccfence();
 
-       case LK_EXCLUPGRADE:
                /*
                 * Upgrade from a single shared lock to an exclusive lock.
                 *
@@ -355,14 +417,15 @@ again:
                 * exclusive access.  The shared lock is released on
                 * failure.
                 */
-               if (count & LKC_UPREQ) {
-                       flags = LK_RELEASE;
-                       error = EBUSY;
-                       goto again;
+               if ((flags & LK_TYPE_MASK) == LK_EXCLUPGRADE) {
+                       if (count & LKC_UPREQ) {
+                               lockmgr_release(lkp, LK_RELEASE);
+                               error = EBUSY;
+                               break;
+                       }
                }
                /* fall through into normal upgrade */
 
-       case LK_UPGRADE:
                /*
                 * Upgrade a shared lock to an exclusive one.  This can cause
                 * the lock to be temporarily released and stolen by other
@@ -383,7 +446,7 @@ again:
                                lkp->lk_lockholder = td;
                                break;
                        }
-                       goto again;
+                       continue;
                }
 
                /*
@@ -408,9 +471,9 @@ again:
                 * We cannot upgrade without blocking at this point.
                 */
                if (extflags & LK_NOWAIT) {
-                       flags = LK_RELEASE;
+                       lockmgr_release(lkp, LK_RELEASE);
                        error = EBUSY;
-                       goto again;
+                       break;
                }
                if (extflags & LK_CANCELABLE) {
                        if (count & LKC_CANCEL) {
@@ -419,6 +482,16 @@ again:
                        }
                }
 
+               if ((extflags & LK_NOCOLLSTATS) == 0) {
+                       indefinite_info_t info;
+
+                       flags |= LK_NOCOLLSTATS;
+                       indefinite_init(&info, lkp->lk_wmesg, 1, 'U');
+                       error = lockmgr_upgrade(lkp, flags);
+                       indefinite_done(&info);
+                       break;
+               }
+
                /*
                 * Release the shared lock and request the upgrade.
                 */
@@ -446,13 +519,6 @@ again:
                        wflags |= (count - 1);
                }
 
-               if (info_init == 0 &&
-                   (lkp->lk_flags & LK_NOCOLLSTATS) == 0) {
-                       indefinite_init(&td->td_indefinite, lkp->lk_wmesg,
-                                       1, 'U');
-                       info_init = 1;
-               }
-
                if (atomic_cmpset_int(&lkp->lk_count, count, wflags)) {
                        COUNT(td, -1);
 
@@ -482,14 +548,37 @@ again:
                         * LKC_UPREQ bit.
                         */
                        if (count & LKC_UPREQ)
-                               flags = LK_EXCLUSIVE;   /* someone else */
+                               error = lockmgr_exclusive(lkp, flags);
                        else
-                               flags = LK_WAITUPGRADE; /* we own the bit */
+                               error = lockmgr_waitupgrade(lkp, flags);
+                       break;
                }
-               indefinite_check(&td->td_indefinite);
-               goto again;
+               /* retry */
+       }
+       return error;
+}
+
+/*
+ * (internal helper)
+ */
+static int
+lockmgr_waitupgrade(struct lock *lkp, u_int flags)
+{
+       uint32_t extflags;
+       thread_t td;
+       int count;
+       int error;
+       int pflags;
+       int timo;
+
+       extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
+       td = curthread;
+       error = 0;
+
+       for (;;) {
+               count = lkp->lk_count;
+               cpu_ccfence();
 
-       case LK_WAITUPGRADE:
                /*
                 * We own the LKC_UPREQ bit, wait until we are granted the
                 * exclusive lock (LKC_UPGRANT is set).
@@ -529,10 +618,29 @@ again:
                        }
                        /* retry */
                }
-               indefinite_check(&td->td_indefinite);
-               goto again;
+               /* retry */
+       }
+       return error;
+}
+
+/*
+ * Release a held lock
+ */
+int
+lockmgr_release(struct lock *lkp, u_int flags)
+{
+       uint32_t extflags;
+       thread_t otd;
+       thread_t td;
+       int count;
+
+       extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
+       td = curthread;
+
+       for (;;) {
+               count = lkp->lk_count;
+               cpu_ccfence();
 
-       case LK_RELEASE:
                /*
                 * Release the currently held lock.  If releasing the current
                 * lock as part of an error return, error will ALREADY be
@@ -574,7 +682,7 @@ again:
                                           ~(LKC_EXCL | LKC_EXREQ |
                                             LKC_SHREQ| LKC_CANCEL))) {
                                        lkp->lk_lockholder = otd;
-                                       goto again;
+                                       continue;
                                }
                                if (count & (LKC_EXREQ|LKC_SHREQ))
                                        wakeup(lkp);
@@ -593,7 +701,7 @@ again:
                                                (count & ~LKC_UPREQ) |
                                                LKC_UPGRANT)) {
                                        lkp->lk_lockholder = otd;
-                                       goto again;
+                                       continue;
                                }
                                wakeup(lkp);
                                /* success */
@@ -601,7 +709,7 @@ again:
                                otd = lkp->lk_lockholder;
                                if (!atomic_cmpset_int(&lkp->lk_count, count,
                                                       count - 1)) {
-                                       goto again;
+                                       continue;
                                }
                                /* success */
                        }
@@ -618,7 +726,7 @@ again:
                                              (count - 1) &
                                               ~(LKC_EXREQ | LKC_SHREQ |
                                                 LKC_CANCEL))) {
-                                       goto again;
+                                       continue;
                                }
                                if (count & (LKC_EXREQ|LKC_SHREQ))
                                        wakeup(lkp);
@@ -636,7 +744,7 @@ again:
                                              (count & ~(LKC_UPREQ |
                                                         LKC_CANCEL)) |
                                              LKC_EXCL | LKC_UPGRANT)) {
-                                       goto again;
+                                       continue;
                                }
                                wakeup(lkp);
                        } else {
@@ -646,45 +754,59 @@ again:
                                 */
                                if (!atomic_cmpset_int(&lkp->lk_count, count,
                                                       count - 1)) {
-                                       goto again;
+                                       continue;
                                }
                        }
                        /* success */
                        COUNT(td, -1);
                }
                break;
+       }
+       return 0;
+}
+
+/*
+ * Start canceling blocked requesters or later requestors.
+ * Only blocked requesters using CANCELABLE can be canceled.
+ *
+ * This is intended to then allow other requesters (usually the
+ * caller) to obtain a non-cancelable lock.
+ *
+ * Don't waste time issuing a wakeup if nobody is pending.
+ */
+int
+lockmgr_cancel_beg(struct lock *lkp, u_int flags)
+{
+       int count;
+
+       for (;;) {
+               count = lkp->lk_count;
+               cpu_ccfence();
 
-       case LK_CANCEL_BEG:
-               /*
-                * Start canceling blocked requestors or later requestors.
-                * requestors must use CANCELABLE.  Don't waste time issuing
-                * a wakeup if nobody is pending.
-                */
                KKASSERT((count & LKC_CANCEL) == 0);    /* disallowed case */
                KKASSERT((count & LKC_MASK) != 0);      /* issue w/lock held */
                if (!atomic_cmpset_int(&lkp->lk_count,
                                       count, count | LKC_CANCEL)) {
-                       goto again;
+                       continue;
                }
                if (count & (LKC_EXREQ|LKC_SHREQ|LKC_UPREQ)) {
                        wakeup(lkp);
                }
                break;
-
-       case LK_CANCEL_END:
-               atomic_clear_int(&lkp->lk_count, LKC_CANCEL);
-               break;
-
-       default:
-               panic("lockmgr: unknown locktype request %d",
-                   flags & LK_TYPE_MASK);
-               /* NOTREACHED */
        }
+       return 0;
+}
 
-       if (info_init)
-               indefinite_done(&td->td_indefinite);
+/*
+ * End our cancel request (typically after we have acquired
+ * the lock ourselves).
+ */
+int
+lockmgr_cancel_end(struct lock *lkp, u_int flags)
+{
+       atomic_clear_int(&lkp->lk_count, LKC_CANCEL);
 
-       return (error);
+       return 0;
 }
 
 /*
@@ -699,6 +821,7 @@ undo_upreq(struct lock *lkp)
        for (;;) {
                count = lkp->lk_count;
                cpu_ccfence();
+
                if (count & LKC_UPGRANT) {
                        /*
                         * UPREQ was shifted to UPGRANT.  We own UPGRANT now,
index 60cd51b..6729cff 100644 (file)
@@ -938,13 +938,11 @@ mtx_delete_link(mtx_t *mtx, mtx_link_t *link)
 int
 mtx_wait_link(mtx_t *mtx, mtx_link_t *link, int flags, int to)
 {
-       thread_t td = curthread;
+       indefinite_info_t info;
        int error;
 
-       if ((mtx->mtx_flags & MTXF_NOCOLLSTATS) == 0) {
-               indefinite_init(&td->td_indefinite, mtx->mtx_ident, 1,
+       indefinite_init(&info, mtx->mtx_ident, 1,
                        ((link->state & MTX_LINK_LINKED_SH) ? 'm' : 'M'));
-       }
 
        /*
         * Sleep.  Handle false wakeups, interruptions, etc.
@@ -963,7 +961,7 @@ mtx_wait_link(mtx_t *mtx, mtx_link_t *link, int flags, int to)
                                break;
                }
                if ((mtx->mtx_flags & MTXF_NOCOLLSTATS) == 0)
-                       indefinite_check(&td->td_indefinite);
+                       indefinite_check(&info);
        }
 
        /*
@@ -1016,7 +1014,7 @@ mtx_wait_link(mtx_t *mtx, mtx_link_t *link, int flags, int to)
        link->state = MTX_LINK_IDLE;
 
        if ((mtx->mtx_flags & MTXF_NOCOLLSTATS) == 0)
-               indefinite_done(&td->td_indefinite);
+               indefinite_done(&info);
 
        return error;
 }
index 8df1474..1875467 100644 (file)
@@ -153,7 +153,7 @@ spin_trylock_contested(struct spinlock *spin)
 void
 _spin_lock_contested(struct spinlock *spin, const char *ident, int value)
 {
-       thread_t td = curthread;
+       indefinite_info_t info;
 
        /*
         * WARNING! Caller has already incremented the lock.  We must
@@ -168,7 +168,7 @@ _spin_lock_contested(struct spinlock *spin, const char *ident, int value)
                if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED | 1, 1))
                        return;
        }
-       indefinite_init(&td->td_indefinite, ident, 0, 'S');
+       indefinite_init(&info, ident, 0, 'S');
 
        /*
         * Transfer our exclusive request to the high bits and clear the
@@ -229,10 +229,10 @@ _spin_lock_contested(struct spinlock *spin, const char *ident, int value)
                        --ovalue;
                }
 
-               if (indefinite_check(&td->td_indefinite))
+               if (indefinite_check(&info))
                        break;
        }
-       indefinite_done(&td->td_indefinite);
+       indefinite_done(&info);
 }
 
 /*
@@ -245,9 +245,9 @@ _spin_lock_contested(struct spinlock *spin, const char *ident, int value)
 void
 _spin_lock_shared_contested(struct spinlock *spin, const char *ident)
 {
-       thread_t td = curthread;
+       indefinite_info_t info;
 
-       indefinite_init(&td->td_indefinite, ident, 0, 's');
+       indefinite_init(&info, ident, 0, 's');
 
        /*
         * Undo the inline's increment.
@@ -290,10 +290,10 @@ _spin_lock_shared_contested(struct spinlock *spin, const char *ident)
                                              ovalue + 1))
                                break;
                }
-               if (indefinite_check(&td->td_indefinite))
+               if (indefinite_check(&info))
                        break;
        }
-       indefinite_done(&td->td_indefinite);
+       indefinite_done(&info);
 }
 
 /*
index e6e4d62..1b695f3 100644 (file)
@@ -704,6 +704,8 @@ lwkt_switch(void)
                goto havethread;
            }
            ++ntd->td_contended;        /* overflow ok */
+           if (gd->gd_indefinite.type == 0)
+               indefinite_init(&gd->gd_indefinite, NULL, 0, 't');
 #ifdef LOOPMASK
            if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) {
                    kprintf("lwkt_switch: excessive contended %d "
@@ -769,8 +771,8 @@ havethread:
     /*
      * If we were busy waiting record final disposition
      */
-    if (ntd->td_indefinite.type)
-           indefinite_done(&ntd->td_indefinite);
+    if (gd->gd_indefinite.type)
+           indefinite_done(&gd->gd_indefinite);
 
 havethread_preempted:
     /*
index 7083045..35b35a7 100644 (file)
 #include <machine/cpu.h>
 #include <sys/lock.h>
 #include <sys/spinlock.h>
-#include <sys/indefinite.h>
 
 #include <sys/thread2.h>
 #include <sys/spinlock2.h>
 #include <sys/mplock2.h>
-#include <sys/indefinite2.h>
 
 #include <vm/vm.h>
 #include <vm/vm_param.h>
@@ -482,13 +480,10 @@ lwkt_getalltokens(thread_t td, int spinning)
                         */
                        KASSERT(tok->t_desc,
                                ("token %p is not initialized", tok));
-
-                       if (td->td_indefinite.type == 0) {
-                               indefinite_init(&td->td_indefinite,
-                                               tok->t_desc, 1, 't');
-                       } else {
-                               indefinite_check(&td->td_indefinite);
-                       }
+                       td->td_gd->gd_cnt.v_lock_name[0] = 't';
+                       strncpy(td->td_gd->gd_cnt.v_lock_name + 1,
+                               tok->t_desc,
+                               sizeof(td->td_gd->gd_cnt.v_lock_name) - 2);
                        if (lwkt_sched_debug > 0) {
                                --lwkt_sched_debug;
                                kprintf("toka %p %s %s\n",
@@ -597,12 +592,10 @@ _lwkt_getalltokens_sorted(thread_t td)
                         * Otherwise we failed to acquire all the tokens.
                         * Release whatever we did get.
                         */
-                       if (td->td_indefinite.type == 0) {
-                               indefinite_init(&td->td_indefinite,
-                                               tok->t_desc, 1, 't');
-                       } else {
-                               indefinite_check(&td->td_indefinite);
-                       }
+                       td->td_gd->gd_cnt.v_lock_name[0] = 't';
+                       strncpy(td->td_gd->gd_cnt.v_lock_name + 1,
+                               tok->t_desc,
+                               sizeof(td->td_gd->gd_cnt.v_lock_name) - 2);
                        if (lwkt_sched_debug > 0) {
                                --lwkt_sched_debug;
                                kprintf("tokb %p %s %s\n",
index 6882220..72d3dca 100644 (file)
@@ -87,6 +87,9 @@
 #ifndef _SYS_CALLOUT_H_
 #include <sys/callout.h>
 #endif
+#ifndef _SYS_INDEFINITE_H_
+#include <sys/indefinite.h>
+#endif
 
 /*
  * This structure maps out the global data that needs to be kept on a
@@ -187,6 +190,7 @@ struct globaldata {
        struct vmstats  gd_vmstats_adj;         /* pcpu adj for vmstats */
        struct callout  gd_loadav_callout;      /* loadavg calc */
        struct callout  gd_schedcpu_callout;    /* scheduler/stats */
+       indefinite_info_t gd_indefinite;        /* scheduler cpu-bound */
        uint32_t        gd_loadav_nrunnable;    /* pcpu lwps nrunnable */
        uint32_t        gd_reserved32[1];
        void            *gd_preserved[4];       /* future fields */
index 9d2f77a..0cdecf1 100644 (file)
 static __inline void
 indefinite_init(indefinite_info_t *info, const char *ident, int now, char type)
 {
+       info->ident = ident;
+       info->secs = 0;
+       info->count = 0;
+
        if (tsc_frequency) {
-               info->base = rdtsc();
-               info->ident = ident;
-               info->secs = 0;
-               info->count = 0;
                info->type = type;
-
-               if (now) {
-                       mycpu->gd_cnt.v_lock_name[0] = info->type;
-                       strncpy(mycpu->gd_cnt.v_lock_name + 1, info->ident,
-                               sizeof(mycpu->gd_cnt.v_lock_name) - 2);
-               }
+               info->base = rdtsc();
+       } else {
+               info->type = 0;
+               info->base = 0;
+       }
+       if (now && info->ident) {
+               mycpu->gd_cnt.v_lock_name[0] = info->type;
+               strncpy(mycpu->gd_cnt.v_lock_name + 1, info->ident,
+                       sizeof(mycpu->gd_cnt.v_lock_name) - 2);
        }
 }
 
@@ -90,7 +93,7 @@ indefinite_check(indefinite_info_t *info)
         * Ignore minor one-second interval error accumulation in
         * favor of ensuring that info->base is fully synchronized.
         */
-       if (info->secs == 0 && delta > tsc_oneus_approx) {
+       if (info->secs == 0 && delta > tsc_oneus_approx && info->ident) {
                mycpu->gd_cnt.v_lock_name[0] = info->type;
                strncpy(mycpu->gd_cnt.v_lock_name + 1, info->ident,
                        sizeof(mycpu->gd_cnt.v_lock_name) - 2);
@@ -162,8 +165,6 @@ indefinite_done(indefinite_info_t *info)
        if (info->type) {
                delta = rdtsc() - info->base;
                delta = delta * 1000000U / tsc_frequency;
-               if (lock_test_mode && delta > 1000)
-                       kprintf("TEST %s (%lu)\n", info->ident, delta);
                mycpu->gd_cnt.v_lock_colls += delta;
                info->type = 0;
        }
index 42c34d8..d5fe5e9 100644 (file)
@@ -63,11 +63,6 @@ struct lock {
        int     lk_timo;                /* maximum sleep time (for tsleep) */
        const char *lk_wmesg;           /* resource sleeping (for tsleep) */
        struct thread *lk_lockholder;   /* thread of excl lock holder */
-#ifdef DEBUG_LOCKS
-       const char *lk_filename;
-       const char *lk_lockername;
-       int     lk_lineno;
-#endif
 };
 
 /*
@@ -229,18 +224,13 @@ void      lockinit (struct lock *, const char *wmesg, int timo, int flags);
 void   lockreinit (struct lock *, const char *wmesg, int timo, int flags);
 void   lockuninit(struct lock *);
 void   lock_sysinit(struct lock_args *);
-#ifdef DEBUG_LOCKS
-int    debuglockmgr (struct lock *, u_int flags,
-                       const char *,
-                       const char *,
-                       int);
-#define lockmgr(lockp, flags) \
-       debuglockmgr((lockp), (flags), "lockmgr", __FILE__, __LINE__)
-#else
-int    lockmgr (struct lock *, u_int flags);
-#endif
-void   lockmgr_setexclusive_interlocked(struct lock *);
-void   lockmgr_clrexclusive_interlocked(struct lock *);
+int    lockmgr_shared (struct lock *, u_int flags);
+int    lockmgr_exclusive (struct lock *, u_int flags);
+int    lockmgr_downgrade (struct lock *, u_int flags);
+int    lockmgr_upgrade (struct lock *, u_int flags);
+int    lockmgr_release (struct lock *, u_int flags);
+int    lockmgr_cancel_beg (struct lock *, u_int flags);
+int    lockmgr_cancel_end (struct lock *, u_int flags);
 void   lockmgr_kernproc (struct lock *);
 void   lockmgr_printinfo (struct lock *);
 int    lockstatus (struct lock *, struct thread *);
@@ -255,10 +245,41 @@ int       lockcountnb (struct lock *);
                (flags)                                                 \
        };                                                              \
        SYSINIT(name##_lock_sysinit, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,   \
-           lock_sysinit, &name##_args);                                        \
-       SYSUNINIT(name##_lock_sysuninit, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,       \
+           lock_sysinit, &name##_args);                                \
+       SYSUNINIT(name##_lock_sysuninit, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, \
            lockuninit, (lock))
 
+/*
+ * Most lockmgr() calls pass a constant flags parameter which
+ * we can optimize-out with an inline.
+ */
+static __inline
+int
+lockmgr(struct lock *lkp, u_int flags)
+{
+       switch(flags & LK_TYPE_MASK) {
+       case LK_SHARED:
+               return lockmgr_shared(lkp, flags);
+       case LK_EXCLUSIVE:
+               return lockmgr_exclusive(lkp, flags);
+       case LK_DOWNGRADE:
+               return lockmgr_downgrade(lkp, flags);
+       case LK_EXCLUPGRADE:
+       case LK_UPGRADE:
+               return lockmgr_upgrade(lkp, flags);
+       case LK_RELEASE:
+               return lockmgr_release(lkp, flags);
+       case LK_CANCEL_BEG:
+               return lockmgr_cancel_beg(lkp, flags);
+       case LK_CANCEL_END:
+               return lockmgr_cancel_end(lkp, flags);
+       default:
+               panic("lockmgr: unknown locktype request %d",
+                     flags & LK_TYPE_MASK);
+               return EINVAL;  /* NOT REACHED */
+       }
+}
+
 #endif /* _KERNEL */
 #endif /* _KERNEL || _KERNEL_STRUCTURES */
 #endif /* _SYS_LOCK_H_ */
index b4819ce..0523731 100644 (file)
@@ -35,9 +35,6 @@
 #ifndef _SYS_IOSCHED_H_
 #include <sys/iosched.h>
 #endif
-#ifndef _SYS_INDEFINITE_H_
-#include <sys/indefinite.h>
-#endif
 #include <machine/thread.h>
 
 struct globaldata;
@@ -275,7 +272,6 @@ struct thread {
     int                td_in_crit_report;      
 #endif
     struct md_thread td_mach;
-    indefinite_info_t  td_indefinite;
 #ifdef DEBUG_LOCKS
 #define SPINLOCK_DEBUG_ARRAY_SIZE      32
    int         td_spinlock_stack_id[SPINLOCK_DEBUG_ARRAY_SIZE];