kernel - add usched_dfly algorith, set as default for now
authorMatthew Dillon <dillon@apollo.backplane.com>
Tue, 18 Sep 2012 06:17:51 +0000 (23:17 -0700)
committerMatthew Dillon <dillon@apollo.backplane.com>
Tue, 18 Sep 2012 06:22:05 +0000 (23:22 -0700)
* Fork usched_bsd4 for continued development.

* Rewrite the bsd4 scheduler to use per-cpu spinlocks and queues.

* Reformulate the cpu selection algorithm using the topology info.
  We now do a top-down iteration instead of a bottom-up iteration
  to calculate the best cpu node to schedule something to.

  Implements both thread push to remote queue and pull from remote queue.

* Track a load factor on a per-cpu basis.

sys/conf/files
sys/kern/kern_exit.c
sys/kern/kern_synch.c
sys/kern/kern_usched.c
sys/kern/subr_cpu_topology.c
sys/kern/usched_bsd4.c
sys/kern/usched_dfly.c [copied from sys/kern/usched_bsd4.c with 50% similarity]
sys/kern/usched_dummy.c
sys/sys/cpu_topology.h
sys/sys/proc.h
sys/sys/usched.h

index a2f789b..838848f 100644 (file)
@@ -871,6 +871,7 @@ kern/kern_subr.c    standard
 kern/kern_iosched.c    standard
 kern/kern_usched.c     standard
 kern/usched_bsd4.c     standard
+kern/usched_dfly.c     standard
 kern/usched_dummy.c    standard
 kern/kern_umtx.c       standard
 kern/kern_mutex.c      standard
index aae085c..7f98afb 100644 (file)
@@ -696,6 +696,12 @@ lwp_exit(int masterexit)
        lwkt_reltoken(&p->p_token);
        if (dowake)
                wakeup(&p->p_nthreads);
+
+       /*
+        * Tell the userland scheduler that we are going away
+        */
+       p->p_usched->heuristic_exiting(lp, p);
+
        cpu_lwp_exit();
 }
 
@@ -939,7 +945,6 @@ loop:
 
                        /* Take care of our return values. */
                        *res = p->p_pid;
-                       p->p_usched->heuristic_exiting(td->td_lwp, p);
 
                        if (status)
                                *status = p->p_xstat;
@@ -1048,7 +1053,6 @@ loop:
                        p->p_flags |= P_WAITED;
 
                        *res = p->p_pid;
-                       p->p_usched->heuristic_exiting(td->td_lwp, p);
                        if (status)
                                *status = W_STOPCODE(p->p_xstat);
                        /* Zero rusage so we get something consistent. */
@@ -1074,7 +1078,6 @@ loop:
                        }
 
                        *res = p->p_pid;
-                       p->p_usched->heuristic_exiting(td->td_lwp, p);
                        p->p_flags &= ~P_CONTINUED;
 
                        if (status)
index 4378085..a67a76e 100644 (file)
@@ -602,7 +602,9 @@ tsleep(const volatile void *ident, int flags, const char *wmesg, int timo)
                if (lp->lwp_stat != LSSTOP)
                        lp->lwp_stat = LSSLEEP;
                lp->lwp_ru.ru_nvcsw++;
+               p->p_usched->uload_update(lp);
                lwkt_switch();
+               p->p_usched->uload_update(lp);
 
                /*
                 * And when we are woken up, put us back in LSRUN.  If we
index 4064187..a651874 100644 (file)
@@ -66,11 +66,14 @@ usched_init(void)
         * Add various userland schedulers to the system.
         */
        usched_ctl(&usched_bsd4, USCH_ADD);
+       usched_ctl(&usched_dfly, USCH_ADD);
        usched_ctl(&usched_dummy, USCH_ADD);
        if (defsched == NULL )
-               return(&usched_bsd4);
+               return(&usched_dfly);
        if (strcmp(defsched, "bsd4") == 0)
                return(&usched_bsd4);
+       if (strcmp(defsched, "dfly") == 0)
+               return(&usched_dfly);
        kprintf("WARNING: Running dummy userland scheduler\n");
        return(&usched_dummy);
 }
@@ -202,6 +205,7 @@ sys_usched_set(struct usched_set_args *uap)
                if (item && item != p->p_usched) {
                        /* XXX lwp */
                        p->p_usched->release_curproc(ONLY_LWP_IN_PROC(p));
+                       p->p_usched->heuristic_exiting(ONLY_LWP_IN_PROC(p), p);
                        p->p_usched = item;
                } else if (item == NULL) {
                        error = EINVAL;
index 3efd419..186f1c8 100644 (file)
@@ -69,6 +69,7 @@ static char cpu_topology_members[8*MAXCPU];
 static per_cpu_sysctl_info_t pcpu_sysctl[MAXCPU];
 
 int cpu_topology_levels_number = 1;
+cpu_node_t *root_cpu_node;
 
 /* Get the next valid apicid starting
  * from current apicid (curr_apicid
@@ -120,9 +121,10 @@ build_topology_tree(int *children_no_per_level,
 
        node->child_node = *last_free_node;
        (*last_free_node) += node->child_no;
+       if (node->parent_node == NULL)
+               root_cpu_node = node;
        
        for (i = 0; i < node->child_no; i++) {
-
                node->child_node[i].parent_node = node;
 
                build_topology_tree(children_no_per_level,
index 4408443..085176c 100644 (file)
@@ -1,6 +1,11 @@
 /*
- * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>
- * All rights reserved.
+ * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
+ * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
+ *
+ * This code is derived from software contributed to The DragonFly Project
+ * by Matthew Dillon <dillon@backplane.com>,
+ * by Mihai Carabas <mihai.carabas@gmail.com>
+ * and many others.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -94,17 +99,18 @@ static void bsd4_recalculate_estcpu(struct lwp *lp);
 static void bsd4_resetpriority(struct lwp *lp);
 static void bsd4_forking(struct lwp *plp, struct lwp *lp);
 static void bsd4_exiting(struct lwp *lp, struct proc *);
+static void bsd4_uload_update(struct lwp *lp);
 static void bsd4_yield(struct lwp *lp);
 
 #ifdef SMP
-static void need_user_resched_remote(void *dummy);
-static int batchy_looser_pri_test(struct lwp* lp);
-static struct lwp *chooseproc_locked_cache_coherent(struct lwp *chklp);
+static void bsd4_need_user_resched_remote(void *dummy);
+static int bsd4_batchy_looser_pri_test(struct lwp* lp);
+static struct lwp *bsd4_chooseproc_locked_cache_coherent(struct lwp *chklp);
 #endif
-static struct lwp *chooseproc_locked(struct lwp *chklp);
+static struct lwp *bsd4_chooseproc_locked(struct lwp *chklp);
 static void bsd4_remrunqueue_locked(struct lwp *lp);
 static void bsd4_setrunqueue_locked(struct lwp *lp);
-static void kick_helper(struct lwp *lp);
+static void bsd4_kick_helper(struct lwp *lp);
 
 struct usched usched_bsd4 = {
        { NULL },
@@ -119,6 +125,7 @@ struct usched usched_bsd4 = {
        bsd4_resetpriority,
        bsd4_forking,
        bsd4_exiting,
+       bsd4_uload_update,
        NULL,                   /* setcpumask not supported */
        bsd4_yield
 };
@@ -167,36 +174,19 @@ static struct sysctl_oid *usched_bsd4_sysctl_tree;
 
 /* Debug info exposed through debug.* sysctl */
 
-SYSCTL_INT(_debug, OID_AUTO, bsd4_runqcount, CTLFLAG_RD, &bsd4_runqcount, 0,
-    "Number of run queues");
-#ifdef INVARIANTS
-static int usched_nonoptimal;
-SYSCTL_INT(_debug, OID_AUTO, usched_nonoptimal, CTLFLAG_RW,
-        &usched_nonoptimal, 0, "acquire_curproc() was not optimal");
-static int usched_optimal;
-SYSCTL_INT(_debug, OID_AUTO, usched_optimal, CTLFLAG_RW,
-        &usched_optimal, 0, "acquire_curproc() was optimal");
-#endif
+SYSCTL_INT(_debug, OID_AUTO, bsd4_runqcount, CTLFLAG_RD,
+          &bsd4_runqcount, 0,
+          "Number of run queues");
 
 static int usched_bsd4_debug = -1;
-SYSCTL_INT(_debug, OID_AUTO, scdebug, CTLFLAG_RW, &usched_bsd4_debug, 0,
-    "Print debug information for this pid");
-static int usched_bsd4_pid_debug = -1;
-SYSCTL_INT(_debug, OID_AUTO, pid_debug, CTLFLAG_RW, &usched_bsd4_pid_debug, 0,
-    "Print KTR debug information for this pid");
-
-#ifdef SMP
-static int remote_resched_nonaffinity;
-static int remote_resched_affinity;
-static int choose_affinity;
-SYSCTL_INT(_debug, OID_AUTO, remote_resched_nonaffinity, CTLFLAG_RD,
-        &remote_resched_nonaffinity, 0, "Number of remote rescheds");
-SYSCTL_INT(_debug, OID_AUTO, remote_resched_affinity, CTLFLAG_RD,
-        &remote_resched_affinity, 0, "Number of remote rescheds");
-SYSCTL_INT(_debug, OID_AUTO, choose_affinity, CTLFLAG_RD,
-        &choose_affinity, 0, "chooseproc() was smart");
-#endif
+SYSCTL_INT(_debug, OID_AUTO, bsd4_scdebug, CTLFLAG_RW,
+          &usched_bsd4_debug, 0,
+          "Print debug information for this pid");
 
+static int usched_bsd4_pid_debug = -1;
+SYSCTL_INT(_debug, OID_AUTO, bsd4_pid_debug, CTLFLAG_RW,
+          &usched_bsd4_pid_debug, 0,
+          "Print KTR debug information for this pid");
 
 /* Tunning usched_bsd4 - configurable through kern.usched_bsd4.* */
 #ifdef SMP
@@ -313,7 +303,7 @@ KTR_INFO(KTR_USCHED_BSD4, usched, sched_thread_no_process_found, 0,
  * Initialize the run queues at boot time.
  */
 static void
-rqinit(void *dummy)
+bsd4_rqinit(void *dummy)
 {
        int i;
 
@@ -325,7 +315,7 @@ rqinit(void *dummy)
        }
        atomic_clear_cpumask(&bsd4_curprocmask, 1);
 }
-SYSINIT(runqueue, SI_BOOT2_USCHED, SI_ORDER_FIRST, rqinit, NULL)
+SYSINIT(runqueue, SI_BOOT2_USCHED, SI_ORDER_FIRST, bsd4_rqinit, NULL)
 
 /*
  * BSD4_ACQUIRE_CURPROC
@@ -542,10 +532,10 @@ bsd4_select_curproc(globaldata_t gd)
        spin_lock(&bsd4_spin);
 #ifdef SMP
        if(usched_bsd4_cache_coherent)
-               nlp = chooseproc_locked_cache_coherent(dd->uschedcp);
+               nlp = bsd4_chooseproc_locked_cache_coherent(dd->uschedcp);
        else
 #endif
-               nlp = chooseproc_locked(dd->uschedcp);
+               nlp = bsd4_chooseproc_locked(dd->uschedcp);
 
        if (nlp) {
 
@@ -588,7 +578,7 @@ bsd4_select_curproc(globaldata_t gd)
  * relative to the other processes running in the system
  */
 static int
-batchy_looser_pri_test(struct lwp* lp)
+bsd4_batchy_looser_pri_test(struct lwp* lp)
 {
        cpumask_t mask;
        bsd4_pcpu_t other_dd;
@@ -710,7 +700,7 @@ bsd4_setrunqueue(struct lwp *lp)
         */
        spin_lock(&bsd4_spin);
        bsd4_setrunqueue_locked(lp);
-       lp->lwp_setrunqueue_ticks = sched_ticks;
+       lp->lwp_rebal_ticks = sched_ticks;
 
 #ifdef SMP
        /*
@@ -909,7 +899,7 @@ found:
                atomic_clear_cpumask(&bsd4_rdyprocmask, CPUMASK(cpuid));
                spin_unlock(&bsd4_spin);
                if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
-                       lwkt_send_ipiq(gd, need_user_resched_remote, NULL);
+                       lwkt_send_ipiq(gd, bsd4_need_user_resched_remote, NULL);
                else
                        wakeup(&dd->helper_thread);
        }
@@ -1232,7 +1222,8 @@ bsd4_resetpriority(struct lwp *lp)
                                atomic_clear_cpumask(&bsd4_rdyprocmask,
                                                     CPUMASK(reschedcpu));
                                lwkt_send_ipiq(lp->lwp_thread->td_gd,
-                                              need_user_resched_remote, NULL);
+                                              bsd4_need_user_resched_remote,
+                                              NULL);
                        }
 #else
                        spin_unlock(&bsd4_spin);
@@ -1304,15 +1295,19 @@ bsd4_forking(struct lwp *plp, struct lwp *lp)
 }
 
 /*
- * Called when a parent waits for a child.
- *
- * MPSAFE
+ * Called when a lwp is being removed from this scheduler, typically
+ * during lwp_exit().
  */
 static void
 bsd4_exiting(struct lwp *lp, struct proc *child_proc)
 {
 }
 
+static void
+bsd4_uload_update(struct lwp *lp)
+{
+}
+
 /*
  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
  * it selects a user process and returns it.  If chklp is non-NULL and chklp
@@ -1327,7 +1322,7 @@ bsd4_exiting(struct lwp *lp, struct proc *child_proc)
  */
 static
 struct lwp *
-chooseproc_locked(struct lwp *chklp)
+bsd4_chooseproc_locked(struct lwp *chklp)
 {
        struct lwp *lp;
        struct rq *q;
@@ -1401,7 +1396,6 @@ again:
            (chklp = TAILQ_NEXT(lp, lwp_procq)) != NULL
        ) {
                if (chklp->lwp_thread->td_gd == mycpu) {
-                       ++choose_affinity;
                        lp = chklp;
                }
        }
@@ -1434,7 +1428,7 @@ again:
  */
 static
 struct lwp *
-chooseproc_locked_cache_coherent(struct lwp *chklp)
+bsd4_chooseproc_locked_cache_coherent(struct lwp *chklp)
 {
        struct lwp *lp;
        struct rq *q;
@@ -1491,7 +1485,7 @@ again:
                /*
                 * No more left and we didn't reach the checks limit.
                 */
-               kick_helper(min_level_lwp);
+               bsd4_kick_helper(min_level_lwp);
                return NULL;
        }
        lp = TAILQ_FIRST(q);
@@ -1504,9 +1498,9 @@ again:
        while (checks < usched_bsd4_queue_checks) {
                if ((lp->lwp_cpumask & cpumask) == 0 ||
                    ((siblings & lp->lwp_thread->td_gd->gd_cpumask) == 0 &&
-                     (lp->lwp_setrunqueue_ticks == sched_ticks ||
-                      lp->lwp_setrunqueue_ticks == (int)(sched_ticks - 1)) &&
-                     batchy_looser_pri_test(lp))) {
+                     (lp->lwp_rebal_ticks == sched_ticks ||
+                      lp->lwp_rebal_ticks == (int)(sched_ticks - 1)) &&
+                     bsd4_batchy_looser_pri_test(lp))) {
 
                        KTR_COND_LOG(usched_chooseproc_cc_not_good,
                            lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
@@ -1526,14 +1520,14 @@ again:
                        if (level < min_level ||
                            (level == min_level && min_level_lwp &&
                             lp->lwp_priority < min_level_lwp->lwp_priority)) {
-                               kick_helper(min_level_lwp);
+                               bsd4_kick_helper(min_level_lwp);
                                min_level_lwp = lp;
                                min_level = level;
                                min_q = q;
                                min_which = which;
                                min_pri = pri;
                        } else {
-                               kick_helper(lp);
+                               bsd4_kick_helper(lp);
                        }
                        lp = TAILQ_NEXT(lp, lwp_procq);
                        if (lp == NULL) {
@@ -1574,7 +1568,7 @@ found:
         */
        if (chklp) {
                if (chklp->lwp_priority < lp->lwp_priority + PPQ) {
-                       kick_helper(lp);
+                       bsd4_kick_helper(lp);
                        return(NULL);
                }
        }
@@ -1603,7 +1597,7 @@ found:
  */
 static
 void
-kick_helper(struct lwp *lp)
+bsd4_kick_helper(struct lwp *lp)
 {
        globaldata_t gd;
        bsd4_pcpu_t dd;
@@ -1619,7 +1613,7 @@ kick_helper(struct lwp *lp)
        ++usched_bsd4_kicks;
        atomic_clear_cpumask(&bsd4_rdyprocmask, gd->gd_cpumask);
        if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
-               lwkt_send_ipiq(gd, need_user_resched_remote, NULL);
+               lwkt_send_ipiq(gd, bsd4_need_user_resched_remote, NULL);
        } else {
                wakeup(&dd->helper_thread);
        }
@@ -1627,7 +1621,7 @@ kick_helper(struct lwp *lp)
 
 static
 void
-need_user_resched_remote(void *dummy)
+bsd4_need_user_resched_remote(void *dummy)
 {
        globaldata_t gd = mycpu;
        bsd4_pcpu_t  dd = &bsd4_pcpu[gd->gd_cpuid];
@@ -1805,7 +1799,7 @@ sched_thread(void *dummy)
                 * No thread is currently scheduled.
                 */
                KKASSERT(dd->uschedcp == NULL);
-               if ((nlp = chooseproc_locked(NULL)) != NULL) {
+               if ((nlp = bsd4_chooseproc_locked(NULL)) != NULL) {
                        KTR_COND_LOG(usched_sched_thread_no_process,
                            nlp->lwp_proc->p_pid == usched_bsd4_pid_debug,
                            gd->gd_cpuid,
@@ -1825,7 +1819,7 @@ sched_thread(void *dummy)
                        spin_unlock(&bsd4_spin);
                }
        } else if (bsd4_runqcount) {
-               if ((nlp = chooseproc_locked(dd->uschedcp)) != NULL) {
+               if ((nlp = bsd4_chooseproc_locked(dd->uschedcp)) != NULL) {
                        KTR_COND_LOG(usched_sched_thread_process,
                            nlp->lwp_proc->p_pid == usched_bsd4_pid_debug,
                            gd->gd_cpuid,
similarity index 50%
copy from sys/kern/usched_bsd4.c
copy to sys/kern/usched_dfly.c
index 4408443..6325175 100644 (file)
@@ -1,29 +1,39 @@
 /*
- * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>
- * All rights reserved.
+ * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
+ * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
+ *
+ * This code is derived from software contributed to The DragonFly Project
+ * by Matthew Dillon <dillon@backplane.com>,
+ * by Mihai Carabas <mihai.carabas@gmail.com>
+ * and many others.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
+ *
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ * 3. Neither the name of The DragonFly Project nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific, prior written permission.
  *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
-
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/kernel.h>
@@ -50,6 +60,8 @@
  * represents four priority levels.
  */
 
+int dfly_rebalanced;
+
 #define MAXPRI                 128
 #define PRIMASK                        (MAXPRI - 1)
 #define PRIBASE_REALTIME       0
@@ -64,7 +76,6 @@
 
 /*
  * NICEPPQ     - number of nice units per priority queue
- *
  * ESTCPUPPQ   - number of estcpu units per priority queue
  * ESTCPUMAX   - number of estcpu units
  */
 
 TAILQ_HEAD(rq, lwp);
 
-#define lwp_priority   lwp_usdata.bsd4.priority
-#define lwp_rqindex    lwp_usdata.bsd4.rqindex
-#define lwp_estcpu     lwp_usdata.bsd4.estcpu
-#define lwp_batch      lwp_usdata.bsd4.batch
-#define lwp_rqtype     lwp_usdata.bsd4.rqtype
-
-static void bsd4_acquire_curproc(struct lwp *lp);
-static void bsd4_release_curproc(struct lwp *lp);
-static void bsd4_select_curproc(globaldata_t gd);
-static void bsd4_setrunqueue(struct lwp *lp);
-static void bsd4_schedulerclock(struct lwp *lp, sysclock_t period,
-                               sysclock_t cpstamp);
-static void bsd4_recalculate_estcpu(struct lwp *lp);
-static void bsd4_resetpriority(struct lwp *lp);
-static void bsd4_forking(struct lwp *plp, struct lwp *lp);
-static void bsd4_exiting(struct lwp *lp, struct proc *);
-static void bsd4_yield(struct lwp *lp);
-
-#ifdef SMP
-static void need_user_resched_remote(void *dummy);
-static int batchy_looser_pri_test(struct lwp* lp);
-static struct lwp *chooseproc_locked_cache_coherent(struct lwp *chklp);
-#endif
-static struct lwp *chooseproc_locked(struct lwp *chklp);
-static void bsd4_remrunqueue_locked(struct lwp *lp);
-static void bsd4_setrunqueue_locked(struct lwp *lp);
-static void kick_helper(struct lwp *lp);
+#define lwp_priority   lwp_usdata.dfly.priority
+#define lwp_rqindex    lwp_usdata.dfly.rqindex
+#define lwp_estcpu     lwp_usdata.dfly.estcpu
+#define lwp_batch      lwp_usdata.dfly.batch
+#define lwp_rqtype     lwp_usdata.dfly.rqtype
+#define lwp_qcpu       lwp_usdata.dfly.qcpu
 
-struct usched usched_bsd4 = {
-       { NULL },
-       "bsd4", "Original DragonFly Scheduler",
-       NULL,                   /* default registration */
-       NULL,                   /* default deregistration */
-       bsd4_acquire_curproc,
-       bsd4_release_curproc,
-       bsd4_setrunqueue,
-       bsd4_schedulerclock,
-       bsd4_recalculate_estcpu,
-       bsd4_resetpriority,
-       bsd4_forking,
-       bsd4_exiting,
-       NULL,                   /* setcpumask not supported */
-       bsd4_yield
-};
-
-struct usched_bsd4_pcpu {
+struct usched_dfly_pcpu {
+       struct spinlock spin;
        struct thread   helper_thread;
        short           rrcount;
        short           upri;
+       int             uload;
        struct lwp      *uschedcp;
-       struct lwp      *old_uschedcp;
+       struct rq       queues[NQS];
+       struct rq       rtqueues[NQS];
+       struct rq       idqueues[NQS];
+       u_int32_t       queuebits;
+       u_int32_t       rtqueuebits;
+       u_int32_t       idqueuebits;
+       int             runqcount;
+       int             cpuid;
+       cpumask_t       cpumask;
 #ifdef SMP
        cpu_node_t      *cpunode;
 #endif
 };
 
-typedef struct usched_bsd4_pcpu        *bsd4_pcpu_t;
+typedef struct usched_dfly_pcpu        *dfly_pcpu_t;
+
+static void dfly_acquire_curproc(struct lwp *lp);
+static void dfly_release_curproc(struct lwp *lp);
+static void dfly_select_curproc(globaldata_t gd);
+static void dfly_setrunqueue(struct lwp *lp);
+static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
+                               sysclock_t cpstamp);
+static void dfly_recalculate_estcpu(struct lwp *lp);
+static void dfly_resetpriority(struct lwp *lp);
+static void dfly_forking(struct lwp *plp, struct lwp *lp);
+static void dfly_exiting(struct lwp *lp, struct proc *);
+static void dfly_uload_update(struct lwp *lp);
+static void dfly_yield(struct lwp *lp);
+#ifdef SMP
+static dfly_pcpu_t dfly_choose_best_queue(dfly_pcpu_t dd, struct lwp *lp);
+static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
+static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
+#endif
+
+#ifdef SMP
+static void dfly_need_user_resched_remote(void *dummy);
+#endif
+static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t dd, struct lwp *chklp,
+                                       int isremote);
+static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
+static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
+
+struct usched usched_dfly = {
+       { NULL },
+       "dfly", "Original DragonFly Scheduler",
+       NULL,                   /* default registration */
+       NULL,                   /* default deregistration */
+       dfly_acquire_curproc,
+       dfly_release_curproc,
+       dfly_setrunqueue,
+       dfly_schedulerclock,
+       dfly_recalculate_estcpu,
+       dfly_resetpriority,
+       dfly_forking,
+       dfly_exiting,
+       dfly_uload_update,
+       NULL,                   /* setcpumask not supported */
+       dfly_yield
+};
 
 /*
  * We have NQS (32) run queues per scheduling class.  For the normal
@@ -148,187 +175,146 @@ typedef struct usched_bsd4_pcpu *bsd4_pcpu_t;
  * the state of all 32 queues and then a ffs() to find the first busy
  * queue.
  */
-static struct rq bsd4_queues[NQS];
-static struct rq bsd4_rtqueues[NQS];
-static struct rq bsd4_idqueues[NQS];
-static u_int32_t bsd4_queuebits;
-static u_int32_t bsd4_rtqueuebits;
-static u_int32_t bsd4_idqueuebits;
-static cpumask_t bsd4_curprocmask = -1;        /* currently running a user process */
-static cpumask_t bsd4_rdyprocmask;     /* ready to accept a user process */
-static int      bsd4_runqcount;
+static cpumask_t dfly_curprocmask = -1;        /* currently running a user process */
+static cpumask_t dfly_rdyprocmask;     /* ready to accept a user process */
 #ifdef SMP
-static volatile int bsd4_scancpu;
+static volatile int dfly_scancpu;
+/*static struct spinlock dfly_spin = SPINLOCK_INITIALIZER(dfly_spin);*/
 #endif
-static struct spinlock bsd4_spin;
-static struct usched_bsd4_pcpu bsd4_pcpu[MAXCPU];
-static struct sysctl_ctx_list usched_bsd4_sysctl_ctx;
-static struct sysctl_oid *usched_bsd4_sysctl_tree;
+static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
+static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
+static struct sysctl_oid *usched_dfly_sysctl_tree;
 
 /* Debug info exposed through debug.* sysctl */
 
-SYSCTL_INT(_debug, OID_AUTO, bsd4_runqcount, CTLFLAG_RD, &bsd4_runqcount, 0,
-    "Number of run queues");
-#ifdef INVARIANTS
-static int usched_nonoptimal;
-SYSCTL_INT(_debug, OID_AUTO, usched_nonoptimal, CTLFLAG_RW,
-        &usched_nonoptimal, 0, "acquire_curproc() was not optimal");
-static int usched_optimal;
-SYSCTL_INT(_debug, OID_AUTO, usched_optimal, CTLFLAG_RW,
-        &usched_optimal, 0, "acquire_curproc() was optimal");
-#endif
+static int usched_dfly_debug = -1;
+SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
+          &usched_dfly_debug, 0,
+          "Print debug information for this pid");
 
-static int usched_bsd4_debug = -1;
-SYSCTL_INT(_debug, OID_AUTO, scdebug, CTLFLAG_RW, &usched_bsd4_debug, 0,
-    "Print debug information for this pid");
-static int usched_bsd4_pid_debug = -1;
-SYSCTL_INT(_debug, OID_AUTO, pid_debug, CTLFLAG_RW, &usched_bsd4_pid_debug, 0,
-    "Print KTR debug information for this pid");
+static int usched_dfly_pid_debug = -1;
+SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
+          &usched_dfly_pid_debug, 0,
+          "Print KTR debug information for this pid");
 
-#ifdef SMP
-static int remote_resched_nonaffinity;
-static int remote_resched_affinity;
-static int choose_affinity;
-SYSCTL_INT(_debug, OID_AUTO, remote_resched_nonaffinity, CTLFLAG_RD,
-        &remote_resched_nonaffinity, 0, "Number of remote rescheds");
-SYSCTL_INT(_debug, OID_AUTO, remote_resched_affinity, CTLFLAG_RD,
-        &remote_resched_affinity, 0, "Number of remote rescheds");
-SYSCTL_INT(_debug, OID_AUTO, choose_affinity, CTLFLAG_RD,
-        &choose_affinity, 0, "chooseproc() was smart");
-#endif
+static int usched_dfly_chooser = 0;
+SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
+          &usched_dfly_chooser, 0,
+          "Print KTR debug information for this pid");
 
-
-/* Tunning usched_bsd4 - configurable through kern.usched_bsd4.* */
+/* Tunning usched_dfly - configurable through kern.usched_dfly.* */
 #ifdef SMP
-static int usched_bsd4_smt = 0;
-static int usched_bsd4_cache_coherent = 0;
-static int usched_bsd4_upri_affinity = 16; /* 32 queues - half-way */
-static int usched_bsd4_queue_checks = 5;
-static int usched_bsd4_stick_to_level = 0;
+static int usched_dfly_smt = 0;
+static int usched_dfly_cache_coherent = 0;
+static int usched_dfly_upri_affinity = 16; /* 32 queues - half-way */
+static int usched_dfly_queue_checks = 5;
+static int usched_dfly_stick_to_level = 0;
 #endif
-static int usched_bsd4_rrinterval = (ESTCPUFREQ + 9) / 10;
-static int usched_bsd4_decay = 8;
-static int usched_bsd4_batch_time = 10;
-static long usched_bsd4_kicks;
+static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
+static int usched_dfly_decay = 8;
+static int usched_dfly_batch_time = 10;
+static long usched_dfly_kicks;
 
 /* KTR debug printings */
 
 KTR_INFO_MASTER(usched);
 
-#if !defined(KTR_USCHED_BSD4)
-#define        KTR_USCHED_BSD4 KTR_ALL
+#if !defined(KTR_USCHED_DFLY)
+#define        KTR_USCHED_DFLY KTR_ALL
 #endif
 
-KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_urw, 0,
-    "USCHED_BSD4(bsd4_acquire_curproc in user_reseched_wanted "
+KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_urw, 0,
+    "USCHED_DFLY(dfly_acquire_curproc in user_reseched_wanted "
     "after release: pid %d, cpuid %d, curr_cpuid %d)",
     pid_t pid, int cpuid, int curr);
-KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_before_loop, 0,
-    "USCHED_BSD4(bsd4_acquire_curproc before loop: pid %d, cpuid %d, "
+KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_before_loop, 0,
+    "USCHED_DFLY(dfly_acquire_curproc before loop: pid %d, cpuid %d, "
     "curr_cpuid %d)",
     pid_t pid, int cpuid, int curr);
-KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_not, 0,
-    "USCHED_BSD4(bsd4_acquire_curproc couldn't acquire after "
-    "bsd4_setrunqueue: pid %d, cpuid %d, curr_lp pid %d, curr_cpuid %d)",
+KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_not, 0,
+    "USCHED_DFLY(dfly_acquire_curproc couldn't acquire after "
+    "dfly_setrunqueue: pid %d, cpuid %d, curr_lp pid %d, curr_cpuid %d)",
     pid_t pid, int cpuid, pid_t curr_pid, int curr_cpuid);
-KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_switch, 0,
-    "USCHED_BSD4(bsd4_acquire_curproc after lwkt_switch: pid %d, "
+KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_switch, 0,
+    "USCHED_DFLY(dfly_acquire_curproc after lwkt_switch: pid %d, "
     "cpuid %d, curr_cpuid %d)",
     pid_t pid, int cpuid, int curr);
 
-KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_release_curproc, 0,
-    "USCHED_BSD4(bsd4_release_curproc before select: pid %d, "
+KTR_INFO(KTR_USCHED_DFLY, usched, dfly_release_curproc, 0,
+    "USCHED_DFLY(dfly_release_curproc before select: pid %d, "
     "cpuid %d, curr_cpuid %d)",
     pid_t pid, int cpuid, int curr);
 
-KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_select_curproc, 0,
-    "USCHED_BSD4(bsd4_release_curproc before select: pid %d, "
+KTR_INFO(KTR_USCHED_DFLY, usched, dfly_select_curproc, 0,
+    "USCHED_DFLY(dfly_release_curproc before select: pid %d, "
     "cpuid %d, old_pid %d, old_cpuid %d, curr_cpuid %d)",
     pid_t pid, int cpuid, pid_t old_pid, int old_cpuid, int curr);
 
 #ifdef SMP
-KTR_INFO(KTR_USCHED_BSD4, usched, batchy_test_false, 0,
-    "USCHED_BSD4(batchy_looser_pri_test false: pid %d, "
+KTR_INFO(KTR_USCHED_DFLY, usched, batchy_test_false, 0,
+    "USCHED_DFLY(batchy_looser_pri_test false: pid %d, "
     "cpuid %d, verify_mask %lu)",
     pid_t pid, int cpuid, cpumask_t mask);
-KTR_INFO(KTR_USCHED_BSD4, usched, batchy_test_true, 0,
-    "USCHED_BSD4(batchy_looser_pri_test true: pid %d, "
+KTR_INFO(KTR_USCHED_DFLY, usched, batchy_test_true, 0,
+    "USCHED_DFLY(batchy_looser_pri_test true: pid %d, "
     "cpuid %d, verify_mask %lu)",
     pid_t pid, int cpuid, cpumask_t mask);
 
-KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_fc_smt, 0,
-    "USCHED_BSD4(bsd4_setrunqueue free cpus smt: pid %d, cpuid %d, "
+KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_fc_smt, 0,
+    "USCHED_DFLY(dfly_setrunqueue free cpus smt: pid %d, cpuid %d, "
     "mask %lu, curr_cpuid %d)",
     pid_t pid, int cpuid, cpumask_t mask, int curr);
-KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_fc_non_smt, 0,
-    "USCHED_BSD4(bsd4_setrunqueue free cpus check non_smt: pid %d, "
+KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_fc_non_smt, 0,
+    "USCHED_DFLY(dfly_setrunqueue free cpus check non_smt: pid %d, "
     "cpuid %d, mask %lu, curr_cpuid %d)",
     pid_t pid, int cpuid, cpumask_t mask, int curr);
-KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_rc, 0,
-    "USCHED_BSD4(bsd4_setrunqueue running cpus check: pid %d, "
+KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_rc, 0,
+    "USCHED_DFLY(dfly_setrunqueue running cpus check: pid %d, "
     "cpuid %d, mask %lu, curr_cpuid %d)",
     pid_t pid, int cpuid, cpumask_t mask, int curr);
-KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_found, 0,
-    "USCHED_BSD4(bsd4_setrunqueue found cpu: pid %d, cpuid %d, "
+KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_found, 0,
+    "USCHED_DFLY(dfly_setrunqueue found cpu: pid %d, cpuid %d, "
     "mask %lu, found_cpuid %d, curr_cpuid %d)",
     pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
-KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_not_found, 0,
-    "USCHED_BSD4(bsd4_setrunqueue not found cpu: pid %d, cpuid %d, "
+KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_not_found, 0,
+    "USCHED_DFLY(dfly_setrunqueue not found cpu: pid %d, cpuid %d, "
     "try_cpuid %d, curr_cpuid %d)",
     pid_t pid, int cpuid, int try_cpuid, int curr);
-KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_found_best_cpuid, 0,
-    "USCHED_BSD4(bsd4_setrunqueue found cpu: pid %d, cpuid %d, "
+KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_found_best_cpuid, 0,
+    "USCHED_DFLY(dfly_setrunqueue found cpu: pid %d, cpuid %d, "
     "mask %lu, found_cpuid %d, curr_cpuid %d)",
     pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
 #endif
 
-KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc, 0,
-    "USCHED_BSD4(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
+KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
+    "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
     pid_t pid, int old_cpuid, int curr);
 #ifdef SMP
-KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc_cc, 0,
-    "USCHED_BSD4(chooseproc_cc: pid %d, old_cpuid %d, curr_cpuid %d)",
+KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc, 0,
+    "USCHED_DFLY(chooseproc_cc: pid %d, old_cpuid %d, curr_cpuid %d)",
     pid_t pid, int old_cpuid, int curr);
-KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc_cc_not_good, 0,
-    "USCHED_BSD4(chooseproc_cc not good: pid %d, old_cpumask %lu, "
+KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc_not_good, 0,
+    "USCHED_DFLY(chooseproc_cc not good: pid %d, old_cpumask %lu, "
     "sibling_mask %lu, curr_cpumask %lu)",
     pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
-KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc_cc_elected, 0,
-    "USCHED_BSD4(chooseproc_cc elected: pid %d, old_cpumask %lu, "
+KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc_elected, 0,
+    "USCHED_DFLY(chooseproc_cc elected: pid %d, old_cpumask %lu, "
     "sibling_mask %lu, curr_cpumask: %lu)",
     pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
 
-KTR_INFO(KTR_USCHED_BSD4, usched, sched_thread_no_process, 0,
-    "USCHED_BSD4(sched_thread %d no process scheduled: pid %d, old_cpuid %d)",
+KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_no_process, 0,
+    "USCHED_DFLY(sched_thread %d no process scheduled: pid %d, old_cpuid %d)",
     int id, pid_t pid, int cpuid);
-KTR_INFO(KTR_USCHED_BSD4, usched, sched_thread_process, 0,
-    "USCHED_BSD4(sched_thread %d process scheduled: pid %d, old_cpuid %d)",
+KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_process, 0,
+    "USCHED_DFLY(sched_thread %d process scheduled: pid %d, old_cpuid %d)",
     int id, pid_t pid, int cpuid);
-KTR_INFO(KTR_USCHED_BSD4, usched, sched_thread_no_process_found, 0,
-    "USCHED_BSD4(sched_thread %d no process found; tmpmask %lu)",
+KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_no_process_found, 0,
+    "USCHED_DFLY(sched_thread %d no process found; tmpmask %lu)",
     int id, cpumask_t tmpmask);
 #endif
 
 /*
- * Initialize the run queues at boot time.
- */
-static void
-rqinit(void *dummy)
-{
-       int i;
-
-       spin_init(&bsd4_spin);
-       for (i = 0; i < NQS; i++) {
-               TAILQ_INIT(&bsd4_queues[i]);
-               TAILQ_INIT(&bsd4_rtqueues[i]);
-               TAILQ_INIT(&bsd4_idqueues[i]);
-       }
-       atomic_clear_cpumask(&bsd4_curprocmask, 1);
-}
-SYSINIT(runqueue, SI_BOOT2_USCHED, SI_ORDER_FIRST, rqinit, NULL)
-
-/*
- * BSD4_ACQUIRE_CURPROC
+ * DFLY_ACQUIRE_CURPROC
  *
  * This function is called when the kernel intends to return to userland.
  * It is responsible for making the thread the current designated userland
@@ -340,18 +326,13 @@ SYSINIT(runqueue, SI_BOOT2_USCHED, SI_ORDER_FIRST, rqinit, NULL)
  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
  * occur, this function is called only under very controlled circumstances.
- *
- * MPSAFE
  */
 static void
-bsd4_acquire_curproc(struct lwp *lp)
+dfly_acquire_curproc(struct lwp *lp)
 {
        globaldata_t gd;
-       bsd4_pcpu_t dd;
+       dfly_pcpu_t dd;
        thread_t td;
-#if 0
-       struct lwp *olp;
-#endif
 
        /*
         * Make sure we aren't sitting on a tsleep queue.
@@ -360,7 +341,7 @@ bsd4_acquire_curproc(struct lwp *lp)
        crit_enter_quick(td);
        if (td->td_flags & TDF_TSLEEPQ)
                tsleep_remove(td);
-       bsd4_recalculate_estcpu(lp);
+       dfly_recalculate_estcpu(lp);
 
        /*
         * If a reschedule was requested give another thread the
@@ -368,26 +349,14 @@ bsd4_acquire_curproc(struct lwp *lp)
         */
        if (user_resched_wanted()) {
                clear_user_resched();
-               bsd4_release_curproc(lp);
-
-               KTR_COND_LOG(usched_bsd4_acquire_curproc_urw,
-                   lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
-                   lp->lwp_proc->p_pid,
-                   lp->lwp_thread->td_gd->gd_cpuid,
-                   mycpu->gd_cpuid);
+               dfly_release_curproc(lp);
        }
 
        /*
         * Loop until we are the current user thread
         */
        gd = mycpu;
-       dd = &bsd4_pcpu[gd->gd_cpuid];
-
-       KTR_COND_LOG(usched_bsd4_acquire_curproc_before_loop,
-           lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
-           lp->lwp_proc->p_pid,
-           lp->lwp_thread->td_gd->gd_cpuid,
-           gd->gd_cpuid);
+       dd = &dfly_pcpu[gd->gd_cpuid];
 
        do {
                /*
@@ -412,55 +381,40 @@ bsd4_acquire_curproc(struct lwp *lp)
                        /*
                         * We can trivially become the current lwp.
                         */
-                       atomic_set_cpumask(&bsd4_curprocmask, gd->gd_cpumask);
+                       atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
                        dd->uschedcp = lp;
                        dd->upri = lp->lwp_priority;
-               } else if (dd->upri > lp->lwp_priority) {
+                       KKASSERT(lp->lwp_qcpu == dd->cpuid);
+               } else if (dd->uschedcp && dd->upri > lp->lwp_priority) {
                        /*
                         * We can steal the current cpu's lwp designation
                         * away simply by replacing it.  The other thread
-                        * will stall when it tries to return to userland.
+                        * will stall when it tries to return to userland,
+                        * possibly rescheduling elsewhere when it calls
+                        * setrunqueue.
                         */
                        dd->uschedcp = lp;
                        dd->upri = lp->lwp_priority;
-                       /*
-                       lwkt_deschedule(olp->lwp_thread);
-                       bsd4_setrunqueue(olp);
-                       */
+                       KKASSERT(lp->lwp_qcpu == dd->cpuid);
                } else {
                        /*
                         * We cannot become the current lwp, place the lp
-                        * on the bsd4 run-queue and deschedule ourselves.
+                        * on the run-queue of this or another cpu and
+                        * deschedule ourselves.
                         *
                         * When we are reactivated we will have another
                         * chance.
                         */
                        lwkt_deschedule(lp->lwp_thread);
-
-                       bsd4_setrunqueue(lp);
-
-                       KTR_COND_LOG(usched_bsd4_acquire_curproc_not,
-                           lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
-                           lp->lwp_proc->p_pid,
-                           lp->lwp_thread->td_gd->gd_cpuid,
-                           dd->uschedcp->lwp_proc->p_pid,
-                           gd->gd_cpuid);
-
-
-                       lwkt_switch();
+                       dfly_setrunqueue(lp);
 
                        /*
                         * Reload after a switch or setrunqueue/switch possibly
                         * moved us to another cpu.
                         */
+                       lwkt_switch();
                        gd = mycpu;
-                       dd = &bsd4_pcpu[gd->gd_cpuid];
-
-                       KTR_COND_LOG(usched_bsd4_acquire_curproc_switch,
-                           lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
-                           lp->lwp_proc->p_pid,
-                           lp->lwp_thread->td_gd->gd_cpuid,
-                           gd->gd_cpuid);
+                       dd = &dfly_pcpu[gd->gd_cpuid];
                }
        } while (dd->uschedcp != lp);
 
@@ -469,7 +423,7 @@ bsd4_acquire_curproc(struct lwp *lp)
 }
 
 /*
- * BSD4_RELEASE_CURPROC
+ * DFLY_RELEASE_CURPROC
  *
  * This routine detaches the current thread from the userland scheduler,
  * usually because the thread needs to run or block in the kernel (at
@@ -479,177 +433,93 @@ bsd4_acquire_curproc(struct lwp *lp)
  * make the current thread.
  *
  * NOTE: This implementation differs from the dummy example in that
- * bsd4_select_curproc() is able to select the current process, whereas
+ * dfly_select_curproc() is able to select the current process, whereas
  * dummy_select_curproc() is not able to select the current process.
  * This means we have to NULL out uschedcp.
  *
  * Additionally, note that we may already be on a run queue if releasing
- * via the lwkt_switch() in bsd4_setrunqueue().
- *
- * MPSAFE
+ * via the lwkt_switch() in dfly_setrunqueue().
  */
 
 static void
-bsd4_release_curproc(struct lwp *lp)
+dfly_release_curproc(struct lwp *lp)
 {
        globaldata_t gd = mycpu;
-       bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
+       dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
 
        if (dd->uschedcp == lp) {
                crit_enter();
                KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
 
-               KTR_COND_LOG(usched_bsd4_release_curproc,
-                   lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
-                   lp->lwp_proc->p_pid,
-                   lp->lwp_thread->td_gd->gd_cpuid,
-                   gd->gd_cpuid);
-
                dd->uschedcp = NULL;    /* don't let lp be selected */
                dd->upri = PRIBASE_NULL;
-               atomic_clear_cpumask(&bsd4_curprocmask, gd->gd_cpumask);
-               dd->old_uschedcp = lp;  /* used only for KTR debug prints */
-               bsd4_select_curproc(gd);
+               atomic_clear_cpumask(&dfly_curprocmask, gd->gd_cpumask);
+               dfly_select_curproc(gd);
                crit_exit();
        }
 }
 
 /*
- * BSD4_SELECT_CURPROC
+ * DFLY_SELECT_CURPROC
  *
  * Select a new current process for this cpu and clear any pending user
  * reschedule request.  The cpu currently has no current process.
  *
  * This routine is also responsible for equal-priority round-robining,
- * typically triggered from bsd4_schedulerclock().  In our dummy example
+ * typically triggered from dfly_schedulerclock().  In our dummy example
  * all the 'user' threads are LWKT scheduled all at once and we just
  * call lwkt_switch().
  *
  * The calling process is not on the queue and cannot be selected.
- *
- * MPSAFE
  */
 static
 void
-bsd4_select_curproc(globaldata_t gd)
+dfly_select_curproc(globaldata_t gd)
 {
-       bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
+       dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
        struct lwp *nlp;
        int cpuid = gd->gd_cpuid;
 
        crit_enter_gd(gd);
 
-       spin_lock(&bsd4_spin);
-#ifdef SMP
-       if(usched_bsd4_cache_coherent)
-               nlp = chooseproc_locked_cache_coherent(dd->uschedcp);
-       else
-#endif
-               nlp = chooseproc_locked(dd->uschedcp);
+       /*spin_lock(&dfly_spin);*/
+       spin_lock(&dd->spin);
+       nlp = dfly_chooseproc_locked(dd, dd->uschedcp, 0);
 
        if (nlp) {
-
-               KTR_COND_LOG(usched_bsd4_select_curproc,
-                   nlp->lwp_proc->p_pid == usched_bsd4_pid_debug,
-                   nlp->lwp_proc->p_pid,
-                   nlp->lwp_thread->td_gd->gd_cpuid,
-                   dd->old_uschedcp->lwp_proc->p_pid,
-                   dd->old_uschedcp->lwp_thread->td_gd->gd_cpuid,
-                   gd->gd_cpuid);
-
-               atomic_set_cpumask(&bsd4_curprocmask, CPUMASK(cpuid));
+               atomic_set_cpumask(&dfly_curprocmask, CPUMASK(cpuid));
                dd->upri = nlp->lwp_priority;
                dd->uschedcp = nlp;
                dd->rrcount = 0;                /* reset round robin */
-               spin_unlock(&bsd4_spin);
+               spin_unlock(&dd->spin);
+               /*spin_unlock(&dfly_spin);*/
 #ifdef SMP
                lwkt_acquire(nlp->lwp_thread);
 #endif
                lwkt_schedule(nlp->lwp_thread);
        } else {
-               spin_unlock(&bsd4_spin);
-       }
-
-#if 0
-       } else if (bsd4_runqcount && (bsd4_rdyprocmask & CPUMASK(cpuid))) {
-               atomic_clear_cpumask(&bsd4_rdyprocmask, CPUMASK(cpuid));
-               spin_unlock(&bsd4_spin);
-               lwkt_schedule(&dd->helper_thread);
-       } else {
-               spin_unlock(&bsd4_spin);
+               spin_unlock(&dd->spin);
+               /*spin_unlock(&dfly_spin);*/
        }
-#endif
        crit_exit_gd(gd);
 }
-#ifdef SMP
-
-/*
- * batchy_looser_pri_test() - determine if a process is batchy or not
- * relative to the other processes running in the system
- */
-static int
-batchy_looser_pri_test(struct lwp* lp)
-{
-       cpumask_t mask;
-       bsd4_pcpu_t other_dd;
-       int cpu;
-
-       /* Current running processes */
-       mask = bsd4_curprocmask & smp_active_mask
-           & usched_global_cpumask;
-
-       while(mask) {
-               cpu = BSFCPUMASK(mask);
-               other_dd = &bsd4_pcpu[cpu];
-               if (other_dd->upri - lp->lwp_priority > usched_bsd4_upri_affinity * PPQ) {
-
-                       KTR_COND_LOG(usched_batchy_test_false,
-                           lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
-                           lp->lwp_proc->p_pid,
-                           lp->lwp_thread->td_gd->gd_cpuid,
-                           (unsigned long)mask);
-
-                       return 0;
-               }
-               mask &= ~CPUMASK(cpu);
-       }
-
-       KTR_COND_LOG(usched_batchy_test_true,
-           lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
-           lp->lwp_proc->p_pid,
-           lp->lwp_thread->td_gd->gd_cpuid,
-           (unsigned long)mask);
-
-       return 1;
-}
 
-#endif
 /*
- *
- * BSD4_SETRUNQUEUE
- *
  * Place the specified lwp on the user scheduler's run queue.  This routine
  * must be called with the thread descheduled.  The lwp must be runnable.
+ * It must not be possible for anyone else to explicitly schedule this thread.
  *
  * The thread may be the current thread as a special case.
- *
- * MPSAFE
  */
 static void
-bsd4_setrunqueue(struct lwp *lp)
+dfly_setrunqueue(struct lwp *lp)
 {
-       globaldata_t gd;
-       bsd4_pcpu_t dd;
-#ifdef SMP
+       globaldata_t rgd;
+       dfly_pcpu_t rdd;
        int cpuid;
-       cpumask_t mask;
-       cpumask_t tmpmask;
-#endif
 
        /*
-        * First validate the process state relative to the current cpu.
-        * We don't need the spinlock for this, just a critical section.
-        * We are in control of the process.
+        * First validate the process LWKT state.
         */
        crit_enter();
        KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
@@ -659,17 +529,18 @@ bsd4_setrunqueue(struct lwp *lp)
        KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
 
        /*
-        * Note: gd and dd are relative to the target thread's last cpu,
-        * NOT our current cpu.
+        * NOTE: gd and dd are relative to the target thread's last cpu,
+        *       NOT our current cpu.
         */
-       gd = lp->lwp_thread->td_gd;
-       dd = &bsd4_pcpu[gd->gd_cpuid];
+       rgd = globaldata_find(lp->lwp_qcpu);
+       rdd = &dfly_pcpu[lp->lwp_qcpu];
+       cpuid = rdd->cpuid;
 
        /*
         * This process is not supposed to be scheduled anywhere or assigned
         * as the current process anywhere.  Assert the condition.
         */
-       KKASSERT(dd->uschedcp != lp);
+       KKASSERT(rdd->uschedcp != lp);
 
 #ifndef SMP
        /*
@@ -678,17 +549,24 @@ bsd4_setrunqueue(struct lwp *lp)
         *
         * This is really only an issue when bootstrapping init since
         * the caller in all other cases will be a user process, and
-        * even if released (dd->uschedcp == NULL), that process will
+        * even if released (rdd->uschedcp == NULL), that process will
         * kickstart the scheduler when it returns to user mode from
         * the kernel.
+        *
+        * NOTE: On SMP we can't just set some other cpu's uschedcp.
         */
-       if (dd->uschedcp == NULL) {
-               atomic_set_cpumask(&bsd4_curprocmask, gd->gd_cpumask);
-               dd->uschedcp = lp;
-               dd->upri = lp->lwp_priority;
-               lwkt_schedule(lp->lwp_thread);
-               crit_exit();
-               return;
+       if (rdd->uschedcp == NULL) {
+               spin_lock(&rdd->spin);
+               if (rdd->uschedcp == NULL) {
+                       atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
+                       rdd->uschedcp = lp;
+                       rdd->upri = lp->lwp_priority;
+                       spin_unlock(&rdd->spin);
+                       lwkt_schedule(lp->lwp_thread);
+                       crit_exit();
+                       return;
+               }
+               spin_unlock(&rdd->spin);
        }
 #endif
 
@@ -702,223 +580,63 @@ bsd4_setrunqueue(struct lwp *lp)
                lwkt_giveaway(lp->lwp_thread);
 #endif
 
-       /*
-        * We lose control of lp the moment we release the spinlock after
-        * having placed lp on the queue.  i.e. another cpu could pick it
-        * up and it could exit, or its priority could be further adjusted,
-        * or something like that.
-        */
-       spin_lock(&bsd4_spin);
-       bsd4_setrunqueue_locked(lp);
-       lp->lwp_setrunqueue_ticks = sched_ticks;
-
 #ifdef SMP
        /*
-        * Kick the scheduler helper on one of the other cpu's
-        * and request a reschedule if appropriate.
+        * Ok, we have to setrunqueue some target cpu and request a reschedule
+        * if necessary.
         *
-        * NOTE: We check all cpus whos rdyprocmask is set.  First we
-        *       look for cpus without designated lps, then we look for
-        *       cpus with designated lps with a worse priority than our
-        *       process.
-        */
-       ++bsd4_scancpu;
-
-       if (usched_bsd4_smt) {
-
-               /*
-                * SMT heuristic - Try to schedule on a free physical core.
-                * If no physical core found than choose the one that has
-                * an interactive thread.
-                */
-
-               int best_cpuid = -1;
-               int min_prio = MAXPRI * MAXPRI;
-               int sibling;
-
-               cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
-               mask = ~bsd4_curprocmask & bsd4_rdyprocmask & lp->lwp_cpumask &
-                   smp_active_mask & usched_global_cpumask;
-
-               KTR_COND_LOG(usched_bsd4_setrunqueue_fc_smt,
-                   lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
-                   lp->lwp_proc->p_pid,
-                   lp->lwp_thread->td_gd->gd_cpuid,
-                   (unsigned long)mask,
-                   mycpu->gd_cpuid);
-
-               while (mask) {
-                       tmpmask = ~(CPUMASK(cpuid) - 1);
-                       if (mask & tmpmask)
-                               cpuid = BSFCPUMASK(mask & tmpmask);
-                       else
-                               cpuid = BSFCPUMASK(mask);
-                       gd = globaldata_find(cpuid);
-                       dd = &bsd4_pcpu[cpuid];
-
-                       if ((dd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK)) {
-                               if (dd->cpunode->parent_node->members & ~dd->cpunode->members & mask) {
-
-                                       KTR_COND_LOG(usched_bsd4_setrunqueue_found,
-                                           lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
-                                           lp->lwp_proc->p_pid,
-                                           lp->lwp_thread->td_gd->gd_cpuid,
-                                           (unsigned long)mask,
-                                           cpuid,
-                                           mycpu->gd_cpuid);
-
-                                       goto found;
-                               } else {
-                                       sibling = BSFCPUMASK(dd->cpunode->parent_node->members &
-                                           ~dd->cpunode->members);
-                                       if (min_prio > bsd4_pcpu[sibling].upri) {
-                                               min_prio = bsd4_pcpu[sibling].upri;
-                                               best_cpuid = cpuid;
-                                       }
-                               }
-                       }
-                       mask &= ~CPUMASK(cpuid);
-               }
-
-               if (best_cpuid != -1) {
-                       cpuid = best_cpuid;
-                       gd = globaldata_find(cpuid);
-                       dd = &bsd4_pcpu[cpuid];
-
-                       KTR_COND_LOG(usched_bsd4_setrunqueue_found_best_cpuid,
-                           lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
-                           lp->lwp_proc->p_pid,
-                           lp->lwp_thread->td_gd->gd_cpuid,
-                           (unsigned long)mask,
-                           cpuid,
-                           mycpu->gd_cpuid);
-
-                       goto found;
-               }
-       } else {
-               /* Fallback to the original heuristic */
-               cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
-               mask = ~bsd4_curprocmask & bsd4_rdyprocmask & lp->lwp_cpumask &
-                      smp_active_mask & usched_global_cpumask;
-
-               KTR_COND_LOG(usched_bsd4_setrunqueue_fc_non_smt,
-                   lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
-                   lp->lwp_proc->p_pid,
-                   lp->lwp_thread->td_gd->gd_cpuid,
-                   (unsigned long)mask,
-                   mycpu->gd_cpuid);
-
-               while (mask) {
-                       tmpmask = ~(CPUMASK(cpuid) - 1);
-                       if (mask & tmpmask)
-                               cpuid = BSFCPUMASK(mask & tmpmask);
-                       else
-                               cpuid = BSFCPUMASK(mask);
-                       gd = globaldata_find(cpuid);
-                       dd = &bsd4_pcpu[cpuid];
-
-                       if ((dd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK)) {
-
-                               KTR_COND_LOG(usched_bsd4_setrunqueue_found,
-                                   lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
-                                   lp->lwp_proc->p_pid,
-                                   lp->lwp_thread->td_gd->gd_cpuid,
-                                   (unsigned long)mask,
-                                   cpuid,
-                                   mycpu->gd_cpuid);
-
-                               goto found;
-                       }
-                       mask &= ~CPUMASK(cpuid);
-               }
-       }
-
-       /*
-        * Then cpus which might have a currently running lp
+        * We have to choose the best target cpu.  It might not be the current
+        * target even if the current cpu has no running user thread (for
+        * example, because the current cpu might be a hyperthread and its
+        * sibling has a thread assigned).
         */
-       mask = bsd4_curprocmask & bsd4_rdyprocmask &
-              lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
-
-       KTR_COND_LOG(usched_bsd4_setrunqueue_rc,
-           lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
-           lp->lwp_proc->p_pid,
-           lp->lwp_thread->td_gd->gd_cpuid,
-           (unsigned long)mask,
-           mycpu->gd_cpuid);
-
-       while (mask) {
-               tmpmask = ~(CPUMASK(cpuid) - 1);
-               if (mask & tmpmask)
-                       cpuid = BSFCPUMASK(mask & tmpmask);
-               else
-                       cpuid = BSFCPUMASK(mask);
-               gd = globaldata_find(cpuid);
-               dd = &bsd4_pcpu[cpuid];
-
-               if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
-
-                       KTR_COND_LOG(usched_bsd4_setrunqueue_found,
-                           lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
-                           lp->lwp_proc->p_pid,
-                           lp->lwp_thread->td_gd->gd_cpuid,
-                           (unsigned long)mask,
-                           cpuid,
-                           mycpu->gd_cpuid);
-
-                       goto found;
-               }
-               mask &= ~CPUMASK(cpuid);
-       }
+       /*spin_lock(&dfly_spin);*/
+       rdd = dfly_choose_best_queue(rdd, lp);
+       rgd = globaldata_find(rdd->cpuid);
 
        /*
-        * If we cannot find a suitable cpu we reload from bsd4_scancpu
-        * and round-robin.  Other cpus will pickup as they release their
-        * current lwps or become ready.
-        *
-        * Avoid a degenerate system lockup case if usched_global_cpumask
-        * is set to 0 or otherwise does not cover lwp_cpumask.
+        * We lose control of lp the moment we release the spinlock after
+        * having placed lp on the queue.  i.e. another cpu could pick it
+        * up and it could exit, or its priority could be further adjusted,
+        * or something like that.
         *
-        * We only kick the target helper thread in this case, we do not
-        * set the user resched flag because
+        * WARNING! dd can point to a foreign cpu!
         */
-       cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
-       if ((CPUMASK(cpuid) & usched_global_cpumask) == 0) {
-               cpuid = 0;
-       }
-       gd = globaldata_find(cpuid);
-       dd = &bsd4_pcpu[cpuid];
-
-       KTR_COND_LOG(usched_bsd4_setrunqueue_not_found,
-           lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
-           lp->lwp_proc->p_pid,
-           lp->lwp_thread->td_gd->gd_cpuid,
-           cpuid,
-           mycpu->gd_cpuid);
-
-found:
-       if (gd == mycpu) {
-               spin_unlock(&bsd4_spin);
-               if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
-                       if (dd->uschedcp == NULL) {
-                               wakeup_mycpu(&dd->helper_thread);
+       spin_lock(&rdd->spin);
+       dfly_setrunqueue_locked(rdd, lp);
+       /*spin_unlock(&dfly_spin);*/
+
+       if (rgd == mycpu) {
+               if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
+                       spin_unlock(&rdd->spin);
+                       if (rdd->uschedcp == NULL) {
+                               wakeup_mycpu(&rdd->helper_thread); /* XXX */
+                               need_user_resched();
                        } else {
                                need_user_resched();
                        }
+               } else {
+                       spin_unlock(&rdd->spin);
                }
        } else {
-               atomic_clear_cpumask(&bsd4_rdyprocmask, CPUMASK(cpuid));
-               spin_unlock(&bsd4_spin);
-               if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
-                       lwkt_send_ipiq(gd, need_user_resched_remote, NULL);
-               else
-                       wakeup(&dd->helper_thread);
+               atomic_clear_cpumask(&dfly_rdyprocmask, CPUMASK(cpuid));
+               if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
+                       spin_unlock(&rdd->spin);
+                       lwkt_send_ipiq(rgd, dfly_need_user_resched_remote,
+                                      NULL);
+               } else {
+                       spin_unlock(&rdd->spin);
+                       wakeup(&rdd->helper_thread);
+               }
        }
 #else
        /*
         * Request a reschedule if appropriate.
         */
-       spin_unlock(&bsd4_spin);
-       if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
+       spin_lock(&rdd->spin);
+       dfly_setrunqueue_locked(rdd, lp);
+       spin_unlock(&rdd->spin);
+       if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
                need_user_resched();
        }
 #endif
@@ -929,21 +647,19 @@ found:
  * This routine is called from a systimer IPI.  It MUST be MP-safe and
  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
  * each cpu.
- *
- * MPSAFE
  */
 static
 void
-bsd4_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
+dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
 {
        globaldata_t gd = mycpu;
-       bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
+       dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
 
        /*
         * Do we need to round-robin?  We round-robin 10 times a second.
         * This should only occur for cpu-bound batch processes.
         */
-       if (++dd->rrcount >= usched_bsd4_rrinterval) {
+       if (++dd->rrcount >= usched_dfly_rrinterval) {
                dd->rrcount = 0;
                need_user_resched();
        }
@@ -959,14 +675,7 @@ bsd4_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
         */
        KKASSERT(gd->gd_spinlocks_wr == 0);
 
-       bsd4_resetpriority(lp);
-#if 0
-       /*
-       * if we can't call bsd4_resetpriority for some reason we must call
-        * need user_resched().
-        */
-       need_user_resched();
-#endif
+       dfly_resetpriority(lp);
 }
 
 /*
@@ -983,14 +692,13 @@ bsd4_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
  * up in the same tick.  That is, a system doing thousands of context
  * switches per second will still only do serious estcpu calculations
  * ESTCPUFREQ times per second.
- *
- * MPSAFE
  */
 static
 void
-bsd4_recalculate_estcpu(struct lwp *lp)
+dfly_recalculate_estcpu(struct lwp *lp)
 {
        globaldata_t gd = mycpu;
+       dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
        sysclock_t cpbase;
        sysclock_t ttlticks;
        int estcpu;
@@ -1010,7 +718,7 @@ bsd4_recalculate_estcpu(struct lwp *lp)
                 * Too much time has passed, do a coarse correction.
                 */
                lp->lwp_estcpu = lp->lwp_estcpu >> 1;
-               bsd4_resetpriority(lp);
+               dfly_resetpriority(lp);
                lp->lwp_cpbase = cpbase;
                lp->lwp_cpticks = 0;
                lp->lwp_batch -= ESTCPUFREQ;
@@ -1039,18 +747,18 @@ bsd4_recalculate_estcpu(struct lwp *lp)
                /*
                 * Calculate the percentage of one cpu used factoring in ncpus
                 * and the load and adjust estcpu.  Handle degenerate cases
-                * by adding 1 to bsd4_runqcount.
+                * by adding 1 to runqcount.
                 *
                 * estcpu is scaled by ESTCPUMAX.
                 *
-                * bsd4_runqcount is the excess number of user processes
+                * runqcount is the excess number of user processes
                 * that cannot be immediately scheduled to cpus.  We want
                 * to count these as running to avoid range compression
                 * in the base calculation (which is the actual percentage
                 * of one cpu used).
                 */
                estcpu = (lp->lwp_cpticks * ESTCPUMAX) *
-                        (bsd4_runqcount + ncpus) / (ncpus * ttlticks);
+                        (dd->runqcount + ncpus) / (ncpus * ttlticks);
 
                /*
                 * If estcpu is > 50% we become more batch-like
@@ -1068,7 +776,7 @@ bsd4_recalculate_estcpu(struct lwp *lp)
                                lp->lwp_batch = 0;
                }
 
-               if (usched_bsd4_debug == lp->lwp_proc->p_pid) {
+               if (usched_dfly_debug == lp->lwp_proc->p_pid) {
                        kprintf("pid %d lwp %p estcpu %3d %3d bat %d cp %d/%d",
                                lp->lwp_proc->p_pid, lp,
                                estcpu, lp->lwp_estcpu,
@@ -1083,7 +791,7 @@ bsd4_recalculate_estcpu(struct lwp *lp)
                 * can cause a cpu hog to eat too much cpu before the
                 * scheduler decides to downgrade it.
                 *
-                * NOTE: p_nice is accounted for in bsd4_resetpriority(),
+                * NOTE: p_nice is accounted for in dfly_resetpriority(),
                 *       and not here, but we must still ensure that a
                 *       cpu-bound nice -20 process does not completely
                 *       override a cpu-bound nice +20 process.
@@ -1091,7 +799,7 @@ bsd4_recalculate_estcpu(struct lwp *lp)
                 * NOTE: We must use ESTCPULIM() here to deal with any
                 *       overshoot.
                 */
-               decay_factor = usched_bsd4_decay;
+               decay_factor = usched_dfly_decay;
                if (decay_factor < 1)
                        decay_factor = 1;
                if (decay_factor > 1024)
@@ -1101,9 +809,9 @@ bsd4_recalculate_estcpu(struct lwp *lp)
                        (lp->lwp_estcpu * decay_factor + estcpu) /
                        (decay_factor + 1));
 
-               if (usched_bsd4_debug == lp->lwp_proc->p_pid)
+               if (usched_dfly_debug == lp->lwp_proc->p_pid)
                        kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
-               bsd4_resetpriority(lp);
+               dfly_resetpriority(lp);
                lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
                lp->lwp_cpticks = 0;
        }
@@ -1119,25 +827,36 @@ bsd4_recalculate_estcpu(struct lwp *lp)
  * This routine is called by fork1() for initial setup with the process
  * of the run queue, and also may be called normally with the process on or
  * off the run queue.
- *
- * MPSAFE
  */
 static void
-bsd4_resetpriority(struct lwp *lp)
+dfly_resetpriority(struct lwp *lp)
 {
-       bsd4_pcpu_t dd;
+       dfly_pcpu_t rdd;
        int newpriority;
        u_short newrqtype;
-       int reschedcpu;
+       int rcpu;
        int checkpri;
        int estcpu;
 
+       crit_enter();
+
        /*
-        * Calculate the new priority and queue type
+        * Lock the scheduler (lp) belongs to.  This can be on a different
+        * cpu.  Handle races.  This loop breaks out with the appropriate
+        * rdd locked.
         */
-       crit_enter();
-       spin_lock(&bsd4_spin);
+       for (;;) {
+               rcpu = lp->lwp_qcpu;
+               rdd = &dfly_pcpu[rcpu];
+               spin_lock(&rdd->spin);
+               if (rcpu == lp->lwp_qcpu)
+                       break;
+               spin_unlock(&rdd->spin);
+       }
 
+       /*
+        * Calculate the new priority and queue type
+        */
        newrqtype = lp->lwp_rtprio.type;
 
        switch(newrqtype) {
@@ -1180,25 +899,45 @@ bsd4_resetpriority(struct lwp *lp)
         * The newpriority incorporates the queue type so do a simple masked
         * check to determine if the process has moved to another queue.  If
         * it has, and it is currently on a run queue, then move it.
+        *
+        * Since uload is ~PPQMASK masked, no modifications are necessary if
+        * we end up in the same run queue.
         */
        if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
-               lp->lwp_priority = newpriority;
+               int delta_uload;
+
+               /*
+                * uload can change, calculate the adjustment to reduce
+                * edge cases since choosers scan the cpu topology without
+                * locks.
+                */
+               if (lp->lwp_mpflags & LWP_MP_ULOAD) {
+                       delta_uload =
+                               -((lp->lwp_priority & ~PPQMASK) & PRIMASK) +
+                               ((newpriority & ~PPQMASK) & PRIMASK);
+                       atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
+                                      delta_uload);
+               }
                if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
-                       bsd4_remrunqueue_locked(lp);
+                       dfly_remrunqueue_locked(rdd, lp);
+                       lp->lwp_priority = newpriority;
                        lp->lwp_rqtype = newrqtype;
                        lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
-                       bsd4_setrunqueue_locked(lp);
+                       dfly_setrunqueue_locked(rdd, lp);
                        checkpri = 1;
                } else {
+                       lp->lwp_priority = newpriority;
                        lp->lwp_rqtype = newrqtype;
                        lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
                        checkpri = 0;
                }
-               reschedcpu = lp->lwp_thread->td_gd->gd_cpuid;
        } else {
+               /*
+                * In the same PPQ, uload cannot change.
+                */
                lp->lwp_priority = newpriority;
-               reschedcpu = -1;
                checkpri = 1;
+               rcpu = -1;
        }
 
        /*
@@ -1218,41 +957,38 @@ bsd4_resetpriority(struct lwp *lp)
         * process, possibly higher (less desireable), so ignore the upri
         * check which will fail in that case.
         */
-       if (reschedcpu >= 0) {
-               dd = &bsd4_pcpu[reschedcpu];
-               if ((bsd4_rdyprocmask & CPUMASK(reschedcpu)) &&
+       if (rcpu >= 0) {
+               if ((dfly_rdyprocmask & CPUMASK(rcpu)) &&
                    (checkpri == 0 ||
-                    (dd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK))) {
+                    (rdd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK))) {
 #ifdef SMP
-                       if (reschedcpu == mycpu->gd_cpuid) {
-                               spin_unlock(&bsd4_spin);
+                       if (rcpu == mycpu->gd_cpuid) {
+                               spin_unlock(&rdd->spin);
                                need_user_resched();
                        } else {
-                               spin_unlock(&bsd4_spin);
-                               atomic_clear_cpumask(&bsd4_rdyprocmask,
-                                                    CPUMASK(reschedcpu));
-                               lwkt_send_ipiq(lp->lwp_thread->td_gd,
-                                              need_user_resched_remote, NULL);
+                               atomic_clear_cpumask(&dfly_rdyprocmask,
+                                                    CPUMASK(rcpu));
+                               spin_unlock(&rdd->spin);
+                               lwkt_send_ipiq(globaldata_find(rcpu),
+                                              dfly_need_user_resched_remote,
+                                              NULL);
                        }
 #else
-                       spin_unlock(&bsd4_spin);
+                       spin_unlock(&rdd->spin);
                        need_user_resched();
 #endif
                } else {
-                       spin_unlock(&bsd4_spin);
+                       spin_unlock(&rdd->spin);
                }
        } else {
-               spin_unlock(&bsd4_spin);
+               spin_unlock(&rdd->spin);
        }
        crit_exit();
 }
 
-/*
- * MPSAFE
- */
 static
 void
-bsd4_yield(struct lwp *lp)
+dfly_yield(struct lwp *lp)
 {
 #if 0
        /* FUTURE (or something similar) */
@@ -1276,11 +1012,9 @@ bsd4_yield(struct lwp *lp)
  * detection heuristic for both parallel forking and sequential execs.
  *
  * XXX lwp should be "spawning" instead of "forking"
- *
- * MPSAFE
  */
 static void
-bsd4_forking(struct lwp *plp, struct lwp *lp)
+dfly_forking(struct lwp *plp, struct lwp *lp)
 {
        /*
         * Put the child 4 queue slots (out of 32) higher than the parent
@@ -1304,13 +1038,39 @@ bsd4_forking(struct lwp *plp, struct lwp *lp)
 }
 
 /*
- * Called when a parent waits for a child.
- *
- * MPSAFE
+ * Called when a lwp is being removed from this scheduler, typically
+ * during lwp_exit().
  */
 static void
-bsd4_exiting(struct lwp *lp, struct proc *child_proc)
+dfly_exiting(struct lwp *lp, struct proc *child_proc)
+{
+       dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
+
+       if (lp->lwp_mpflags & LWP_MP_ULOAD) {
+               atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
+               atomic_add_int(&dd->uload,
+                              -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
+       }
+}
+
+static void
+dfly_uload_update(struct lwp *lp)
 {
+       dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
+
+       if (lp->lwp_thread->td_flags & TDF_RUNQ) {
+               if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
+                       atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
+                       atomic_add_int(&dd->uload,
+                                  ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
+               }
+       } else {
+               if (lp->lwp_mpflags & LWP_MP_ULOAD) {
+                       atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
+                       atomic_add_int(&dd->uload,
+                                  -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
+               }
+       }
 }
 
 /*
@@ -1322,13 +1082,17 @@ bsd4_exiting(struct lwp *lp, struct proc *child_proc)
  * Until we fix the RUNQ code the chklp test has to be strict or we may
  * bounce between processes trying to acquire the current process designation.
  *
- * MPSAFE - must be called with bsd4_spin exclusive held.  The spinlock is
- *         left intact through the entire routine.
+ * Must be called with dfly_spin exclusive held.  The spinlock is
+ * left intact through the entire routine.
+ *
+ * if chklp is NULL this function will dive other cpu's queues looking
+ * for work if the current queue is empty.
  */
 static
 struct lwp *
-chooseproc_locked(struct lwp *chklp)
+dfly_chooseproc_locked(dfly_pcpu_t dd, struct lwp *chklp, int isremote)
 {
+       dfly_pcpu_t xdd;
        struct lwp *lp;
        struct rq *q;
        u_int32_t *which, *which2;
@@ -1336,48 +1100,69 @@ chooseproc_locked(struct lwp *chklp)
        u_int32_t rtqbits;
        u_int32_t tsqbits;
        u_int32_t idqbits;
-       cpumask_t cpumask;
-
-       rtqbits = bsd4_rtqueuebits;
-       tsqbits = bsd4_queuebits;
-       idqbits = bsd4_idqueuebits;
-       cpumask = mycpu->gd_cpumask;
+       /*usched_dfly_queue_checks*/
 
+       rtqbits = dd->rtqueuebits;
+       tsqbits = dd->queuebits;
+       idqbits = dd->idqueuebits;
 
-#ifdef SMP
-again:
-#endif
        if (rtqbits) {
                pri = bsfl(rtqbits);
-               q = &bsd4_rtqueues[pri];
-               which = &bsd4_rtqueuebits;
+               q = &dd->rtqueues[pri];
+               which = &dd->rtqueuebits;
                which2 = &rtqbits;
        } else if (tsqbits) {
                pri = bsfl(tsqbits);
-               q = &bsd4_queues[pri];
-               which = &bsd4_queuebits;
+               q = &dd->queues[pri];
+               which = &dd->queuebits;
                which2 = &tsqbits;
        } else if (idqbits) {
                pri = bsfl(idqbits);
-               q = &bsd4_idqueues[pri];
-               which = &bsd4_idqueuebits;
+               q = &dd->idqueues[pri];
+               which = &dd->idqueuebits;
                which2 = &idqbits;
+       } else
+#ifdef SMP
+       if (isremote) {
+               /*
+                * Disallow remote->remote recursion
+                */
+               return (NULL);
        } else {
+               /*
+                * Pull a runnable thread from a remote run queue.  We have
+                * to adjust qcpu and uload manually because the lp we return
+                * might be assigned directly to uschedcp (setrunqueue might
+                * not be called).
+                */
+               xdd = dfly_choose_worst_queue(dd);
+               if (xdd && xdd != dd && spin_trylock(&xdd->spin)) {
+                       lp = dfly_chooseproc_locked(xdd, NULL, 1);
+                       if (lp) {
+                               if (lp->lwp_mpflags & LWP_MP_ULOAD) {
+                                       atomic_add_int(&xdd->uload,
+                                           -((lp->lwp_priority & ~PPQMASK) &
+                                             PRIMASK));
+                               }
+                               lp->lwp_qcpu = dd->cpuid;
+                               atomic_add_int(&dd->uload,
+                                   ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
+                               atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
+                       }
+                       spin_unlock(&xdd->spin);
+               } else {
+                       lp = NULL;
+               }
+               return (lp);
+       }
+#else
+       {
                return NULL;
        }
+#endif
        lp = TAILQ_FIRST(q);
        KASSERT(lp, ("chooseproc: no lwp on busy queue"));
 
-#ifdef SMP
-       while ((lp->lwp_cpumask & cpumask) == 0) {
-               lp = TAILQ_NEXT(lp, lwp_procq);
-               if (lp == NULL) {
-                       *which2 &= ~(1 << pri);
-                       goto again;
-               }
-       }
-#endif
-
        /*
         * If the passed lwp <chklp> is reasonably close to the selected
         * lwp <lp>, return NULL (indicating that <chklp> should be kept).
@@ -1390,31 +1175,14 @@ again:
                        return(NULL);
        }
 
-#ifdef SMP
-       /*
-        * If the chosen lwp does not reside on this cpu spend a few
-        * cycles looking for a better candidate at the same priority level.
-        * This is a fallback check, setrunqueue() tries to wakeup the
-        * correct cpu and is our front-line affinity.
-        */
-       if (lp->lwp_thread->td_gd != mycpu &&
-           (chklp = TAILQ_NEXT(lp, lwp_procq)) != NULL
-       ) {
-               if (chklp->lwp_thread->td_gd == mycpu) {
-                       ++choose_affinity;
-                       lp = chklp;
-               }
-       }
-#endif
-
        KTR_COND_LOG(usched_chooseproc,
-           lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
+           lp->lwp_proc->p_pid == usched_dfly_pid_debug,
            lp->lwp_proc->p_pid,
            lp->lwp_thread->td_gd->gd_cpuid,
            mycpu->gd_cpuid);
 
        TAILQ_REMOVE(q, lp, lwp_procq);
-       --bsd4_runqcount;
+       --dd->runqcount;
        if (TAILQ_EMPTY(q))
                *which &= ~(1 << pri);
        KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
@@ -1424,213 +1192,298 @@ again:
 }
 
 #ifdef SMP
+
 /*
- * chooseproc() - with a cache coherence heuristic. Try to pull a process that
- * has its home on the current CPU> If the process doesn't have its home here
- * and is a batchy one (see batcy_looser_pri_test), we can wait for a
- * sched_tick, may be its home will become free and pull it in. Anyway,
- * we can't wait more than one tick. If that tick expired, we pull in that
- * process, no matter what.
+ * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
+ *
+ * Choose a cpu node to schedule lp on, hopefully nearby its current
+ * node.  The current node is passed in (dd) (though it can also be obtained
+ * from lp->lwp_qcpu).  The caller will dfly_setrunqueue() lp on the queue
+ * we return.
+ *
+ * When the topology is known choose a cpu whos group has, in aggregate,
+ * has the lowest weighted load.
  */
 static
-struct lwp *
-chooseproc_locked_cache_coherent(struct lwp *chklp)
+dfly_pcpu_t
+dfly_choose_best_queue(dfly_pcpu_t dd, struct lwp *lp)
 {
-       struct lwp *lp;
-       struct rq *q;
-       u_int32_t *which, *which2;
-       u_int32_t pri;
-       u_int32_t checks;
-       u_int32_t rtqbits;
-       u_int32_t tsqbits;
-       u_int32_t idqbits;
-       cpumask_t cpumask;
-
-       struct lwp * min_level_lwp = NULL;
-       struct rq *min_q = NULL;
-       cpumask_t siblings;
-       cpu_node_t* cpunode = NULL;
-       u_int32_t min_level = MAXCPU;   /* number of levels < MAXCPU */
-       u_int32_t *min_which = NULL;
-       u_int32_t min_pri = 0;
-       u_int32_t level = 0;
-
-       rtqbits = bsd4_rtqueuebits;
-       tsqbits = bsd4_queuebits;
-       idqbits = bsd4_idqueuebits;
-       cpumask = mycpu->gd_cpumask;
-
-       /* Get the mask coresponding to the sysctl configured level */
-       cpunode = bsd4_pcpu[mycpu->gd_cpuid].cpunode;
-       level = usched_bsd4_stick_to_level;
-       while (level) {
-               cpunode = cpunode->parent_node;
-               level--;
-       }
-       /* The cpus which can ellect a process */
-       siblings = cpunode->members;
-       checks = 0;
+       cpumask_t mask;
+       cpu_node_t *cpup;
+       cpu_node_t *cpun;
+       cpu_node_t *cpub;
+       dfly_pcpu_t rdd;
+       int cpuid;
+       int n;
+       int load;
+       int lowest_load;
+       int level;
 
-again:
-       if (rtqbits) {
-               pri = bsfl(rtqbits);
-               q = &bsd4_rtqueues[pri];
-               which = &bsd4_rtqueuebits;
-               which2 = &rtqbits;
-       } else if (tsqbits) {
-               pri = bsfl(tsqbits);
-               q = &bsd4_queues[pri];
-               which = &bsd4_queuebits;
-               which2 = &tsqbits;
-       } else if (idqbits) {
-               pri = bsfl(idqbits);
-               q = &bsd4_idqueues[pri];
-               which = &bsd4_idqueuebits;
-               which2 = &idqbits;
-       } else {
-               /*
-                * No more left and we didn't reach the checks limit.
-                */
-               kick_helper(min_level_lwp);
-               return NULL;
-       }
-       lp = TAILQ_FIRST(q);
-       KASSERT(lp, ("chooseproc: no lwp on busy queue"));
+       /*
+        * When the topology is unknown choose a random cpu that is hopefully
+        * idle.
+        */
+       if (dd->cpunode == NULL)
+               return (dfly_choose_queue_simple(dd, lp));
 
        /*
-        * Limit the number of checks/queue to a configurable value to
-        * minimize the contention (we are in a locked region
+        * When the topology is known choose a cpu whos group has, in
+        * aggregate, has the lowest weighted load.
         */
-       while (checks < usched_bsd4_queue_checks) {
-               if ((lp->lwp_cpumask & cpumask) == 0 ||
-                   ((siblings & lp->lwp_thread->td_gd->gd_cpumask) == 0 &&
-                     (lp->lwp_setrunqueue_ticks == sched_ticks ||
-                      lp->lwp_setrunqueue_ticks == (int)(sched_ticks - 1)) &&
-                     batchy_looser_pri_test(lp))) {
-
-                       KTR_COND_LOG(usched_chooseproc_cc_not_good,
-                           lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
-                           lp->lwp_proc->p_pid,
-                           (unsigned long)lp->lwp_thread->td_gd->gd_cpumask,
-                           (unsigned long)siblings,
-                           (unsigned long)cpumask);
-
-                       cpunode = bsd4_pcpu[lp->lwp_thread->td_gd->gd_cpuid].cpunode;
-                       level = 0;
-                       while (cpunode) {
-                               if (cpunode->members & cpumask)
-                                       break;
-                               cpunode = cpunode->parent_node;
-                               level++;
-                       }
-                       if (level < min_level ||
-                           (level == min_level && min_level_lwp &&
-                            lp->lwp_priority < min_level_lwp->lwp_priority)) {
-                               kick_helper(min_level_lwp);
-                               min_level_lwp = lp;
-                               min_level = level;
-                               min_q = q;
-                               min_which = which;
-                               min_pri = pri;
-                       } else {
-                               kick_helper(lp);
-                       }
-                       lp = TAILQ_NEXT(lp, lwp_procq);
-                       if (lp == NULL) {
-                               *which2 &= ~(1 << pri);
-                               goto again;
+       cpup = root_cpu_node;
+       rdd = dd;
+       level = cpu_topology_levels_number;
+
+       while (cpup) {
+               /*
+                * Degenerate case super-root
+                */
+               if (cpup->child_node && cpup->child_no == 1) {
+                       cpup = cpup->child_node;
+                       --level;
+                       continue;
+               }
+
+               /*
+                * Terminal cpunode
+                */
+               if (cpup->child_node == NULL) {
+                       rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
+                       break;
+               }
+
+               cpub = NULL;
+               lowest_load = 0x7FFFFFFF;
+
+               for (n = 0; n < cpup->child_no; ++n) {
+                       /*
+                        * Accumulate load information for all cpus
+                        * which are members of this node.
+                        */
+                       cpun = &cpup->child_node[n];
+                       mask = cpun->members & usched_global_cpumask &
+                              smp_active_mask & lp->lwp_cpumask;
+                       if (mask == 0)
+                               continue;
+                       load = 0;
+                       while (mask) {
+                               cpuid = BSFCPUMASK(mask);
+                               load += dfly_pcpu[cpuid].uload;
+                               mask &= ~CPUMASK(cpuid);
                        }
-               } else {
-                       KTR_COND_LOG(usched_chooseproc_cc_elected,
-                           lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
-                           lp->lwp_proc->p_pid,
-                           (unsigned long)lp->lwp_thread->td_gd->gd_cpumask,
-                           (unsigned long)siblings,
-                           (unsigned long)cpumask);
 
-                       goto found;
+                       /*
+                        * Give a slight advantage to nearby cpus.
+                        */
+                       if (cpun->members & dd->cpumask)
+                               load -= PPQ * level;
+
+                       /*
+                        * Calculate the best load
+                        */
+                       if (cpub == NULL || lowest_load > load ||
+                           (lowest_load == load &&
+                            (cpun->members & dd->cpumask))
+                       ) {
+                               lowest_load = load;
+                               cpub = cpun;
+                       }
                }
-               ++checks;
+               cpup = cpub;
+               --level;
        }
+       if (usched_dfly_chooser)
+               kprintf("lp %02d->%02d %s\n",
+                       lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
+       return (rdd);
+}
+
+/*
+ * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
+ *
+ * Choose the worst queue close to dd's cpu node with a non-empty runq.
+ *
+ * This is used by the thread chooser when the current cpu's queues are
+ * empty to steal a thread from another cpu's queue.  We want to offload
+ * the most heavily-loaded queue.
+ */
+static
+dfly_pcpu_t
+dfly_choose_worst_queue(dfly_pcpu_t dd)
+{
+       cpumask_t mask;
+       cpu_node_t *cpup;
+       cpu_node_t *cpun;
+       cpu_node_t *cpub;
+       dfly_pcpu_t rdd;
+       int cpuid;
+       int n;
+       int load;
+       int highest_load;
+       int uloadok;
+       int level;
 
        /*
-        * Checks exhausted, we tried to defer too many threads, so schedule
-        * the best of the worst.
+        * When the topology is unknown choose a random cpu that is hopefully
+        * idle.
         */
-       lp = min_level_lwp;
-       q = min_q;
-       which = min_which;
-       pri = min_pri;
-       KASSERT(lp, ("chooseproc: at least the first lp was good"));
-
-found:
+       if (dd->cpunode == NULL) {
+               return (NULL);
+       }
 
        /*
-        * If the passed lwp <chklp> is reasonably close to the selected
-        * lwp <lp>, return NULL (indicating that <chklp> should be kept).
-        *
-        * Note that we must error on the side of <chklp> to avoid bouncing
-        * between threads in the acquire code.
+        * When the topology is known choose a cpu whos group has, in
+        * aggregate, has the lowest weighted load.
         */
-       if (chklp) {
-               if (chklp->lwp_priority < lp->lwp_priority + PPQ) {
-                       kick_helper(lp);
-                       return(NULL);
+       cpup = root_cpu_node;
+       rdd = dd;
+       level = cpu_topology_levels_number;
+       while (cpup) {
+               /*
+                * Degenerate case super-root
+                */
+               if (cpup->child_node && cpup->child_no == 1) {
+                       cpup = cpup->child_node;
+                       --level;
+                       continue;
                }
-       }
 
-       KTR_COND_LOG(usched_chooseproc_cc,
-           lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
-           lp->lwp_proc->p_pid,
-           lp->lwp_thread->td_gd->gd_cpuid,
-           mycpu->gd_cpuid);
+               /*
+                * Terminal cpunode
+                */
+               if (cpup->child_node == NULL) {
+                       rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
+                       break;
+               }
 
-       TAILQ_REMOVE(q, lp, lwp_procq);
-       --bsd4_runqcount;
-       if (TAILQ_EMPTY(q))
-               *which &= ~(1 << pri);
-       KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
-       atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
+               cpub = NULL;
+               highest_load = 0;
 
-       return lp;
+               for (n = 0; n < cpup->child_no; ++n) {
+                       /*
+                        * Accumulate load information for all cpus
+                        * which are members of this node.
+                        */
+                       cpun = &cpup->child_node[n];
+                       mask = cpun->members & usched_global_cpumask &
+                              smp_active_mask;
+                       if (mask == 0)
+                               continue;
+                       load = 0;
+                       uloadok = 0;
+                       while (mask) {
+                               cpuid = BSFCPUMASK(mask);
+                               load += dfly_pcpu[cpuid].uload;
+                               if (dfly_pcpu[cpuid].uload)
+                                       uloadok = 1;
+                               if (dfly_pcpu[cpuid].uschedcp) {
+                                       load += (dfly_pcpu[cpuid].upri &
+                                                ~PPQMASK) & PRIMASK;
+                               }
+                               mask &= ~CPUMASK(cpuid);
+                       }
+
+                       /*
+                        * Give a slight advantage to nearby cpus.
+                        */
+                       if (cpun->members & dd->cpumask)
+                               load += PPQ * level;
+
+                       /*
+                        * The best candidate is the one with the worst
+                        * (highest) load.  Prefer candiates that are
+                        * closer to our cpu.
+                        */
+                       if (uloadok &&
+                           (cpub == NULL || highest_load < load ||
+                            (highest_load == load &&
+                             (cpun->members & dd->cpumask)))
+                       ) {
+                               highest_load = load;
+                               cpub = cpun;
+                       }
+               }
+               cpup = cpub;
+               --level;
+       }
+       return (rdd);
 }
 
-/*
- * If we aren't willing to schedule a ready process on our cpu, give it's
- * target cpu a kick rather than wait for the next tick.
- *
- * Called with bsd4_spin held.
- */
 static
-void
-kick_helper(struct lwp *lp)
+dfly_pcpu_t
+dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
 {
-       globaldata_t gd;
-       bsd4_pcpu_t dd;
-
-       if (lp == NULL)
-               return;
-       gd = lp->lwp_thread->td_gd;
-       dd = &bsd4_pcpu[gd->gd_cpuid];
-       if ((smp_active_mask & usched_global_cpumask &
-           bsd4_rdyprocmask & gd->gd_cpumask) == 0) {
-               return;
+       dfly_pcpu_t rdd;
+       cpumask_t tmpmask;
+       cpumask_t mask;
+       int cpuid;
+
+       /*
+        * Fallback to the original heuristic, select random cpu,
+        * first checking cpus not currently running a user thread.
+        */
+       cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
+       mask = ~dfly_curprocmask & dfly_rdyprocmask & lp->lwp_cpumask &
+              smp_active_mask & usched_global_cpumask;
+
+       while (mask) {
+               tmpmask = ~(CPUMASK(cpuid) - 1);
+               if (mask & tmpmask)
+                       cpuid = BSFCPUMASK(mask & tmpmask);
+               else
+                       cpuid = BSFCPUMASK(mask);
+               rdd = &dfly_pcpu[cpuid];
+
+               if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
+                       goto found;
+               mask &= ~CPUMASK(cpuid);
        }
-       ++usched_bsd4_kicks;
-       atomic_clear_cpumask(&bsd4_rdyprocmask, gd->gd_cpumask);
-       if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
-               lwkt_send_ipiq(gd, need_user_resched_remote, NULL);
-       } else {
-               wakeup(&dd->helper_thread);
+
+       /*
+        * Then cpus which might have a currently running lp
+        */
+       cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
+       mask = dfly_curprocmask & dfly_rdyprocmask &
+              lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
+
+       while (mask) {
+               tmpmask = ~(CPUMASK(cpuid) - 1);
+               if (mask & tmpmask)
+                       cpuid = BSFCPUMASK(mask & tmpmask);
+               else
+                       cpuid = BSFCPUMASK(mask);
+               rdd = &dfly_pcpu[cpuid];
+
+               if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
+                       goto found;
+               mask &= ~CPUMASK(cpuid);
        }
+
+       /*
+        * If we cannot find a suitable cpu we reload from dfly_scancpu
+        * and round-robin.  Other cpus will pickup as they release their
+        * current lwps or become ready.
+        *
+        * Avoid a degenerate system lockup case if usched_global_cpumask
+        * is set to 0 or otherwise does not cover lwp_cpumask.
+        *
+        * We only kick the target helper thread in this case, we do not
+        * set the user resched flag because
+        */
+       cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
+       if ((CPUMASK(cpuid) & usched_global_cpumask) == 0)
+               cpuid = 0;
+       rdd = &dfly_pcpu[cpuid];
+found:
+       return (rdd);
 }
 
 static
 void
-need_user_resched_remote(void *dummy)
+dfly_need_user_resched_remote(void *dummy)
 {
        globaldata_t gd = mycpu;
-       bsd4_pcpu_t  dd = &bsd4_pcpu[gd->gd_cpuid];
+       dfly_pcpu_t  dd = &dfly_pcpu[gd->gd_cpuid];
 
        need_user_resched();
 
@@ -1641,7 +1494,7 @@ need_user_resched_remote(void *dummy)
 #endif
 
 /*
- * bsd4_remrunqueue_locked() removes a given process from the run queue
+ * dfly_remrunqueue_locked() removes a given process from the run queue
  * that it is on, clearing the queue busy bit if it becomes empty.
  *
  * Note that user process scheduler is different from the LWKT schedule.
@@ -1649,10 +1502,11 @@ need_user_resched_remote(void *dummy)
  * underneath, and a user process operating in the kernel will often be
  * 'released' from our management.
  *
- * MPSAFE - bsd4_spin must be held exclusively on call
+ * uload is NOT adjusted here.  It is only adjusted if the lwkt_thread goes
+ * to sleep or the lwp is moved to a different runq.
  */
 static void
-bsd4_remrunqueue_locked(struct lwp *lp)
+dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
 {
        struct rq *q;
        u_int32_t *which;
@@ -1660,23 +1514,24 @@ bsd4_remrunqueue_locked(struct lwp *lp)
 
        KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
        atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
-       --bsd4_runqcount;
-       KKASSERT(bsd4_runqcount >= 0);
+       --rdd->runqcount;
+       /*rdd->uload -= (lp->lwp_priority & ~PPQMASK) & PRIMASK;*/
+       KKASSERT(rdd->runqcount >= 0);
 
        pri = lp->lwp_rqindex;
        switch(lp->lwp_rqtype) {
        case RTP_PRIO_NORMAL:
-               q = &bsd4_queues[pri];
-               which = &bsd4_queuebits;
+               q = &rdd->queues[pri];
+               which = &rdd->queuebits;
                break;
        case RTP_PRIO_REALTIME:
        case RTP_PRIO_FIFO:
-               q = &bsd4_rtqueues[pri];
-               which = &bsd4_rtqueuebits;
+               q = &rdd->rtqueues[pri];
+               which = &rdd->rtqueuebits;
                break;
        case RTP_PRIO_IDLE:
-               q = &bsd4_idqueues[pri];
-               which = &bsd4_idqueuebits;
+               q = &rdd->idqueues[pri];
+               which = &rdd->idqueuebits;
                break;
        default:
                panic("remrunqueue: invalid rtprio type");
@@ -1691,42 +1546,61 @@ bsd4_remrunqueue_locked(struct lwp *lp)
 }
 
 /*
- * bsd4_setrunqueue_locked()
+ * dfly_setrunqueue_locked()
  *
  * Add a process whos rqtype and rqindex had previously been calculated
  * onto the appropriate run queue.   Determine if the addition requires
  * a reschedule on a cpu and return the cpuid or -1.
  *
- * NOTE: Lower priorities are better priorities.
+ * NOTE:         Lower priorities are better priorities.
  *
- * MPSAFE - bsd4_spin must be held exclusively on call
+ * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
+ *               sum of the rough lwp_priority for all running and runnable
+ *               processes.  Lower priority processes (higher lwp_priority
+ *               values) actually DO count as more load, not less, because
+ *               these are the programs which require the most care with
+ *               regards to cpu selection.
  */
 static void
-bsd4_setrunqueue_locked(struct lwp *lp)
+dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
 {
        struct rq *q;
        u_int32_t *which;
        int pri;
 
+       if (lp->lwp_qcpu != rdd->cpuid) {
+               if (lp->lwp_mpflags & LWP_MP_ULOAD) {
+                       atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
+                       atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
+                                  -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
+               }
+               lp->lwp_qcpu = rdd->cpuid;
+       }
+
        KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
        atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
-       ++bsd4_runqcount;
+       ++rdd->runqcount;
+       if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
+               atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
+               atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
+                              (lp->lwp_priority & ~PPQMASK) & PRIMASK);
+       }
 
        pri = lp->lwp_rqindex;
 
        switch(lp->lwp_rqtype) {
        case RTP_PRIO_NORMAL:
-               q = &bsd4_queues[pri];
-               which = &bsd4_queuebits;
+               q = &rdd->queues[pri];
+               which = &rdd->queuebits;
                break;
        case RTP_PRIO_REALTIME:
        case RTP_PRIO_FIFO:
-               q = &bsd4_rtqueues[pri];
-               which = &bsd4_rtqueuebits;
+               q = &rdd->rtqueues[pri];
+               which = &rdd->rtqueuebits;
                break;
        case RTP_PRIO_IDLE:
-               q = &bsd4_idqueues[pri];
-               which = &bsd4_idqueuebits;
+               q = &rdd->idqueues[pri];
+               which = &rdd->idqueuebits;
                break;
        default:
                panic("remrunqueue: invalid rtprio type");
@@ -1756,78 +1630,79 @@ bsd4_setrunqueue_locked(struct lwp *lp)
  * We can't use the idle thread for this because we might block.
  * Additionally, doing things this way allows us to HLT idle cpus
  * on MP systems.
- *
- * MPSAFE
  */
 static void
-sched_thread(void *dummy)
+dfly_helper_thread(void *dummy)
 {
     globaldata_t gd;
-    bsd4_pcpu_t  dd;
-    bsd4_pcpu_t  tmpdd;
+    dfly_pcpu_t  dd;
     struct lwp *nlp;
     cpumask_t mask;
     int cpuid;
-#ifdef SMP
-    cpumask_t tmpmask;
-    int tmpid;
-#endif
 
     gd = mycpu;
     cpuid = gd->gd_cpuid;      /* doesn't change */
     mask = gd->gd_cpumask;     /* doesn't change */
-    dd = &bsd4_pcpu[cpuid];
+    dd = &dfly_pcpu[cpuid];
 
     /*
-     * Since we are woken up only when no user processes are scheduled
-     * on a cpu, we can run at an ultra low priority.
+     * Since we only want to be woken up only when no user processes
+     * are scheduled on a cpu, run at an ultra low priority.
      */
     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
 
-    tsleep(&dd->helper_thread, 0, "sched_thread_sleep", 0);
+    tsleep(&dd->helper_thread, 0, "schslp", 0);
 
     for (;;) {
        /*
         * We use the LWKT deschedule-interlock trick to avoid racing
-        * bsd4_rdyprocmask.  This means we cannot block through to the
+        * dfly_rdyprocmask.  This means we cannot block through to the
         * manual lwkt_switch() call we make below.
         */
        crit_enter_gd(gd);
        tsleep_interlock(&dd->helper_thread, 0);
-       spin_lock(&bsd4_spin);
-       atomic_set_cpumask(&bsd4_rdyprocmask, mask);
 
+       /*spin_lock(&dfly_spin);*/
+       spin_lock(&dd->spin);
+
+       atomic_set_cpumask(&dfly_rdyprocmask, mask);
        clear_user_resched();   /* This satisfied the reschedule request */
        dd->rrcount = 0;        /* Reset the round-robin counter */
 
-       if ((bsd4_curprocmask & mask) == 0) {
+       if ((dfly_curprocmask & mask) == 0) {
                /*
                 * No thread is currently scheduled.
                 */
                KKASSERT(dd->uschedcp == NULL);
-               if ((nlp = chooseproc_locked(NULL)) != NULL) {
+               if ((nlp = dfly_chooseproc_locked(dd, NULL, 0)) != NULL) {
                        KTR_COND_LOG(usched_sched_thread_no_process,
-                           nlp->lwp_proc->p_pid == usched_bsd4_pid_debug,
+                           nlp->lwp_proc->p_pid == usched_dfly_pid_debug,
                            gd->gd_cpuid,
                            nlp->lwp_proc->p_pid,
                            nlp->lwp_thread->td_gd->gd_cpuid);
 
-                       atomic_set_cpumask(&bsd4_curprocmask, mask);
+                       atomic_set_cpumask(&dfly_curprocmask, mask);
                        dd->upri = nlp->lwp_priority;
                        dd->uschedcp = nlp;
                        dd->rrcount = 0;        /* reset round robin */
-                       spin_unlock(&bsd4_spin);
+                       spin_unlock(&dd->spin);
+                       /*spin_unlock(&dfly_spin);*/
 #ifdef SMP
                        lwkt_acquire(nlp->lwp_thread);
 #endif
                        lwkt_schedule(nlp->lwp_thread);
                } else {
-                       spin_unlock(&bsd4_spin);
+                       spin_unlock(&dd->spin);
+                       /*spin_unlock(&dfly_spin);*/
                }
-       } else if (bsd4_runqcount) {
-               if ((nlp = chooseproc_locked(dd->uschedcp)) != NULL) {
+       } else if (dd->runqcount) {
+               /*
+                * Possibly find a better process to schedule.
+                */
+               nlp = dfly_chooseproc_locked(dd, dd->uschedcp, 0);
+               if (nlp) {
                        KTR_COND_LOG(usched_sched_thread_process,
-                           nlp->lwp_proc->p_pid == usched_bsd4_pid_debug,
+                           nlp->lwp_proc->p_pid == usched_dfly_pid_debug,
                            gd->gd_cpuid,
                            nlp->lwp_proc->p_pid,
                            nlp->lwp_thread->td_gd->gd_cpuid);
@@ -1835,44 +1710,26 @@ sched_thread(void *dummy)
                        dd->upri = nlp->lwp_priority;
                        dd->uschedcp = nlp;
                        dd->rrcount = 0;        /* reset round robin */
-                       spin_unlock(&bsd4_spin);
+                       spin_unlock(&dd->spin);
+                       /*spin_unlock(&dfly_spin);*/
 #ifdef SMP
                        lwkt_acquire(nlp->lwp_thread);
 #endif
                        lwkt_schedule(nlp->lwp_thread);
                } else {
                        /*
-                        * CHAINING CONDITION TRAIN
-                        *
-                        * We could not deal with the scheduler wakeup
-                        * request on this cpu, locate a ready scheduler
-                        * with no current lp assignment and chain to it.
-                        *
-                        * This ensures that a wakeup race which fails due
-                        * to priority test does not leave other unscheduled
-                        * cpus idle when the runqueue is not empty.
+                        * Leave the thread on our run queue.  Another
+                        * scheduler will try to pull it later.
                         */
-                       tmpmask = ~bsd4_curprocmask &
-                                 bsd4_rdyprocmask & smp_active_mask;
-                       if (tmpmask) {
-                               tmpid = BSFCPUMASK(tmpmask);
-                               tmpdd = &bsd4_pcpu[tmpid];
-                               atomic_clear_cpumask(&bsd4_rdyprocmask,
-                                                    CPUMASK(tmpid));
-                               spin_unlock(&bsd4_spin);
-                               wakeup(&tmpdd->helper_thread);
-                       } else {
-                               spin_unlock(&bsd4_spin);
-                       }
-
-                       KTR_LOG(usched_sched_thread_no_process_found,
-                               gd->gd_cpuid, (unsigned long)tmpmask);
+                       spin_unlock(&dd->spin);
+                       /*spin_unlock(&dfly_spin);*/
                }
        } else {
                /*
                 * The runq is empty.
                 */
-               spin_unlock(&bsd4_spin);
+               spin_unlock(&dd->spin);
+               /*spin_unlock(&dfly_spin);*/
        }
 
        /*
@@ -1887,18 +1744,18 @@ sched_thread(void *dummy)
 
 /* sysctl stick_to_level parameter */
 static int
-sysctl_usched_bsd4_stick_to_level(SYSCTL_HANDLER_ARGS)
+sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
 {
        int error, new_val;
 
-       new_val = usched_bsd4_stick_to_level;
+       new_val = usched_dfly_stick_to_level;
 
        error = sysctl_handle_int(oidp, &new_val, 0, req);
         if (error != 0 || req->newptr == NULL)
                return (error);
        if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
                return (EINVAL);
-       usched_bsd4_stick_to_level = new_val;
+       usched_dfly_stick_to_level = new_val;
        return (0);
 }
 
@@ -1907,9 +1764,10 @@ sysctl_usched_bsd4_stick_to_level(SYSCTL_HANDLER_ARGS)
  * been cleared by rqinit() and we should not mess with it further.
  */
 static void
-sched_thread_cpu_init(void)
+dfly_helper_thread_cpu_init(void)
 {
        int i;
+       int j;
        int cpuid;
        int smt_not_supported = 0;
        int cache_coherent_not_supported = 0;
@@ -1917,20 +1775,29 @@ sched_thread_cpu_init(void)
        if (bootverbose)
                kprintf("Start scheduler helpers on cpus:\n");
 
-       sysctl_ctx_init(&usched_bsd4_sysctl_ctx);
-       usched_bsd4_sysctl_tree =
-               SYSCTL_ADD_NODE(&usched_bsd4_sysctl_ctx,
+       sysctl_ctx_init(&usched_dfly_sysctl_ctx);
+       usched_dfly_sysctl_tree =
+               SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
                                SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
-                               "usched_bsd4", CTLFLAG_RD, 0, "");
+                               "usched_dfly", CTLFLAG_RD, 0, "");
 
        for (i = 0; i < ncpus; ++i) {
-               bsd4_pcpu_t dd = &bsd4_pcpu[i];
+               dfly_pcpu_t dd = &dfly_pcpu[i];
                cpumask_t mask = CPUMASK(i);
 
                if ((mask & smp_active_mask) == 0)
                    continue;
 
+               spin_init(&dd->spin);
                dd->cpunode = get_cpu_node_by_cpuid(i);
+               dd->cpuid = i;
+               dd->cpumask = CPUMASK(i);
+               for (j = 0; j < NQS; j++) {
+                       TAILQ_INIT(&dd->queues[j]);
+                       TAILQ_INIT(&dd->rtqueues[j]);
+                       TAILQ_INIT(&dd->idqueues[j]);
+               }
+               atomic_clear_cpumask(&dfly_curprocmask, 1);
 
                if (dd->cpunode == NULL) {
                        smt_not_supported = 1;
@@ -1987,7 +1854,7 @@ sched_thread_cpu_init(void)
                        }
                }
 
-               lwkt_create(sched_thread, NULL, NULL, &dd->helper_thread,
+               lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread,
                            0, i, "usched %d", i);
 
                /*
@@ -1995,44 +1862,44 @@ sched_thread_cpu_init(void)
                 * been enabled in rqinit().
                 */
                if (i)
-                   atomic_clear_cpumask(&bsd4_curprocmask, mask);
-               atomic_set_cpumask(&bsd4_rdyprocmask, mask);
+                   atomic_clear_cpumask(&dfly_curprocmask, mask);
+               atomic_set_cpumask(&dfly_rdyprocmask, mask);
                dd->upri = PRIBASE_NULL;
 
        }
 
-       /* usched_bsd4 sysctl configurable parameters */
+       /* usched_dfly sysctl configurable parameters */
 
-       SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
-                      SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
+       SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
+                      SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
                       OID_AUTO, "rrinterval", CTLFLAG_RW,
-                      &usched_bsd4_rrinterval, 0, "");
-       SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
-                      SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
+                      &usched_dfly_rrinterval, 0, "");
+       SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
+                      SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
                       OID_AUTO, "decay", CTLFLAG_RW,
-                      &usched_bsd4_decay, 0, "Extra decay when not running");
-       SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
-                      SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
+                      &usched_dfly_decay, 0, "Extra decay when not running");
+       SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
+                      SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
                       OID_AUTO, "batch_time", CTLFLAG_RW,
-                      &usched_bsd4_batch_time, 0, "Min batch counter value");
-       SYSCTL_ADD_LONG(&usched_bsd4_sysctl_ctx,
-                      SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
+                      &usched_dfly_batch_time, 0, "Min batch counter value");
+       SYSCTL_ADD_LONG(&usched_dfly_sysctl_ctx,
+                      SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
                       OID_AUTO, "kicks", CTLFLAG_RW,
-                      &usched_bsd4_kicks, "Number of kickstarts");
+                      &usched_dfly_kicks, "Number of kickstarts");
 
        /* Add enable/disable option for SMT scheduling if supported */
        if (smt_not_supported) {
-               usched_bsd4_smt = 0;
-               SYSCTL_ADD_STRING(&usched_bsd4_sysctl_ctx,
-                                 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
+               usched_dfly_smt = 0;
+               SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
+                                 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
                                  OID_AUTO, "smt", CTLFLAG_RD,
                                  "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
        } else {
-               usched_bsd4_smt = 1;
-               SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
-                              SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
+               usched_dfly_smt = 1;
+               SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
+                              SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
                               OID_AUTO, "smt", CTLFLAG_RW,
-                              &usched_bsd4_smt, 0, "Enable SMT scheduling");
+                              &usched_dfly_smt, 0, "Enable SMT scheduling");
        }
 
        /*
@@ -2041,74 +1908,73 @@ sched_thread_cpu_init(void)
         */
        if (cache_coherent_not_supported) {
 #ifdef SMP
-               usched_bsd4_cache_coherent = 0;
-               SYSCTL_ADD_STRING(&usched_bsd4_sysctl_ctx,
-                                 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
+               usched_dfly_cache_coherent = 0;
+               SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
+                                 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
                                  OID_AUTO, "cache_coherent", CTLFLAG_RD,
                                  "NOT SUPPORTED", 0,
                                  "Cache coherence NOT SUPPORTED");
 #endif
        } else {
 #ifdef SMP
-               usched_bsd4_cache_coherent = 1;
-               SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
-                              SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
+               usched_dfly_cache_coherent = 1;
+               SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
+                              SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
                               OID_AUTO, "cache_coherent", CTLFLAG_RW,
-                              &usched_bsd4_cache_coherent, 0,
+                              &usched_dfly_cache_coherent, 0,
                               "Enable/Disable cache coherent scheduling");
 #endif
 
-               SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
-                              SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
+               SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
+                              SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
                               OID_AUTO, "upri_affinity", CTLFLAG_RW,
-                              &usched_bsd4_upri_affinity, 1,
+                              &usched_dfly_upri_affinity, 1,
                               "Number of PPQs in user priority check");
 
-               SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
-                              SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
+               SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
+                              SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
                               OID_AUTO, "queue_checks", CTLFLAG_RW,
-                              &usched_bsd4_queue_checks, 5,
+                              &usched_dfly_queue_checks, 5,
                               "LWPs to check from a queue before giving up");
 
-               SYSCTL_ADD_PROC(&usched_bsd4_sysctl_ctx,
-                               SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
+               SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
+                               SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
                                OID_AUTO, "stick_to_level",
                                CTLTYPE_INT | CTLFLAG_RW,
-                               NULL, sizeof usched_bsd4_stick_to_level,
-                               sysctl_usched_bsd4_stick_to_level, "I",
+                               NULL, sizeof usched_dfly_stick_to_level,
+                               sysctl_usched_dfly_stick_to_level, "I",
                                "Stick a process to this level. See sysctl"
                                "paremter hw.cpu_topology.level_description");
        }
 }
 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
-       sched_thread_cpu_init, NULL)
+       dfly_helper_thread_cpu_init, NULL)
 
 #else /* No SMP options - just add the configurable parameters to sysctl */
 
 static void
 sched_sysctl_tree_init(void)
 {
-       sysctl_ctx_init(&usched_bsd4_sysctl_ctx);
-       usched_bsd4_sysctl_tree =
-               SYSCTL_ADD_NODE(&usched_bsd4_sysctl_ctx,
+       sysctl_ctx_init(&usched_dfly_sysctl_ctx);
+       usched_dfly_sysctl_tree =
+               SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
                                SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
-                               "usched_bsd4", CTLFLAG_RD, 0, "");
+                               "usched_dfly", CTLFLAG_RD, 0, "");
 
-       /* usched_bsd4 sysctl configurable parameters */
-       SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
-                      SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
+       /* usched_dfly sysctl configurable parameters */
+       SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
+                      SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
                       OID_AUTO, "rrinterval", CTLFLAG_RW,
-                      &usched_bsd4_rrinterval, 0, "");
-       SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
-                      SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
+                      &usched_dfly_rrinterval, 0, "");
+       SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
+                      SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
                       OID_AUTO, "decay", CTLFLAG_RW,
-                      &usched_bsd4_decay, 0, "Extra decay when not running");
-       SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
-                      SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
+                      &usched_dfly_decay, 0, "Extra decay when not running");
+       SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
+                      SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
                       OID_AUTO, "batch_time", CTLFLAG_RW,
-                      &usched_bsd4_batch_time, 0, "Min batch counter value");
+                      &usched_dfly_batch_time, 0, "Min batch counter value");
 }
 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
        sched_sysctl_tree_init, NULL)
 #endif
-
index 4a1efb6..936966e 100644 (file)
@@ -72,6 +72,7 @@ static void dummy_recalculate_estcpu(struct lwp *lp);
 static void dummy_resetpriority(struct lwp *lp);
 static void dummy_forking(struct lwp *plp, struct lwp *lp);
 static void dummy_exiting(struct lwp *plp, struct proc *child);
+static void dummy_uload_update(struct lwp *lp);
 static void dummy_yield(struct lwp *lp);
 
 struct usched usched_dummy = {
@@ -87,6 +88,7 @@ struct usched usched_dummy = {
        dummy_resetpriority,
        dummy_forking,
        dummy_exiting,
+       dummy_uload_update,
        NULL,                   /* setcpumask not supported */
        dummy_yield
 };
@@ -426,21 +428,19 @@ dummy_forking(struct lwp *plp, struct lwp *lp)
 }
 
 /*
- * DUMMY_EXITING
- *
- * Called when the parent reaps a child.   Typically used to propogate cpu
- * use by the child back to the parent as part of a batch detection
- * heuristic.  
- *
- * NOTE: cpu use is not normally back-propogated to PID 1.
- *
- * MPSAFE
+ * Called when a lwp is being removed from this scheduler, typically
+ * during lwp_exit().
  */
 static void
 dummy_exiting(struct lwp *plp, struct proc *child)
 {
 }
 
+static void
+dummy_uload_update(struct lwp *lp)
+{
+}
+
 /*
  * SMP systems may need a scheduler helper thread.  This is how one can be
  * setup.
index 4aff0dc..a7a2342 100644 (file)
@@ -14,12 +14,10 @@ struct cpu_node {
 typedef struct cpu_node cpu_node_t;
 
 extern int cpu_topology_levels_number;
+extern cpu_node_t *root_cpu_node;
 
-cpumask_t get_cpumask_from_level(int cpuid,
-                       uint8_t level_type);
-
-cpu_node_t *
-get_cpu_node_by_cpuid(int cpuid);
+cpumask_t get_cpumask_from_level(int cpuid, uint8_t level_type);
+cpu_node_t *get_cpu_node_by_cpuid(int cpuid);
 
 #define LEVEL_NO 4
 
index b6eedfc..1753bb2 100644 (file)
@@ -200,7 +200,7 @@ struct lwp {
        sysclock_t      lwp_cpbase;     /* Measurement base */
        fixpt_t         lwp_pctcpu;     /* %cpu for this process */
        u_int           lwp_slptime;    /* Time since last blocked. */
-       u_int           lwp_setrunqueue_ticks;  /* Tick count - lwp set on runqueue */
+       u_int           lwp_rebal_ticks; /* Timestamp sched on current cpu */
 
        int             lwp_traceflag;  /* Kernel trace points. */
 
@@ -400,6 +400,7 @@ struct      proc {
 #define        LWP_MP_ONRUNQ   0x0000001 /* on a user scheduling run queue */
 #define LWP_MP_WEXIT   0x0000002 /* working on exiting */
 #define        LWP_MP_WSTOP    0x0000004 /* working on stopping */
+#define        LWP_MP_ULOAD    0x0000008 /* uload accounting for current cpu */
 
 #define        FIRST_LWP_IN_PROC(p)            RB_FIRST(lwp_rb_tree, &(p)->p_lwp_tree)
 #define        FOREACH_LWP_IN_PROC(lp, p)      \
index be4ad93..c0649c0 100644 (file)
@@ -41,6 +41,7 @@ struct usched {
     void (*resetpriority)(struct lwp *);
     void (*heuristic_forking)(struct lwp *, struct lwp *);
     void (*heuristic_exiting)(struct lwp *, struct proc *);
+    void (*uload_update)(struct lwp *);
     void (*setcpumask)(struct usched *, cpumask_t);
     void (*yield)(struct lwp *);
 };
@@ -58,6 +59,15 @@ union usched_data {
        u_short rqtype;         /* protected copy of rtprio type */
        u_short unused02;
     } bsd4;
+    struct {
+       short   priority;       /* lower is better */
+       char    unused01;       /* (currently not used) */
+       char    rqindex;
+       int     batch;          /* batch mode heuristic */
+       int     estcpu;         /* dynamic priority modification */
+       u_short rqtype;         /* protected copy of rtprio type */
+       u_short qcpu;           /* which cpu are we enqueued on? */
+    } dfly;
 
     int                pad[4];         /* PAD for future expansion */
 };
@@ -82,6 +92,7 @@ union usched_data {
 #ifdef _KERNEL
 
 extern struct usched   usched_bsd4;
+extern struct usched   usched_dfly;
 extern struct usched   usched_dummy;
 extern cpumask_t usched_mastermask;
 extern int sched_ticks; /* From sys/kern/kern_clock.c */