kernel - add usched_dfly algorith, set as default for now (3)
authorMatthew Dillon <dillon@apollo.backplane.com>
Tue, 18 Sep 2012 16:25:03 +0000 (09:25 -0700)
committerMatthew Dillon <dillon@apollo.backplane.com>
Tue, 18 Sep 2012 16:25:03 +0000 (09:25 -0700)
* UP compile fixes.

Reported-by: swildner
sys/kern/usched_bsd4.c
sys/kern/usched_dfly.c

index 085176c..1d29f59 100644 (file)
@@ -106,11 +106,11 @@ static void bsd4_yield(struct lwp *lp);
 static void bsd4_need_user_resched_remote(void *dummy);
 static int bsd4_batchy_looser_pri_test(struct lwp* lp);
 static struct lwp *bsd4_chooseproc_locked_cache_coherent(struct lwp *chklp);
+static void bsd4_kick_helper(struct lwp *lp);
 #endif
 static struct lwp *bsd4_chooseproc_locked(struct lwp *chklp);
 static void bsd4_remrunqueue_locked(struct lwp *lp);
 static void bsd4_setrunqueue_locked(struct lwp *lp);
-static void bsd4_kick_helper(struct lwp *lp);
 
 struct usched usched_bsd4 = {
        { NULL },
@@ -195,11 +195,11 @@ static int usched_bsd4_cache_coherent = 0;
 static int usched_bsd4_upri_affinity = 16; /* 32 queues - half-way */
 static int usched_bsd4_queue_checks = 5;
 static int usched_bsd4_stick_to_level = 0;
+static long usched_bsd4_kicks;
 #endif
 static int usched_bsd4_rrinterval = (ESTCPUFREQ + 9) / 10;
 static int usched_bsd4_decay = 8;
 static int usched_bsd4_batch_time = 10;
-static long usched_bsd4_kicks;
 
 /* KTR debug printings */
 
@@ -1762,10 +1762,8 @@ sched_thread(void *dummy)
     struct lwp *nlp;
     cpumask_t mask;
     int cpuid;
-#ifdef SMP
     cpumask_t tmpmask;
     int tmpid;
-#endif
 
     gd = mycpu;
     cpuid = gd->gd_cpuid;      /* doesn't change */
@@ -1811,9 +1809,7 @@ sched_thread(void *dummy)
                        dd->uschedcp = nlp;
                        dd->rrcount = 0;        /* reset round robin */
                        spin_unlock(&bsd4_spin);
-#ifdef SMP
                        lwkt_acquire(nlp->lwp_thread);
-#endif
                        lwkt_schedule(nlp->lwp_thread);
                } else {
                        spin_unlock(&bsd4_spin);
@@ -1830,9 +1826,7 @@ sched_thread(void *dummy)
                        dd->uschedcp = nlp;
                        dd->rrcount = 0;        /* reset round robin */
                        spin_unlock(&bsd4_spin);
-#ifdef SMP
                        lwkt_acquire(nlp->lwp_thread);
-#endif
                        lwkt_schedule(nlp->lwp_thread);
                } else {
                        /*
@@ -2034,23 +2028,19 @@ sched_thread_cpu_init(void)
         * if supported
         */
        if (cache_coherent_not_supported) {
-#ifdef SMP
                usched_bsd4_cache_coherent = 0;
                SYSCTL_ADD_STRING(&usched_bsd4_sysctl_ctx,
                                  SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
                                  OID_AUTO, "cache_coherent", CTLFLAG_RD,
                                  "NOT SUPPORTED", 0,
                                  "Cache coherence NOT SUPPORTED");
-#endif
        } else {
-#ifdef SMP
                usched_bsd4_cache_coherent = 1;
                SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
                               SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
                               OID_AUTO, "cache_coherent", CTLFLAG_RW,
                               &usched_bsd4_cache_coherent, 0,
                               "Enable/Disable cache coherent scheduling");
-#endif
 
                SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
                               SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
@@ -2105,4 +2095,3 @@ sched_sysctl_tree_init(void)
 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
        sched_sysctl_tree_init, NULL)
 #endif
-
index 262cbdf..ddb624a 100644 (file)
@@ -213,7 +213,6 @@ static int usched_dfly_stick_to_level = 0;
 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
 static int usched_dfly_decay = 8;
 static int usched_dfly_batch_time = 10;
-static long usched_dfly_kicks;
 
 /* KTR debug printings */
 
@@ -558,7 +557,7 @@ dfly_setrunqueue(struct lwp *lp)
        if (rdd->uschedcp == NULL) {
                spin_lock(&rdd->spin);
                if (rdd->uschedcp == NULL) {
-                       atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
+                       atomic_set_cpumask(&dfly_curprocmask, rgd->gd_cpumask);
                        rdd->uschedcp = lp;
                        rdd->upri = lp->lwp_priority;
                        spin_unlock(&rdd->spin);
@@ -1092,7 +1091,9 @@ static
 struct lwp *
 dfly_chooseproc_locked(dfly_pcpu_t dd, struct lwp *chklp, int isremote)
 {
+#ifdef SMP
        dfly_pcpu_t xdd;
+#endif
        struct lwp *lp;
        struct rq *q;
        u_int32_t *which, *which2;
@@ -1683,9 +1684,7 @@ dfly_helper_thread(void *dummy)
                        dd->rrcount = 0;        /* reset round robin */
                        spin_unlock(&dd->spin);
                        /*spin_unlock(&dfly_spin);*/
-#ifdef SMP
                        lwkt_acquire(nlp->lwp_thread);
-#endif
                        lwkt_schedule(nlp->lwp_thread);
                } else {
                        spin_unlock(&dd->spin);
@@ -1708,9 +1707,7 @@ dfly_helper_thread(void *dummy)
                        dd->rrcount = 0;        /* reset round robin */
                        spin_unlock(&dd->spin);
                        /*spin_unlock(&dfly_spin);*/
-#ifdef SMP
                        lwkt_acquire(nlp->lwp_thread);
-#endif
                        lwkt_schedule(nlp->lwp_thread);
                } else {
                        /*
@@ -1878,10 +1875,6 @@ dfly_helper_thread_cpu_init(void)
                       SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
                       OID_AUTO, "batch_time", CTLFLAG_RW,
                       &usched_dfly_batch_time, 0, "Min batch counter value");
-       SYSCTL_ADD_LONG(&usched_dfly_sysctl_ctx,
-                      SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
-                      OID_AUTO, "kicks", CTLFLAG_RW,
-                      &usched_dfly_kicks, "Number of kickstarts");
 
        /* Add enable/disable option for SMT scheduling if supported */
        if (smt_not_supported) {
@@ -1903,23 +1896,19 @@ dfly_helper_thread_cpu_init(void)
         * if supported
         */
        if (cache_coherent_not_supported) {
-#ifdef SMP
                usched_dfly_cache_coherent = 0;
                SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
                                  SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
                                  OID_AUTO, "cache_coherent", CTLFLAG_RD,
                                  "NOT SUPPORTED", 0,
                                  "Cache coherence NOT SUPPORTED");
-#endif
        } else {
-#ifdef SMP
                usched_dfly_cache_coherent = 1;
                SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
                               SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
                               OID_AUTO, "cache_coherent", CTLFLAG_RW,
                               &usched_dfly_cache_coherent, 0,
                               "Enable/Disable cache coherent scheduling");
-#endif
 
                SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
                               SYSCTL_CHILDREN(usched_dfly_sysctl_tree),