kernel - add usched_dfly algorith, set as default for now
[dragonfly.git] / sys / kern / usched_dfly.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com>,
7  * by Mihai Carabas <mihai.carabas@gmail.com>
8  * and many others.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
52
53 #include <sys/ktr.h>
54
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
57
58 /*
59  * Priorities.  Note that with 32 run queues per scheduler each queue
60  * represents four priority levels.
61  */
62
63 int dfly_rebalanced;
64
65 #define MAXPRI                  128
66 #define PRIMASK                 (MAXPRI - 1)
67 #define PRIBASE_REALTIME        0
68 #define PRIBASE_NORMAL          MAXPRI
69 #define PRIBASE_IDLE            (MAXPRI * 2)
70 #define PRIBASE_THREAD          (MAXPRI * 3)
71 #define PRIBASE_NULL            (MAXPRI * 4)
72
73 #define NQS     32                      /* 32 run queues. */
74 #define PPQ     (MAXPRI / NQS)          /* priorities per queue */
75 #define PPQMASK (PPQ - 1)
76
77 /*
78  * NICEPPQ      - number of nice units per priority queue
79  * ESTCPUPPQ    - number of estcpu units per priority queue
80  * ESTCPUMAX    - number of estcpu units
81  */
82 #define NICEPPQ         2
83 #define ESTCPUPPQ       512
84 #define ESTCPUMAX       (ESTCPUPPQ * NQS)
85 #define BATCHMAX        (ESTCPUFREQ * 30)
86 #define PRIO_RANGE      (PRIO_MAX - PRIO_MIN + 1)
87
88 #define ESTCPULIM(v)    min((v), ESTCPUMAX)
89
90 TAILQ_HEAD(rq, lwp);
91
92 #define lwp_priority    lwp_usdata.dfly.priority
93 #define lwp_rqindex     lwp_usdata.dfly.rqindex
94 #define lwp_estcpu      lwp_usdata.dfly.estcpu
95 #define lwp_batch       lwp_usdata.dfly.batch
96 #define lwp_rqtype      lwp_usdata.dfly.rqtype
97 #define lwp_qcpu        lwp_usdata.dfly.qcpu
98
99 struct usched_dfly_pcpu {
100         struct spinlock spin;
101         struct thread   helper_thread;
102         short           rrcount;
103         short           upri;
104         int             uload;
105         struct lwp      *uschedcp;
106         struct rq       queues[NQS];
107         struct rq       rtqueues[NQS];
108         struct rq       idqueues[NQS];
109         u_int32_t       queuebits;
110         u_int32_t       rtqueuebits;
111         u_int32_t       idqueuebits;
112         int             runqcount;
113         int             cpuid;
114         cpumask_t       cpumask;
115 #ifdef SMP
116         cpu_node_t      *cpunode;
117 #endif
118 };
119
120 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
121
122 static void dfly_acquire_curproc(struct lwp *lp);
123 static void dfly_release_curproc(struct lwp *lp);
124 static void dfly_select_curproc(globaldata_t gd);
125 static void dfly_setrunqueue(struct lwp *lp);
126 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
127                                 sysclock_t cpstamp);
128 static void dfly_recalculate_estcpu(struct lwp *lp);
129 static void dfly_resetpriority(struct lwp *lp);
130 static void dfly_forking(struct lwp *plp, struct lwp *lp);
131 static void dfly_exiting(struct lwp *lp, struct proc *);
132 static void dfly_uload_update(struct lwp *lp);
133 static void dfly_yield(struct lwp *lp);
134 #ifdef SMP
135 static dfly_pcpu_t dfly_choose_best_queue(dfly_pcpu_t dd, struct lwp *lp);
136 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
137 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
138 #endif
139
140 #ifdef SMP
141 static void dfly_need_user_resched_remote(void *dummy);
142 #endif
143 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t dd, struct lwp *chklp,
144                                         int isremote);
145 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
146 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
147
148 struct usched usched_dfly = {
149         { NULL },
150         "dfly", "Original DragonFly Scheduler",
151         NULL,                   /* default registration */
152         NULL,                   /* default deregistration */
153         dfly_acquire_curproc,
154         dfly_release_curproc,
155         dfly_setrunqueue,
156         dfly_schedulerclock,
157         dfly_recalculate_estcpu,
158         dfly_resetpriority,
159         dfly_forking,
160         dfly_exiting,
161         dfly_uload_update,
162         NULL,                   /* setcpumask not supported */
163         dfly_yield
164 };
165
166 /*
167  * We have NQS (32) run queues per scheduling class.  For the normal
168  * class, there are 128 priorities scaled onto these 32 queues.  New
169  * processes are added to the last entry in each queue, and processes
170  * are selected for running by taking them from the head and maintaining
171  * a simple FIFO arrangement.  Realtime and Idle priority processes have
172  * and explicit 0-31 priority which maps directly onto their class queue
173  * index.  When a queue has something in it, the corresponding bit is
174  * set in the queuebits variable, allowing a single read to determine
175  * the state of all 32 queues and then a ffs() to find the first busy
176  * queue.
177  */
178 static cpumask_t dfly_curprocmask = -1; /* currently running a user process */
179 static cpumask_t dfly_rdyprocmask;      /* ready to accept a user process */
180 #ifdef SMP
181 static volatile int dfly_scancpu;
182 /*static struct spinlock dfly_spin = SPINLOCK_INITIALIZER(dfly_spin);*/
183 #endif
184 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
185 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
186 static struct sysctl_oid *usched_dfly_sysctl_tree;
187
188 /* Debug info exposed through debug.* sysctl */
189
190 static int usched_dfly_debug = -1;
191 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
192            &usched_dfly_debug, 0,
193            "Print debug information for this pid");
194
195 static int usched_dfly_pid_debug = -1;
196 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
197            &usched_dfly_pid_debug, 0,
198            "Print KTR debug information for this pid");
199
200 static int usched_dfly_chooser = 0;
201 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
202            &usched_dfly_chooser, 0,
203            "Print KTR debug information for this pid");
204
205 /* Tunning usched_dfly - configurable through kern.usched_dfly.* */
206 #ifdef SMP
207 static int usched_dfly_smt = 0;
208 static int usched_dfly_cache_coherent = 0;
209 static int usched_dfly_upri_affinity = 16; /* 32 queues - half-way */
210 static int usched_dfly_queue_checks = 5;
211 static int usched_dfly_stick_to_level = 0;
212 #endif
213 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
214 static int usched_dfly_decay = 8;
215 static int usched_dfly_batch_time = 10;
216 static long usched_dfly_kicks;
217
218 /* KTR debug printings */
219
220 KTR_INFO_MASTER(usched);
221
222 #if !defined(KTR_USCHED_DFLY)
223 #define KTR_USCHED_DFLY KTR_ALL
224 #endif
225
226 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_urw, 0,
227     "USCHED_DFLY(dfly_acquire_curproc in user_reseched_wanted "
228     "after release: pid %d, cpuid %d, curr_cpuid %d)",
229     pid_t pid, int cpuid, int curr);
230 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_before_loop, 0,
231     "USCHED_DFLY(dfly_acquire_curproc before loop: pid %d, cpuid %d, "
232     "curr_cpuid %d)",
233     pid_t pid, int cpuid, int curr);
234 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_not, 0,
235     "USCHED_DFLY(dfly_acquire_curproc couldn't acquire after "
236     "dfly_setrunqueue: pid %d, cpuid %d, curr_lp pid %d, curr_cpuid %d)",
237     pid_t pid, int cpuid, pid_t curr_pid, int curr_cpuid);
238 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_switch, 0,
239     "USCHED_DFLY(dfly_acquire_curproc after lwkt_switch: pid %d, "
240     "cpuid %d, curr_cpuid %d)",
241     pid_t pid, int cpuid, int curr);
242
243 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_release_curproc, 0,
244     "USCHED_DFLY(dfly_release_curproc before select: pid %d, "
245     "cpuid %d, curr_cpuid %d)",
246     pid_t pid, int cpuid, int curr);
247
248 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_select_curproc, 0,
249     "USCHED_DFLY(dfly_release_curproc before select: pid %d, "
250     "cpuid %d, old_pid %d, old_cpuid %d, curr_cpuid %d)",
251     pid_t pid, int cpuid, pid_t old_pid, int old_cpuid, int curr);
252
253 #ifdef SMP
254 KTR_INFO(KTR_USCHED_DFLY, usched, batchy_test_false, 0,
255     "USCHED_DFLY(batchy_looser_pri_test false: pid %d, "
256     "cpuid %d, verify_mask %lu)",
257     pid_t pid, int cpuid, cpumask_t mask);
258 KTR_INFO(KTR_USCHED_DFLY, usched, batchy_test_true, 0,
259     "USCHED_DFLY(batchy_looser_pri_test true: pid %d, "
260     "cpuid %d, verify_mask %lu)",
261     pid_t pid, int cpuid, cpumask_t mask);
262
263 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_fc_smt, 0,
264     "USCHED_DFLY(dfly_setrunqueue free cpus smt: pid %d, cpuid %d, "
265     "mask %lu, curr_cpuid %d)",
266     pid_t pid, int cpuid, cpumask_t mask, int curr);
267 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_fc_non_smt, 0,
268     "USCHED_DFLY(dfly_setrunqueue free cpus check non_smt: pid %d, "
269     "cpuid %d, mask %lu, curr_cpuid %d)",
270     pid_t pid, int cpuid, cpumask_t mask, int curr);
271 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_rc, 0,
272     "USCHED_DFLY(dfly_setrunqueue running cpus check: pid %d, "
273     "cpuid %d, mask %lu, curr_cpuid %d)",
274     pid_t pid, int cpuid, cpumask_t mask, int curr);
275 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_found, 0,
276     "USCHED_DFLY(dfly_setrunqueue found cpu: pid %d, cpuid %d, "
277     "mask %lu, found_cpuid %d, curr_cpuid %d)",
278     pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
279 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_not_found, 0,
280     "USCHED_DFLY(dfly_setrunqueue not found cpu: pid %d, cpuid %d, "
281     "try_cpuid %d, curr_cpuid %d)",
282     pid_t pid, int cpuid, int try_cpuid, int curr);
283 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_found_best_cpuid, 0,
284     "USCHED_DFLY(dfly_setrunqueue found cpu: pid %d, cpuid %d, "
285     "mask %lu, found_cpuid %d, curr_cpuid %d)",
286     pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
287 #endif
288
289 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
290     "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
291     pid_t pid, int old_cpuid, int curr);
292 #ifdef SMP
293 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc, 0,
294     "USCHED_DFLY(chooseproc_cc: pid %d, old_cpuid %d, curr_cpuid %d)",
295     pid_t pid, int old_cpuid, int curr);
296 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc_not_good, 0,
297     "USCHED_DFLY(chooseproc_cc not good: pid %d, old_cpumask %lu, "
298     "sibling_mask %lu, curr_cpumask %lu)",
299     pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
300 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc_elected, 0,
301     "USCHED_DFLY(chooseproc_cc elected: pid %d, old_cpumask %lu, "
302     "sibling_mask %lu, curr_cpumask: %lu)",
303     pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
304
305 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_no_process, 0,
306     "USCHED_DFLY(sched_thread %d no process scheduled: pid %d, old_cpuid %d)",
307     int id, pid_t pid, int cpuid);
308 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_process, 0,
309     "USCHED_DFLY(sched_thread %d process scheduled: pid %d, old_cpuid %d)",
310     int id, pid_t pid, int cpuid);
311 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_no_process_found, 0,
312     "USCHED_DFLY(sched_thread %d no process found; tmpmask %lu)",
313     int id, cpumask_t tmpmask);
314 #endif
315
316 /*
317  * DFLY_ACQUIRE_CURPROC
318  *
319  * This function is called when the kernel intends to return to userland.
320  * It is responsible for making the thread the current designated userland
321  * thread for this cpu, blocking if necessary.
322  *
323  * The kernel has already depressed our LWKT priority so we must not switch
324  * until we have either assigned or disposed of the thread.
325  *
326  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
327  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
328  * occur, this function is called only under very controlled circumstances.
329  */
330 static void
331 dfly_acquire_curproc(struct lwp *lp)
332 {
333         globaldata_t gd;
334         dfly_pcpu_t dd;
335         thread_t td;
336
337         /*
338          * Make sure we aren't sitting on a tsleep queue.
339          */
340         td = lp->lwp_thread;
341         crit_enter_quick(td);
342         if (td->td_flags & TDF_TSLEEPQ)
343                 tsleep_remove(td);
344         dfly_recalculate_estcpu(lp);
345
346         /*
347          * If a reschedule was requested give another thread the
348          * driver's seat.
349          */
350         if (user_resched_wanted()) {
351                 clear_user_resched();
352                 dfly_release_curproc(lp);
353         }
354
355         /*
356          * Loop until we are the current user thread
357          */
358         gd = mycpu;
359         dd = &dfly_pcpu[gd->gd_cpuid];
360
361         do {
362                 /*
363                  * Process any pending events and higher priority threads.
364                  */
365                 lwkt_yield();
366
367                 /*
368                  * Become the currently scheduled user thread for this cpu
369                  * if we can do so trivially.
370                  *
371                  * We can steal another thread's current thread designation
372                  * on this cpu since if we are running that other thread
373                  * must not be, so we can safely deschedule it.
374                  */
375                 if (dd->uschedcp == lp) {
376                         /*
377                          * We are already the current lwp (hot path).
378                          */
379                         dd->upri = lp->lwp_priority;
380                 } else if (dd->uschedcp == NULL) {
381                         /*
382                          * We can trivially become the current lwp.
383                          */
384                         atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
385                         dd->uschedcp = lp;
386                         dd->upri = lp->lwp_priority;
387                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
388                 } else if (dd->uschedcp && dd->upri > lp->lwp_priority) {
389                         /*
390                          * We can steal the current cpu's lwp designation
391                          * away simply by replacing it.  The other thread
392                          * will stall when it tries to return to userland,
393                          * possibly rescheduling elsewhere when it calls
394                          * setrunqueue.
395                          */
396                         dd->uschedcp = lp;
397                         dd->upri = lp->lwp_priority;
398                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
399                 } else {
400                         /*
401                          * We cannot become the current lwp, place the lp
402                          * on the run-queue of this or another cpu and
403                          * deschedule ourselves.
404                          *
405                          * When we are reactivated we will have another
406                          * chance.
407                          */
408                         lwkt_deschedule(lp->lwp_thread);
409                         dfly_setrunqueue(lp);
410
411                         /*
412                          * Reload after a switch or setrunqueue/switch possibly
413                          * moved us to another cpu.
414                          */
415                         lwkt_switch();
416                         gd = mycpu;
417                         dd = &dfly_pcpu[gd->gd_cpuid];
418                 }
419         } while (dd->uschedcp != lp);
420
421         crit_exit_quick(td);
422         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
423 }
424
425 /*
426  * DFLY_RELEASE_CURPROC
427  *
428  * This routine detaches the current thread from the userland scheduler,
429  * usually because the thread needs to run or block in the kernel (at
430  * kernel priority) for a while.
431  *
432  * This routine is also responsible for selecting a new thread to
433  * make the current thread.
434  *
435  * NOTE: This implementation differs from the dummy example in that
436  * dfly_select_curproc() is able to select the current process, whereas
437  * dummy_select_curproc() is not able to select the current process.
438  * This means we have to NULL out uschedcp.
439  *
440  * Additionally, note that we may already be on a run queue if releasing
441  * via the lwkt_switch() in dfly_setrunqueue().
442  */
443
444 static void
445 dfly_release_curproc(struct lwp *lp)
446 {
447         globaldata_t gd = mycpu;
448         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
449
450         if (dd->uschedcp == lp) {
451                 crit_enter();
452                 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
453
454                 dd->uschedcp = NULL;    /* don't let lp be selected */
455                 dd->upri = PRIBASE_NULL;
456                 atomic_clear_cpumask(&dfly_curprocmask, gd->gd_cpumask);
457                 dfly_select_curproc(gd);
458                 crit_exit();
459         }
460 }
461
462 /*
463  * DFLY_SELECT_CURPROC
464  *
465  * Select a new current process for this cpu and clear any pending user
466  * reschedule request.  The cpu currently has no current process.
467  *
468  * This routine is also responsible for equal-priority round-robining,
469  * typically triggered from dfly_schedulerclock().  In our dummy example
470  * all the 'user' threads are LWKT scheduled all at once and we just
471  * call lwkt_switch().
472  *
473  * The calling process is not on the queue and cannot be selected.
474  */
475 static
476 void
477 dfly_select_curproc(globaldata_t gd)
478 {
479         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
480         struct lwp *nlp;
481         int cpuid = gd->gd_cpuid;
482
483         crit_enter_gd(gd);
484
485         /*spin_lock(&dfly_spin);*/
486         spin_lock(&dd->spin);
487         nlp = dfly_chooseproc_locked(dd, dd->uschedcp, 0);
488
489         if (nlp) {
490                 atomic_set_cpumask(&dfly_curprocmask, CPUMASK(cpuid));
491                 dd->upri = nlp->lwp_priority;
492                 dd->uschedcp = nlp;
493                 dd->rrcount = 0;                /* reset round robin */
494                 spin_unlock(&dd->spin);
495                 /*spin_unlock(&dfly_spin);*/
496 #ifdef SMP
497                 lwkt_acquire(nlp->lwp_thread);
498 #endif
499                 lwkt_schedule(nlp->lwp_thread);
500         } else {
501                 spin_unlock(&dd->spin);
502                 /*spin_unlock(&dfly_spin);*/
503         }
504         crit_exit_gd(gd);
505 }
506
507 /*
508  * Place the specified lwp on the user scheduler's run queue.  This routine
509  * must be called with the thread descheduled.  The lwp must be runnable.
510  * It must not be possible for anyone else to explicitly schedule this thread.
511  *
512  * The thread may be the current thread as a special case.
513  */
514 static void
515 dfly_setrunqueue(struct lwp *lp)
516 {
517         globaldata_t rgd;
518         dfly_pcpu_t rdd;
519         int cpuid;
520
521         /*
522          * First validate the process LWKT state.
523          */
524         crit_enter();
525         KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
526         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
527             ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
528              lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
529         KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
530
531         /*
532          * NOTE: gd and dd are relative to the target thread's last cpu,
533          *       NOT our current cpu.
534          */
535         rgd = globaldata_find(lp->lwp_qcpu);
536         rdd = &dfly_pcpu[lp->lwp_qcpu];
537         cpuid = rdd->cpuid;
538
539         /*
540          * This process is not supposed to be scheduled anywhere or assigned
541          * as the current process anywhere.  Assert the condition.
542          */
543         KKASSERT(rdd->uschedcp != lp);
544
545 #ifndef SMP
546         /*
547          * If we are not SMP we do not have a scheduler helper to kick
548          * and must directly activate the process if none are scheduled.
549          *
550          * This is really only an issue when bootstrapping init since
551          * the caller in all other cases will be a user process, and
552          * even if released (rdd->uschedcp == NULL), that process will
553          * kickstart the scheduler when it returns to user mode from
554          * the kernel.
555          *
556          * NOTE: On SMP we can't just set some other cpu's uschedcp.
557          */
558         if (rdd->uschedcp == NULL) {
559                 spin_lock(&rdd->spin);
560                 if (rdd->uschedcp == NULL) {
561                         atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
562                         rdd->uschedcp = lp;
563                         rdd->upri = lp->lwp_priority;
564                         spin_unlock(&rdd->spin);
565                         lwkt_schedule(lp->lwp_thread);
566                         crit_exit();
567                         return;
568                 }
569                 spin_unlock(&rdd->spin);
570         }
571 #endif
572
573 #ifdef SMP
574         /*
575          * XXX fixme.  Could be part of a remrunqueue/setrunqueue
576          * operation when the priority is recalculated, so TDF_MIGRATING
577          * may already be set.
578          */
579         if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
580                 lwkt_giveaway(lp->lwp_thread);
581 #endif
582
583 #ifdef SMP
584         /*
585          * Ok, we have to setrunqueue some target cpu and request a reschedule
586          * if necessary.
587          *
588          * We have to choose the best target cpu.  It might not be the current
589          * target even if the current cpu has no running user thread (for
590          * example, because the current cpu might be a hyperthread and its
591          * sibling has a thread assigned).
592          */
593         /*spin_lock(&dfly_spin);*/
594         rdd = dfly_choose_best_queue(rdd, lp);
595         rgd = globaldata_find(rdd->cpuid);
596
597         /*
598          * We lose control of lp the moment we release the spinlock after
599          * having placed lp on the queue.  i.e. another cpu could pick it
600          * up and it could exit, or its priority could be further adjusted,
601          * or something like that.
602          *
603          * WARNING! dd can point to a foreign cpu!
604          */
605         spin_lock(&rdd->spin);
606         dfly_setrunqueue_locked(rdd, lp);
607         /*spin_unlock(&dfly_spin);*/
608
609         if (rgd == mycpu) {
610                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
611                         spin_unlock(&rdd->spin);
612                         if (rdd->uschedcp == NULL) {
613                                 wakeup_mycpu(&rdd->helper_thread); /* XXX */
614                                 need_user_resched();
615                         } else {
616                                 need_user_resched();
617                         }
618                 } else {
619                         spin_unlock(&rdd->spin);
620                 }
621         } else {
622                 atomic_clear_cpumask(&dfly_rdyprocmask, CPUMASK(cpuid));
623                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
624                         spin_unlock(&rdd->spin);
625                         lwkt_send_ipiq(rgd, dfly_need_user_resched_remote,
626                                        NULL);
627                 } else {
628                         spin_unlock(&rdd->spin);
629                         wakeup(&rdd->helper_thread);
630                 }
631         }
632 #else
633         /*
634          * Request a reschedule if appropriate.
635          */
636         spin_lock(&rdd->spin);
637         dfly_setrunqueue_locked(rdd, lp);
638         spin_unlock(&rdd->spin);
639         if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
640                 need_user_resched();
641         }
642 #endif
643         crit_exit();
644 }
645
646 /*
647  * This routine is called from a systimer IPI.  It MUST be MP-safe and
648  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
649  * each cpu.
650  */
651 static
652 void
653 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
654 {
655         globaldata_t gd = mycpu;
656         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
657
658         /*
659          * Do we need to round-robin?  We round-robin 10 times a second.
660          * This should only occur for cpu-bound batch processes.
661          */
662         if (++dd->rrcount >= usched_dfly_rrinterval) {
663                 dd->rrcount = 0;
664                 need_user_resched();
665         }
666
667         /*
668          * Adjust estcpu upward using a real time equivalent calculation.
669          */
670         lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
671
672         /*
673          * Spinlocks also hold a critical section so there should not be
674          * any active.
675          */
676         KKASSERT(gd->gd_spinlocks_wr == 0);
677
678         dfly_resetpriority(lp);
679 }
680
681 /*
682  * Called from acquire and from kern_synch's one-second timer (one of the
683  * callout helper threads) with a critical section held.
684  *
685  * Decay p_estcpu based on the number of ticks we haven't been running
686  * and our p_nice.  As the load increases each process observes a larger
687  * number of idle ticks (because other processes are running in them).
688  * This observation leads to a larger correction which tends to make the
689  * system more 'batchy'.
690  *
691  * Note that no recalculation occurs for a process which sleeps and wakes
692  * up in the same tick.  That is, a system doing thousands of context
693  * switches per second will still only do serious estcpu calculations
694  * ESTCPUFREQ times per second.
695  */
696 static
697 void
698 dfly_recalculate_estcpu(struct lwp *lp)
699 {
700         globaldata_t gd = mycpu;
701         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
702         sysclock_t cpbase;
703         sysclock_t ttlticks;
704         int estcpu;
705         int decay_factor;
706
707         /*
708          * We have to subtract periodic to get the last schedclock
709          * timeout time, otherwise we would get the upcoming timeout.
710          * Keep in mind that a process can migrate between cpus and
711          * while the scheduler clock should be very close, boundary
712          * conditions could lead to a small negative delta.
713          */
714         cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
715
716         if (lp->lwp_slptime > 1) {
717                 /*
718                  * Too much time has passed, do a coarse correction.
719                  */
720                 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
721                 dfly_resetpriority(lp);
722                 lp->lwp_cpbase = cpbase;
723                 lp->lwp_cpticks = 0;
724                 lp->lwp_batch -= ESTCPUFREQ;
725                 if (lp->lwp_batch < 0)
726                         lp->lwp_batch = 0;
727         } else if (lp->lwp_cpbase != cpbase) {
728                 /*
729                  * Adjust estcpu if we are in a different tick.  Don't waste
730                  * time if we are in the same tick.
731                  *
732                  * First calculate the number of ticks in the measurement
733                  * interval.  The ttlticks calculation can wind up 0 due to
734                  * a bug in the handling of lwp_slptime  (as yet not found),
735                  * so make sure we do not get a divide by 0 panic.
736                  */
737                 ttlticks = (cpbase - lp->lwp_cpbase) /
738                            gd->gd_schedclock.periodic;
739                 if (ttlticks < 0) {
740                         ttlticks = 0;
741                         lp->lwp_cpbase = cpbase;
742                 }
743                 if (ttlticks == 0)
744                         return;
745                 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
746
747                 /*
748                  * Calculate the percentage of one cpu used factoring in ncpus
749                  * and the load and adjust estcpu.  Handle degenerate cases
750                  * by adding 1 to runqcount.
751                  *
752                  * estcpu is scaled by ESTCPUMAX.
753                  *
754                  * runqcount is the excess number of user processes
755                  * that cannot be immediately scheduled to cpus.  We want
756                  * to count these as running to avoid range compression
757                  * in the base calculation (which is the actual percentage
758                  * of one cpu used).
759                  */
760                 estcpu = (lp->lwp_cpticks * ESTCPUMAX) *
761                          (dd->runqcount + ncpus) / (ncpus * ttlticks);
762
763                 /*
764                  * If estcpu is > 50% we become more batch-like
765                  * If estcpu is <= 50% we become less batch-like
766                  *
767                  * It takes 30 cpu seconds to traverse the entire range.
768                  */
769                 if (estcpu > ESTCPUMAX / 2) {
770                         lp->lwp_batch += ttlticks;
771                         if (lp->lwp_batch > BATCHMAX)
772                                 lp->lwp_batch = BATCHMAX;
773                 } else {
774                         lp->lwp_batch -= ttlticks;
775                         if (lp->lwp_batch < 0)
776                                 lp->lwp_batch = 0;
777                 }
778
779                 if (usched_dfly_debug == lp->lwp_proc->p_pid) {
780                         kprintf("pid %d lwp %p estcpu %3d %3d bat %d cp %d/%d",
781                                 lp->lwp_proc->p_pid, lp,
782                                 estcpu, lp->lwp_estcpu,
783                                 lp->lwp_batch,
784                                 lp->lwp_cpticks, ttlticks);
785                 }
786
787                 /*
788                  * Adjust lp->lwp_esetcpu.  The decay factor determines how
789                  * quickly lwp_estcpu collapses to its realtime calculation.
790                  * A slower collapse gives us a more accurate number but
791                  * can cause a cpu hog to eat too much cpu before the
792                  * scheduler decides to downgrade it.
793                  *
794                  * NOTE: p_nice is accounted for in dfly_resetpriority(),
795                  *       and not here, but we must still ensure that a
796                  *       cpu-bound nice -20 process does not completely
797                  *       override a cpu-bound nice +20 process.
798                  *
799                  * NOTE: We must use ESTCPULIM() here to deal with any
800                  *       overshoot.
801                  */
802                 decay_factor = usched_dfly_decay;
803                 if (decay_factor < 1)
804                         decay_factor = 1;
805                 if (decay_factor > 1024)
806                         decay_factor = 1024;
807
808                 lp->lwp_estcpu = ESTCPULIM(
809                         (lp->lwp_estcpu * decay_factor + estcpu) /
810                         (decay_factor + 1));
811
812                 if (usched_dfly_debug == lp->lwp_proc->p_pid)
813                         kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
814                 dfly_resetpriority(lp);
815                 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
816                 lp->lwp_cpticks = 0;
817         }
818 }
819
820 /*
821  * Compute the priority of a process when running in user mode.
822  * Arrange to reschedule if the resulting priority is better
823  * than that of the current process.
824  *
825  * This routine may be called with any process.
826  *
827  * This routine is called by fork1() for initial setup with the process
828  * of the run queue, and also may be called normally with the process on or
829  * off the run queue.
830  */
831 static void
832 dfly_resetpriority(struct lwp *lp)
833 {
834         dfly_pcpu_t rdd;
835         int newpriority;
836         u_short newrqtype;
837         int rcpu;
838         int checkpri;
839         int estcpu;
840
841         crit_enter();
842
843         /*
844          * Lock the scheduler (lp) belongs to.  This can be on a different
845          * cpu.  Handle races.  This loop breaks out with the appropriate
846          * rdd locked.
847          */
848         for (;;) {
849                 rcpu = lp->lwp_qcpu;
850                 rdd = &dfly_pcpu[rcpu];
851                 spin_lock(&rdd->spin);
852                 if (rcpu == lp->lwp_qcpu)
853                         break;
854                 spin_unlock(&rdd->spin);
855         }
856
857         /*
858          * Calculate the new priority and queue type
859          */
860         newrqtype = lp->lwp_rtprio.type;
861
862         switch(newrqtype) {
863         case RTP_PRIO_REALTIME:
864         case RTP_PRIO_FIFO:
865                 newpriority = PRIBASE_REALTIME +
866                              (lp->lwp_rtprio.prio & PRIMASK);
867                 break;
868         case RTP_PRIO_NORMAL:
869                 /*
870                  * Detune estcpu based on batchiness.  lwp_batch ranges
871                  * from 0 to  BATCHMAX.  Limit estcpu for the sake of
872                  * the priority calculation to between 50% and 100%.
873                  */
874                 estcpu = lp->lwp_estcpu * (lp->lwp_batch + BATCHMAX) /
875                          (BATCHMAX * 2);
876
877                 /*
878                  * p_nice piece         Adds (0-40) * 2         0-80
879                  * estcpu               Adds 16384  * 4 / 512   0-128
880                  */
881                 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
882                 newpriority += estcpu * PPQ / ESTCPUPPQ;
883                 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
884                               NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
885                 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
886                 break;
887         case RTP_PRIO_IDLE:
888                 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
889                 break;
890         case RTP_PRIO_THREAD:
891                 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
892                 break;
893         default:
894                 panic("Bad RTP_PRIO %d", newrqtype);
895                 /* NOT REACHED */
896         }
897
898         /*
899          * The newpriority incorporates the queue type so do a simple masked
900          * check to determine if the process has moved to another queue.  If
901          * it has, and it is currently on a run queue, then move it.
902          *
903          * Since uload is ~PPQMASK masked, no modifications are necessary if
904          * we end up in the same run queue.
905          */
906         if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
907                 int delta_uload;
908
909                 /*
910                  * uload can change, calculate the adjustment to reduce
911                  * edge cases since choosers scan the cpu topology without
912                  * locks.
913                  */
914                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
915                         delta_uload =
916                                 -((lp->lwp_priority & ~PPQMASK) & PRIMASK) +
917                                 ((newpriority & ~PPQMASK) & PRIMASK);
918                         atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
919                                        delta_uload);
920                 }
921                 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
922                         dfly_remrunqueue_locked(rdd, lp);
923                         lp->lwp_priority = newpriority;
924                         lp->lwp_rqtype = newrqtype;
925                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
926                         dfly_setrunqueue_locked(rdd, lp);
927                         checkpri = 1;
928                 } else {
929                         lp->lwp_priority = newpriority;
930                         lp->lwp_rqtype = newrqtype;
931                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
932                         checkpri = 0;
933                 }
934         } else {
935                 /*
936                  * In the same PPQ, uload cannot change.
937                  */
938                 lp->lwp_priority = newpriority;
939                 checkpri = 1;
940                 rcpu = -1;
941         }
942
943         /*
944          * Determine if we need to reschedule the target cpu.  This only
945          * occurs if the LWP is already on a scheduler queue, which means
946          * that idle cpu notification has already occured.  At most we
947          * need only issue a need_user_resched() on the appropriate cpu.
948          *
949          * The LWP may be owned by a CPU different from the current one,
950          * in which case dd->uschedcp may be modified without an MP lock
951          * or a spinlock held.  The worst that happens is that the code
952          * below causes a spurious need_user_resched() on the target CPU
953          * and dd->pri to be wrong for a short period of time, both of
954          * which are harmless.
955          *
956          * If checkpri is 0 we are adjusting the priority of the current
957          * process, possibly higher (less desireable), so ignore the upri
958          * check which will fail in that case.
959          */
960         if (rcpu >= 0) {
961                 if ((dfly_rdyprocmask & CPUMASK(rcpu)) &&
962                     (checkpri == 0 ||
963                      (rdd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK))) {
964 #ifdef SMP
965                         if (rcpu == mycpu->gd_cpuid) {
966                                 spin_unlock(&rdd->spin);
967                                 need_user_resched();
968                         } else {
969                                 atomic_clear_cpumask(&dfly_rdyprocmask,
970                                                      CPUMASK(rcpu));
971                                 spin_unlock(&rdd->spin);
972                                 lwkt_send_ipiq(globaldata_find(rcpu),
973                                                dfly_need_user_resched_remote,
974                                                NULL);
975                         }
976 #else
977                         spin_unlock(&rdd->spin);
978                         need_user_resched();
979 #endif
980                 } else {
981                         spin_unlock(&rdd->spin);
982                 }
983         } else {
984                 spin_unlock(&rdd->spin);
985         }
986         crit_exit();
987 }
988
989 static
990 void
991 dfly_yield(struct lwp *lp)
992 {
993 #if 0
994         /* FUTURE (or something similar) */
995         switch(lp->lwp_rqtype) {
996         case RTP_PRIO_NORMAL:
997                 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
998                 break;
999         default:
1000                 break;
1001         }
1002 #endif
1003         need_user_resched();
1004 }
1005
1006 /*
1007  * Called from fork1() when a new child process is being created.
1008  *
1009  * Give the child process an initial estcpu that is more batch then
1010  * its parent and dock the parent for the fork (but do not
1011  * reschedule the parent).   This comprises the main part of our batch
1012  * detection heuristic for both parallel forking and sequential execs.
1013  *
1014  * XXX lwp should be "spawning" instead of "forking"
1015  */
1016 static void
1017 dfly_forking(struct lwp *plp, struct lwp *lp)
1018 {
1019         /*
1020          * Put the child 4 queue slots (out of 32) higher than the parent
1021          * (less desireable than the parent).
1022          */
1023         lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1024
1025         /*
1026          * The batch status of children always starts out centerline
1027          * and will inch-up or inch-down as appropriate.  It takes roughly
1028          * ~15 seconds of >50% cpu to hit the limit.
1029          */
1030         lp->lwp_batch = BATCHMAX / 2;
1031
1032         /*
1033          * Dock the parent a cost for the fork, protecting us from fork
1034          * bombs.  If the parent is forking quickly make the child more
1035          * batchy.
1036          */
1037         plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1038 }
1039
1040 /*
1041  * Called when a lwp is being removed from this scheduler, typically
1042  * during lwp_exit().
1043  */
1044 static void
1045 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1046 {
1047         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1048
1049         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1050                 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1051                 atomic_add_int(&dd->uload,
1052                                -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1053         }
1054 }
1055
1056 static void
1057 dfly_uload_update(struct lwp *lp)
1058 {
1059         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1060
1061         if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1062                 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1063                         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1064                         atomic_add_int(&dd->uload,
1065                                    ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1066                 }
1067         } else {
1068                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1069                         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1070                         atomic_add_int(&dd->uload,
1071                                    -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1072                 }
1073         }
1074 }
1075
1076 /*
1077  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1078  * it selects a user process and returns it.  If chklp is non-NULL and chklp
1079  * has a better or equal priority then the process that would otherwise be
1080  * chosen, NULL is returned.
1081  *
1082  * Until we fix the RUNQ code the chklp test has to be strict or we may
1083  * bounce between processes trying to acquire the current process designation.
1084  *
1085  * Must be called with dfly_spin exclusive held.  The spinlock is
1086  * left intact through the entire routine.
1087  *
1088  * if chklp is NULL this function will dive other cpu's queues looking
1089  * for work if the current queue is empty.
1090  */
1091 static
1092 struct lwp *
1093 dfly_chooseproc_locked(dfly_pcpu_t dd, struct lwp *chklp, int isremote)
1094 {
1095         dfly_pcpu_t xdd;
1096         struct lwp *lp;
1097         struct rq *q;
1098         u_int32_t *which, *which2;
1099         u_int32_t pri;
1100         u_int32_t rtqbits;
1101         u_int32_t tsqbits;
1102         u_int32_t idqbits;
1103         /*usched_dfly_queue_checks*/
1104
1105         rtqbits = dd->rtqueuebits;
1106         tsqbits = dd->queuebits;
1107         idqbits = dd->idqueuebits;
1108
1109         if (rtqbits) {
1110                 pri = bsfl(rtqbits);
1111                 q = &dd->rtqueues[pri];
1112                 which = &dd->rtqueuebits;
1113                 which2 = &rtqbits;
1114         } else if (tsqbits) {
1115                 pri = bsfl(tsqbits);
1116                 q = &dd->queues[pri];
1117                 which = &dd->queuebits;
1118                 which2 = &tsqbits;
1119         } else if (idqbits) {
1120                 pri = bsfl(idqbits);
1121                 q = &dd->idqueues[pri];
1122                 which = &dd->idqueuebits;
1123                 which2 = &idqbits;
1124         } else
1125 #ifdef SMP
1126         if (isremote) {
1127                 /*
1128                  * Disallow remote->remote recursion
1129                  */
1130                 return (NULL);
1131         } else {
1132                 /*
1133                  * Pull a runnable thread from a remote run queue.  We have
1134                  * to adjust qcpu and uload manually because the lp we return
1135                  * might be assigned directly to uschedcp (setrunqueue might
1136                  * not be called).
1137                  */
1138                 xdd = dfly_choose_worst_queue(dd);
1139                 if (xdd && xdd != dd && spin_trylock(&xdd->spin)) {
1140                         lp = dfly_chooseproc_locked(xdd, NULL, 1);
1141                         if (lp) {
1142                                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1143                                         atomic_add_int(&xdd->uload,
1144                                             -((lp->lwp_priority & ~PPQMASK) &
1145                                               PRIMASK));
1146                                 }
1147                                 lp->lwp_qcpu = dd->cpuid;
1148                                 atomic_add_int(&dd->uload,
1149                                     ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1150                                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1151                         }
1152                         spin_unlock(&xdd->spin);
1153                 } else {
1154                         lp = NULL;
1155                 }
1156                 return (lp);
1157         }
1158 #else
1159         {
1160                 return NULL;
1161         }
1162 #endif
1163         lp = TAILQ_FIRST(q);
1164         KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1165
1166         /*
1167          * If the passed lwp <chklp> is reasonably close to the selected
1168          * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1169          *
1170          * Note that we must error on the side of <chklp> to avoid bouncing
1171          * between threads in the acquire code.
1172          */
1173         if (chklp) {
1174                 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1175                         return(NULL);
1176         }
1177
1178         KTR_COND_LOG(usched_chooseproc,
1179             lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1180             lp->lwp_proc->p_pid,
1181             lp->lwp_thread->td_gd->gd_cpuid,
1182             mycpu->gd_cpuid);
1183
1184         TAILQ_REMOVE(q, lp, lwp_procq);
1185         --dd->runqcount;
1186         if (TAILQ_EMPTY(q))
1187                 *which &= ~(1 << pri);
1188         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1189         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1190
1191         return lp;
1192 }
1193
1194 #ifdef SMP
1195
1196 /*
1197  * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1198  *
1199  * Choose a cpu node to schedule lp on, hopefully nearby its current
1200  * node.  The current node is passed in (dd) (though it can also be obtained
1201  * from lp->lwp_qcpu).  The caller will dfly_setrunqueue() lp on the queue
1202  * we return.
1203  *
1204  * When the topology is known choose a cpu whos group has, in aggregate,
1205  * has the lowest weighted load.
1206  */
1207 static
1208 dfly_pcpu_t
1209 dfly_choose_best_queue(dfly_pcpu_t dd, struct lwp *lp)
1210 {
1211         cpumask_t mask;
1212         cpu_node_t *cpup;
1213         cpu_node_t *cpun;
1214         cpu_node_t *cpub;
1215         dfly_pcpu_t rdd;
1216         int cpuid;
1217         int n;
1218         int load;
1219         int lowest_load;
1220         int level;
1221
1222         /*
1223          * When the topology is unknown choose a random cpu that is hopefully
1224          * idle.
1225          */
1226         if (dd->cpunode == NULL)
1227                 return (dfly_choose_queue_simple(dd, lp));
1228
1229         /*
1230          * When the topology is known choose a cpu whos group has, in
1231          * aggregate, has the lowest weighted load.
1232          */
1233         cpup = root_cpu_node;
1234         rdd = dd;
1235         level = cpu_topology_levels_number;
1236
1237         while (cpup) {
1238                 /*
1239                  * Degenerate case super-root
1240                  */
1241                 if (cpup->child_node && cpup->child_no == 1) {
1242                         cpup = cpup->child_node;
1243                         --level;
1244                         continue;
1245                 }
1246
1247                 /*
1248                  * Terminal cpunode
1249                  */
1250                 if (cpup->child_node == NULL) {
1251                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1252                         break;
1253                 }
1254
1255                 cpub = NULL;
1256                 lowest_load = 0x7FFFFFFF;
1257
1258                 for (n = 0; n < cpup->child_no; ++n) {
1259                         /*
1260                          * Accumulate load information for all cpus
1261                          * which are members of this node.
1262                          */
1263                         cpun = &cpup->child_node[n];
1264                         mask = cpun->members & usched_global_cpumask &
1265                                smp_active_mask & lp->lwp_cpumask;
1266                         if (mask == 0)
1267                                 continue;
1268                         load = 0;
1269                         while (mask) {
1270                                 cpuid = BSFCPUMASK(mask);
1271                                 load += dfly_pcpu[cpuid].uload;
1272                                 mask &= ~CPUMASK(cpuid);
1273                         }
1274
1275                         /*
1276                          * Give a slight advantage to nearby cpus.
1277                          */
1278                         if (cpun->members & dd->cpumask)
1279                                 load -= PPQ * level;
1280
1281                         /*
1282                          * Calculate the best load
1283                          */
1284                         if (cpub == NULL || lowest_load > load ||
1285                             (lowest_load == load &&
1286                              (cpun->members & dd->cpumask))
1287                         ) {
1288                                 lowest_load = load;
1289                                 cpub = cpun;
1290                         }
1291                 }
1292                 cpup = cpub;
1293                 --level;
1294         }
1295         if (usched_dfly_chooser)
1296                 kprintf("lp %02d->%02d %s\n",
1297                         lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1298         return (rdd);
1299 }
1300
1301 /*
1302  * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1303  *
1304  * Choose the worst queue close to dd's cpu node with a non-empty runq.
1305  *
1306  * This is used by the thread chooser when the current cpu's queues are
1307  * empty to steal a thread from another cpu's queue.  We want to offload
1308  * the most heavily-loaded queue.
1309  */
1310 static
1311 dfly_pcpu_t
1312 dfly_choose_worst_queue(dfly_pcpu_t dd)
1313 {
1314         cpumask_t mask;
1315         cpu_node_t *cpup;
1316         cpu_node_t *cpun;
1317         cpu_node_t *cpub;
1318         dfly_pcpu_t rdd;
1319         int cpuid;
1320         int n;
1321         int load;
1322         int highest_load;
1323         int uloadok;
1324         int level;
1325
1326         /*
1327          * When the topology is unknown choose a random cpu that is hopefully
1328          * idle.
1329          */
1330         if (dd->cpunode == NULL) {
1331                 return (NULL);
1332         }
1333
1334         /*
1335          * When the topology is known choose a cpu whos group has, in
1336          * aggregate, has the lowest weighted load.
1337          */
1338         cpup = root_cpu_node;
1339         rdd = dd;
1340         level = cpu_topology_levels_number;
1341         while (cpup) {
1342                 /*
1343                  * Degenerate case super-root
1344                  */
1345                 if (cpup->child_node && cpup->child_no == 1) {
1346                         cpup = cpup->child_node;
1347                         --level;
1348                         continue;
1349                 }
1350
1351                 /*
1352                  * Terminal cpunode
1353                  */
1354                 if (cpup->child_node == NULL) {
1355                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1356                         break;
1357                 }
1358
1359                 cpub = NULL;
1360                 highest_load = 0;
1361
1362                 for (n = 0; n < cpup->child_no; ++n) {
1363                         /*
1364                          * Accumulate load information for all cpus
1365                          * which are members of this node.
1366                          */
1367                         cpun = &cpup->child_node[n];
1368                         mask = cpun->members & usched_global_cpumask &
1369                                smp_active_mask;
1370                         if (mask == 0)
1371                                 continue;
1372                         load = 0;
1373                         uloadok = 0;
1374                         while (mask) {
1375                                 cpuid = BSFCPUMASK(mask);
1376                                 load += dfly_pcpu[cpuid].uload;
1377                                 if (dfly_pcpu[cpuid].uload)
1378                                         uloadok = 1;
1379                                 if (dfly_pcpu[cpuid].uschedcp) {
1380                                         load += (dfly_pcpu[cpuid].upri &
1381                                                  ~PPQMASK) & PRIMASK;
1382                                 }
1383                                 mask &= ~CPUMASK(cpuid);
1384                         }
1385
1386                         /*
1387                          * Give a slight advantage to nearby cpus.
1388                          */
1389                         if (cpun->members & dd->cpumask)
1390                                 load += PPQ * level;
1391
1392                         /*
1393                          * The best candidate is the one with the worst
1394                          * (highest) load.  Prefer candiates that are
1395                          * closer to our cpu.
1396                          */
1397                         if (uloadok &&
1398                             (cpub == NULL || highest_load < load ||
1399                              (highest_load == load &&
1400                               (cpun->members & dd->cpumask)))
1401                         ) {
1402                                 highest_load = load;
1403                                 cpub = cpun;
1404                         }
1405                 }
1406                 cpup = cpub;
1407                 --level;
1408         }
1409         return (rdd);
1410 }
1411
1412 static
1413 dfly_pcpu_t
1414 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1415 {
1416         dfly_pcpu_t rdd;
1417         cpumask_t tmpmask;
1418         cpumask_t mask;
1419         int cpuid;
1420
1421         /*
1422          * Fallback to the original heuristic, select random cpu,
1423          * first checking cpus not currently running a user thread.
1424          */
1425         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1426         mask = ~dfly_curprocmask & dfly_rdyprocmask & lp->lwp_cpumask &
1427                smp_active_mask & usched_global_cpumask;
1428
1429         while (mask) {
1430                 tmpmask = ~(CPUMASK(cpuid) - 1);
1431                 if (mask & tmpmask)
1432                         cpuid = BSFCPUMASK(mask & tmpmask);
1433                 else
1434                         cpuid = BSFCPUMASK(mask);
1435                 rdd = &dfly_pcpu[cpuid];
1436
1437                 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1438                         goto found;
1439                 mask &= ~CPUMASK(cpuid);
1440         }
1441
1442         /*
1443          * Then cpus which might have a currently running lp
1444          */
1445         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1446         mask = dfly_curprocmask & dfly_rdyprocmask &
1447                lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
1448
1449         while (mask) {
1450                 tmpmask = ~(CPUMASK(cpuid) - 1);
1451                 if (mask & tmpmask)
1452                         cpuid = BSFCPUMASK(mask & tmpmask);
1453                 else
1454                         cpuid = BSFCPUMASK(mask);
1455                 rdd = &dfly_pcpu[cpuid];
1456
1457                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1458                         goto found;
1459                 mask &= ~CPUMASK(cpuid);
1460         }
1461
1462         /*
1463          * If we cannot find a suitable cpu we reload from dfly_scancpu
1464          * and round-robin.  Other cpus will pickup as they release their
1465          * current lwps or become ready.
1466          *
1467          * Avoid a degenerate system lockup case if usched_global_cpumask
1468          * is set to 0 or otherwise does not cover lwp_cpumask.
1469          *
1470          * We only kick the target helper thread in this case, we do not
1471          * set the user resched flag because
1472          */
1473         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1474         if ((CPUMASK(cpuid) & usched_global_cpumask) == 0)
1475                 cpuid = 0;
1476         rdd = &dfly_pcpu[cpuid];
1477 found:
1478         return (rdd);
1479 }
1480
1481 static
1482 void
1483 dfly_need_user_resched_remote(void *dummy)
1484 {
1485         globaldata_t gd = mycpu;
1486         dfly_pcpu_t  dd = &dfly_pcpu[gd->gd_cpuid];
1487
1488         need_user_resched();
1489
1490         /* Call wakeup_mycpu to avoid sending IPIs to other CPUs */
1491         wakeup_mycpu(&dd->helper_thread);
1492 }
1493
1494 #endif
1495
1496 /*
1497  * dfly_remrunqueue_locked() removes a given process from the run queue
1498  * that it is on, clearing the queue busy bit if it becomes empty.
1499  *
1500  * Note that user process scheduler is different from the LWKT schedule.
1501  * The user process scheduler only manages user processes but it uses LWKT
1502  * underneath, and a user process operating in the kernel will often be
1503  * 'released' from our management.
1504  *
1505  * uload is NOT adjusted here.  It is only adjusted if the lwkt_thread goes
1506  * to sleep or the lwp is moved to a different runq.
1507  */
1508 static void
1509 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1510 {
1511         struct rq *q;
1512         u_int32_t *which;
1513         u_int8_t pri;
1514
1515         KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1516         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1517         --rdd->runqcount;
1518         /*rdd->uload -= (lp->lwp_priority & ~PPQMASK) & PRIMASK;*/
1519         KKASSERT(rdd->runqcount >= 0);
1520
1521         pri = lp->lwp_rqindex;
1522         switch(lp->lwp_rqtype) {
1523         case RTP_PRIO_NORMAL:
1524                 q = &rdd->queues[pri];
1525                 which = &rdd->queuebits;
1526                 break;
1527         case RTP_PRIO_REALTIME:
1528         case RTP_PRIO_FIFO:
1529                 q = &rdd->rtqueues[pri];
1530                 which = &rdd->rtqueuebits;
1531                 break;
1532         case RTP_PRIO_IDLE:
1533                 q = &rdd->idqueues[pri];
1534                 which = &rdd->idqueuebits;
1535                 break;
1536         default:
1537                 panic("remrunqueue: invalid rtprio type");
1538                 /* NOT REACHED */
1539         }
1540         TAILQ_REMOVE(q, lp, lwp_procq);
1541         if (TAILQ_EMPTY(q)) {
1542                 KASSERT((*which & (1 << pri)) != 0,
1543                         ("remrunqueue: remove from empty queue"));
1544                 *which &= ~(1 << pri);
1545         }
1546 }
1547
1548 /*
1549  * dfly_setrunqueue_locked()
1550  *
1551  * Add a process whos rqtype and rqindex had previously been calculated
1552  * onto the appropriate run queue.   Determine if the addition requires
1553  * a reschedule on a cpu and return the cpuid or -1.
1554  *
1555  * NOTE:          Lower priorities are better priorities.
1556  *
1557  * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1558  *                sum of the rough lwp_priority for all running and runnable
1559  *                processes.  Lower priority processes (higher lwp_priority
1560  *                values) actually DO count as more load, not less, because
1561  *                these are the programs which require the most care with
1562  *                regards to cpu selection.
1563  */
1564 static void
1565 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1566 {
1567         struct rq *q;
1568         u_int32_t *which;
1569         int pri;
1570
1571         if (lp->lwp_qcpu != rdd->cpuid) {
1572                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1573                         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1574                         atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1575                                    -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1576                 }
1577                 lp->lwp_qcpu = rdd->cpuid;
1578         }
1579
1580         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1581         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1582         ++rdd->runqcount;
1583         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1584                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1585                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1586                                (lp->lwp_priority & ~PPQMASK) & PRIMASK);
1587         }
1588
1589         pri = lp->lwp_rqindex;
1590
1591         switch(lp->lwp_rqtype) {
1592         case RTP_PRIO_NORMAL:
1593                 q = &rdd->queues[pri];
1594                 which = &rdd->queuebits;
1595                 break;
1596         case RTP_PRIO_REALTIME:
1597         case RTP_PRIO_FIFO:
1598                 q = &rdd->rtqueues[pri];
1599                 which = &rdd->rtqueuebits;
1600                 break;
1601         case RTP_PRIO_IDLE:
1602                 q = &rdd->idqueues[pri];
1603                 which = &rdd->idqueuebits;
1604                 break;
1605         default:
1606                 panic("remrunqueue: invalid rtprio type");
1607                 /* NOT REACHED */
1608         }
1609
1610         /*
1611          * Add to the correct queue and set the appropriate bit.  If no
1612          * lower priority (i.e. better) processes are in the queue then
1613          * we want a reschedule, calculate the best cpu for the job.
1614          *
1615          * Always run reschedules on the LWPs original cpu.
1616          */
1617         TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1618         *which |= 1 << pri;
1619 }
1620
1621 #ifdef SMP
1622
1623 /*
1624  * For SMP systems a user scheduler helper thread is created for each
1625  * cpu and is used to allow one cpu to wakeup another for the purposes of
1626  * scheduling userland threads from setrunqueue().
1627  *
1628  * UP systems do not need the helper since there is only one cpu.
1629  *
1630  * We can't use the idle thread for this because we might block.
1631  * Additionally, doing things this way allows us to HLT idle cpus
1632  * on MP systems.
1633  */
1634 static void
1635 dfly_helper_thread(void *dummy)
1636 {
1637     globaldata_t gd;
1638     dfly_pcpu_t  dd;
1639     struct lwp *nlp;
1640     cpumask_t mask;
1641     int cpuid;
1642
1643     gd = mycpu;
1644     cpuid = gd->gd_cpuid;       /* doesn't change */
1645     mask = gd->gd_cpumask;      /* doesn't change */
1646     dd = &dfly_pcpu[cpuid];
1647
1648     /*
1649      * Since we only want to be woken up only when no user processes
1650      * are scheduled on a cpu, run at an ultra low priority.
1651      */
1652     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1653
1654     tsleep(&dd->helper_thread, 0, "schslp", 0);
1655
1656     for (;;) {
1657         /*
1658          * We use the LWKT deschedule-interlock trick to avoid racing
1659          * dfly_rdyprocmask.  This means we cannot block through to the
1660          * manual lwkt_switch() call we make below.
1661          */
1662         crit_enter_gd(gd);
1663         tsleep_interlock(&dd->helper_thread, 0);
1664
1665         /*spin_lock(&dfly_spin);*/
1666         spin_lock(&dd->spin);
1667
1668         atomic_set_cpumask(&dfly_rdyprocmask, mask);
1669         clear_user_resched();   /* This satisfied the reschedule request */
1670         dd->rrcount = 0;        /* Reset the round-robin counter */
1671
1672         if ((dfly_curprocmask & mask) == 0) {
1673                 /*
1674                  * No thread is currently scheduled.
1675                  */
1676                 KKASSERT(dd->uschedcp == NULL);
1677                 if ((nlp = dfly_chooseproc_locked(dd, NULL, 0)) != NULL) {
1678                         KTR_COND_LOG(usched_sched_thread_no_process,
1679                             nlp->lwp_proc->p_pid == usched_dfly_pid_debug,
1680                             gd->gd_cpuid,
1681                             nlp->lwp_proc->p_pid,
1682                             nlp->lwp_thread->td_gd->gd_cpuid);
1683
1684                         atomic_set_cpumask(&dfly_curprocmask, mask);
1685                         dd->upri = nlp->lwp_priority;
1686                         dd->uschedcp = nlp;
1687                         dd->rrcount = 0;        /* reset round robin */
1688                         spin_unlock(&dd->spin);
1689                         /*spin_unlock(&dfly_spin);*/
1690 #ifdef SMP
1691                         lwkt_acquire(nlp->lwp_thread);
1692 #endif
1693                         lwkt_schedule(nlp->lwp_thread);
1694                 } else {
1695                         spin_unlock(&dd->spin);
1696                         /*spin_unlock(&dfly_spin);*/
1697                 }
1698         } else if (dd->runqcount) {
1699                 /*
1700                  * Possibly find a better process to schedule.
1701                  */
1702                 nlp = dfly_chooseproc_locked(dd, dd->uschedcp, 0);
1703                 if (nlp) {
1704                         KTR_COND_LOG(usched_sched_thread_process,
1705                             nlp->lwp_proc->p_pid == usched_dfly_pid_debug,
1706                             gd->gd_cpuid,
1707                             nlp->lwp_proc->p_pid,
1708                             nlp->lwp_thread->td_gd->gd_cpuid);
1709
1710                         dd->upri = nlp->lwp_priority;
1711                         dd->uschedcp = nlp;
1712                         dd->rrcount = 0;        /* reset round robin */
1713                         spin_unlock(&dd->spin);
1714                         /*spin_unlock(&dfly_spin);*/
1715 #ifdef SMP
1716                         lwkt_acquire(nlp->lwp_thread);
1717 #endif
1718                         lwkt_schedule(nlp->lwp_thread);
1719                 } else {
1720                         /*
1721                          * Leave the thread on our run queue.  Another
1722                          * scheduler will try to pull it later.
1723                          */
1724                         spin_unlock(&dd->spin);
1725                         /*spin_unlock(&dfly_spin);*/
1726                 }
1727         } else {
1728                 /*
1729                  * The runq is empty.
1730                  */
1731                 spin_unlock(&dd->spin);
1732                 /*spin_unlock(&dfly_spin);*/
1733         }
1734
1735         /*
1736          * We're descheduled unless someone scheduled us.  Switch away.
1737          * Exiting the critical section will cause splz() to be called
1738          * for us if interrupts and such are pending.
1739          */
1740         crit_exit_gd(gd);
1741         tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
1742     }
1743 }
1744
1745 /* sysctl stick_to_level parameter */
1746 static int
1747 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
1748 {
1749         int error, new_val;
1750
1751         new_val = usched_dfly_stick_to_level;
1752
1753         error = sysctl_handle_int(oidp, &new_val, 0, req);
1754         if (error != 0 || req->newptr == NULL)
1755                 return (error);
1756         if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
1757                 return (EINVAL);
1758         usched_dfly_stick_to_level = new_val;
1759         return (0);
1760 }
1761
1762 /*
1763  * Setup our scheduler helpers.  Note that curprocmask bit 0 has already
1764  * been cleared by rqinit() and we should not mess with it further.
1765  */
1766 static void
1767 dfly_helper_thread_cpu_init(void)
1768 {
1769         int i;
1770         int j;
1771         int cpuid;
1772         int smt_not_supported = 0;
1773         int cache_coherent_not_supported = 0;
1774
1775         if (bootverbose)
1776                 kprintf("Start scheduler helpers on cpus:\n");
1777
1778         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
1779         usched_dfly_sysctl_tree =
1780                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
1781                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
1782                                 "usched_dfly", CTLFLAG_RD, 0, "");
1783
1784         for (i = 0; i < ncpus; ++i) {
1785                 dfly_pcpu_t dd = &dfly_pcpu[i];
1786                 cpumask_t mask = CPUMASK(i);
1787
1788                 if ((mask & smp_active_mask) == 0)
1789                     continue;
1790
1791                 spin_init(&dd->spin);
1792                 dd->cpunode = get_cpu_node_by_cpuid(i);
1793                 dd->cpuid = i;
1794                 dd->cpumask = CPUMASK(i);
1795                 for (j = 0; j < NQS; j++) {
1796                         TAILQ_INIT(&dd->queues[j]);
1797                         TAILQ_INIT(&dd->rtqueues[j]);
1798                         TAILQ_INIT(&dd->idqueues[j]);
1799                 }
1800                 atomic_clear_cpumask(&dfly_curprocmask, 1);
1801
1802                 if (dd->cpunode == NULL) {
1803                         smt_not_supported = 1;
1804                         cache_coherent_not_supported = 1;
1805                         if (bootverbose)
1806                                 kprintf ("\tcpu%d - WARNING: No CPU NODE "
1807                                          "found for cpu\n", i);
1808                 } else {
1809                         switch (dd->cpunode->type) {
1810                         case THREAD_LEVEL:
1811                                 if (bootverbose)
1812                                         kprintf ("\tcpu%d - HyperThreading "
1813                                                  "available. Core siblings: ",
1814                                                  i);
1815                                 break;
1816                         case CORE_LEVEL:
1817                                 smt_not_supported = 1;
1818
1819                                 if (bootverbose)
1820                                         kprintf ("\tcpu%d - No HT available, "
1821                                                  "multi-core/physical "
1822                                                  "cpu. Physical siblings: ",
1823                                                  i);
1824                                 break;
1825                         case CHIP_LEVEL:
1826                                 smt_not_supported = 1;
1827
1828                                 if (bootverbose)
1829                                         kprintf ("\tcpu%d - No HT available, "
1830                                                  "single-core/physical cpu. "
1831                                                  "Package Siblings: ",
1832                                                  i);
1833                                 break;
1834                         default:
1835                                 /* Let's go for safe defaults here */
1836                                 smt_not_supported = 1;
1837                                 cache_coherent_not_supported = 1;
1838                                 if (bootverbose)
1839                                         kprintf ("\tcpu%d - Unknown cpunode->"
1840                                                  "type=%u. Siblings: ",
1841                                                  i,
1842                                                  (u_int)dd->cpunode->type);
1843                                 break;
1844                         }
1845
1846                         if (bootverbose) {
1847                                 if (dd->cpunode->parent_node != NULL) {
1848                                         CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
1849                                                 kprintf("cpu%d ", cpuid);
1850                                         kprintf("\n");
1851                                 } else {
1852                                         kprintf(" no siblings\n");
1853                                 }
1854                         }
1855                 }
1856
1857                 lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread,
1858                             0, i, "usched %d", i);
1859
1860                 /*
1861                  * Allow user scheduling on the target cpu.  cpu #0 has already
1862                  * been enabled in rqinit().
1863                  */
1864                 if (i)
1865                     atomic_clear_cpumask(&dfly_curprocmask, mask);
1866                 atomic_set_cpumask(&dfly_rdyprocmask, mask);
1867                 dd->upri = PRIBASE_NULL;
1868
1869         }
1870
1871         /* usched_dfly sysctl configurable parameters */
1872
1873         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1874                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1875                        OID_AUTO, "rrinterval", CTLFLAG_RW,
1876                        &usched_dfly_rrinterval, 0, "");
1877         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1878                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1879                        OID_AUTO, "decay", CTLFLAG_RW,
1880                        &usched_dfly_decay, 0, "Extra decay when not running");
1881         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1882                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1883                        OID_AUTO, "batch_time", CTLFLAG_RW,
1884                        &usched_dfly_batch_time, 0, "Min batch counter value");
1885         SYSCTL_ADD_LONG(&usched_dfly_sysctl_ctx,
1886                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1887                        OID_AUTO, "kicks", CTLFLAG_RW,
1888                        &usched_dfly_kicks, "Number of kickstarts");
1889
1890         /* Add enable/disable option for SMT scheduling if supported */
1891         if (smt_not_supported) {
1892                 usched_dfly_smt = 0;
1893                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
1894                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1895                                   OID_AUTO, "smt", CTLFLAG_RD,
1896                                   "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
1897         } else {
1898                 usched_dfly_smt = 1;
1899                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1900                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1901                                OID_AUTO, "smt", CTLFLAG_RW,
1902                                &usched_dfly_smt, 0, "Enable SMT scheduling");
1903         }
1904
1905         /*
1906          * Add enable/disable option for cache coherent scheduling
1907          * if supported
1908          */
1909         if (cache_coherent_not_supported) {
1910 #ifdef SMP
1911                 usched_dfly_cache_coherent = 0;
1912                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
1913                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1914                                   OID_AUTO, "cache_coherent", CTLFLAG_RD,
1915                                   "NOT SUPPORTED", 0,
1916                                   "Cache coherence NOT SUPPORTED");
1917 #endif
1918         } else {
1919 #ifdef SMP
1920                 usched_dfly_cache_coherent = 1;
1921                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1922                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1923                                OID_AUTO, "cache_coherent", CTLFLAG_RW,
1924                                &usched_dfly_cache_coherent, 0,
1925                                "Enable/Disable cache coherent scheduling");
1926 #endif
1927
1928                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1929                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1930                                OID_AUTO, "upri_affinity", CTLFLAG_RW,
1931                                &usched_dfly_upri_affinity, 1,
1932                                "Number of PPQs in user priority check");
1933
1934                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1935                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1936                                OID_AUTO, "queue_checks", CTLFLAG_RW,
1937                                &usched_dfly_queue_checks, 5,
1938                                "LWPs to check from a queue before giving up");
1939
1940                 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
1941                                 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1942                                 OID_AUTO, "stick_to_level",
1943                                 CTLTYPE_INT | CTLFLAG_RW,
1944                                 NULL, sizeof usched_dfly_stick_to_level,
1945                                 sysctl_usched_dfly_stick_to_level, "I",
1946                                 "Stick a process to this level. See sysctl"
1947                                 "paremter hw.cpu_topology.level_description");
1948         }
1949 }
1950 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
1951         dfly_helper_thread_cpu_init, NULL)
1952
1953 #else /* No SMP options - just add the configurable parameters to sysctl */
1954
1955 static void
1956 sched_sysctl_tree_init(void)
1957 {
1958         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
1959         usched_dfly_sysctl_tree =
1960                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
1961                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
1962                                 "usched_dfly", CTLFLAG_RD, 0, "");
1963
1964         /* usched_dfly sysctl configurable parameters */
1965         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1966                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1967                        OID_AUTO, "rrinterval", CTLFLAG_RW,
1968                        &usched_dfly_rrinterval, 0, "");
1969         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1970                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1971                        OID_AUTO, "decay", CTLFLAG_RW,
1972                        &usched_dfly_decay, 0, "Extra decay when not running");
1973         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1974                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1975                        OID_AUTO, "batch_time", CTLFLAG_RW,
1976                        &usched_dfly_batch_time, 0, "Min batch counter value");
1977 }
1978 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
1979         sched_sysctl_tree_init, NULL)
1980 #endif