ddb624a1e9e83494975a6de2045fdb96a1c40d82
[dragonfly.git] / sys / kern / usched_dfly.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com>,
7  * by Mihai Carabas <mihai.carabas@gmail.com>
8  * and many others.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
52
53 #include <sys/ktr.h>
54
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
57
58 /*
59  * Priorities.  Note that with 32 run queues per scheduler each queue
60  * represents four priority levels.
61  */
62
63 int dfly_rebalanced;
64
65 #define MAXPRI                  128
66 #define PRIMASK                 (MAXPRI - 1)
67 #define PRIBASE_REALTIME        0
68 #define PRIBASE_NORMAL          MAXPRI
69 #define PRIBASE_IDLE            (MAXPRI * 2)
70 #define PRIBASE_THREAD          (MAXPRI * 3)
71 #define PRIBASE_NULL            (MAXPRI * 4)
72
73 #define NQS     32                      /* 32 run queues. */
74 #define PPQ     (MAXPRI / NQS)          /* priorities per queue */
75 #define PPQMASK (PPQ - 1)
76
77 /*
78  * NICEPPQ      - number of nice units per priority queue
79  * ESTCPUPPQ    - number of estcpu units per priority queue
80  * ESTCPUMAX    - number of estcpu units
81  */
82 #define NICEPPQ         2
83 #define ESTCPUPPQ       512
84 #define ESTCPUMAX       (ESTCPUPPQ * NQS)
85 #define BATCHMAX        (ESTCPUFREQ * 30)
86 #define PRIO_RANGE      (PRIO_MAX - PRIO_MIN + 1)
87
88 #define ESTCPULIM(v)    min((v), ESTCPUMAX)
89
90 TAILQ_HEAD(rq, lwp);
91
92 #define lwp_priority    lwp_usdata.dfly.priority
93 #define lwp_rqindex     lwp_usdata.dfly.rqindex
94 #define lwp_estcpu      lwp_usdata.dfly.estcpu
95 #define lwp_batch       lwp_usdata.dfly.batch
96 #define lwp_rqtype      lwp_usdata.dfly.rqtype
97 #define lwp_qcpu        lwp_usdata.dfly.qcpu
98
99 struct usched_dfly_pcpu {
100         struct spinlock spin;
101         struct thread   helper_thread;
102         short           rrcount;
103         short           upri;
104         int             uload;
105         struct lwp      *uschedcp;
106         struct rq       queues[NQS];
107         struct rq       rtqueues[NQS];
108         struct rq       idqueues[NQS];
109         u_int32_t       queuebits;
110         u_int32_t       rtqueuebits;
111         u_int32_t       idqueuebits;
112         int             runqcount;
113         int             cpuid;
114         cpumask_t       cpumask;
115 #ifdef SMP
116         cpu_node_t      *cpunode;
117 #endif
118 };
119
120 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
121
122 static void dfly_acquire_curproc(struct lwp *lp);
123 static void dfly_release_curproc(struct lwp *lp);
124 static void dfly_select_curproc(globaldata_t gd);
125 static void dfly_setrunqueue(struct lwp *lp);
126 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
127                                 sysclock_t cpstamp);
128 static void dfly_recalculate_estcpu(struct lwp *lp);
129 static void dfly_resetpriority(struct lwp *lp);
130 static void dfly_forking(struct lwp *plp, struct lwp *lp);
131 static void dfly_exiting(struct lwp *lp, struct proc *);
132 static void dfly_uload_update(struct lwp *lp);
133 static void dfly_yield(struct lwp *lp);
134 #ifdef SMP
135 static dfly_pcpu_t dfly_choose_best_queue(dfly_pcpu_t dd, struct lwp *lp);
136 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
137 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
138 #endif
139
140 #ifdef SMP
141 static void dfly_need_user_resched_remote(void *dummy);
142 #endif
143 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t dd, struct lwp *chklp,
144                                         int isremote);
145 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
146 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
147
148 struct usched usched_dfly = {
149         { NULL },
150         "dfly", "Original DragonFly Scheduler",
151         NULL,                   /* default registration */
152         NULL,                   /* default deregistration */
153         dfly_acquire_curproc,
154         dfly_release_curproc,
155         dfly_setrunqueue,
156         dfly_schedulerclock,
157         dfly_recalculate_estcpu,
158         dfly_resetpriority,
159         dfly_forking,
160         dfly_exiting,
161         dfly_uload_update,
162         NULL,                   /* setcpumask not supported */
163         dfly_yield
164 };
165
166 /*
167  * We have NQS (32) run queues per scheduling class.  For the normal
168  * class, there are 128 priorities scaled onto these 32 queues.  New
169  * processes are added to the last entry in each queue, and processes
170  * are selected for running by taking them from the head and maintaining
171  * a simple FIFO arrangement.  Realtime and Idle priority processes have
172  * and explicit 0-31 priority which maps directly onto their class queue
173  * index.  When a queue has something in it, the corresponding bit is
174  * set in the queuebits variable, allowing a single read to determine
175  * the state of all 32 queues and then a ffs() to find the first busy
176  * queue.
177  */
178 static cpumask_t dfly_curprocmask = -1; /* currently running a user process */
179 static cpumask_t dfly_rdyprocmask;      /* ready to accept a user process */
180 #ifdef SMP
181 static volatile int dfly_scancpu;
182 /*static struct spinlock dfly_spin = SPINLOCK_INITIALIZER(dfly_spin);*/
183 #endif
184 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
185 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
186 static struct sysctl_oid *usched_dfly_sysctl_tree;
187
188 /* Debug info exposed through debug.* sysctl */
189
190 static int usched_dfly_debug = -1;
191 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
192            &usched_dfly_debug, 0,
193            "Print debug information for this pid");
194
195 static int usched_dfly_pid_debug = -1;
196 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
197            &usched_dfly_pid_debug, 0,
198            "Print KTR debug information for this pid");
199
200 static int usched_dfly_chooser = 0;
201 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
202            &usched_dfly_chooser, 0,
203            "Print KTR debug information for this pid");
204
205 /* Tunning usched_dfly - configurable through kern.usched_dfly.* */
206 #ifdef SMP
207 static int usched_dfly_smt = 0;
208 static int usched_dfly_cache_coherent = 0;
209 static int usched_dfly_upri_affinity = 16; /* 32 queues - half-way */
210 static int usched_dfly_queue_checks = 5;
211 static int usched_dfly_stick_to_level = 0;
212 #endif
213 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
214 static int usched_dfly_decay = 8;
215 static int usched_dfly_batch_time = 10;
216
217 /* KTR debug printings */
218
219 KTR_INFO_MASTER(usched);
220
221 #if !defined(KTR_USCHED_DFLY)
222 #define KTR_USCHED_DFLY KTR_ALL
223 #endif
224
225 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_urw, 0,
226     "USCHED_DFLY(dfly_acquire_curproc in user_reseched_wanted "
227     "after release: pid %d, cpuid %d, curr_cpuid %d)",
228     pid_t pid, int cpuid, int curr);
229 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_before_loop, 0,
230     "USCHED_DFLY(dfly_acquire_curproc before loop: pid %d, cpuid %d, "
231     "curr_cpuid %d)",
232     pid_t pid, int cpuid, int curr);
233 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_not, 0,
234     "USCHED_DFLY(dfly_acquire_curproc couldn't acquire after "
235     "dfly_setrunqueue: pid %d, cpuid %d, curr_lp pid %d, curr_cpuid %d)",
236     pid_t pid, int cpuid, pid_t curr_pid, int curr_cpuid);
237 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_switch, 0,
238     "USCHED_DFLY(dfly_acquire_curproc after lwkt_switch: pid %d, "
239     "cpuid %d, curr_cpuid %d)",
240     pid_t pid, int cpuid, int curr);
241
242 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_release_curproc, 0,
243     "USCHED_DFLY(dfly_release_curproc before select: pid %d, "
244     "cpuid %d, curr_cpuid %d)",
245     pid_t pid, int cpuid, int curr);
246
247 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_select_curproc, 0,
248     "USCHED_DFLY(dfly_release_curproc before select: pid %d, "
249     "cpuid %d, old_pid %d, old_cpuid %d, curr_cpuid %d)",
250     pid_t pid, int cpuid, pid_t old_pid, int old_cpuid, int curr);
251
252 #ifdef SMP
253 KTR_INFO(KTR_USCHED_DFLY, usched, batchy_test_false, 0,
254     "USCHED_DFLY(batchy_looser_pri_test false: pid %d, "
255     "cpuid %d, verify_mask %lu)",
256     pid_t pid, int cpuid, cpumask_t mask);
257 KTR_INFO(KTR_USCHED_DFLY, usched, batchy_test_true, 0,
258     "USCHED_DFLY(batchy_looser_pri_test true: pid %d, "
259     "cpuid %d, verify_mask %lu)",
260     pid_t pid, int cpuid, cpumask_t mask);
261
262 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_fc_smt, 0,
263     "USCHED_DFLY(dfly_setrunqueue free cpus smt: pid %d, cpuid %d, "
264     "mask %lu, curr_cpuid %d)",
265     pid_t pid, int cpuid, cpumask_t mask, int curr);
266 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_fc_non_smt, 0,
267     "USCHED_DFLY(dfly_setrunqueue free cpus check non_smt: pid %d, "
268     "cpuid %d, mask %lu, curr_cpuid %d)",
269     pid_t pid, int cpuid, cpumask_t mask, int curr);
270 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_rc, 0,
271     "USCHED_DFLY(dfly_setrunqueue running cpus check: pid %d, "
272     "cpuid %d, mask %lu, curr_cpuid %d)",
273     pid_t pid, int cpuid, cpumask_t mask, int curr);
274 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_found, 0,
275     "USCHED_DFLY(dfly_setrunqueue found cpu: pid %d, cpuid %d, "
276     "mask %lu, found_cpuid %d, curr_cpuid %d)",
277     pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
278 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_not_found, 0,
279     "USCHED_DFLY(dfly_setrunqueue not found cpu: pid %d, cpuid %d, "
280     "try_cpuid %d, curr_cpuid %d)",
281     pid_t pid, int cpuid, int try_cpuid, int curr);
282 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_found_best_cpuid, 0,
283     "USCHED_DFLY(dfly_setrunqueue found cpu: pid %d, cpuid %d, "
284     "mask %lu, found_cpuid %d, curr_cpuid %d)",
285     pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
286 #endif
287
288 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
289     "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
290     pid_t pid, int old_cpuid, int curr);
291 #ifdef SMP
292 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc, 0,
293     "USCHED_DFLY(chooseproc_cc: pid %d, old_cpuid %d, curr_cpuid %d)",
294     pid_t pid, int old_cpuid, int curr);
295 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc_not_good, 0,
296     "USCHED_DFLY(chooseproc_cc not good: pid %d, old_cpumask %lu, "
297     "sibling_mask %lu, curr_cpumask %lu)",
298     pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
299 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc_elected, 0,
300     "USCHED_DFLY(chooseproc_cc elected: pid %d, old_cpumask %lu, "
301     "sibling_mask %lu, curr_cpumask: %lu)",
302     pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
303
304 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_no_process, 0,
305     "USCHED_DFLY(sched_thread %d no process scheduled: pid %d, old_cpuid %d)",
306     int id, pid_t pid, int cpuid);
307 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_process, 0,
308     "USCHED_DFLY(sched_thread %d process scheduled: pid %d, old_cpuid %d)",
309     int id, pid_t pid, int cpuid);
310 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_no_process_found, 0,
311     "USCHED_DFLY(sched_thread %d no process found; tmpmask %lu)",
312     int id, cpumask_t tmpmask);
313 #endif
314
315 /*
316  * DFLY_ACQUIRE_CURPROC
317  *
318  * This function is called when the kernel intends to return to userland.
319  * It is responsible for making the thread the current designated userland
320  * thread for this cpu, blocking if necessary.
321  *
322  * The kernel has already depressed our LWKT priority so we must not switch
323  * until we have either assigned or disposed of the thread.
324  *
325  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
326  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
327  * occur, this function is called only under very controlled circumstances.
328  */
329 static void
330 dfly_acquire_curproc(struct lwp *lp)
331 {
332         globaldata_t gd;
333         dfly_pcpu_t dd;
334         thread_t td;
335
336         /*
337          * Make sure we aren't sitting on a tsleep queue.
338          */
339         td = lp->lwp_thread;
340         crit_enter_quick(td);
341         if (td->td_flags & TDF_TSLEEPQ)
342                 tsleep_remove(td);
343         dfly_recalculate_estcpu(lp);
344
345         /*
346          * If a reschedule was requested give another thread the
347          * driver's seat.
348          */
349         if (user_resched_wanted()) {
350                 clear_user_resched();
351                 dfly_release_curproc(lp);
352         }
353
354         /*
355          * Loop until we are the current user thread
356          */
357         gd = mycpu;
358         dd = &dfly_pcpu[gd->gd_cpuid];
359
360         do {
361                 /*
362                  * Process any pending events and higher priority threads.
363                  */
364                 lwkt_yield();
365
366                 /*
367                  * Become the currently scheduled user thread for this cpu
368                  * if we can do so trivially.
369                  *
370                  * We can steal another thread's current thread designation
371                  * on this cpu since if we are running that other thread
372                  * must not be, so we can safely deschedule it.
373                  */
374                 if (dd->uschedcp == lp) {
375                         /*
376                          * We are already the current lwp (hot path).
377                          */
378                         dd->upri = lp->lwp_priority;
379                 } else if (dd->uschedcp == NULL) {
380                         /*
381                          * We can trivially become the current lwp.
382                          */
383                         atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
384                         dd->uschedcp = lp;
385                         dd->upri = lp->lwp_priority;
386                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
387                 } else if (dd->uschedcp && dd->upri > lp->lwp_priority) {
388                         /*
389                          * We can steal the current cpu's lwp designation
390                          * away simply by replacing it.  The other thread
391                          * will stall when it tries to return to userland,
392                          * possibly rescheduling elsewhere when it calls
393                          * setrunqueue.
394                          */
395                         dd->uschedcp = lp;
396                         dd->upri = lp->lwp_priority;
397                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
398                 } else {
399                         /*
400                          * We cannot become the current lwp, place the lp
401                          * on the run-queue of this or another cpu and
402                          * deschedule ourselves.
403                          *
404                          * When we are reactivated we will have another
405                          * chance.
406                          */
407                         lwkt_deschedule(lp->lwp_thread);
408                         dfly_setrunqueue(lp);
409
410                         /*
411                          * Reload after a switch or setrunqueue/switch possibly
412                          * moved us to another cpu.
413                          */
414                         lwkt_switch();
415                         gd = mycpu;
416                         dd = &dfly_pcpu[gd->gd_cpuid];
417                 }
418         } while (dd->uschedcp != lp);
419
420         crit_exit_quick(td);
421         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
422 }
423
424 /*
425  * DFLY_RELEASE_CURPROC
426  *
427  * This routine detaches the current thread from the userland scheduler,
428  * usually because the thread needs to run or block in the kernel (at
429  * kernel priority) for a while.
430  *
431  * This routine is also responsible for selecting a new thread to
432  * make the current thread.
433  *
434  * NOTE: This implementation differs from the dummy example in that
435  * dfly_select_curproc() is able to select the current process, whereas
436  * dummy_select_curproc() is not able to select the current process.
437  * This means we have to NULL out uschedcp.
438  *
439  * Additionally, note that we may already be on a run queue if releasing
440  * via the lwkt_switch() in dfly_setrunqueue().
441  */
442
443 static void
444 dfly_release_curproc(struct lwp *lp)
445 {
446         globaldata_t gd = mycpu;
447         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
448
449         if (dd->uschedcp == lp) {
450                 crit_enter();
451                 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
452
453                 dd->uschedcp = NULL;    /* don't let lp be selected */
454                 dd->upri = PRIBASE_NULL;
455                 atomic_clear_cpumask(&dfly_curprocmask, gd->gd_cpumask);
456                 dfly_select_curproc(gd);
457                 crit_exit();
458         }
459 }
460
461 /*
462  * DFLY_SELECT_CURPROC
463  *
464  * Select a new current process for this cpu and clear any pending user
465  * reschedule request.  The cpu currently has no current process.
466  *
467  * This routine is also responsible for equal-priority round-robining,
468  * typically triggered from dfly_schedulerclock().  In our dummy example
469  * all the 'user' threads are LWKT scheduled all at once and we just
470  * call lwkt_switch().
471  *
472  * The calling process is not on the queue and cannot be selected.
473  */
474 static
475 void
476 dfly_select_curproc(globaldata_t gd)
477 {
478         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
479         struct lwp *nlp;
480         int cpuid = gd->gd_cpuid;
481
482         crit_enter_gd(gd);
483
484         /*spin_lock(&dfly_spin);*/
485         spin_lock(&dd->spin);
486         nlp = dfly_chooseproc_locked(dd, dd->uschedcp, 0);
487
488         if (nlp) {
489                 atomic_set_cpumask(&dfly_curprocmask, CPUMASK(cpuid));
490                 dd->upri = nlp->lwp_priority;
491                 dd->uschedcp = nlp;
492                 dd->rrcount = 0;                /* reset round robin */
493                 spin_unlock(&dd->spin);
494                 /*spin_unlock(&dfly_spin);*/
495 #ifdef SMP
496                 lwkt_acquire(nlp->lwp_thread);
497 #endif
498                 lwkt_schedule(nlp->lwp_thread);
499         } else {
500                 spin_unlock(&dd->spin);
501                 /*spin_unlock(&dfly_spin);*/
502         }
503         crit_exit_gd(gd);
504 }
505
506 /*
507  * Place the specified lwp on the user scheduler's run queue.  This routine
508  * must be called with the thread descheduled.  The lwp must be runnable.
509  * It must not be possible for anyone else to explicitly schedule this thread.
510  *
511  * The thread may be the current thread as a special case.
512  */
513 static void
514 dfly_setrunqueue(struct lwp *lp)
515 {
516         globaldata_t rgd;
517         dfly_pcpu_t rdd;
518         int cpuid;
519
520         /*
521          * First validate the process LWKT state.
522          */
523         crit_enter();
524         KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
525         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
526             ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
527              lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
528         KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
529
530         /*
531          * NOTE: gd and dd are relative to the target thread's last cpu,
532          *       NOT our current cpu.
533          */
534         rgd = globaldata_find(lp->lwp_qcpu);
535         rdd = &dfly_pcpu[lp->lwp_qcpu];
536         cpuid = rdd->cpuid;
537
538         /*
539          * This process is not supposed to be scheduled anywhere or assigned
540          * as the current process anywhere.  Assert the condition.
541          */
542         KKASSERT(rdd->uschedcp != lp);
543
544 #ifndef SMP
545         /*
546          * If we are not SMP we do not have a scheduler helper to kick
547          * and must directly activate the process if none are scheduled.
548          *
549          * This is really only an issue when bootstrapping init since
550          * the caller in all other cases will be a user process, and
551          * even if released (rdd->uschedcp == NULL), that process will
552          * kickstart the scheduler when it returns to user mode from
553          * the kernel.
554          *
555          * NOTE: On SMP we can't just set some other cpu's uschedcp.
556          */
557         if (rdd->uschedcp == NULL) {
558                 spin_lock(&rdd->spin);
559                 if (rdd->uschedcp == NULL) {
560                         atomic_set_cpumask(&dfly_curprocmask, rgd->gd_cpumask);
561                         rdd->uschedcp = lp;
562                         rdd->upri = lp->lwp_priority;
563                         spin_unlock(&rdd->spin);
564                         lwkt_schedule(lp->lwp_thread);
565                         crit_exit();
566                         return;
567                 }
568                 spin_unlock(&rdd->spin);
569         }
570 #endif
571
572 #ifdef SMP
573         /*
574          * XXX fixme.  Could be part of a remrunqueue/setrunqueue
575          * operation when the priority is recalculated, so TDF_MIGRATING
576          * may already be set.
577          */
578         if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
579                 lwkt_giveaway(lp->lwp_thread);
580 #endif
581
582 #ifdef SMP
583         /*
584          * Ok, we have to setrunqueue some target cpu and request a reschedule
585          * if necessary.
586          *
587          * We have to choose the best target cpu.  It might not be the current
588          * target even if the current cpu has no running user thread (for
589          * example, because the current cpu might be a hyperthread and its
590          * sibling has a thread assigned).
591          */
592         /*spin_lock(&dfly_spin);*/
593         rdd = dfly_choose_best_queue(rdd, lp);
594         rgd = globaldata_find(rdd->cpuid);
595
596         /*
597          * We lose control of lp the moment we release the spinlock after
598          * having placed lp on the queue.  i.e. another cpu could pick it
599          * up and it could exit, or its priority could be further adjusted,
600          * or something like that.
601          *
602          * WARNING! dd can point to a foreign cpu!
603          */
604         spin_lock(&rdd->spin);
605         dfly_setrunqueue_locked(rdd, lp);
606         /*spin_unlock(&dfly_spin);*/
607
608         if (rgd == mycpu) {
609                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
610                         spin_unlock(&rdd->spin);
611                         if (rdd->uschedcp == NULL) {
612                                 wakeup_mycpu(&rdd->helper_thread); /* XXX */
613                                 need_user_resched();
614                         } else {
615                                 need_user_resched();
616                         }
617                 } else {
618                         spin_unlock(&rdd->spin);
619                 }
620         } else {
621                 atomic_clear_cpumask(&dfly_rdyprocmask, CPUMASK(cpuid));
622                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
623                         spin_unlock(&rdd->spin);
624                         lwkt_send_ipiq(rgd, dfly_need_user_resched_remote,
625                                        NULL);
626                 } else {
627                         spin_unlock(&rdd->spin);
628                         wakeup(&rdd->helper_thread);
629                 }
630         }
631 #else
632         /*
633          * Request a reschedule if appropriate.
634          */
635         spin_lock(&rdd->spin);
636         dfly_setrunqueue_locked(rdd, lp);
637         spin_unlock(&rdd->spin);
638         if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
639                 need_user_resched();
640         }
641 #endif
642         crit_exit();
643 }
644
645 /*
646  * This routine is called from a systimer IPI.  It MUST be MP-safe and
647  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
648  * each cpu.
649  */
650 static
651 void
652 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
653 {
654         globaldata_t gd = mycpu;
655         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
656
657         /*
658          * Do we need to round-robin?  We round-robin 10 times a second.
659          * This should only occur for cpu-bound batch processes.
660          */
661         if (++dd->rrcount >= usched_dfly_rrinterval) {
662                 dd->rrcount = 0;
663                 need_user_resched();
664         }
665
666         /*
667          * Adjust estcpu upward using a real time equivalent calculation.
668          */
669         lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
670
671         /*
672          * Spinlocks also hold a critical section so there should not be
673          * any active.
674          */
675         KKASSERT(gd->gd_spinlocks_wr == 0);
676
677         dfly_resetpriority(lp);
678 }
679
680 /*
681  * Called from acquire and from kern_synch's one-second timer (one of the
682  * callout helper threads) with a critical section held.
683  *
684  * Decay p_estcpu based on the number of ticks we haven't been running
685  * and our p_nice.  As the load increases each process observes a larger
686  * number of idle ticks (because other processes are running in them).
687  * This observation leads to a larger correction which tends to make the
688  * system more 'batchy'.
689  *
690  * Note that no recalculation occurs for a process which sleeps and wakes
691  * up in the same tick.  That is, a system doing thousands of context
692  * switches per second will still only do serious estcpu calculations
693  * ESTCPUFREQ times per second.
694  */
695 static
696 void
697 dfly_recalculate_estcpu(struct lwp *lp)
698 {
699         globaldata_t gd = mycpu;
700         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
701         sysclock_t cpbase;
702         sysclock_t ttlticks;
703         int estcpu;
704         int decay_factor;
705
706         /*
707          * We have to subtract periodic to get the last schedclock
708          * timeout time, otherwise we would get the upcoming timeout.
709          * Keep in mind that a process can migrate between cpus and
710          * while the scheduler clock should be very close, boundary
711          * conditions could lead to a small negative delta.
712          */
713         cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
714
715         if (lp->lwp_slptime > 1) {
716                 /*
717                  * Too much time has passed, do a coarse correction.
718                  */
719                 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
720                 dfly_resetpriority(lp);
721                 lp->lwp_cpbase = cpbase;
722                 lp->lwp_cpticks = 0;
723                 lp->lwp_batch -= ESTCPUFREQ;
724                 if (lp->lwp_batch < 0)
725                         lp->lwp_batch = 0;
726         } else if (lp->lwp_cpbase != cpbase) {
727                 /*
728                  * Adjust estcpu if we are in a different tick.  Don't waste
729                  * time if we are in the same tick.
730                  *
731                  * First calculate the number of ticks in the measurement
732                  * interval.  The ttlticks calculation can wind up 0 due to
733                  * a bug in the handling of lwp_slptime  (as yet not found),
734                  * so make sure we do not get a divide by 0 panic.
735                  */
736                 ttlticks = (cpbase - lp->lwp_cpbase) /
737                            gd->gd_schedclock.periodic;
738                 if (ttlticks < 0) {
739                         ttlticks = 0;
740                         lp->lwp_cpbase = cpbase;
741                 }
742                 if (ttlticks == 0)
743                         return;
744                 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
745
746                 /*
747                  * Calculate the percentage of one cpu used factoring in ncpus
748                  * and the load and adjust estcpu.  Handle degenerate cases
749                  * by adding 1 to runqcount.
750                  *
751                  * estcpu is scaled by ESTCPUMAX.
752                  *
753                  * runqcount is the excess number of user processes
754                  * that cannot be immediately scheduled to cpus.  We want
755                  * to count these as running to avoid range compression
756                  * in the base calculation (which is the actual percentage
757                  * of one cpu used).
758                  */
759                 estcpu = (lp->lwp_cpticks * ESTCPUMAX) *
760                          (dd->runqcount + ncpus) / (ncpus * ttlticks);
761
762                 /*
763                  * If estcpu is > 50% we become more batch-like
764                  * If estcpu is <= 50% we become less batch-like
765                  *
766                  * It takes 30 cpu seconds to traverse the entire range.
767                  */
768                 if (estcpu > ESTCPUMAX / 2) {
769                         lp->lwp_batch += ttlticks;
770                         if (lp->lwp_batch > BATCHMAX)
771                                 lp->lwp_batch = BATCHMAX;
772                 } else {
773                         lp->lwp_batch -= ttlticks;
774                         if (lp->lwp_batch < 0)
775                                 lp->lwp_batch = 0;
776                 }
777
778                 if (usched_dfly_debug == lp->lwp_proc->p_pid) {
779                         kprintf("pid %d lwp %p estcpu %3d %3d bat %d cp %d/%d",
780                                 lp->lwp_proc->p_pid, lp,
781                                 estcpu, lp->lwp_estcpu,
782                                 lp->lwp_batch,
783                                 lp->lwp_cpticks, ttlticks);
784                 }
785
786                 /*
787                  * Adjust lp->lwp_esetcpu.  The decay factor determines how
788                  * quickly lwp_estcpu collapses to its realtime calculation.
789                  * A slower collapse gives us a more accurate number but
790                  * can cause a cpu hog to eat too much cpu before the
791                  * scheduler decides to downgrade it.
792                  *
793                  * NOTE: p_nice is accounted for in dfly_resetpriority(),
794                  *       and not here, but we must still ensure that a
795                  *       cpu-bound nice -20 process does not completely
796                  *       override a cpu-bound nice +20 process.
797                  *
798                  * NOTE: We must use ESTCPULIM() here to deal with any
799                  *       overshoot.
800                  */
801                 decay_factor = usched_dfly_decay;
802                 if (decay_factor < 1)
803                         decay_factor = 1;
804                 if (decay_factor > 1024)
805                         decay_factor = 1024;
806
807                 lp->lwp_estcpu = ESTCPULIM(
808                         (lp->lwp_estcpu * decay_factor + estcpu) /
809                         (decay_factor + 1));
810
811                 if (usched_dfly_debug == lp->lwp_proc->p_pid)
812                         kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
813                 dfly_resetpriority(lp);
814                 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
815                 lp->lwp_cpticks = 0;
816         }
817 }
818
819 /*
820  * Compute the priority of a process when running in user mode.
821  * Arrange to reschedule if the resulting priority is better
822  * than that of the current process.
823  *
824  * This routine may be called with any process.
825  *
826  * This routine is called by fork1() for initial setup with the process
827  * of the run queue, and also may be called normally with the process on or
828  * off the run queue.
829  */
830 static void
831 dfly_resetpriority(struct lwp *lp)
832 {
833         dfly_pcpu_t rdd;
834         int newpriority;
835         u_short newrqtype;
836         int rcpu;
837         int checkpri;
838         int estcpu;
839
840         crit_enter();
841
842         /*
843          * Lock the scheduler (lp) belongs to.  This can be on a different
844          * cpu.  Handle races.  This loop breaks out with the appropriate
845          * rdd locked.
846          */
847         for (;;) {
848                 rcpu = lp->lwp_qcpu;
849                 rdd = &dfly_pcpu[rcpu];
850                 spin_lock(&rdd->spin);
851                 if (rcpu == lp->lwp_qcpu)
852                         break;
853                 spin_unlock(&rdd->spin);
854         }
855
856         /*
857          * Calculate the new priority and queue type
858          */
859         newrqtype = lp->lwp_rtprio.type;
860
861         switch(newrqtype) {
862         case RTP_PRIO_REALTIME:
863         case RTP_PRIO_FIFO:
864                 newpriority = PRIBASE_REALTIME +
865                              (lp->lwp_rtprio.prio & PRIMASK);
866                 break;
867         case RTP_PRIO_NORMAL:
868                 /*
869                  * Detune estcpu based on batchiness.  lwp_batch ranges
870                  * from 0 to  BATCHMAX.  Limit estcpu for the sake of
871                  * the priority calculation to between 50% and 100%.
872                  */
873                 estcpu = lp->lwp_estcpu * (lp->lwp_batch + BATCHMAX) /
874                          (BATCHMAX * 2);
875
876                 /*
877                  * p_nice piece         Adds (0-40) * 2         0-80
878                  * estcpu               Adds 16384  * 4 / 512   0-128
879                  */
880                 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
881                 newpriority += estcpu * PPQ / ESTCPUPPQ;
882                 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
883                               NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
884                 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
885                 break;
886         case RTP_PRIO_IDLE:
887                 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
888                 break;
889         case RTP_PRIO_THREAD:
890                 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
891                 break;
892         default:
893                 panic("Bad RTP_PRIO %d", newrqtype);
894                 /* NOT REACHED */
895         }
896
897         /*
898          * The newpriority incorporates the queue type so do a simple masked
899          * check to determine if the process has moved to another queue.  If
900          * it has, and it is currently on a run queue, then move it.
901          *
902          * Since uload is ~PPQMASK masked, no modifications are necessary if
903          * we end up in the same run queue.
904          */
905         if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
906                 int delta_uload;
907
908                 /*
909                  * uload can change, calculate the adjustment to reduce
910                  * edge cases since choosers scan the cpu topology without
911                  * locks.
912                  */
913                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
914                         delta_uload =
915                                 -((lp->lwp_priority & ~PPQMASK) & PRIMASK) +
916                                 ((newpriority & ~PPQMASK) & PRIMASK);
917                         atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
918                                        delta_uload);
919                 }
920                 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
921                         dfly_remrunqueue_locked(rdd, lp);
922                         lp->lwp_priority = newpriority;
923                         lp->lwp_rqtype = newrqtype;
924                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
925                         dfly_setrunqueue_locked(rdd, lp);
926                         checkpri = 1;
927                 } else {
928                         lp->lwp_priority = newpriority;
929                         lp->lwp_rqtype = newrqtype;
930                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
931                         checkpri = 0;
932                 }
933         } else {
934                 /*
935                  * In the same PPQ, uload cannot change.
936                  */
937                 lp->lwp_priority = newpriority;
938                 checkpri = 1;
939                 rcpu = -1;
940         }
941
942         /*
943          * Determine if we need to reschedule the target cpu.  This only
944          * occurs if the LWP is already on a scheduler queue, which means
945          * that idle cpu notification has already occured.  At most we
946          * need only issue a need_user_resched() on the appropriate cpu.
947          *
948          * The LWP may be owned by a CPU different from the current one,
949          * in which case dd->uschedcp may be modified without an MP lock
950          * or a spinlock held.  The worst that happens is that the code
951          * below causes a spurious need_user_resched() on the target CPU
952          * and dd->pri to be wrong for a short period of time, both of
953          * which are harmless.
954          *
955          * If checkpri is 0 we are adjusting the priority of the current
956          * process, possibly higher (less desireable), so ignore the upri
957          * check which will fail in that case.
958          */
959         if (rcpu >= 0) {
960                 if ((dfly_rdyprocmask & CPUMASK(rcpu)) &&
961                     (checkpri == 0 ||
962                      (rdd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK))) {
963 #ifdef SMP
964                         if (rcpu == mycpu->gd_cpuid) {
965                                 spin_unlock(&rdd->spin);
966                                 need_user_resched();
967                         } else {
968                                 atomic_clear_cpumask(&dfly_rdyprocmask,
969                                                      CPUMASK(rcpu));
970                                 spin_unlock(&rdd->spin);
971                                 lwkt_send_ipiq(globaldata_find(rcpu),
972                                                dfly_need_user_resched_remote,
973                                                NULL);
974                         }
975 #else
976                         spin_unlock(&rdd->spin);
977                         need_user_resched();
978 #endif
979                 } else {
980                         spin_unlock(&rdd->spin);
981                 }
982         } else {
983                 spin_unlock(&rdd->spin);
984         }
985         crit_exit();
986 }
987
988 static
989 void
990 dfly_yield(struct lwp *lp)
991 {
992 #if 0
993         /* FUTURE (or something similar) */
994         switch(lp->lwp_rqtype) {
995         case RTP_PRIO_NORMAL:
996                 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
997                 break;
998         default:
999                 break;
1000         }
1001 #endif
1002         need_user_resched();
1003 }
1004
1005 /*
1006  * Called from fork1() when a new child process is being created.
1007  *
1008  * Give the child process an initial estcpu that is more batch then
1009  * its parent and dock the parent for the fork (but do not
1010  * reschedule the parent).   This comprises the main part of our batch
1011  * detection heuristic for both parallel forking and sequential execs.
1012  *
1013  * XXX lwp should be "spawning" instead of "forking"
1014  */
1015 static void
1016 dfly_forking(struct lwp *plp, struct lwp *lp)
1017 {
1018         /*
1019          * Put the child 4 queue slots (out of 32) higher than the parent
1020          * (less desireable than the parent).
1021          */
1022         lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1023
1024         /*
1025          * The batch status of children always starts out centerline
1026          * and will inch-up or inch-down as appropriate.  It takes roughly
1027          * ~15 seconds of >50% cpu to hit the limit.
1028          */
1029         lp->lwp_batch = BATCHMAX / 2;
1030
1031         /*
1032          * Dock the parent a cost for the fork, protecting us from fork
1033          * bombs.  If the parent is forking quickly make the child more
1034          * batchy.
1035          */
1036         plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1037 }
1038
1039 /*
1040  * Called when a lwp is being removed from this scheduler, typically
1041  * during lwp_exit().
1042  */
1043 static void
1044 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1045 {
1046         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1047
1048         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1049                 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1050                 atomic_add_int(&dd->uload,
1051                                -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1052         }
1053 }
1054
1055 static void
1056 dfly_uload_update(struct lwp *lp)
1057 {
1058         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1059
1060         if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1061                 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1062                         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1063                         atomic_add_int(&dd->uload,
1064                                    ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1065                 }
1066         } else {
1067                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1068                         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1069                         atomic_add_int(&dd->uload,
1070                                    -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1071                 }
1072         }
1073 }
1074
1075 /*
1076  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1077  * it selects a user process and returns it.  If chklp is non-NULL and chklp
1078  * has a better or equal priority then the process that would otherwise be
1079  * chosen, NULL is returned.
1080  *
1081  * Until we fix the RUNQ code the chklp test has to be strict or we may
1082  * bounce between processes trying to acquire the current process designation.
1083  *
1084  * Must be called with dfly_spin exclusive held.  The spinlock is
1085  * left intact through the entire routine.
1086  *
1087  * if chklp is NULL this function will dive other cpu's queues looking
1088  * for work if the current queue is empty.
1089  */
1090 static
1091 struct lwp *
1092 dfly_chooseproc_locked(dfly_pcpu_t dd, struct lwp *chklp, int isremote)
1093 {
1094 #ifdef SMP
1095         dfly_pcpu_t xdd;
1096 #endif
1097         struct lwp *lp;
1098         struct rq *q;
1099         u_int32_t *which, *which2;
1100         u_int32_t pri;
1101         u_int32_t rtqbits;
1102         u_int32_t tsqbits;
1103         u_int32_t idqbits;
1104         /*usched_dfly_queue_checks*/
1105
1106         rtqbits = dd->rtqueuebits;
1107         tsqbits = dd->queuebits;
1108         idqbits = dd->idqueuebits;
1109
1110         if (rtqbits) {
1111                 pri = bsfl(rtqbits);
1112                 q = &dd->rtqueues[pri];
1113                 which = &dd->rtqueuebits;
1114                 which2 = &rtqbits;
1115         } else if (tsqbits) {
1116                 pri = bsfl(tsqbits);
1117                 q = &dd->queues[pri];
1118                 which = &dd->queuebits;
1119                 which2 = &tsqbits;
1120         } else if (idqbits) {
1121                 pri = bsfl(idqbits);
1122                 q = &dd->idqueues[pri];
1123                 which = &dd->idqueuebits;
1124                 which2 = &idqbits;
1125         } else
1126 #ifdef SMP
1127         if (isremote) {
1128                 /*
1129                  * Disallow remote->remote recursion
1130                  */
1131                 return (NULL);
1132         } else {
1133                 /*
1134                  * Pull a runnable thread from a remote run queue.  We have
1135                  * to adjust qcpu and uload manually because the lp we return
1136                  * might be assigned directly to uschedcp (setrunqueue might
1137                  * not be called).
1138                  */
1139                 xdd = dfly_choose_worst_queue(dd);
1140                 if (xdd && xdd != dd && spin_trylock(&xdd->spin)) {
1141                         lp = dfly_chooseproc_locked(xdd, NULL, 1);
1142                         if (lp) {
1143                                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1144                                         atomic_add_int(&xdd->uload,
1145                                             -((lp->lwp_priority & ~PPQMASK) &
1146                                               PRIMASK));
1147                                 }
1148                                 lp->lwp_qcpu = dd->cpuid;
1149                                 atomic_add_int(&dd->uload,
1150                                     ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1151                                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1152                         }
1153                         spin_unlock(&xdd->spin);
1154                 } else {
1155                         lp = NULL;
1156                 }
1157                 return (lp);
1158         }
1159 #else
1160         {
1161                 return NULL;
1162         }
1163 #endif
1164         lp = TAILQ_FIRST(q);
1165         KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1166
1167         /*
1168          * If the passed lwp <chklp> is reasonably close to the selected
1169          * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1170          *
1171          * Note that we must error on the side of <chklp> to avoid bouncing
1172          * between threads in the acquire code.
1173          */
1174         if (chklp) {
1175                 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1176                         return(NULL);
1177         }
1178
1179         KTR_COND_LOG(usched_chooseproc,
1180             lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1181             lp->lwp_proc->p_pid,
1182             lp->lwp_thread->td_gd->gd_cpuid,
1183             mycpu->gd_cpuid);
1184
1185         TAILQ_REMOVE(q, lp, lwp_procq);
1186         --dd->runqcount;
1187         if (TAILQ_EMPTY(q))
1188                 *which &= ~(1 << pri);
1189         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1190         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1191
1192         return lp;
1193 }
1194
1195 #ifdef SMP
1196
1197 /*
1198  * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1199  *
1200  * Choose a cpu node to schedule lp on, hopefully nearby its current
1201  * node.  The current node is passed in (dd) (though it can also be obtained
1202  * from lp->lwp_qcpu).  The caller will dfly_setrunqueue() lp on the queue
1203  * we return.
1204  *
1205  * When the topology is known choose a cpu whos group has, in aggregate,
1206  * has the lowest weighted load.
1207  */
1208 static
1209 dfly_pcpu_t
1210 dfly_choose_best_queue(dfly_pcpu_t dd, struct lwp *lp)
1211 {
1212         cpumask_t mask;
1213         cpu_node_t *cpup;
1214         cpu_node_t *cpun;
1215         cpu_node_t *cpub;
1216         dfly_pcpu_t rdd;
1217         int cpuid;
1218         int n;
1219         int load;
1220         int lowest_load;
1221         int level;
1222
1223         /*
1224          * When the topology is unknown choose a random cpu that is hopefully
1225          * idle.
1226          */
1227         if (dd->cpunode == NULL)
1228                 return (dfly_choose_queue_simple(dd, lp));
1229
1230         /*
1231          * When the topology is known choose a cpu whos group has, in
1232          * aggregate, has the lowest weighted load.
1233          */
1234         cpup = root_cpu_node;
1235         rdd = dd;
1236         level = cpu_topology_levels_number;
1237
1238         while (cpup) {
1239                 /*
1240                  * Degenerate case super-root
1241                  */
1242                 if (cpup->child_node && cpup->child_no == 1) {
1243                         cpup = cpup->child_node;
1244                         --level;
1245                         continue;
1246                 }
1247
1248                 /*
1249                  * Terminal cpunode
1250                  */
1251                 if (cpup->child_node == NULL) {
1252                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1253                         break;
1254                 }
1255
1256                 cpub = NULL;
1257                 lowest_load = 0x7FFFFFFF;
1258
1259                 for (n = 0; n < cpup->child_no; ++n) {
1260                         /*
1261                          * Accumulate load information for all cpus
1262                          * which are members of this node.
1263                          */
1264                         cpun = &cpup->child_node[n];
1265                         mask = cpun->members & usched_global_cpumask &
1266                                smp_active_mask & lp->lwp_cpumask;
1267                         if (mask == 0)
1268                                 continue;
1269                         load = 0;
1270                         while (mask) {
1271                                 cpuid = BSFCPUMASK(mask);
1272                                 load += dfly_pcpu[cpuid].uload;
1273                                 mask &= ~CPUMASK(cpuid);
1274                         }
1275
1276                         /*
1277                          * Give a slight advantage to nearby cpus.
1278                          */
1279                         if (cpun->members & dd->cpumask)
1280                                 load -= PPQ * level;
1281
1282                         /*
1283                          * Calculate the best load
1284                          */
1285                         if (cpub == NULL || lowest_load > load ||
1286                             (lowest_load == load &&
1287                              (cpun->members & dd->cpumask))
1288                         ) {
1289                                 lowest_load = load;
1290                                 cpub = cpun;
1291                         }
1292                 }
1293                 cpup = cpub;
1294                 --level;
1295         }
1296         if (usched_dfly_chooser)
1297                 kprintf("lp %02d->%02d %s\n",
1298                         lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1299         return (rdd);
1300 }
1301
1302 /*
1303  * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1304  *
1305  * Choose the worst queue close to dd's cpu node with a non-empty runq.
1306  *
1307  * This is used by the thread chooser when the current cpu's queues are
1308  * empty to steal a thread from another cpu's queue.  We want to offload
1309  * the most heavily-loaded queue.
1310  */
1311 static
1312 dfly_pcpu_t
1313 dfly_choose_worst_queue(dfly_pcpu_t dd)
1314 {
1315         cpumask_t mask;
1316         cpu_node_t *cpup;
1317         cpu_node_t *cpun;
1318         cpu_node_t *cpub;
1319         dfly_pcpu_t rdd;
1320         int cpuid;
1321         int n;
1322         int load;
1323         int highest_load;
1324         int uloadok;
1325         int level;
1326
1327         /*
1328          * When the topology is unknown choose a random cpu that is hopefully
1329          * idle.
1330          */
1331         if (dd->cpunode == NULL) {
1332                 return (NULL);
1333         }
1334
1335         /*
1336          * When the topology is known choose a cpu whos group has, in
1337          * aggregate, has the lowest weighted load.
1338          */
1339         cpup = root_cpu_node;
1340         rdd = dd;
1341         level = cpu_topology_levels_number;
1342         while (cpup) {
1343                 /*
1344                  * Degenerate case super-root
1345                  */
1346                 if (cpup->child_node && cpup->child_no == 1) {
1347                         cpup = cpup->child_node;
1348                         --level;
1349                         continue;
1350                 }
1351
1352                 /*
1353                  * Terminal cpunode
1354                  */
1355                 if (cpup->child_node == NULL) {
1356                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1357                         break;
1358                 }
1359
1360                 cpub = NULL;
1361                 highest_load = 0;
1362
1363                 for (n = 0; n < cpup->child_no; ++n) {
1364                         /*
1365                          * Accumulate load information for all cpus
1366                          * which are members of this node.
1367                          */
1368                         cpun = &cpup->child_node[n];
1369                         mask = cpun->members & usched_global_cpumask &
1370                                smp_active_mask;
1371                         if (mask == 0)
1372                                 continue;
1373                         load = 0;
1374                         uloadok = 0;
1375                         while (mask) {
1376                                 cpuid = BSFCPUMASK(mask);
1377                                 load += dfly_pcpu[cpuid].uload;
1378                                 if (dfly_pcpu[cpuid].uload)
1379                                         uloadok = 1;
1380                                 mask &= ~CPUMASK(cpuid);
1381                         }
1382
1383                         /*
1384                          * Give a slight advantage to nearby cpus.
1385                          */
1386                         if (cpun->members & dd->cpumask)
1387                                 load += PPQ * level;
1388
1389                         /*
1390                          * The best candidate is the one with the worst
1391                          * (highest) load.  Prefer candiates that are
1392                          * closer to our cpu.
1393                          */
1394                         if (uloadok &&
1395                             (cpub == NULL || highest_load < load ||
1396                              (highest_load == load &&
1397                               (cpun->members & dd->cpumask)))
1398                         ) {
1399                                 highest_load = load;
1400                                 cpub = cpun;
1401                         }
1402                 }
1403                 cpup = cpub;
1404                 --level;
1405         }
1406         return (rdd);
1407 }
1408
1409 static
1410 dfly_pcpu_t
1411 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1412 {
1413         dfly_pcpu_t rdd;
1414         cpumask_t tmpmask;
1415         cpumask_t mask;
1416         int cpuid;
1417
1418         /*
1419          * Fallback to the original heuristic, select random cpu,
1420          * first checking cpus not currently running a user thread.
1421          */
1422         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1423         mask = ~dfly_curprocmask & dfly_rdyprocmask & lp->lwp_cpumask &
1424                smp_active_mask & usched_global_cpumask;
1425
1426         while (mask) {
1427                 tmpmask = ~(CPUMASK(cpuid) - 1);
1428                 if (mask & tmpmask)
1429                         cpuid = BSFCPUMASK(mask & tmpmask);
1430                 else
1431                         cpuid = BSFCPUMASK(mask);
1432                 rdd = &dfly_pcpu[cpuid];
1433
1434                 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1435                         goto found;
1436                 mask &= ~CPUMASK(cpuid);
1437         }
1438
1439         /*
1440          * Then cpus which might have a currently running lp
1441          */
1442         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1443         mask = dfly_curprocmask & dfly_rdyprocmask &
1444                lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
1445
1446         while (mask) {
1447                 tmpmask = ~(CPUMASK(cpuid) - 1);
1448                 if (mask & tmpmask)
1449                         cpuid = BSFCPUMASK(mask & tmpmask);
1450                 else
1451                         cpuid = BSFCPUMASK(mask);
1452                 rdd = &dfly_pcpu[cpuid];
1453
1454                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1455                         goto found;
1456                 mask &= ~CPUMASK(cpuid);
1457         }
1458
1459         /*
1460          * If we cannot find a suitable cpu we reload from dfly_scancpu
1461          * and round-robin.  Other cpus will pickup as they release their
1462          * current lwps or become ready.
1463          *
1464          * Avoid a degenerate system lockup case if usched_global_cpumask
1465          * is set to 0 or otherwise does not cover lwp_cpumask.
1466          *
1467          * We only kick the target helper thread in this case, we do not
1468          * set the user resched flag because
1469          */
1470         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1471         if ((CPUMASK(cpuid) & usched_global_cpumask) == 0)
1472                 cpuid = 0;
1473         rdd = &dfly_pcpu[cpuid];
1474 found:
1475         return (rdd);
1476 }
1477
1478 static
1479 void
1480 dfly_need_user_resched_remote(void *dummy)
1481 {
1482         globaldata_t gd = mycpu;
1483         dfly_pcpu_t  dd = &dfly_pcpu[gd->gd_cpuid];
1484
1485         need_user_resched();
1486
1487         /* Call wakeup_mycpu to avoid sending IPIs to other CPUs */
1488         wakeup_mycpu(&dd->helper_thread);
1489 }
1490
1491 #endif
1492
1493 /*
1494  * dfly_remrunqueue_locked() removes a given process from the run queue
1495  * that it is on, clearing the queue busy bit if it becomes empty.
1496  *
1497  * Note that user process scheduler is different from the LWKT schedule.
1498  * The user process scheduler only manages user processes but it uses LWKT
1499  * underneath, and a user process operating in the kernel will often be
1500  * 'released' from our management.
1501  *
1502  * uload is NOT adjusted here.  It is only adjusted if the lwkt_thread goes
1503  * to sleep or the lwp is moved to a different runq.
1504  */
1505 static void
1506 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1507 {
1508         struct rq *q;
1509         u_int32_t *which;
1510         u_int8_t pri;
1511
1512         KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1513         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1514         --rdd->runqcount;
1515         /*rdd->uload -= (lp->lwp_priority & ~PPQMASK) & PRIMASK;*/
1516         KKASSERT(rdd->runqcount >= 0);
1517
1518         pri = lp->lwp_rqindex;
1519         switch(lp->lwp_rqtype) {
1520         case RTP_PRIO_NORMAL:
1521                 q = &rdd->queues[pri];
1522                 which = &rdd->queuebits;
1523                 break;
1524         case RTP_PRIO_REALTIME:
1525         case RTP_PRIO_FIFO:
1526                 q = &rdd->rtqueues[pri];
1527                 which = &rdd->rtqueuebits;
1528                 break;
1529         case RTP_PRIO_IDLE:
1530                 q = &rdd->idqueues[pri];
1531                 which = &rdd->idqueuebits;
1532                 break;
1533         default:
1534                 panic("remrunqueue: invalid rtprio type");
1535                 /* NOT REACHED */
1536         }
1537         TAILQ_REMOVE(q, lp, lwp_procq);
1538         if (TAILQ_EMPTY(q)) {
1539                 KASSERT((*which & (1 << pri)) != 0,
1540                         ("remrunqueue: remove from empty queue"));
1541                 *which &= ~(1 << pri);
1542         }
1543 }
1544
1545 /*
1546  * dfly_setrunqueue_locked()
1547  *
1548  * Add a process whos rqtype and rqindex had previously been calculated
1549  * onto the appropriate run queue.   Determine if the addition requires
1550  * a reschedule on a cpu and return the cpuid or -1.
1551  *
1552  * NOTE:          Lower priorities are better priorities.
1553  *
1554  * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1555  *                sum of the rough lwp_priority for all running and runnable
1556  *                processes.  Lower priority processes (higher lwp_priority
1557  *                values) actually DO count as more load, not less, because
1558  *                these are the programs which require the most care with
1559  *                regards to cpu selection.
1560  */
1561 static void
1562 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1563 {
1564         struct rq *q;
1565         u_int32_t *which;
1566         int pri;
1567
1568         if (lp->lwp_qcpu != rdd->cpuid) {
1569                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1570                         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1571                         atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1572                                    -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1573                 }
1574                 lp->lwp_qcpu = rdd->cpuid;
1575         }
1576
1577         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1578         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1579         ++rdd->runqcount;
1580         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1581                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1582                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1583                                (lp->lwp_priority & ~PPQMASK) & PRIMASK);
1584         }
1585
1586         pri = lp->lwp_rqindex;
1587
1588         switch(lp->lwp_rqtype) {
1589         case RTP_PRIO_NORMAL:
1590                 q = &rdd->queues[pri];
1591                 which = &rdd->queuebits;
1592                 break;
1593         case RTP_PRIO_REALTIME:
1594         case RTP_PRIO_FIFO:
1595                 q = &rdd->rtqueues[pri];
1596                 which = &rdd->rtqueuebits;
1597                 break;
1598         case RTP_PRIO_IDLE:
1599                 q = &rdd->idqueues[pri];
1600                 which = &rdd->idqueuebits;
1601                 break;
1602         default:
1603                 panic("remrunqueue: invalid rtprio type");
1604                 /* NOT REACHED */
1605         }
1606
1607         /*
1608          * Add to the correct queue and set the appropriate bit.  If no
1609          * lower priority (i.e. better) processes are in the queue then
1610          * we want a reschedule, calculate the best cpu for the job.
1611          *
1612          * Always run reschedules on the LWPs original cpu.
1613          */
1614         TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1615         *which |= 1 << pri;
1616 }
1617
1618 #ifdef SMP
1619
1620 /*
1621  * For SMP systems a user scheduler helper thread is created for each
1622  * cpu and is used to allow one cpu to wakeup another for the purposes of
1623  * scheduling userland threads from setrunqueue().
1624  *
1625  * UP systems do not need the helper since there is only one cpu.
1626  *
1627  * We can't use the idle thread for this because we might block.
1628  * Additionally, doing things this way allows us to HLT idle cpus
1629  * on MP systems.
1630  */
1631 static void
1632 dfly_helper_thread(void *dummy)
1633 {
1634     globaldata_t gd;
1635     dfly_pcpu_t  dd;
1636     struct lwp *nlp;
1637     cpumask_t mask;
1638     int cpuid;
1639
1640     gd = mycpu;
1641     cpuid = gd->gd_cpuid;       /* doesn't change */
1642     mask = gd->gd_cpumask;      /* doesn't change */
1643     dd = &dfly_pcpu[cpuid];
1644
1645     /*
1646      * Since we only want to be woken up only when no user processes
1647      * are scheduled on a cpu, run at an ultra low priority.
1648      */
1649     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1650
1651     tsleep(&dd->helper_thread, 0, "schslp", 0);
1652
1653     for (;;) {
1654         /*
1655          * We use the LWKT deschedule-interlock trick to avoid racing
1656          * dfly_rdyprocmask.  This means we cannot block through to the
1657          * manual lwkt_switch() call we make below.
1658          */
1659         crit_enter_gd(gd);
1660         tsleep_interlock(&dd->helper_thread, 0);
1661
1662         /*spin_lock(&dfly_spin);*/
1663         spin_lock(&dd->spin);
1664
1665         atomic_set_cpumask(&dfly_rdyprocmask, mask);
1666         clear_user_resched();   /* This satisfied the reschedule request */
1667         dd->rrcount = 0;        /* Reset the round-robin counter */
1668
1669         if ((dfly_curprocmask & mask) == 0) {
1670                 /*
1671                  * No thread is currently scheduled.
1672                  */
1673                 KKASSERT(dd->uschedcp == NULL);
1674                 if ((nlp = dfly_chooseproc_locked(dd, NULL, 0)) != NULL) {
1675                         KTR_COND_LOG(usched_sched_thread_no_process,
1676                             nlp->lwp_proc->p_pid == usched_dfly_pid_debug,
1677                             gd->gd_cpuid,
1678                             nlp->lwp_proc->p_pid,
1679                             nlp->lwp_thread->td_gd->gd_cpuid);
1680
1681                         atomic_set_cpumask(&dfly_curprocmask, mask);
1682                         dd->upri = nlp->lwp_priority;
1683                         dd->uschedcp = nlp;
1684                         dd->rrcount = 0;        /* reset round robin */
1685                         spin_unlock(&dd->spin);
1686                         /*spin_unlock(&dfly_spin);*/
1687                         lwkt_acquire(nlp->lwp_thread);
1688                         lwkt_schedule(nlp->lwp_thread);
1689                 } else {
1690                         spin_unlock(&dd->spin);
1691                         /*spin_unlock(&dfly_spin);*/
1692                 }
1693         } else if (dd->runqcount) {
1694                 /*
1695                  * Possibly find a better process to schedule.
1696                  */
1697                 nlp = dfly_chooseproc_locked(dd, dd->uschedcp, 0);
1698                 if (nlp) {
1699                         KTR_COND_LOG(usched_sched_thread_process,
1700                             nlp->lwp_proc->p_pid == usched_dfly_pid_debug,
1701                             gd->gd_cpuid,
1702                             nlp->lwp_proc->p_pid,
1703                             nlp->lwp_thread->td_gd->gd_cpuid);
1704
1705                         dd->upri = nlp->lwp_priority;
1706                         dd->uschedcp = nlp;
1707                         dd->rrcount = 0;        /* reset round robin */
1708                         spin_unlock(&dd->spin);
1709                         /*spin_unlock(&dfly_spin);*/
1710                         lwkt_acquire(nlp->lwp_thread);
1711                         lwkt_schedule(nlp->lwp_thread);
1712                 } else {
1713                         /*
1714                          * Leave the thread on our run queue.  Another
1715                          * scheduler will try to pull it later.
1716                          */
1717                         spin_unlock(&dd->spin);
1718                         /*spin_unlock(&dfly_spin);*/
1719                 }
1720         } else {
1721                 /*
1722                  * The runq is empty.
1723                  */
1724                 spin_unlock(&dd->spin);
1725                 /*spin_unlock(&dfly_spin);*/
1726         }
1727
1728         /*
1729          * We're descheduled unless someone scheduled us.  Switch away.
1730          * Exiting the critical section will cause splz() to be called
1731          * for us if interrupts and such are pending.
1732          */
1733         crit_exit_gd(gd);
1734         tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
1735     }
1736 }
1737
1738 /* sysctl stick_to_level parameter */
1739 static int
1740 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
1741 {
1742         int error, new_val;
1743
1744         new_val = usched_dfly_stick_to_level;
1745
1746         error = sysctl_handle_int(oidp, &new_val, 0, req);
1747         if (error != 0 || req->newptr == NULL)
1748                 return (error);
1749         if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
1750                 return (EINVAL);
1751         usched_dfly_stick_to_level = new_val;
1752         return (0);
1753 }
1754
1755 /*
1756  * Setup our scheduler helpers.  Note that curprocmask bit 0 has already
1757  * been cleared by rqinit() and we should not mess with it further.
1758  */
1759 static void
1760 dfly_helper_thread_cpu_init(void)
1761 {
1762         int i;
1763         int j;
1764         int cpuid;
1765         int smt_not_supported = 0;
1766         int cache_coherent_not_supported = 0;
1767
1768         if (bootverbose)
1769                 kprintf("Start scheduler helpers on cpus:\n");
1770
1771         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
1772         usched_dfly_sysctl_tree =
1773                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
1774                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
1775                                 "usched_dfly", CTLFLAG_RD, 0, "");
1776
1777         for (i = 0; i < ncpus; ++i) {
1778                 dfly_pcpu_t dd = &dfly_pcpu[i];
1779                 cpumask_t mask = CPUMASK(i);
1780
1781                 if ((mask & smp_active_mask) == 0)
1782                     continue;
1783
1784                 spin_init(&dd->spin);
1785                 dd->cpunode = get_cpu_node_by_cpuid(i);
1786                 dd->cpuid = i;
1787                 dd->cpumask = CPUMASK(i);
1788                 for (j = 0; j < NQS; j++) {
1789                         TAILQ_INIT(&dd->queues[j]);
1790                         TAILQ_INIT(&dd->rtqueues[j]);
1791                         TAILQ_INIT(&dd->idqueues[j]);
1792                 }
1793                 atomic_clear_cpumask(&dfly_curprocmask, 1);
1794
1795                 if (dd->cpunode == NULL) {
1796                         smt_not_supported = 1;
1797                         cache_coherent_not_supported = 1;
1798                         if (bootverbose)
1799                                 kprintf ("\tcpu%d - WARNING: No CPU NODE "
1800                                          "found for cpu\n", i);
1801                 } else {
1802                         switch (dd->cpunode->type) {
1803                         case THREAD_LEVEL:
1804                                 if (bootverbose)
1805                                         kprintf ("\tcpu%d - HyperThreading "
1806                                                  "available. Core siblings: ",
1807                                                  i);
1808                                 break;
1809                         case CORE_LEVEL:
1810                                 smt_not_supported = 1;
1811
1812                                 if (bootverbose)
1813                                         kprintf ("\tcpu%d - No HT available, "
1814                                                  "multi-core/physical "
1815                                                  "cpu. Physical siblings: ",
1816                                                  i);
1817                                 break;
1818                         case CHIP_LEVEL:
1819                                 smt_not_supported = 1;
1820
1821                                 if (bootverbose)
1822                                         kprintf ("\tcpu%d - No HT available, "
1823                                                  "single-core/physical cpu. "
1824                                                  "Package Siblings: ",
1825                                                  i);
1826                                 break;
1827                         default:
1828                                 /* Let's go for safe defaults here */
1829                                 smt_not_supported = 1;
1830                                 cache_coherent_not_supported = 1;
1831                                 if (bootverbose)
1832                                         kprintf ("\tcpu%d - Unknown cpunode->"
1833                                                  "type=%u. Siblings: ",
1834                                                  i,
1835                                                  (u_int)dd->cpunode->type);
1836                                 break;
1837                         }
1838
1839                         if (bootverbose) {
1840                                 if (dd->cpunode->parent_node != NULL) {
1841                                         CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
1842                                                 kprintf("cpu%d ", cpuid);
1843                                         kprintf("\n");
1844                                 } else {
1845                                         kprintf(" no siblings\n");
1846                                 }
1847                         }
1848                 }
1849
1850                 lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread,
1851                             0, i, "usched %d", i);
1852
1853                 /*
1854                  * Allow user scheduling on the target cpu.  cpu #0 has already
1855                  * been enabled in rqinit().
1856                  */
1857                 if (i)
1858                     atomic_clear_cpumask(&dfly_curprocmask, mask);
1859                 atomic_set_cpumask(&dfly_rdyprocmask, mask);
1860                 dd->upri = PRIBASE_NULL;
1861
1862         }
1863
1864         /* usched_dfly sysctl configurable parameters */
1865
1866         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1867                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1868                        OID_AUTO, "rrinterval", CTLFLAG_RW,
1869                        &usched_dfly_rrinterval, 0, "");
1870         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1871                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1872                        OID_AUTO, "decay", CTLFLAG_RW,
1873                        &usched_dfly_decay, 0, "Extra decay when not running");
1874         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1875                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1876                        OID_AUTO, "batch_time", CTLFLAG_RW,
1877                        &usched_dfly_batch_time, 0, "Min batch counter value");
1878
1879         /* Add enable/disable option for SMT scheduling if supported */
1880         if (smt_not_supported) {
1881                 usched_dfly_smt = 0;
1882                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
1883                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1884                                   OID_AUTO, "smt", CTLFLAG_RD,
1885                                   "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
1886         } else {
1887                 usched_dfly_smt = 1;
1888                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1889                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1890                                OID_AUTO, "smt", CTLFLAG_RW,
1891                                &usched_dfly_smt, 0, "Enable SMT scheduling");
1892         }
1893
1894         /*
1895          * Add enable/disable option for cache coherent scheduling
1896          * if supported
1897          */
1898         if (cache_coherent_not_supported) {
1899                 usched_dfly_cache_coherent = 0;
1900                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
1901                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1902                                   OID_AUTO, "cache_coherent", CTLFLAG_RD,
1903                                   "NOT SUPPORTED", 0,
1904                                   "Cache coherence NOT SUPPORTED");
1905         } else {
1906                 usched_dfly_cache_coherent = 1;
1907                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1908                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1909                                OID_AUTO, "cache_coherent", CTLFLAG_RW,
1910                                &usched_dfly_cache_coherent, 0,
1911                                "Enable/Disable cache coherent scheduling");
1912
1913                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1914                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1915                                OID_AUTO, "upri_affinity", CTLFLAG_RW,
1916                                &usched_dfly_upri_affinity, 1,
1917                                "Number of PPQs in user priority check");
1918
1919                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1920                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1921                                OID_AUTO, "queue_checks", CTLFLAG_RW,
1922                                &usched_dfly_queue_checks, 5,
1923                                "LWPs to check from a queue before giving up");
1924
1925                 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
1926                                 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1927                                 OID_AUTO, "stick_to_level",
1928                                 CTLTYPE_INT | CTLFLAG_RW,
1929                                 NULL, sizeof usched_dfly_stick_to_level,
1930                                 sysctl_usched_dfly_stick_to_level, "I",
1931                                 "Stick a process to this level. See sysctl"
1932                                 "paremter hw.cpu_topology.level_description");
1933         }
1934 }
1935 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
1936         dfly_helper_thread_cpu_init, NULL)
1937
1938 #else /* No SMP options - just add the configurable parameters to sysctl */
1939
1940 static void
1941 sched_sysctl_tree_init(void)
1942 {
1943         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
1944         usched_dfly_sysctl_tree =
1945                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
1946                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
1947                                 "usched_dfly", CTLFLAG_RD, 0, "");
1948
1949         /* usched_dfly sysctl configurable parameters */
1950         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1951                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1952                        OID_AUTO, "rrinterval", CTLFLAG_RW,
1953                        &usched_dfly_rrinterval, 0, "");
1954         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1955                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1956                        OID_AUTO, "decay", CTLFLAG_RW,
1957                        &usched_dfly_decay, 0, "Extra decay when not running");
1958         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1959                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1960                        OID_AUTO, "batch_time", CTLFLAG_RW,
1961                        &usched_dfly_batch_time, 0, "Min batch counter value");
1962 }
1963 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
1964         sched_sysctl_tree_init, NULL)
1965 #endif