kernel - Add usched_dfly algorith, set as default for now (4)
[dragonfly.git] / sys / kern / usched_dfly.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com>,
7  * by Mihai Carabas <mihai.carabas@gmail.com>
8  * and many others.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
52
53 #include <sys/ktr.h>
54
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
57
58 /*
59  * Priorities.  Note that with 32 run queues per scheduler each queue
60  * represents four priority levels.
61  */
62
63 int dfly_rebalanced;
64
65 #define MAXPRI                  128
66 #define PRIMASK                 (MAXPRI - 1)
67 #define PRIBASE_REALTIME        0
68 #define PRIBASE_NORMAL          MAXPRI
69 #define PRIBASE_IDLE            (MAXPRI * 2)
70 #define PRIBASE_THREAD          (MAXPRI * 3)
71 #define PRIBASE_NULL            (MAXPRI * 4)
72
73 #define NQS     32                      /* 32 run queues. */
74 #define PPQ     (MAXPRI / NQS)          /* priorities per queue */
75 #define PPQMASK (PPQ - 1)
76
77 /*
78  * NICEPPQ      - number of nice units per priority queue
79  * ESTCPUPPQ    - number of estcpu units per priority queue
80  * ESTCPUMAX    - number of estcpu units
81  */
82 #define NICEPPQ         2
83 #define ESTCPUPPQ       512
84 #define ESTCPUMAX       (ESTCPUPPQ * NQS)
85 #define BATCHMAX        (ESTCPUFREQ * 30)
86 #define PRIO_RANGE      (PRIO_MAX - PRIO_MIN + 1)
87
88 #define ESTCPULIM(v)    min((v), ESTCPUMAX)
89
90 TAILQ_HEAD(rq, lwp);
91
92 #define lwp_priority    lwp_usdata.dfly.priority
93 #define lwp_forked      lwp_usdata.dfly.forked
94 #define lwp_rqindex     lwp_usdata.dfly.rqindex
95 #define lwp_estcpu      lwp_usdata.dfly.estcpu
96 #define lwp_batch       lwp_usdata.dfly.batch
97 #define lwp_rqtype      lwp_usdata.dfly.rqtype
98 #define lwp_qcpu        lwp_usdata.dfly.qcpu
99
100 struct usched_dfly_pcpu {
101         struct spinlock spin;
102         struct thread   helper_thread;
103         short           rrcount;
104         short           upri;
105         int             uload;
106         struct lwp      *uschedcp;
107         struct rq       queues[NQS];
108         struct rq       rtqueues[NQS];
109         struct rq       idqueues[NQS];
110         u_int32_t       queuebits;
111         u_int32_t       rtqueuebits;
112         u_int32_t       idqueuebits;
113         int             runqcount;
114         int             cpuid;
115         cpumask_t       cpumask;
116 #ifdef SMP
117         cpu_node_t      *cpunode;
118 #endif
119 };
120
121 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
122
123 static void dfly_acquire_curproc(struct lwp *lp);
124 static void dfly_release_curproc(struct lwp *lp);
125 static void dfly_select_curproc(globaldata_t gd);
126 static void dfly_setrunqueue(struct lwp *lp);
127 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
128                                 sysclock_t cpstamp);
129 static void dfly_recalculate_estcpu(struct lwp *lp);
130 static void dfly_resetpriority(struct lwp *lp);
131 static void dfly_forking(struct lwp *plp, struct lwp *lp);
132 static void dfly_exiting(struct lwp *lp, struct proc *);
133 static void dfly_uload_update(struct lwp *lp);
134 static void dfly_yield(struct lwp *lp);
135 #ifdef SMP
136 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
137 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
138 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
139 #if 0
140 static void dfly_wakeup_random_helper(dfly_pcpu_t notdd);
141 #endif
142 #endif
143
144 #ifdef SMP
145 static void dfly_need_user_resched_remote(void *dummy);
146 #endif
147 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t dd, struct lwp *chklp,
148                                         int isremote);
149 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
150 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
151
152 struct usched usched_dfly = {
153         { NULL },
154         "dfly", "Original DragonFly Scheduler",
155         NULL,                   /* default registration */
156         NULL,                   /* default deregistration */
157         dfly_acquire_curproc,
158         dfly_release_curproc,
159         dfly_setrunqueue,
160         dfly_schedulerclock,
161         dfly_recalculate_estcpu,
162         dfly_resetpriority,
163         dfly_forking,
164         dfly_exiting,
165         dfly_uload_update,
166         NULL,                   /* setcpumask not supported */
167         dfly_yield
168 };
169
170 /*
171  * We have NQS (32) run queues per scheduling class.  For the normal
172  * class, there are 128 priorities scaled onto these 32 queues.  New
173  * processes are added to the last entry in each queue, and processes
174  * are selected for running by taking them from the head and maintaining
175  * a simple FIFO arrangement.  Realtime and Idle priority processes have
176  * and explicit 0-31 priority which maps directly onto their class queue
177  * index.  When a queue has something in it, the corresponding bit is
178  * set in the queuebits variable, allowing a single read to determine
179  * the state of all 32 queues and then a ffs() to find the first busy
180  * queue.
181  */
182 static cpumask_t dfly_curprocmask = -1; /* currently running a user process */
183 static cpumask_t dfly_rdyprocmask;      /* ready to accept a user process */
184 #ifdef SMP
185 static volatile int dfly_scancpu;
186 /*static struct spinlock dfly_spin = SPINLOCK_INITIALIZER(dfly_spin);*/
187 #endif
188 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
189 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
190 static struct sysctl_oid *usched_dfly_sysctl_tree;
191
192 /* Debug info exposed through debug.* sysctl */
193
194 static int usched_dfly_debug = -1;
195 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
196            &usched_dfly_debug, 0,
197            "Print debug information for this pid");
198
199 static int usched_dfly_pid_debug = -1;
200 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
201            &usched_dfly_pid_debug, 0,
202            "Print KTR debug information for this pid");
203
204 static int usched_dfly_chooser = 0;
205 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
206            &usched_dfly_chooser, 0,
207            "Print KTR debug information for this pid");
208
209 /* Tunning usched_dfly - configurable through kern.usched_dfly.* */
210 #ifdef SMP
211 static int usched_dfly_smt = 0;
212 static int usched_dfly_cache_coherent = 0;
213 static int usched_dfly_weight1 = 10;
214 static int usched_dfly_weight2 = 5;
215 static int usched_dfly_stick_to_level = 0;
216 #endif
217 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
218 static int usched_dfly_decay = 8;
219 static int usched_dfly_batch_time = 10;
220
221 /* KTR debug printings */
222
223 KTR_INFO_MASTER(usched);
224
225 #if !defined(KTR_USCHED_DFLY)
226 #define KTR_USCHED_DFLY KTR_ALL
227 #endif
228
229 #if 0
230 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_urw, 0,
231     "USCHED_DFLY(dfly_acquire_curproc in user_reseched_wanted "
232     "after release: pid %d, cpuid %d, curr_cpuid %d)",
233     pid_t pid, int cpuid, int curr);
234 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_before_loop, 0,
235     "USCHED_DFLY(dfly_acquire_curproc before loop: pid %d, cpuid %d, "
236     "curr_cpuid %d)",
237     pid_t pid, int cpuid, int curr);
238 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_not, 0,
239     "USCHED_DFLY(dfly_acquire_curproc couldn't acquire after "
240     "dfly_setrunqueue: pid %d, cpuid %d, curr_lp pid %d, curr_cpuid %d)",
241     pid_t pid, int cpuid, pid_t curr_pid, int curr_cpuid);
242 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_switch, 0,
243     "USCHED_DFLY(dfly_acquire_curproc after lwkt_switch: pid %d, "
244     "cpuid %d, curr_cpuid %d)",
245     pid_t pid, int cpuid, int curr);
246
247 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_release_curproc, 0,
248     "USCHED_DFLY(dfly_release_curproc before select: pid %d, "
249     "cpuid %d, curr_cpuid %d)",
250     pid_t pid, int cpuid, int curr);
251
252 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_select_curproc, 0,
253     "USCHED_DFLY(dfly_release_curproc before select: pid %d, "
254     "cpuid %d, old_pid %d, old_cpuid %d, curr_cpuid %d)",
255     pid_t pid, int cpuid, pid_t old_pid, int old_cpuid, int curr);
256
257 #ifdef SMP
258 KTR_INFO(KTR_USCHED_DFLY, usched, batchy_test_false, 0,
259     "USCHED_DFLY(batchy_looser_pri_test false: pid %d, "
260     "cpuid %d, verify_mask %lu)",
261     pid_t pid, int cpuid, cpumask_t mask);
262 KTR_INFO(KTR_USCHED_DFLY, usched, batchy_test_true, 0,
263     "USCHED_DFLY(batchy_looser_pri_test true: pid %d, "
264     "cpuid %d, verify_mask %lu)",
265     pid_t pid, int cpuid, cpumask_t mask);
266
267 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_fc_smt, 0,
268     "USCHED_DFLY(dfly_setrunqueue free cpus smt: pid %d, cpuid %d, "
269     "mask %lu, curr_cpuid %d)",
270     pid_t pid, int cpuid, cpumask_t mask, int curr);
271 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_fc_non_smt, 0,
272     "USCHED_DFLY(dfly_setrunqueue free cpus check non_smt: pid %d, "
273     "cpuid %d, mask %lu, curr_cpuid %d)",
274     pid_t pid, int cpuid, cpumask_t mask, int curr);
275 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_rc, 0,
276     "USCHED_DFLY(dfly_setrunqueue running cpus check: pid %d, "
277     "cpuid %d, mask %lu, curr_cpuid %d)",
278     pid_t pid, int cpuid, cpumask_t mask, int curr);
279 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_found, 0,
280     "USCHED_DFLY(dfly_setrunqueue found cpu: pid %d, cpuid %d, "
281     "mask %lu, found_cpuid %d, curr_cpuid %d)",
282     pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
283 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_not_found, 0,
284     "USCHED_DFLY(dfly_setrunqueue not found cpu: pid %d, cpuid %d, "
285     "try_cpuid %d, curr_cpuid %d)",
286     pid_t pid, int cpuid, int try_cpuid, int curr);
287 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_found_best_cpuid, 0,
288     "USCHED_DFLY(dfly_setrunqueue found cpu: pid %d, cpuid %d, "
289     "mask %lu, found_cpuid %d, curr_cpuid %d)",
290     pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
291 #endif
292 #endif
293
294 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
295     "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
296     pid_t pid, int old_cpuid, int curr);
297 #ifdef SMP
298 #if 0
299 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc, 0,
300     "USCHED_DFLY(chooseproc_cc: pid %d, old_cpuid %d, curr_cpuid %d)",
301     pid_t pid, int old_cpuid, int curr);
302 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc_not_good, 0,
303     "USCHED_DFLY(chooseproc_cc not good: pid %d, old_cpumask %lu, "
304     "sibling_mask %lu, curr_cpumask %lu)",
305     pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
306 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc_elected, 0,
307     "USCHED_DFLY(chooseproc_cc elected: pid %d, old_cpumask %lu, "
308     "sibling_mask %lu, curr_cpumask: %lu)",
309     pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
310 #endif
311
312 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_no_process, 0,
313     "USCHED_DFLY(sched_thread %d no process scheduled: pid %d, old_cpuid %d)",
314     int id, pid_t pid, int cpuid);
315 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_process, 0,
316     "USCHED_DFLY(sched_thread %d process scheduled: pid %d, old_cpuid %d)",
317     int id, pid_t pid, int cpuid);
318 #if 0
319 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_no_process_found, 0,
320     "USCHED_DFLY(sched_thread %d no process found; tmpmask %lu)",
321     int id, cpumask_t tmpmask);
322 #endif
323 #endif
324
325 /*
326  * DFLY_ACQUIRE_CURPROC
327  *
328  * This function is called when the kernel intends to return to userland.
329  * It is responsible for making the thread the current designated userland
330  * thread for this cpu, blocking if necessary.
331  *
332  * The kernel has already depressed our LWKT priority so we must not switch
333  * until we have either assigned or disposed of the thread.
334  *
335  * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
336  * TO ANOTHER CPU!  Because most of the kernel assumes that no migration will
337  * occur, this function is called only under very controlled circumstances.
338  */
339 static void
340 dfly_acquire_curproc(struct lwp *lp)
341 {
342         globaldata_t gd;
343         dfly_pcpu_t dd;
344         thread_t td;
345
346         /*
347          * Make sure we aren't sitting on a tsleep queue.
348          */
349         td = lp->lwp_thread;
350         crit_enter_quick(td);
351         if (td->td_flags & TDF_TSLEEPQ)
352                 tsleep_remove(td);
353         dfly_recalculate_estcpu(lp);
354
355         /*
356          * If a reschedule was requested give another thread the
357          * driver's seat.
358          */
359         if (user_resched_wanted()) {
360                 clear_user_resched();
361                 dfly_release_curproc(lp);
362         }
363
364         /*
365          * Loop until we are the current user thread
366          */
367         gd = mycpu;
368         dd = &dfly_pcpu[gd->gd_cpuid];
369
370         do {
371                 /*
372                  * Process any pending events and higher priority threads.
373                  */
374                 lwkt_yield();
375
376                 /*
377                  * Become the currently scheduled user thread for this cpu
378                  * if we can do so trivially.
379                  *
380                  * We can steal another thread's current thread designation
381                  * on this cpu since if we are running that other thread
382                  * must not be, so we can safely deschedule it.
383                  */
384                 if (dd->uschedcp == lp) {
385                         /*
386                          * We are already the current lwp (hot path).
387                          */
388                         dd->upri = lp->lwp_priority;
389                 } else if (dd->uschedcp == NULL) {
390                         /*
391                          * We can trivially become the current lwp.
392                          */
393                         atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
394                         dd->uschedcp = lp;
395                         dd->upri = lp->lwp_priority;
396                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
397                 } else if (dd->uschedcp && dd->upri > lp->lwp_priority) {
398                         /*
399                          * We can steal the current cpu's lwp designation
400                          * away simply by replacing it.  The other thread
401                          * will stall when it tries to return to userland,
402                          * possibly rescheduling elsewhere when it calls
403                          * setrunqueue.
404                          */
405                         dd->uschedcp = lp;
406                         dd->upri = lp->lwp_priority;
407                         KKASSERT(lp->lwp_qcpu == dd->cpuid);
408                 } else {
409                         /*
410                          * We cannot become the current lwp, place the lp
411                          * on the run-queue of this or another cpu and
412                          * deschedule ourselves.
413                          *
414                          * When we are reactivated we will have another
415                          * chance.
416                          */
417                         lwkt_deschedule(lp->lwp_thread);
418                         dfly_setrunqueue(lp);
419
420                         /*
421                          * Reload after a switch or setrunqueue/switch possibly
422                          * moved us to another cpu.
423                          */
424                         lwkt_switch();
425                         gd = mycpu;
426                         dd = &dfly_pcpu[gd->gd_cpuid];
427                 }
428         } while (dd->uschedcp != lp);
429
430         crit_exit_quick(td);
431         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
432 }
433
434 /*
435  * DFLY_RELEASE_CURPROC
436  *
437  * This routine detaches the current thread from the userland scheduler,
438  * usually because the thread needs to run or block in the kernel (at
439  * kernel priority) for a while.
440  *
441  * This routine is also responsible for selecting a new thread to
442  * make the current thread.
443  *
444  * NOTE: This implementation differs from the dummy example in that
445  * dfly_select_curproc() is able to select the current process, whereas
446  * dummy_select_curproc() is not able to select the current process.
447  * This means we have to NULL out uschedcp.
448  *
449  * Additionally, note that we may already be on a run queue if releasing
450  * via the lwkt_switch() in dfly_setrunqueue().
451  */
452
453 static void
454 dfly_release_curproc(struct lwp *lp)
455 {
456         globaldata_t gd = mycpu;
457         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
458
459         /*
460          * Make sure td_wakefromcpu is defaulted.  This will be overwritten
461          * by wakeup().
462          */
463         lp->lwp_thread->td_wakefromcpu = gd->gd_cpuid;
464
465         if (dd->uschedcp == lp) {
466                 crit_enter();
467                 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
468
469                 dd->uschedcp = NULL;    /* don't let lp be selected */
470                 dd->upri = PRIBASE_NULL;
471                 atomic_clear_cpumask(&dfly_curprocmask, gd->gd_cpumask);
472                 dfly_select_curproc(gd);
473                 crit_exit();
474         }
475 }
476
477 /*
478  * DFLY_SELECT_CURPROC
479  *
480  * Select a new current process for this cpu and clear any pending user
481  * reschedule request.  The cpu currently has no current process.
482  *
483  * This routine is also responsible for equal-priority round-robining,
484  * typically triggered from dfly_schedulerclock().  In our dummy example
485  * all the 'user' threads are LWKT scheduled all at once and we just
486  * call lwkt_switch().
487  *
488  * The calling process is not on the queue and cannot be selected.
489  */
490 static
491 void
492 dfly_select_curproc(globaldata_t gd)
493 {
494         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
495         struct lwp *nlp;
496         int cpuid = gd->gd_cpuid;
497
498         crit_enter_gd(gd);
499
500         /*spin_lock(&dfly_spin);*/
501         spin_lock(&dd->spin);
502         nlp = dfly_chooseproc_locked(dd, dd->uschedcp, 0);
503
504         if (nlp) {
505                 atomic_set_cpumask(&dfly_curprocmask, CPUMASK(cpuid));
506                 dd->upri = nlp->lwp_priority;
507                 dd->uschedcp = nlp;
508                 dd->rrcount = 0;                /* reset round robin */
509                 spin_unlock(&dd->spin);
510                 /*spin_unlock(&dfly_spin);*/
511 #ifdef SMP
512                 lwkt_acquire(nlp->lwp_thread);
513 #endif
514                 lwkt_schedule(nlp->lwp_thread);
515         } else {
516                 spin_unlock(&dd->spin);
517                 /*spin_unlock(&dfly_spin);*/
518         }
519         crit_exit_gd(gd);
520 }
521
522 /*
523  * Place the specified lwp on the user scheduler's run queue.  This routine
524  * must be called with the thread descheduled.  The lwp must be runnable.
525  * It must not be possible for anyone else to explicitly schedule this thread.
526  *
527  * The thread may be the current thread as a special case.
528  */
529 static void
530 dfly_setrunqueue(struct lwp *lp)
531 {
532 #ifdef SMP
533         globaldata_t rgd;
534 #endif
535         dfly_pcpu_t rdd;
536
537         /*
538          * First validate the process LWKT state.
539          */
540         crit_enter();
541         KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
542         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
543             ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
544              lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
545         KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
546
547         /*
548          * NOTE: rdd does not necessarily represent the current cpu.
549          *       Instead it represents the cpu the thread was last
550          *       scheduled on.
551          */
552         rdd = &dfly_pcpu[lp->lwp_qcpu];
553
554         /*
555          * This process is not supposed to be scheduled anywhere or assigned
556          * as the current process anywhere.  Assert the condition.
557          */
558         KKASSERT(rdd->uschedcp != lp);
559
560 #ifndef SMP
561         /*
562          * If we are not SMP we do not have a scheduler helper to kick
563          * and must directly activate the process if none are scheduled.
564          *
565          * This is really only an issue when bootstrapping init since
566          * the caller in all other cases will be a user process, and
567          * even if released (rdd->uschedcp == NULL), that process will
568          * kickstart the scheduler when it returns to user mode from
569          * the kernel.
570          *
571          * NOTE: On SMP we can't just set some other cpu's uschedcp.
572          */
573         if (rdd->uschedcp == NULL) {
574                 spin_lock(&rdd->spin);
575                 if (rdd->uschedcp == NULL) {
576                         atomic_set_cpumask(&dfly_curprocmask, 1);
577                         rdd->uschedcp = lp;
578                         rdd->upri = lp->lwp_priority;
579                         spin_unlock(&rdd->spin);
580                         lwkt_schedule(lp->lwp_thread);
581                         crit_exit();
582                         return;
583                 }
584                 spin_unlock(&rdd->spin);
585         }
586 #endif
587
588 #ifdef SMP
589         /*
590          * XXX fixme.  Could be part of a remrunqueue/setrunqueue
591          * operation when the priority is recalculated, so TDF_MIGRATING
592          * may already be set.
593          */
594         if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
595                 lwkt_giveaway(lp->lwp_thread);
596 #endif
597
598 #ifdef SMP
599         /*
600          * Ok, we have to setrunqueue some target cpu and request a reschedule
601          * if necessary.
602          *
603          * We have to choose the best target cpu.  It might not be the current
604          * target even if the current cpu has no running user thread (for
605          * example, because the current cpu might be a hyperthread and its
606          * sibling has a thread assigned).
607          *
608          * If we just forked it is most optimal to run the child on the same
609          * cpu just in case the parent decides to wait for it (thus getting
610          * off that cpu).  As long as there is nothing else runnable on the
611          * cpu, that is.  If we did this unconditionally a parent forking
612          * multiple children before waiting (e.g. make -j N) leaves other
613          * cpus idle that could be working.
614          */
615         /*spin_lock(&dfly_spin);*/
616         if (lp->lwp_forked) {
617                 lp->lwp_forked = 0;
618                 if (dfly_pcpu[lp->lwp_qcpu].runqcount)
619                         rdd = dfly_choose_best_queue(lp);
620                 else
621                         rdd = &dfly_pcpu[lp->lwp_qcpu];
622                 /* dfly_wakeup_random_helper(rdd); */
623         } else {
624                 rdd = dfly_choose_best_queue(lp);
625         }
626         rgd = globaldata_find(rdd->cpuid);
627
628         /*
629          * We lose control of lp the moment we release the spinlock after
630          * having placed lp on the queue.  i.e. another cpu could pick it
631          * up and it could exit, or its priority could be further adjusted,
632          * or something like that.
633          *
634          * WARNING! dd can point to a foreign cpu!
635          */
636         spin_lock(&rdd->spin);
637         dfly_setrunqueue_locked(rdd, lp);
638         /*spin_unlock(&dfly_spin);*/
639
640         if (rgd == mycpu) {
641                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
642                         spin_unlock(&rdd->spin);
643                         if (rdd->uschedcp == NULL) {
644                                 wakeup_mycpu(&rdd->helper_thread); /* XXX */
645                                 need_user_resched();
646                         } else {
647                                 need_user_resched();
648                         }
649                 } else {
650                         spin_unlock(&rdd->spin);
651                 }
652         } else {
653                 atomic_clear_cpumask(&dfly_rdyprocmask, rgd->gd_cpumask);
654                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
655                         spin_unlock(&rdd->spin);
656                         lwkt_send_ipiq(rgd, dfly_need_user_resched_remote,
657                                        NULL);
658                 } else {
659                         spin_unlock(&rdd->spin);
660                         wakeup(&rdd->helper_thread);
661                 }
662         }
663 #else
664         /*
665          * Request a reschedule if appropriate.
666          */
667         spin_lock(&rdd->spin);
668         dfly_setrunqueue_locked(rdd, lp);
669         spin_unlock(&rdd->spin);
670         if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
671                 need_user_resched();
672         }
673 #endif
674         crit_exit();
675 }
676
677 #if 0
678
679 /*
680  * This wakes up a random helper that might have no work on its cpu to do.
681  * The idea is to improve fork/fork-exec/fork-wait/exec and similar
682  * process-spawning sequences by first scheduling the forked process
683  * on the same cpu as the parent, in case the parent is just going to
684  * wait*().  But if the parent does not wait we want another cpu to pick
685  * the forked process up ASAP.
686  *
687  * The ipi/helper-scheduling sequence typically takes a lot longer to run
688  * than a return-from-procedure-call and the parent then entering a
689  * wait*().  There's a race here that we want the parent to win ONLY if
690  * it is going to wait*().
691  *
692  * If a process sticks around for long enough normal scheduling action
693  * will move it to the right place.
694  */
695 static
696 void
697 dfly_wakeup_random_helper(dfly_pcpu_t notdd)
698 {
699         cpumask_t tmpmask;
700         cpumask_t mask;
701         int cpuid;
702
703         mask = dfly_rdyprocmask & ~dfly_curprocmask & smp_active_mask &
704                usched_global_cpumask & ~notdd->cpumask;
705         ++dfly_scancpu;
706         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
707
708         if (mask) {
709                 tmpmask = ~(CPUMASK(cpuid) - 1);
710                 if (mask & tmpmask)
711                         cpuid = BSFCPUMASK(mask & tmpmask);
712                 else
713                         cpuid = BSFCPUMASK(mask);
714                 atomic_clear_cpumask(&dfly_rdyprocmask, CPUMASK(cpuid));
715                 wakeup(&dfly_pcpu[cpuid].helper_thread);
716         }
717 }
718
719 #endif
720
721 /*
722  * This routine is called from a systimer IPI.  It MUST be MP-safe and
723  * the BGL IS NOT HELD ON ENTRY.  This routine is called at ESTCPUFREQ on
724  * each cpu.
725  */
726 static
727 void
728 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
729 {
730         globaldata_t gd = mycpu;
731         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
732
733         /*
734          * Do we need to round-robin?  We round-robin 10 times a second.
735          * This should only occur for cpu-bound batch processes.
736          */
737         if (++dd->rrcount >= usched_dfly_rrinterval) {
738                 dd->rrcount = 0;
739                 need_user_resched();
740         }
741
742         /*
743          * Adjust estcpu upward using a real time equivalent calculation.
744          */
745         lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
746
747         /*
748          * Spinlocks also hold a critical section so there should not be
749          * any active.
750          */
751         KKASSERT(gd->gd_spinlocks_wr == 0);
752
753         dfly_resetpriority(lp);
754 }
755
756 /*
757  * Called from acquire and from kern_synch's one-second timer (one of the
758  * callout helper threads) with a critical section held.
759  *
760  * Decay p_estcpu based on the number of ticks we haven't been running
761  * and our p_nice.  As the load increases each process observes a larger
762  * number of idle ticks (because other processes are running in them).
763  * This observation leads to a larger correction which tends to make the
764  * system more 'batchy'.
765  *
766  * Note that no recalculation occurs for a process which sleeps and wakes
767  * up in the same tick.  That is, a system doing thousands of context
768  * switches per second will still only do serious estcpu calculations
769  * ESTCPUFREQ times per second.
770  */
771 static
772 void
773 dfly_recalculate_estcpu(struct lwp *lp)
774 {
775         globaldata_t gd = mycpu;
776         dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
777         sysclock_t cpbase;
778         sysclock_t ttlticks;
779         int estcpu;
780         int decay_factor;
781
782         /*
783          * We have to subtract periodic to get the last schedclock
784          * timeout time, otherwise we would get the upcoming timeout.
785          * Keep in mind that a process can migrate between cpus and
786          * while the scheduler clock should be very close, boundary
787          * conditions could lead to a small negative delta.
788          */
789         cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
790
791         if (lp->lwp_slptime > 1) {
792                 /*
793                  * Too much time has passed, do a coarse correction.
794                  */
795                 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
796                 dfly_resetpriority(lp);
797                 lp->lwp_cpbase = cpbase;
798                 lp->lwp_cpticks = 0;
799                 lp->lwp_batch -= ESTCPUFREQ;
800                 if (lp->lwp_batch < 0)
801                         lp->lwp_batch = 0;
802         } else if (lp->lwp_cpbase != cpbase) {
803                 /*
804                  * Adjust estcpu if we are in a different tick.  Don't waste
805                  * time if we are in the same tick.
806                  *
807                  * First calculate the number of ticks in the measurement
808                  * interval.  The ttlticks calculation can wind up 0 due to
809                  * a bug in the handling of lwp_slptime  (as yet not found),
810                  * so make sure we do not get a divide by 0 panic.
811                  */
812                 ttlticks = (cpbase - lp->lwp_cpbase) /
813                            gd->gd_schedclock.periodic;
814                 if (ttlticks < 0) {
815                         ttlticks = 0;
816                         lp->lwp_cpbase = cpbase;
817                 }
818                 if (ttlticks == 0)
819                         return;
820                 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
821
822                 /*
823                  * Calculate the percentage of one cpu used factoring in ncpus
824                  * and the load and adjust estcpu.  Handle degenerate cases
825                  * by adding 1 to runqcount.
826                  *
827                  * estcpu is scaled by ESTCPUMAX.
828                  *
829                  * runqcount is the excess number of user processes
830                  * that cannot be immediately scheduled to cpus.  We want
831                  * to count these as running to avoid range compression
832                  * in the base calculation (which is the actual percentage
833                  * of one cpu used).
834                  */
835                 estcpu = (lp->lwp_cpticks * ESTCPUMAX) *
836                          (dd->runqcount + ncpus) / (ncpus * ttlticks);
837
838                 /*
839                  * If estcpu is > 50% we become more batch-like
840                  * If estcpu is <= 50% we become less batch-like
841                  *
842                  * It takes 30 cpu seconds to traverse the entire range.
843                  */
844                 if (estcpu > ESTCPUMAX / 2) {
845                         lp->lwp_batch += ttlticks;
846                         if (lp->lwp_batch > BATCHMAX)
847                                 lp->lwp_batch = BATCHMAX;
848                 } else {
849                         lp->lwp_batch -= ttlticks;
850                         if (lp->lwp_batch < 0)
851                                 lp->lwp_batch = 0;
852                 }
853
854                 if (usched_dfly_debug == lp->lwp_proc->p_pid) {
855                         kprintf("pid %d lwp %p estcpu %3d %3d bat %d cp %d/%d",
856                                 lp->lwp_proc->p_pid, lp,
857                                 estcpu, lp->lwp_estcpu,
858                                 lp->lwp_batch,
859                                 lp->lwp_cpticks, ttlticks);
860                 }
861
862                 /*
863                  * Adjust lp->lwp_esetcpu.  The decay factor determines how
864                  * quickly lwp_estcpu collapses to its realtime calculation.
865                  * A slower collapse gives us a more accurate number but
866                  * can cause a cpu hog to eat too much cpu before the
867                  * scheduler decides to downgrade it.
868                  *
869                  * NOTE: p_nice is accounted for in dfly_resetpriority(),
870                  *       and not here, but we must still ensure that a
871                  *       cpu-bound nice -20 process does not completely
872                  *       override a cpu-bound nice +20 process.
873                  *
874                  * NOTE: We must use ESTCPULIM() here to deal with any
875                  *       overshoot.
876                  */
877                 decay_factor = usched_dfly_decay;
878                 if (decay_factor < 1)
879                         decay_factor = 1;
880                 if (decay_factor > 1024)
881                         decay_factor = 1024;
882
883                 lp->lwp_estcpu = ESTCPULIM(
884                         (lp->lwp_estcpu * decay_factor + estcpu) /
885                         (decay_factor + 1));
886
887                 if (usched_dfly_debug == lp->lwp_proc->p_pid)
888                         kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
889                 dfly_resetpriority(lp);
890                 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
891                 lp->lwp_cpticks = 0;
892         }
893 }
894
895 /*
896  * Compute the priority of a process when running in user mode.
897  * Arrange to reschedule if the resulting priority is better
898  * than that of the current process.
899  *
900  * This routine may be called with any process.
901  *
902  * This routine is called by fork1() for initial setup with the process
903  * of the run queue, and also may be called normally with the process on or
904  * off the run queue.
905  */
906 static void
907 dfly_resetpriority(struct lwp *lp)
908 {
909         dfly_pcpu_t rdd;
910         int newpriority;
911         u_short newrqtype;
912         int rcpu;
913         int checkpri;
914         int estcpu;
915
916         crit_enter();
917
918         /*
919          * Lock the scheduler (lp) belongs to.  This can be on a different
920          * cpu.  Handle races.  This loop breaks out with the appropriate
921          * rdd locked.
922          */
923         for (;;) {
924                 rcpu = lp->lwp_qcpu;
925                 rdd = &dfly_pcpu[rcpu];
926                 spin_lock(&rdd->spin);
927                 if (rcpu == lp->lwp_qcpu)
928                         break;
929                 spin_unlock(&rdd->spin);
930         }
931
932         /*
933          * Calculate the new priority and queue type
934          */
935         newrqtype = lp->lwp_rtprio.type;
936
937         switch(newrqtype) {
938         case RTP_PRIO_REALTIME:
939         case RTP_PRIO_FIFO:
940                 newpriority = PRIBASE_REALTIME +
941                              (lp->lwp_rtprio.prio & PRIMASK);
942                 break;
943         case RTP_PRIO_NORMAL:
944                 /*
945                  * Detune estcpu based on batchiness.  lwp_batch ranges
946                  * from 0 to  BATCHMAX.  Limit estcpu for the sake of
947                  * the priority calculation to between 50% and 100%.
948                  */
949                 estcpu = lp->lwp_estcpu * (lp->lwp_batch + BATCHMAX) /
950                          (BATCHMAX * 2);
951
952                 /*
953                  * p_nice piece         Adds (0-40) * 2         0-80
954                  * estcpu               Adds 16384  * 4 / 512   0-128
955                  */
956                 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
957                 newpriority += estcpu * PPQ / ESTCPUPPQ;
958                 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
959                               NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
960                 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
961                 break;
962         case RTP_PRIO_IDLE:
963                 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
964                 break;
965         case RTP_PRIO_THREAD:
966                 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
967                 break;
968         default:
969                 panic("Bad RTP_PRIO %d", newrqtype);
970                 /* NOT REACHED */
971         }
972
973         /*
974          * The newpriority incorporates the queue type so do a simple masked
975          * check to determine if the process has moved to another queue.  If
976          * it has, and it is currently on a run queue, then move it.
977          *
978          * Since uload is ~PPQMASK masked, no modifications are necessary if
979          * we end up in the same run queue.
980          */
981         if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
982                 int delta_uload;
983
984                 /*
985                  * uload can change, calculate the adjustment to reduce
986                  * edge cases since choosers scan the cpu topology without
987                  * locks.
988                  */
989                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
990                         delta_uload =
991                                 -((lp->lwp_priority & ~PPQMASK) & PRIMASK) +
992                                 ((newpriority & ~PPQMASK) & PRIMASK);
993                         atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
994                                        delta_uload);
995                 }
996                 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
997                         dfly_remrunqueue_locked(rdd, lp);
998                         lp->lwp_priority = newpriority;
999                         lp->lwp_rqtype = newrqtype;
1000                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1001                         dfly_setrunqueue_locked(rdd, lp);
1002                         checkpri = 1;
1003                 } else {
1004                         lp->lwp_priority = newpriority;
1005                         lp->lwp_rqtype = newrqtype;
1006                         lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1007                         checkpri = 0;
1008                 }
1009         } else {
1010                 /*
1011                  * In the same PPQ, uload cannot change.
1012                  */
1013                 lp->lwp_priority = newpriority;
1014                 checkpri = 1;
1015                 rcpu = -1;
1016         }
1017
1018         /*
1019          * Determine if we need to reschedule the target cpu.  This only
1020          * occurs if the LWP is already on a scheduler queue, which means
1021          * that idle cpu notification has already occured.  At most we
1022          * need only issue a need_user_resched() on the appropriate cpu.
1023          *
1024          * The LWP may be owned by a CPU different from the current one,
1025          * in which case dd->uschedcp may be modified without an MP lock
1026          * or a spinlock held.  The worst that happens is that the code
1027          * below causes a spurious need_user_resched() on the target CPU
1028          * and dd->pri to be wrong for a short period of time, both of
1029          * which are harmless.
1030          *
1031          * If checkpri is 0 we are adjusting the priority of the current
1032          * process, possibly higher (less desireable), so ignore the upri
1033          * check which will fail in that case.
1034          */
1035         if (rcpu >= 0) {
1036                 if ((dfly_rdyprocmask & CPUMASK(rcpu)) &&
1037                     (checkpri == 0 ||
1038                      (rdd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK))) {
1039 #ifdef SMP
1040                         if (rcpu == mycpu->gd_cpuid) {
1041                                 spin_unlock(&rdd->spin);
1042                                 need_user_resched();
1043                         } else {
1044                                 atomic_clear_cpumask(&dfly_rdyprocmask,
1045                                                      CPUMASK(rcpu));
1046                                 spin_unlock(&rdd->spin);
1047                                 lwkt_send_ipiq(globaldata_find(rcpu),
1048                                                dfly_need_user_resched_remote,
1049                                                NULL);
1050                         }
1051 #else
1052                         spin_unlock(&rdd->spin);
1053                         need_user_resched();
1054 #endif
1055                 } else {
1056                         spin_unlock(&rdd->spin);
1057                 }
1058         } else {
1059                 spin_unlock(&rdd->spin);
1060         }
1061         crit_exit();
1062 }
1063
1064 static
1065 void
1066 dfly_yield(struct lwp *lp)
1067 {
1068 #if 0
1069         /* FUTURE (or something similar) */
1070         switch(lp->lwp_rqtype) {
1071         case RTP_PRIO_NORMAL:
1072                 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
1073                 break;
1074         default:
1075                 break;
1076         }
1077 #endif
1078         need_user_resched();
1079 }
1080
1081 /*
1082  * Called from fork1() when a new child process is being created.
1083  *
1084  * Give the child process an initial estcpu that is more batch then
1085  * its parent and dock the parent for the fork (but do not
1086  * reschedule the parent).   This comprises the main part of our batch
1087  * detection heuristic for both parallel forking and sequential execs.
1088  *
1089  * XXX lwp should be "spawning" instead of "forking"
1090  */
1091 static void
1092 dfly_forking(struct lwp *plp, struct lwp *lp)
1093 {
1094         /*
1095          * Put the child 4 queue slots (out of 32) higher than the parent
1096          * (less desireable than the parent).
1097          */
1098         lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1099         lp->lwp_forked = 1;
1100
1101         /*
1102          * The batch status of children always starts out centerline
1103          * and will inch-up or inch-down as appropriate.  It takes roughly
1104          * ~15 seconds of >50% cpu to hit the limit.
1105          */
1106         lp->lwp_batch = BATCHMAX / 2;
1107
1108         /*
1109          * Dock the parent a cost for the fork, protecting us from fork
1110          * bombs.  If the parent is forking quickly make the child more
1111          * batchy.
1112          */
1113         plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1114 }
1115
1116 /*
1117  * Called when a lwp is being removed from this scheduler, typically
1118  * during lwp_exit().
1119  */
1120 static void
1121 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1122 {
1123         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1124
1125         if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1126                 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1127                 atomic_add_int(&dd->uload,
1128                                -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1129         }
1130 }
1131
1132 static void
1133 dfly_uload_update(struct lwp *lp)
1134 {
1135         dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1136
1137         if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1138                 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1139                         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1140                         atomic_add_int(&dd->uload,
1141                                    ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1142                 }
1143         } else {
1144                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1145                         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1146                         atomic_add_int(&dd->uload,
1147                                    -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1148                 }
1149         }
1150 }
1151
1152 /*
1153  * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1154  * it selects a user process and returns it.  If chklp is non-NULL and chklp
1155  * has a better or equal priority then the process that would otherwise be
1156  * chosen, NULL is returned.
1157  *
1158  * Until we fix the RUNQ code the chklp test has to be strict or we may
1159  * bounce between processes trying to acquire the current process designation.
1160  *
1161  * Must be called with dfly_spin exclusive held.  The spinlock is
1162  * left intact through the entire routine.
1163  *
1164  * if chklp is NULL this function will dive other cpu's queues looking
1165  * for work if the current queue is empty.
1166  */
1167 static
1168 struct lwp *
1169 dfly_chooseproc_locked(dfly_pcpu_t dd, struct lwp *chklp, int isremote)
1170 {
1171 #ifdef SMP
1172         dfly_pcpu_t xdd;
1173 #endif
1174         struct lwp *lp;
1175         struct rq *q;
1176         u_int32_t *which, *which2;
1177         u_int32_t pri;
1178         u_int32_t rtqbits;
1179         u_int32_t tsqbits;
1180         u_int32_t idqbits;
1181
1182         rtqbits = dd->rtqueuebits;
1183         tsqbits = dd->queuebits;
1184         idqbits = dd->idqueuebits;
1185
1186         if (rtqbits) {
1187                 pri = bsfl(rtqbits);
1188                 q = &dd->rtqueues[pri];
1189                 which = &dd->rtqueuebits;
1190                 which2 = &rtqbits;
1191         } else if (tsqbits) {
1192                 pri = bsfl(tsqbits);
1193                 q = &dd->queues[pri];
1194                 which = &dd->queuebits;
1195                 which2 = &tsqbits;
1196         } else if (idqbits) {
1197                 pri = bsfl(idqbits);
1198                 q = &dd->idqueues[pri];
1199                 which = &dd->idqueuebits;
1200                 which2 = &idqbits;
1201         } else
1202 #ifdef SMP
1203         if (isremote) {
1204                 /*
1205                  * Disallow remote->remote recursion
1206                  */
1207                 return (NULL);
1208         } else {
1209                 /*
1210                  * Pull a runnable thread from a remote run queue.  We have
1211                  * to adjust qcpu and uload manually because the lp we return
1212                  * might be assigned directly to uschedcp (setrunqueue might
1213                  * not be called).
1214                  */
1215                 xdd = dfly_choose_worst_queue(dd);
1216                 if (xdd && xdd != dd && spin_trylock(&xdd->spin)) {
1217                         lp = dfly_chooseproc_locked(xdd, NULL, 1);
1218                         if (lp) {
1219                                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1220                                         atomic_add_int(&xdd->uload,
1221                                             -((lp->lwp_priority & ~PPQMASK) &
1222                                               PRIMASK));
1223                                 }
1224                                 lp->lwp_qcpu = dd->cpuid;
1225                                 atomic_add_int(&dd->uload,
1226                                     ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1227                                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1228                         }
1229                         spin_unlock(&xdd->spin);
1230                 } else {
1231                         lp = NULL;
1232                 }
1233                 return (lp);
1234         }
1235 #else
1236         {
1237                 return NULL;
1238         }
1239 #endif
1240         lp = TAILQ_FIRST(q);
1241         KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1242
1243         /*
1244          * If the passed lwp <chklp> is reasonably close to the selected
1245          * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1246          *
1247          * Note that we must error on the side of <chklp> to avoid bouncing
1248          * between threads in the acquire code.
1249          */
1250         if (chklp) {
1251                 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1252                         return(NULL);
1253         }
1254
1255         KTR_COND_LOG(usched_chooseproc,
1256             lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1257             lp->lwp_proc->p_pid,
1258             lp->lwp_thread->td_gd->gd_cpuid,
1259             mycpu->gd_cpuid);
1260
1261         TAILQ_REMOVE(q, lp, lwp_procq);
1262         --dd->runqcount;
1263         if (TAILQ_EMPTY(q))
1264                 *which &= ~(1 << pri);
1265         KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1266         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1267
1268         return lp;
1269 }
1270
1271 #ifdef SMP
1272
1273 /*
1274  * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1275  *
1276  * Choose a cpu node to schedule lp on, hopefully nearby its current
1277  * node.  We give the current node a modest advantage for obvious reasons.
1278  *
1279  * We also give the node the thread was woken up FROM a slight advantage
1280  * in order to try to schedule paired threads which synchronize/block waiting
1281  * for each other fairly close to each other.  Similarly in a network setting
1282  * this feature will also attempt to place a user process near the kernel
1283  * protocol thread that is feeding it data.  THIS IS A CRITICAL PART of the
1284  * algorithm as it heuristically groups synchronizing processes for locality
1285  * of reference in multi-socket systems.
1286  *
1287  * The caller will normally dfly_setrunqueue() lp on the returned queue.
1288  *
1289  * When the topology is known choose a cpu whos group has, in aggregate,
1290  * has the lowest weighted load.
1291  */
1292 static
1293 dfly_pcpu_t
1294 dfly_choose_best_queue(struct lwp *lp)
1295 {
1296         cpumask_t mask;
1297         cpu_node_t *cpup;
1298         cpu_node_t *cpun;
1299         cpu_node_t *cpub;
1300         dfly_pcpu_t dd1 = &dfly_pcpu[lp->lwp_qcpu];
1301         dfly_pcpu_t dd2 = &dfly_pcpu[lp->lwp_thread->td_wakefromcpu];
1302         dfly_pcpu_t rdd;
1303         int cpuid;
1304         int n;
1305         int load;
1306         int lowest_load;
1307         int level;
1308
1309         /*
1310          * When the topology is unknown choose a random cpu that is hopefully
1311          * idle.
1312          */
1313         if (dd1->cpunode == NULL)
1314                 return (dfly_choose_queue_simple(dd1, lp));
1315
1316         /*
1317          * When the topology is known choose a cpu whos group has, in
1318          * aggregate, has the lowest weighted load.
1319          */
1320         cpup = root_cpu_node;
1321         rdd = dd1;
1322         level = cpu_topology_levels_number;
1323
1324         while (cpup) {
1325                 /*
1326                  * Degenerate case super-root
1327                  */
1328                 if (cpup->child_node && cpup->child_no == 1) {
1329                         cpup = cpup->child_node;
1330                         --level;
1331                         continue;
1332                 }
1333
1334                 /*
1335                  * Terminal cpunode
1336                  */
1337                 if (cpup->child_node == NULL) {
1338                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1339                         break;
1340                 }
1341
1342                 cpub = NULL;
1343                 lowest_load = 0x7FFFFFFF;
1344
1345                 for (n = 0; n < cpup->child_no; ++n) {
1346                         /*
1347                          * Accumulate load information for all cpus
1348                          * which are members of this node.
1349                          */
1350                         cpun = &cpup->child_node[n];
1351                         mask = cpun->members & usched_global_cpumask &
1352                                smp_active_mask & lp->lwp_cpumask;
1353                         if (mask == 0)
1354                                 continue;
1355                         load = 0;
1356                         while (mask) {
1357                                 cpuid = BSFCPUMASK(mask);
1358                                 load += dfly_pcpu[cpuid].uload;
1359                                 mask &= ~CPUMASK(cpuid);
1360                         }
1361
1362                         /*
1363                          * Give a slight advantage to nearby cpus.
1364                          */
1365                         if (cpun->members & dd1->cpumask)
1366                                 load -= PPQ * level * usched_dfly_weight1 / 10;
1367                         else if (cpun->members & dd2->cpumask)
1368                                 load -= PPQ * level * usched_dfly_weight2 / 10;
1369
1370                         /*
1371                          * Calculate the best load
1372                          */
1373                         if (cpub == NULL || lowest_load > load ||
1374                             (lowest_load == load &&
1375                              (cpun->members & dd1->cpumask))
1376                         ) {
1377                                 lowest_load = load;
1378                                 cpub = cpun;
1379                         }
1380                 }
1381                 cpup = cpub;
1382                 --level;
1383         }
1384         if (usched_dfly_chooser)
1385                 kprintf("lp %02d->%02d %s\n",
1386                         lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1387         return (rdd);
1388 }
1389
1390 /*
1391  * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1392  *
1393  * Choose the worst queue close to dd's cpu node with a non-empty runq.
1394  *
1395  * This is used by the thread chooser when the current cpu's queues are
1396  * empty to steal a thread from another cpu's queue.  We want to offload
1397  * the most heavily-loaded queue.
1398  */
1399 static
1400 dfly_pcpu_t
1401 dfly_choose_worst_queue(dfly_pcpu_t dd)
1402 {
1403         cpumask_t mask;
1404         cpu_node_t *cpup;
1405         cpu_node_t *cpun;
1406         cpu_node_t *cpub;
1407         dfly_pcpu_t rdd;
1408         int cpuid;
1409         int n;
1410         int load;
1411         int highest_load;
1412         int uloadok;
1413         int level;
1414
1415         /*
1416          * When the topology is unknown choose a random cpu that is hopefully
1417          * idle.
1418          */
1419         if (dd->cpunode == NULL) {
1420                 return (NULL);
1421         }
1422
1423         /*
1424          * When the topology is known choose a cpu whos group has, in
1425          * aggregate, has the lowest weighted load.
1426          */
1427         cpup = root_cpu_node;
1428         rdd = dd;
1429         level = cpu_topology_levels_number;
1430         while (cpup) {
1431                 /*
1432                  * Degenerate case super-root
1433                  */
1434                 if (cpup->child_node && cpup->child_no == 1) {
1435                         cpup = cpup->child_node;
1436                         --level;
1437                         continue;
1438                 }
1439
1440                 /*
1441                  * Terminal cpunode
1442                  */
1443                 if (cpup->child_node == NULL) {
1444                         rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1445                         break;
1446                 }
1447
1448                 cpub = NULL;
1449                 highest_load = 0;
1450
1451                 for (n = 0; n < cpup->child_no; ++n) {
1452                         /*
1453                          * Accumulate load information for all cpus
1454                          * which are members of this node.
1455                          */
1456                         cpun = &cpup->child_node[n];
1457                         mask = cpun->members & usched_global_cpumask &
1458                                smp_active_mask;
1459                         if (mask == 0)
1460                                 continue;
1461                         load = 0;
1462                         uloadok = 0;
1463                         while (mask) {
1464                                 cpuid = BSFCPUMASK(mask);
1465                                 load += dfly_pcpu[cpuid].uload;
1466                                 if (dfly_pcpu[cpuid].uload)
1467                                         uloadok = 1;
1468                                 mask &= ~CPUMASK(cpuid);
1469                         }
1470
1471                         /*
1472                          * Give a slight advantage to nearby cpus.
1473                          */
1474                         if (cpun->members & dd->cpumask)
1475                                 load += PPQ * level;
1476
1477                         /*
1478                          * The best candidate is the one with the worst
1479                          * (highest) load.  Prefer candiates that are
1480                          * closer to our cpu.
1481                          */
1482                         if (uloadok &&
1483                             (cpub == NULL || highest_load < load ||
1484                              (highest_load == load &&
1485                               (cpun->members & dd->cpumask)))
1486                         ) {
1487                                 highest_load = load;
1488                                 cpub = cpun;
1489                         }
1490                 }
1491                 cpup = cpub;
1492                 --level;
1493         }
1494         return (rdd);
1495 }
1496
1497 static
1498 dfly_pcpu_t
1499 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1500 {
1501         dfly_pcpu_t rdd;
1502         cpumask_t tmpmask;
1503         cpumask_t mask;
1504         int cpuid;
1505
1506         /*
1507          * Fallback to the original heuristic, select random cpu,
1508          * first checking cpus not currently running a user thread.
1509          */
1510         ++dfly_scancpu;
1511         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1512         mask = ~dfly_curprocmask & dfly_rdyprocmask & lp->lwp_cpumask &
1513                smp_active_mask & usched_global_cpumask;
1514
1515         while (mask) {
1516                 tmpmask = ~(CPUMASK(cpuid) - 1);
1517                 if (mask & tmpmask)
1518                         cpuid = BSFCPUMASK(mask & tmpmask);
1519                 else
1520                         cpuid = BSFCPUMASK(mask);
1521                 rdd = &dfly_pcpu[cpuid];
1522
1523                 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1524                         goto found;
1525                 mask &= ~CPUMASK(cpuid);
1526         }
1527
1528         /*
1529          * Then cpus which might have a currently running lp
1530          */
1531         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1532         mask = dfly_curprocmask & dfly_rdyprocmask &
1533                lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
1534
1535         while (mask) {
1536                 tmpmask = ~(CPUMASK(cpuid) - 1);
1537                 if (mask & tmpmask)
1538                         cpuid = BSFCPUMASK(mask & tmpmask);
1539                 else
1540                         cpuid = BSFCPUMASK(mask);
1541                 rdd = &dfly_pcpu[cpuid];
1542
1543                 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1544                         goto found;
1545                 mask &= ~CPUMASK(cpuid);
1546         }
1547
1548         /*
1549          * If we cannot find a suitable cpu we reload from dfly_scancpu
1550          * and round-robin.  Other cpus will pickup as they release their
1551          * current lwps or become ready.
1552          *
1553          * Avoid a degenerate system lockup case if usched_global_cpumask
1554          * is set to 0 or otherwise does not cover lwp_cpumask.
1555          *
1556          * We only kick the target helper thread in this case, we do not
1557          * set the user resched flag because
1558          */
1559         cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1560         if ((CPUMASK(cpuid) & usched_global_cpumask) == 0)
1561                 cpuid = 0;
1562         rdd = &dfly_pcpu[cpuid];
1563 found:
1564         return (rdd);
1565 }
1566
1567 static
1568 void
1569 dfly_need_user_resched_remote(void *dummy)
1570 {
1571         globaldata_t gd = mycpu;
1572         dfly_pcpu_t  dd = &dfly_pcpu[gd->gd_cpuid];
1573
1574         need_user_resched();
1575
1576         /* Call wakeup_mycpu to avoid sending IPIs to other CPUs */
1577         wakeup_mycpu(&dd->helper_thread);
1578 }
1579
1580 #endif
1581
1582 /*
1583  * dfly_remrunqueue_locked() removes a given process from the run queue
1584  * that it is on, clearing the queue busy bit if it becomes empty.
1585  *
1586  * Note that user process scheduler is different from the LWKT schedule.
1587  * The user process scheduler only manages user processes but it uses LWKT
1588  * underneath, and a user process operating in the kernel will often be
1589  * 'released' from our management.
1590  *
1591  * uload is NOT adjusted here.  It is only adjusted if the lwkt_thread goes
1592  * to sleep or the lwp is moved to a different runq.
1593  */
1594 static void
1595 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1596 {
1597         struct rq *q;
1598         u_int32_t *which;
1599         u_int8_t pri;
1600
1601         KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1602         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1603         --rdd->runqcount;
1604         /*rdd->uload -= (lp->lwp_priority & ~PPQMASK) & PRIMASK;*/
1605         KKASSERT(rdd->runqcount >= 0);
1606
1607         pri = lp->lwp_rqindex;
1608         switch(lp->lwp_rqtype) {
1609         case RTP_PRIO_NORMAL:
1610                 q = &rdd->queues[pri];
1611                 which = &rdd->queuebits;
1612                 break;
1613         case RTP_PRIO_REALTIME:
1614         case RTP_PRIO_FIFO:
1615                 q = &rdd->rtqueues[pri];
1616                 which = &rdd->rtqueuebits;
1617                 break;
1618         case RTP_PRIO_IDLE:
1619                 q = &rdd->idqueues[pri];
1620                 which = &rdd->idqueuebits;
1621                 break;
1622         default:
1623                 panic("remrunqueue: invalid rtprio type");
1624                 /* NOT REACHED */
1625         }
1626         TAILQ_REMOVE(q, lp, lwp_procq);
1627         if (TAILQ_EMPTY(q)) {
1628                 KASSERT((*which & (1 << pri)) != 0,
1629                         ("remrunqueue: remove from empty queue"));
1630                 *which &= ~(1 << pri);
1631         }
1632 }
1633
1634 /*
1635  * dfly_setrunqueue_locked()
1636  *
1637  * Add a process whos rqtype and rqindex had previously been calculated
1638  * onto the appropriate run queue.   Determine if the addition requires
1639  * a reschedule on a cpu and return the cpuid or -1.
1640  *
1641  * NOTE:          Lower priorities are better priorities.
1642  *
1643  * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1644  *                sum of the rough lwp_priority for all running and runnable
1645  *                processes.  Lower priority processes (higher lwp_priority
1646  *                values) actually DO count as more load, not less, because
1647  *                these are the programs which require the most care with
1648  *                regards to cpu selection.
1649  */
1650 static void
1651 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1652 {
1653         struct rq *q;
1654         u_int32_t *which;
1655         int pri;
1656
1657         if (lp->lwp_qcpu != rdd->cpuid) {
1658                 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1659                         atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1660                         atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1661                                    -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1662                 }
1663                 lp->lwp_qcpu = rdd->cpuid;
1664         }
1665
1666         KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1667         atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1668         ++rdd->runqcount;
1669         if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1670                 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1671                 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1672                                (lp->lwp_priority & ~PPQMASK) & PRIMASK);
1673         }
1674
1675         pri = lp->lwp_rqindex;
1676
1677         switch(lp->lwp_rqtype) {
1678         case RTP_PRIO_NORMAL:
1679                 q = &rdd->queues[pri];
1680                 which = &rdd->queuebits;
1681                 break;
1682         case RTP_PRIO_REALTIME:
1683         case RTP_PRIO_FIFO:
1684                 q = &rdd->rtqueues[pri];
1685                 which = &rdd->rtqueuebits;
1686                 break;
1687         case RTP_PRIO_IDLE:
1688                 q = &rdd->idqueues[pri];
1689                 which = &rdd->idqueuebits;
1690                 break;
1691         default:
1692                 panic("remrunqueue: invalid rtprio type");
1693                 /* NOT REACHED */
1694         }
1695
1696         /*
1697          * Add to the correct queue and set the appropriate bit.  If no
1698          * lower priority (i.e. better) processes are in the queue then
1699          * we want a reschedule, calculate the best cpu for the job.
1700          *
1701          * Always run reschedules on the LWPs original cpu.
1702          */
1703         TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1704         *which |= 1 << pri;
1705 }
1706
1707 #ifdef SMP
1708
1709 /*
1710  * For SMP systems a user scheduler helper thread is created for each
1711  * cpu and is used to allow one cpu to wakeup another for the purposes of
1712  * scheduling userland threads from setrunqueue().
1713  *
1714  * UP systems do not need the helper since there is only one cpu.
1715  *
1716  * We can't use the idle thread for this because we might block.
1717  * Additionally, doing things this way allows us to HLT idle cpus
1718  * on MP systems.
1719  */
1720 static void
1721 dfly_helper_thread(void *dummy)
1722 {
1723     globaldata_t gd;
1724     dfly_pcpu_t  dd;
1725     struct lwp *nlp;
1726     cpumask_t mask;
1727     int cpuid;
1728
1729     gd = mycpu;
1730     cpuid = gd->gd_cpuid;       /* doesn't change */
1731     mask = gd->gd_cpumask;      /* doesn't change */
1732     dd = &dfly_pcpu[cpuid];
1733
1734     /*
1735      * Since we only want to be woken up only when no user processes
1736      * are scheduled on a cpu, run at an ultra low priority.
1737      */
1738     lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1739
1740     tsleep(&dd->helper_thread, 0, "schslp", 0);
1741
1742     for (;;) {
1743         /*
1744          * We use the LWKT deschedule-interlock trick to avoid racing
1745          * dfly_rdyprocmask.  This means we cannot block through to the
1746          * manual lwkt_switch() call we make below.
1747          */
1748         crit_enter_gd(gd);
1749         tsleep_interlock(&dd->helper_thread, 0);
1750
1751         /*spin_lock(&dfly_spin);*/
1752         spin_lock(&dd->spin);
1753
1754         atomic_set_cpumask(&dfly_rdyprocmask, mask);
1755         clear_user_resched();   /* This satisfied the reschedule request */
1756         dd->rrcount = 0;        /* Reset the round-robin counter */
1757
1758         if ((dfly_curprocmask & mask) == 0) {
1759                 /*
1760                  * No thread is currently scheduled.
1761                  */
1762                 KKASSERT(dd->uschedcp == NULL);
1763                 if ((nlp = dfly_chooseproc_locked(dd, NULL, 0)) != NULL) {
1764                         KTR_COND_LOG(usched_sched_thread_no_process,
1765                             nlp->lwp_proc->p_pid == usched_dfly_pid_debug,
1766                             gd->gd_cpuid,
1767                             nlp->lwp_proc->p_pid,
1768                             nlp->lwp_thread->td_gd->gd_cpuid);
1769
1770                         atomic_set_cpumask(&dfly_curprocmask, mask);
1771                         dd->upri = nlp->lwp_priority;
1772                         dd->uschedcp = nlp;
1773                         dd->rrcount = 0;        /* reset round robin */
1774                         spin_unlock(&dd->spin);
1775                         /*spin_unlock(&dfly_spin);*/
1776                         lwkt_acquire(nlp->lwp_thread);
1777                         lwkt_schedule(nlp->lwp_thread);
1778                 } else {
1779                         spin_unlock(&dd->spin);
1780                         /*spin_unlock(&dfly_spin);*/
1781                 }
1782         } else if (dd->runqcount) {
1783                 /*
1784                  * Possibly find a better process to schedule.
1785                  */
1786                 nlp = dfly_chooseproc_locked(dd, dd->uschedcp, 0);
1787                 if (nlp) {
1788                         KTR_COND_LOG(usched_sched_thread_process,
1789                             nlp->lwp_proc->p_pid == usched_dfly_pid_debug,
1790                             gd->gd_cpuid,
1791                             nlp->lwp_proc->p_pid,
1792                             nlp->lwp_thread->td_gd->gd_cpuid);
1793
1794                         dd->upri = nlp->lwp_priority;
1795                         dd->uschedcp = nlp;
1796                         dd->rrcount = 0;        /* reset round robin */
1797                         spin_unlock(&dd->spin);
1798                         /*spin_unlock(&dfly_spin);*/
1799                         lwkt_acquire(nlp->lwp_thread);
1800                         lwkt_schedule(nlp->lwp_thread);
1801                 } else {
1802                         /*
1803                          * Leave the thread on our run queue.  Another
1804                          * scheduler will try to pull it later.
1805                          */
1806                         spin_unlock(&dd->spin);
1807                         /*spin_unlock(&dfly_spin);*/
1808                 }
1809         } else {
1810                 /*
1811                  * The runq is empty.
1812                  */
1813                 spin_unlock(&dd->spin);
1814                 /*spin_unlock(&dfly_spin);*/
1815         }
1816
1817         /*
1818          * We're descheduled unless someone scheduled us.  Switch away.
1819          * Exiting the critical section will cause splz() to be called
1820          * for us if interrupts and such are pending.
1821          */
1822         crit_exit_gd(gd);
1823         tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
1824     }
1825 }
1826
1827 /* sysctl stick_to_level parameter */
1828 static int
1829 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
1830 {
1831         int error, new_val;
1832
1833         new_val = usched_dfly_stick_to_level;
1834
1835         error = sysctl_handle_int(oidp, &new_val, 0, req);
1836         if (error != 0 || req->newptr == NULL)
1837                 return (error);
1838         if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
1839                 return (EINVAL);
1840         usched_dfly_stick_to_level = new_val;
1841         return (0);
1842 }
1843
1844 /*
1845  * Setup our scheduler helpers.  Note that curprocmask bit 0 has already
1846  * been cleared by rqinit() and we should not mess with it further.
1847  */
1848 static void
1849 dfly_helper_thread_cpu_init(void)
1850 {
1851         int i;
1852         int j;
1853         int cpuid;
1854         int smt_not_supported = 0;
1855         int cache_coherent_not_supported = 0;
1856
1857         if (bootverbose)
1858                 kprintf("Start scheduler helpers on cpus:\n");
1859
1860         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
1861         usched_dfly_sysctl_tree =
1862                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
1863                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
1864                                 "usched_dfly", CTLFLAG_RD, 0, "");
1865
1866         for (i = 0; i < ncpus; ++i) {
1867                 dfly_pcpu_t dd = &dfly_pcpu[i];
1868                 cpumask_t mask = CPUMASK(i);
1869
1870                 if ((mask & smp_active_mask) == 0)
1871                     continue;
1872
1873                 spin_init(&dd->spin);
1874                 dd->cpunode = get_cpu_node_by_cpuid(i);
1875                 dd->cpuid = i;
1876                 dd->cpumask = CPUMASK(i);
1877                 for (j = 0; j < NQS; j++) {
1878                         TAILQ_INIT(&dd->queues[j]);
1879                         TAILQ_INIT(&dd->rtqueues[j]);
1880                         TAILQ_INIT(&dd->idqueues[j]);
1881                 }
1882                 atomic_clear_cpumask(&dfly_curprocmask, 1);
1883
1884                 if (dd->cpunode == NULL) {
1885                         smt_not_supported = 1;
1886                         cache_coherent_not_supported = 1;
1887                         if (bootverbose)
1888                                 kprintf ("\tcpu%d - WARNING: No CPU NODE "
1889                                          "found for cpu\n", i);
1890                 } else {
1891                         switch (dd->cpunode->type) {
1892                         case THREAD_LEVEL:
1893                                 if (bootverbose)
1894                                         kprintf ("\tcpu%d - HyperThreading "
1895                                                  "available. Core siblings: ",
1896                                                  i);
1897                                 break;
1898                         case CORE_LEVEL:
1899                                 smt_not_supported = 1;
1900
1901                                 if (bootverbose)
1902                                         kprintf ("\tcpu%d - No HT available, "
1903                                                  "multi-core/physical "
1904                                                  "cpu. Physical siblings: ",
1905                                                  i);
1906                                 break;
1907                         case CHIP_LEVEL:
1908                                 smt_not_supported = 1;
1909
1910                                 if (bootverbose)
1911                                         kprintf ("\tcpu%d - No HT available, "
1912                                                  "single-core/physical cpu. "
1913                                                  "Package Siblings: ",
1914                                                  i);
1915                                 break;
1916                         default:
1917                                 /* Let's go for safe defaults here */
1918                                 smt_not_supported = 1;
1919                                 cache_coherent_not_supported = 1;
1920                                 if (bootverbose)
1921                                         kprintf ("\tcpu%d - Unknown cpunode->"
1922                                                  "type=%u. Siblings: ",
1923                                                  i,
1924                                                  (u_int)dd->cpunode->type);
1925                                 break;
1926                         }
1927
1928                         if (bootverbose) {
1929                                 if (dd->cpunode->parent_node != NULL) {
1930                                         CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
1931                                                 kprintf("cpu%d ", cpuid);
1932                                         kprintf("\n");
1933                                 } else {
1934                                         kprintf(" no siblings\n");
1935                                 }
1936                         }
1937                 }
1938
1939                 lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread,
1940                             0, i, "usched %d", i);
1941
1942                 /*
1943                  * Allow user scheduling on the target cpu.  cpu #0 has already
1944                  * been enabled in rqinit().
1945                  */
1946                 if (i)
1947                     atomic_clear_cpumask(&dfly_curprocmask, mask);
1948                 atomic_set_cpumask(&dfly_rdyprocmask, mask);
1949                 dd->upri = PRIBASE_NULL;
1950
1951         }
1952
1953         /* usched_dfly sysctl configurable parameters */
1954
1955         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1956                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1957                        OID_AUTO, "rrinterval", CTLFLAG_RW,
1958                        &usched_dfly_rrinterval, 0, "");
1959         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1960                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1961                        OID_AUTO, "decay", CTLFLAG_RW,
1962                        &usched_dfly_decay, 0, "Extra decay when not running");
1963         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1964                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1965                        OID_AUTO, "batch_time", CTLFLAG_RW,
1966                        &usched_dfly_batch_time, 0, "Min batch counter value");
1967
1968         /* Add enable/disable option for SMT scheduling if supported */
1969         if (smt_not_supported) {
1970                 usched_dfly_smt = 0;
1971                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
1972                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1973                                   OID_AUTO, "smt", CTLFLAG_RD,
1974                                   "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
1975         } else {
1976                 usched_dfly_smt = 1;
1977                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1978                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1979                                OID_AUTO, "smt", CTLFLAG_RW,
1980                                &usched_dfly_smt, 0, "Enable SMT scheduling");
1981         }
1982
1983         /*
1984          * Add enable/disable option for cache coherent scheduling
1985          * if supported
1986          */
1987         if (cache_coherent_not_supported) {
1988                 usched_dfly_cache_coherent = 0;
1989                 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
1990                                   SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1991                                   OID_AUTO, "cache_coherent", CTLFLAG_RD,
1992                                   "NOT SUPPORTED", 0,
1993                                   "Cache coherence NOT SUPPORTED");
1994         } else {
1995                 usched_dfly_cache_coherent = 1;
1996                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1997                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1998                                OID_AUTO, "cache_coherent", CTLFLAG_RW,
1999                                &usched_dfly_cache_coherent, 0,
2000                                "Enable/Disable cache coherent scheduling");
2001
2002                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2003                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2004                                OID_AUTO, "weight1", CTLFLAG_RW,
2005                                &usched_dfly_weight1, 10,
2006                                "Weight selection for current cpu");
2007
2008                 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2009                                SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2010                                OID_AUTO, "weight2", CTLFLAG_RW,
2011                                &usched_dfly_weight2, 5,
2012                                "Weight selection for wakefrom cpu");
2013
2014                 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2015                                 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2016                                 OID_AUTO, "stick_to_level",
2017                                 CTLTYPE_INT | CTLFLAG_RW,
2018                                 NULL, sizeof usched_dfly_stick_to_level,
2019                                 sysctl_usched_dfly_stick_to_level, "I",
2020                                 "Stick a process to this level. See sysctl"
2021                                 "paremter hw.cpu_topology.level_description");
2022         }
2023 }
2024 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2025         dfly_helper_thread_cpu_init, NULL)
2026
2027 #else /* No SMP options - just add the configurable parameters to sysctl */
2028
2029 static void
2030 sched_sysctl_tree_init(void)
2031 {
2032         sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2033         usched_dfly_sysctl_tree =
2034                 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2035                                 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2036                                 "usched_dfly", CTLFLAG_RD, 0, "");
2037
2038         /* usched_dfly sysctl configurable parameters */
2039         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2040                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2041                        OID_AUTO, "rrinterval", CTLFLAG_RW,
2042                        &usched_dfly_rrinterval, 0, "");
2043         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2044                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2045                        OID_AUTO, "decay", CTLFLAG_RW,
2046                        &usched_dfly_decay, 0, "Extra decay when not running");
2047         SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2048                        SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2049                        OID_AUTO, "batch_time", CTLFLAG_RW,
2050                        &usched_dfly_batch_time, 0, "Min batch counter value");
2051 }
2052 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2053         sched_sysctl_tree_init, NULL)
2054 #endif