2 * Copyright (c) 2012 The DragonFly Project. All rights reserved.
3 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com>,
7 * by Mihai Carabas <mihai.carabas@gmail.com>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
41 #include <sys/queue.h>
43 #include <sys/rtprio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
59 * Priorities. Note that with 32 run queues per scheduler each queue
60 * represents four priority levels.
66 #define PRIMASK (MAXPRI - 1)
67 #define PRIBASE_REALTIME 0
68 #define PRIBASE_NORMAL MAXPRI
69 #define PRIBASE_IDLE (MAXPRI * 2)
70 #define PRIBASE_THREAD (MAXPRI * 3)
71 #define PRIBASE_NULL (MAXPRI * 4)
73 #define NQS 32 /* 32 run queues. */
74 #define PPQ (MAXPRI / NQS) /* priorities per queue */
75 #define PPQMASK (PPQ - 1)
78 * NICEPPQ - number of nice units per priority queue
79 * ESTCPUPPQ - number of estcpu units per priority queue
80 * ESTCPUMAX - number of estcpu units
84 #define ESTCPUMAX (ESTCPUPPQ * NQS)
85 #define BATCHMAX (ESTCPUFREQ * 30)
86 #define PRIO_RANGE (PRIO_MAX - PRIO_MIN + 1)
88 #define ESTCPULIM(v) min((v), ESTCPUMAX)
92 #define lwp_priority lwp_usdata.dfly.priority
93 #define lwp_forked lwp_usdata.dfly.forked
94 #define lwp_rqindex lwp_usdata.dfly.rqindex
95 #define lwp_estcpu lwp_usdata.dfly.estcpu
96 #define lwp_batch lwp_usdata.dfly.batch
97 #define lwp_rqtype lwp_usdata.dfly.rqtype
98 #define lwp_qcpu lwp_usdata.dfly.qcpu
100 struct usched_dfly_pcpu {
101 struct spinlock spin;
102 struct thread helper_thread;
106 struct lwp *uschedcp;
107 struct rq queues[NQS];
108 struct rq rtqueues[NQS];
109 struct rq idqueues[NQS];
111 u_int32_t rtqueuebits;
112 u_int32_t idqueuebits;
121 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
123 static void dfly_acquire_curproc(struct lwp *lp);
124 static void dfly_release_curproc(struct lwp *lp);
125 static void dfly_select_curproc(globaldata_t gd);
126 static void dfly_setrunqueue(struct lwp *lp);
127 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
128 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
130 static void dfly_recalculate_estcpu(struct lwp *lp);
131 static void dfly_resetpriority(struct lwp *lp);
132 static void dfly_forking(struct lwp *plp, struct lwp *lp);
133 static void dfly_exiting(struct lwp *lp, struct proc *);
134 static void dfly_uload_update(struct lwp *lp);
135 static void dfly_yield(struct lwp *lp);
137 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
138 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
139 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
141 static void dfly_wakeup_random_helper(dfly_pcpu_t notdd);
146 static void dfly_need_user_resched_remote(void *dummy);
148 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t dd, struct lwp *chklp,
150 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
151 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
153 struct usched usched_dfly = {
155 "dfly", "Original DragonFly Scheduler",
156 NULL, /* default registration */
157 NULL, /* default deregistration */
158 dfly_acquire_curproc,
159 dfly_release_curproc,
162 dfly_recalculate_estcpu,
167 NULL, /* setcpumask not supported */
172 * We have NQS (32) run queues per scheduling class. For the normal
173 * class, there are 128 priorities scaled onto these 32 queues. New
174 * processes are added to the last entry in each queue, and processes
175 * are selected for running by taking them from the head and maintaining
176 * a simple FIFO arrangement. Realtime and Idle priority processes have
177 * and explicit 0-31 priority which maps directly onto their class queue
178 * index. When a queue has something in it, the corresponding bit is
179 * set in the queuebits variable, allowing a single read to determine
180 * the state of all 32 queues and then a ffs() to find the first busy
183 static cpumask_t dfly_curprocmask = -1; /* currently running a user process */
184 static cpumask_t dfly_rdyprocmask; /* ready to accept a user process */
186 static volatile int dfly_scancpu;
188 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
189 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
190 static struct sysctl_oid *usched_dfly_sysctl_tree;
192 /* Debug info exposed through debug.* sysctl */
194 static int usched_dfly_debug = -1;
195 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
196 &usched_dfly_debug, 0,
197 "Print debug information for this pid");
199 static int usched_dfly_pid_debug = -1;
200 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
201 &usched_dfly_pid_debug, 0,
202 "Print KTR debug information for this pid");
204 static int usched_dfly_chooser = 0;
205 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
206 &usched_dfly_chooser, 0,
207 "Print KTR debug information for this pid");
209 /* Tunning usched_dfly - configurable through kern.usched_dfly.* */
211 static int usched_dfly_smt = 0;
212 static int usched_dfly_cache_coherent = 0;
213 static int usched_dfly_weight1 = 25; /* thread's current cpu */
214 static int usched_dfly_weight2 = 15; /* synchronous peer's current cpu */
215 static int usched_dfly_weight3 = 10; /* number of threads on queue */
216 static int usched_dfly_pull_enable = 1; /* allow pulls */
218 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
219 static int usched_dfly_decay = 8;
220 static int usched_dfly_batch_time = 10;
222 /* KTR debug printings */
224 KTR_INFO_MASTER(usched);
226 #if !defined(KTR_USCHED_DFLY)
227 #define KTR_USCHED_DFLY KTR_ALL
231 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_urw, 0,
232 "USCHED_DFLY(dfly_acquire_curproc in user_reseched_wanted "
233 "after release: pid %d, cpuid %d, curr_cpuid %d)",
234 pid_t pid, int cpuid, int curr);
235 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_before_loop, 0,
236 "USCHED_DFLY(dfly_acquire_curproc before loop: pid %d, cpuid %d, "
238 pid_t pid, int cpuid, int curr);
239 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_not, 0,
240 "USCHED_DFLY(dfly_acquire_curproc couldn't acquire after "
241 "dfly_setrunqueue: pid %d, cpuid %d, curr_lp pid %d, curr_cpuid %d)",
242 pid_t pid, int cpuid, pid_t curr_pid, int curr_cpuid);
243 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_switch, 0,
244 "USCHED_DFLY(dfly_acquire_curproc after lwkt_switch: pid %d, "
245 "cpuid %d, curr_cpuid %d)",
246 pid_t pid, int cpuid, int curr);
248 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_release_curproc, 0,
249 "USCHED_DFLY(dfly_release_curproc before select: pid %d, "
250 "cpuid %d, curr_cpuid %d)",
251 pid_t pid, int cpuid, int curr);
253 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_select_curproc, 0,
254 "USCHED_DFLY(dfly_release_curproc before select: pid %d, "
255 "cpuid %d, old_pid %d, old_cpuid %d, curr_cpuid %d)",
256 pid_t pid, int cpuid, pid_t old_pid, int old_cpuid, int curr);
259 KTR_INFO(KTR_USCHED_DFLY, usched, batchy_test_false, 0,
260 "USCHED_DFLY(batchy_looser_pri_test false: pid %d, "
261 "cpuid %d, verify_mask %lu)",
262 pid_t pid, int cpuid, cpumask_t mask);
263 KTR_INFO(KTR_USCHED_DFLY, usched, batchy_test_true, 0,
264 "USCHED_DFLY(batchy_looser_pri_test true: pid %d, "
265 "cpuid %d, verify_mask %lu)",
266 pid_t pid, int cpuid, cpumask_t mask);
268 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_fc_smt, 0,
269 "USCHED_DFLY(dfly_setrunqueue free cpus smt: pid %d, cpuid %d, "
270 "mask %lu, curr_cpuid %d)",
271 pid_t pid, int cpuid, cpumask_t mask, int curr);
272 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_fc_non_smt, 0,
273 "USCHED_DFLY(dfly_setrunqueue free cpus check non_smt: pid %d, "
274 "cpuid %d, mask %lu, curr_cpuid %d)",
275 pid_t pid, int cpuid, cpumask_t mask, int curr);
276 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_rc, 0,
277 "USCHED_DFLY(dfly_setrunqueue running cpus check: pid %d, "
278 "cpuid %d, mask %lu, curr_cpuid %d)",
279 pid_t pid, int cpuid, cpumask_t mask, int curr);
280 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_found, 0,
281 "USCHED_DFLY(dfly_setrunqueue found cpu: pid %d, cpuid %d, "
282 "mask %lu, found_cpuid %d, curr_cpuid %d)",
283 pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
284 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_not_found, 0,
285 "USCHED_DFLY(dfly_setrunqueue not found cpu: pid %d, cpuid %d, "
286 "try_cpuid %d, curr_cpuid %d)",
287 pid_t pid, int cpuid, int try_cpuid, int curr);
288 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_found_best_cpuid, 0,
289 "USCHED_DFLY(dfly_setrunqueue found cpu: pid %d, cpuid %d, "
290 "mask %lu, found_cpuid %d, curr_cpuid %d)",
291 pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
295 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
296 "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
297 pid_t pid, int old_cpuid, int curr);
300 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc, 0,
301 "USCHED_DFLY(chooseproc_cc: pid %d, old_cpuid %d, curr_cpuid %d)",
302 pid_t pid, int old_cpuid, int curr);
303 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc_not_good, 0,
304 "USCHED_DFLY(chooseproc_cc not good: pid %d, old_cpumask %lu, "
305 "sibling_mask %lu, curr_cpumask %lu)",
306 pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
307 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc_elected, 0,
308 "USCHED_DFLY(chooseproc_cc elected: pid %d, old_cpumask %lu, "
309 "sibling_mask %lu, curr_cpumask: %lu)",
310 pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
313 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_no_process, 0,
314 "USCHED_DFLY(sched_thread %d no process scheduled: pid %d, old_cpuid %d)",
315 int id, pid_t pid, int cpuid);
316 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_process, 0,
317 "USCHED_DFLY(sched_thread %d process scheduled: pid %d, old_cpuid %d)",
318 int id, pid_t pid, int cpuid);
320 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_no_process_found, 0,
321 "USCHED_DFLY(sched_thread %d no process found; tmpmask %lu)",
322 int id, cpumask_t tmpmask);
327 * DFLY_ACQUIRE_CURPROC
329 * This function is called when the kernel intends to return to userland.
330 * It is responsible for making the thread the current designated userland
331 * thread for this cpu, blocking if necessary.
333 * The kernel has already depressed our LWKT priority so we must not switch
334 * until we have either assigned or disposed of the thread.
336 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
337 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will
338 * occur, this function is called only under very controlled circumstances.
341 dfly_acquire_curproc(struct lwp *lp)
349 * Make sure we aren't sitting on a tsleep queue.
352 crit_enter_quick(td);
353 if (td->td_flags & TDF_TSLEEPQ)
355 dfly_recalculate_estcpu(lp);
358 * If a reschedule was requested give another thread the
361 if (user_resched_wanted()) {
362 clear_user_resched();
363 dfly_release_curproc(lp);
367 * Loop until we are the current user thread
370 dd = &dfly_pcpu[gd->gd_cpuid];
374 * Process any pending events and higher priority threads.
379 * Become the currently scheduled user thread for this cpu
380 * if we can do so trivially.
382 * We can steal another thread's current thread designation
383 * on this cpu since if we are running that other thread
384 * must not be, so we can safely deschedule it.
386 if (dd->uschedcp == lp) {
388 * We are already the current lwp (hot path).
390 dd->upri = lp->lwp_priority;
391 } else if ((rdd = dfly_choose_best_queue(lp)) != dd) {
392 lwkt_deschedule(lp->lwp_thread);
393 dfly_setrunqueue_dd(rdd, lp);
396 dd = &dfly_pcpu[gd->gd_cpuid];
397 } else if (dd->uschedcp == NULL) {
399 * We can trivially become the current lwp.
401 atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
403 dd->upri = lp->lwp_priority;
404 KKASSERT(lp->lwp_qcpu == dd->cpuid);
405 } else if (dd->uschedcp && dd->upri > lp->lwp_priority) {
407 * We can steal the current cpu's lwp designation
408 * away simply by replacing it. The other thread
409 * will stall when it tries to return to userland,
410 * possibly rescheduling elsewhere when it calls
414 dd->upri = lp->lwp_priority;
415 KKASSERT(lp->lwp_qcpu == dd->cpuid);
418 * We cannot become the current lwp, place the lp
419 * on the run-queue of this or another cpu and
420 * deschedule ourselves.
422 * When we are reactivated we will have another
425 * Reload after a switch or setrunqueue/switch possibly
426 * moved us to another cpu.
428 lwkt_deschedule(lp->lwp_thread);
429 dfly_setrunqueue_dd(dd, lp);
432 dd = &dfly_pcpu[gd->gd_cpuid];
434 } while (dd->uschedcp != lp);
437 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
441 * DFLY_RELEASE_CURPROC
443 * This routine detaches the current thread from the userland scheduler,
444 * usually because the thread needs to run or block in the kernel (at
445 * kernel priority) for a while.
447 * This routine is also responsible for selecting a new thread to
448 * make the current thread.
450 * NOTE: This implementation differs from the dummy example in that
451 * dfly_select_curproc() is able to select the current process, whereas
452 * dummy_select_curproc() is not able to select the current process.
453 * This means we have to NULL out uschedcp.
455 * Additionally, note that we may already be on a run queue if releasing
456 * via the lwkt_switch() in dfly_setrunqueue().
459 dfly_release_curproc(struct lwp *lp)
461 globaldata_t gd = mycpu;
462 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
465 * Make sure td_wakefromcpu is defaulted. This will be overwritten
468 lp->lwp_thread->td_wakefromcpu = gd->gd_cpuid;
470 if (dd->uschedcp == lp) {
471 spin_lock(&dd->spin);
472 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
474 dd->uschedcp = NULL; /* don't let lp be selected */
475 dd->upri = PRIBASE_NULL;
476 atomic_clear_cpumask(&dfly_curprocmask, gd->gd_cpumask);
477 spin_unlock(&dd->spin);
478 dfly_select_curproc(gd);
483 * DFLY_SELECT_CURPROC
485 * Select a new current process for this cpu and clear any pending user
486 * reschedule request. The cpu currently has no current process.
488 * This routine is also responsible for equal-priority round-robining,
489 * typically triggered from dfly_schedulerclock(). In our dummy example
490 * all the 'user' threads are LWKT scheduled all at once and we just
491 * call lwkt_switch().
493 * The calling process is not on the queue and cannot be selected.
497 dfly_select_curproc(globaldata_t gd)
499 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
501 int cpuid = gd->gd_cpuid;
505 spin_lock(&dd->spin);
506 nlp = dfly_chooseproc_locked(dd, dd->uschedcp, 1);
509 atomic_set_cpumask(&dfly_curprocmask, CPUMASK(cpuid));
510 dd->upri = nlp->lwp_priority;
512 dd->rrcount = 0; /* reset round robin */
513 spin_unlock(&dd->spin);
515 lwkt_acquire(nlp->lwp_thread);
517 lwkt_schedule(nlp->lwp_thread);
519 spin_unlock(&dd->spin);
525 * Place the specified lwp on the user scheduler's run queue. This routine
526 * must be called with the thread descheduled. The lwp must be runnable.
527 * It must not be possible for anyone else to explicitly schedule this thread.
529 * The thread may be the current thread as a special case.
532 dfly_setrunqueue(struct lwp *lp)
537 * First validate the process LWKT state.
539 KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
540 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
541 ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
542 lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
543 KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
546 * NOTE: rdd does not necessarily represent the current cpu.
547 * Instead it represents the cpu the thread was last
550 rdd = &dfly_pcpu[lp->lwp_qcpu];
553 * This process is not supposed to be scheduled anywhere or assigned
554 * as the current process anywhere. Assert the condition.
556 KKASSERT(rdd->uschedcp != lp);
560 * If we are not SMP we do not have a scheduler helper to kick
561 * and must directly activate the process if none are scheduled.
563 * This is really only an issue when bootstrapping init since
564 * the caller in all other cases will be a user process, and
565 * even if released (rdd->uschedcp == NULL), that process will
566 * kickstart the scheduler when it returns to user mode from
569 * NOTE: On SMP we can't just set some other cpu's uschedcp.
571 if (rdd->uschedcp == NULL) {
572 spin_lock(&rdd->spin);
573 if (rdd->uschedcp == NULL) {
574 atomic_set_cpumask(&dfly_curprocmask, 1);
576 rdd->upri = lp->lwp_priority;
577 spin_unlock(&rdd->spin);
578 lwkt_schedule(lp->lwp_thread);
581 spin_unlock(&rdd->spin);
587 * Ok, we have to setrunqueue some target cpu and request a reschedule
590 * We have to choose the best target cpu. It might not be the current
591 * target even if the current cpu has no running user thread (for
592 * example, because the current cpu might be a hyperthread and its
593 * sibling has a thread assigned).
595 * If we just forked it is most optimal to run the child on the same
596 * cpu just in case the parent decides to wait for it (thus getting
597 * off that cpu). As long as there is nothing else runnable on the
598 * cpu, that is. If we did this unconditionally a parent forking
599 * multiple children before waiting (e.g. make -j N) leaves other
600 * cpus idle that could be working.
602 if (lp->lwp_forked) {
604 if (dfly_pcpu[lp->lwp_qcpu].runqcount)
605 rdd = dfly_choose_best_queue(lp);
607 rdd = &dfly_pcpu[lp->lwp_qcpu];
608 /* dfly_wakeup_random_helper(rdd); */
610 rdd = dfly_choose_best_queue(lp);
611 /* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
614 dfly_setrunqueue_dd(rdd, lp);
618 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
624 * We might be moving the lp to another cpu's run queue, and once
625 * on the runqueue (even if it is our cpu's), another cpu can rip
628 * TDF_MIGRATING might already be set if this is part of a
629 * remrunqueue+setrunqueue sequence.
631 if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
632 lwkt_giveaway(lp->lwp_thread);
634 rgd = globaldata_find(rdd->cpuid);
637 * We lose control of the lp the moment we release the spinlock
638 * after having placed it on the queue. i.e. another cpu could pick
639 * it up, or it could exit, or its priority could be further
640 * adjusted, or something like that.
642 * WARNING! rdd can point to a foreign cpu!
644 spin_lock(&rdd->spin);
645 dfly_setrunqueue_locked(rdd, lp);
648 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
649 spin_unlock(&rdd->spin);
650 if (rdd->uschedcp == NULL) {
651 wakeup_mycpu(&rdd->helper_thread); /* XXX */
657 spin_unlock(&rdd->spin);
660 atomic_clear_cpumask(&dfly_rdyprocmask, rgd->gd_cpumask);
661 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
662 spin_unlock(&rdd->spin);
663 lwkt_send_ipiq(rgd, dfly_need_user_resched_remote,
666 spin_unlock(&rdd->spin);
667 wakeup(&rdd->helper_thread);
672 * Request a reschedule if appropriate.
674 spin_lock(&rdd->spin);
675 dfly_setrunqueue_locked(rdd, lp);
676 spin_unlock(&rdd->spin);
677 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
686 * This wakes up a random helper that might have no work on its cpu to do.
687 * The idea is to improve fork/fork-exec/fork-wait/exec and similar
688 * process-spawning sequences by first scheduling the forked process
689 * on the same cpu as the parent, in case the parent is just going to
690 * wait*(). But if the parent does not wait we want another cpu to pick
691 * the forked process up ASAP.
693 * The ipi/helper-scheduling sequence typically takes a lot longer to run
694 * than a return-from-procedure-call and the parent then entering a
695 * wait*(). There's a race here that we want the parent to win ONLY if
696 * it is going to wait*().
698 * If a process sticks around for long enough normal scheduling action
699 * will move it to the right place.
703 dfly_wakeup_random_helper(dfly_pcpu_t notdd)
709 mask = dfly_rdyprocmask & ~dfly_curprocmask & smp_active_mask &
710 usched_global_cpumask & ~notdd->cpumask;
712 cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
715 tmpmask = ~(CPUMASK(cpuid) - 1);
717 cpuid = BSFCPUMASK(mask & tmpmask);
719 cpuid = BSFCPUMASK(mask);
720 atomic_clear_cpumask(&dfly_rdyprocmask, CPUMASK(cpuid));
721 wakeup(&dfly_pcpu[cpuid].helper_thread);
728 * This routine is called from a systimer IPI. It MUST be MP-safe and
729 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on
734 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
736 globaldata_t gd = mycpu;
737 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
740 * Do we need to round-robin? We round-robin 10 times a second.
741 * This should only occur for cpu-bound batch processes.
743 if (++dd->rrcount >= usched_dfly_rrinterval) {
749 * Adjust estcpu upward using a real time equivalent calculation.
751 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
754 * Spinlocks also hold a critical section so there should not be
757 KKASSERT(gd->gd_spinlocks_wr == 0);
759 dfly_resetpriority(lp);
763 * Called from acquire and from kern_synch's one-second timer (one of the
764 * callout helper threads) with a critical section held.
766 * Decay p_estcpu based on the number of ticks we haven't been running
767 * and our p_nice. As the load increases each process observes a larger
768 * number of idle ticks (because other processes are running in them).
769 * This observation leads to a larger correction which tends to make the
770 * system more 'batchy'.
772 * Note that no recalculation occurs for a process which sleeps and wakes
773 * up in the same tick. That is, a system doing thousands of context
774 * switches per second will still only do serious estcpu calculations
775 * ESTCPUFREQ times per second.
779 dfly_recalculate_estcpu(struct lwp *lp)
781 globaldata_t gd = mycpu;
782 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
789 * We have to subtract periodic to get the last schedclock
790 * timeout time, otherwise we would get the upcoming timeout.
791 * Keep in mind that a process can migrate between cpus and
792 * while the scheduler clock should be very close, boundary
793 * conditions could lead to a small negative delta.
795 cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
797 if (lp->lwp_slptime > 1) {
799 * Too much time has passed, do a coarse correction.
801 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
802 dfly_resetpriority(lp);
803 lp->lwp_cpbase = cpbase;
805 lp->lwp_batch -= ESTCPUFREQ;
806 if (lp->lwp_batch < 0)
808 } else if (lp->lwp_cpbase != cpbase) {
810 * Adjust estcpu if we are in a different tick. Don't waste
811 * time if we are in the same tick.
813 * First calculate the number of ticks in the measurement
814 * interval. The ttlticks calculation can wind up 0 due to
815 * a bug in the handling of lwp_slptime (as yet not found),
816 * so make sure we do not get a divide by 0 panic.
818 ttlticks = (cpbase - lp->lwp_cpbase) /
819 gd->gd_schedclock.periodic;
822 lp->lwp_cpbase = cpbase;
826 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
829 * Calculate the percentage of one cpu used factoring in ncpus
830 * and the load and adjust estcpu. Handle degenerate cases
831 * by adding 1 to runqcount.
833 * estcpu is scaled by ESTCPUMAX.
835 * runqcount is the excess number of user processes
836 * that cannot be immediately scheduled to cpus. We want
837 * to count these as running to avoid range compression
838 * in the base calculation (which is the actual percentage
841 estcpu = (lp->lwp_cpticks * ESTCPUMAX) *
842 (dd->runqcount + ncpus) / (ncpus * ttlticks);
845 * If estcpu is > 50% we become more batch-like
846 * If estcpu is <= 50% we become less batch-like
848 * It takes 30 cpu seconds to traverse the entire range.
850 if (estcpu > ESTCPUMAX / 2) {
851 lp->lwp_batch += ttlticks;
852 if (lp->lwp_batch > BATCHMAX)
853 lp->lwp_batch = BATCHMAX;
855 lp->lwp_batch -= ttlticks;
856 if (lp->lwp_batch < 0)
860 if (usched_dfly_debug == lp->lwp_proc->p_pid) {
861 kprintf("pid %d lwp %p estcpu %3d %3d bat %d cp %d/%d",
862 lp->lwp_proc->p_pid, lp,
863 estcpu, lp->lwp_estcpu,
865 lp->lwp_cpticks, ttlticks);
869 * Adjust lp->lwp_esetcpu. The decay factor determines how
870 * quickly lwp_estcpu collapses to its realtime calculation.
871 * A slower collapse gives us a more accurate number but
872 * can cause a cpu hog to eat too much cpu before the
873 * scheduler decides to downgrade it.
875 * NOTE: p_nice is accounted for in dfly_resetpriority(),
876 * and not here, but we must still ensure that a
877 * cpu-bound nice -20 process does not completely
878 * override a cpu-bound nice +20 process.
880 * NOTE: We must use ESTCPULIM() here to deal with any
883 decay_factor = usched_dfly_decay;
884 if (decay_factor < 1)
886 if (decay_factor > 1024)
889 lp->lwp_estcpu = ESTCPULIM(
890 (lp->lwp_estcpu * decay_factor + estcpu) /
893 if (usched_dfly_debug == lp->lwp_proc->p_pid)
894 kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
895 dfly_resetpriority(lp);
896 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
902 * Compute the priority of a process when running in user mode.
903 * Arrange to reschedule if the resulting priority is better
904 * than that of the current process.
906 * This routine may be called with any process.
908 * This routine is called by fork1() for initial setup with the process
909 * of the run queue, and also may be called normally with the process on or
913 dfly_resetpriority(struct lwp *lp)
925 * Lock the scheduler (lp) belongs to. This can be on a different
926 * cpu. Handle races. This loop breaks out with the appropriate
932 rdd = &dfly_pcpu[rcpu];
933 spin_lock(&rdd->spin);
934 if (rcpu == lp->lwp_qcpu)
936 spin_unlock(&rdd->spin);
940 * Calculate the new priority and queue type
942 newrqtype = lp->lwp_rtprio.type;
945 case RTP_PRIO_REALTIME:
947 newpriority = PRIBASE_REALTIME +
948 (lp->lwp_rtprio.prio & PRIMASK);
950 case RTP_PRIO_NORMAL:
952 * Detune estcpu based on batchiness. lwp_batch ranges
953 * from 0 to BATCHMAX. Limit estcpu for the sake of
954 * the priority calculation to between 50% and 100%.
956 estcpu = lp->lwp_estcpu * (lp->lwp_batch + BATCHMAX) /
960 * p_nice piece Adds (0-40) * 2 0-80
961 * estcpu Adds 16384 * 4 / 512 0-128
963 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
964 newpriority += estcpu * PPQ / ESTCPUPPQ;
965 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
966 NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
967 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
970 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
972 case RTP_PRIO_THREAD:
973 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
976 panic("Bad RTP_PRIO %d", newrqtype);
981 * The newpriority incorporates the queue type so do a simple masked
982 * check to determine if the process has moved to another queue. If
983 * it has, and it is currently on a run queue, then move it.
985 * Since uload is ~PPQMASK masked, no modifications are necessary if
986 * we end up in the same run queue.
988 if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
992 * uload can change, calculate the adjustment to reduce
993 * edge cases since choosers scan the cpu topology without
996 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
998 -((lp->lwp_priority & ~PPQMASK) & PRIMASK) +
999 ((newpriority & ~PPQMASK) & PRIMASK);
1000 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1003 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1004 dfly_remrunqueue_locked(rdd, lp);
1005 lp->lwp_priority = newpriority;
1006 lp->lwp_rqtype = newrqtype;
1007 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1008 dfly_setrunqueue_locked(rdd, lp);
1011 lp->lwp_priority = newpriority;
1012 lp->lwp_rqtype = newrqtype;
1013 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1018 * In the same PPQ, uload cannot change.
1020 lp->lwp_priority = newpriority;
1026 * Determine if we need to reschedule the target cpu. This only
1027 * occurs if the LWP is already on a scheduler queue, which means
1028 * that idle cpu notification has already occured. At most we
1029 * need only issue a need_user_resched() on the appropriate cpu.
1031 * The LWP may be owned by a CPU different from the current one,
1032 * in which case dd->uschedcp may be modified without an MP lock
1033 * or a spinlock held. The worst that happens is that the code
1034 * below causes a spurious need_user_resched() on the target CPU
1035 * and dd->pri to be wrong for a short period of time, both of
1036 * which are harmless.
1038 * If checkpri is 0 we are adjusting the priority of the current
1039 * process, possibly higher (less desireable), so ignore the upri
1040 * check which will fail in that case.
1043 if ((dfly_rdyprocmask & CPUMASK(rcpu)) &&
1045 (rdd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK))) {
1047 if (rcpu == mycpu->gd_cpuid) {
1048 spin_unlock(&rdd->spin);
1049 need_user_resched();
1051 atomic_clear_cpumask(&dfly_rdyprocmask,
1053 spin_unlock(&rdd->spin);
1054 lwkt_send_ipiq(globaldata_find(rcpu),
1055 dfly_need_user_resched_remote,
1059 spin_unlock(&rdd->spin);
1060 need_user_resched();
1063 spin_unlock(&rdd->spin);
1066 spin_unlock(&rdd->spin);
1073 dfly_yield(struct lwp *lp)
1076 /* FUTURE (or something similar) */
1077 switch(lp->lwp_rqtype) {
1078 case RTP_PRIO_NORMAL:
1079 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
1085 need_user_resched();
1089 * Called from fork1() when a new child process is being created.
1091 * Give the child process an initial estcpu that is more batch then
1092 * its parent and dock the parent for the fork (but do not
1093 * reschedule the parent). This comprises the main part of our batch
1094 * detection heuristic for both parallel forking and sequential execs.
1096 * XXX lwp should be "spawning" instead of "forking"
1099 dfly_forking(struct lwp *plp, struct lwp *lp)
1102 * Put the child 4 queue slots (out of 32) higher than the parent
1103 * (less desireable than the parent).
1105 lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1109 * The batch status of children always starts out centerline
1110 * and will inch-up or inch-down as appropriate. It takes roughly
1111 * ~15 seconds of >50% cpu to hit the limit.
1113 lp->lwp_batch = BATCHMAX / 2;
1116 * Dock the parent a cost for the fork, protecting us from fork
1117 * bombs. If the parent is forking quickly make the child more
1120 plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1124 * Called when a lwp is being removed from this scheduler, typically
1125 * during lwp_exit().
1128 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1130 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1132 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1133 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1134 atomic_add_int(&dd->uload,
1135 -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1140 * This function cannot block in any way
1143 dfly_uload_update(struct lwp *lp)
1145 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1147 if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1148 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1149 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1150 atomic_add_int(&dd->uload,
1151 ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1154 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1155 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1156 atomic_add_int(&dd->uload,
1157 -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1163 * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1164 * it selects a user process and returns it. If chklp is non-NULL and chklp
1165 * has a better or equal priority then the process that would otherwise be
1166 * chosen, NULL is returned.
1168 * Until we fix the RUNQ code the chklp test has to be strict or we may
1169 * bounce between processes trying to acquire the current process designation.
1171 * Must be called with dd->spin locked. The spinlock is left intact through
1172 * the entire routine.
1174 * if chklp is NULL this function will dive other cpu's queues looking
1175 * for work if the current queue is empty.
1179 dfly_chooseproc_locked(dfly_pcpu_t dd, struct lwp *chklp, int isremote)
1186 u_int32_t *which, *which2;
1192 rtqbits = dd->rtqueuebits;
1193 tsqbits = dd->queuebits;
1194 idqbits = dd->idqueuebits;
1197 pri = bsfl(rtqbits);
1198 q = &dd->rtqueues[pri];
1199 which = &dd->rtqueuebits;
1201 } else if (tsqbits) {
1202 pri = bsfl(tsqbits);
1203 q = &dd->queues[pri];
1204 which = &dd->queuebits;
1206 } else if (idqbits) {
1207 pri = bsfl(idqbits);
1208 q = &dd->idqueues[pri];
1209 which = &dd->idqueuebits;
1213 if (isremote || usched_dfly_pull_enable == 0) {
1215 * Queue is empty, disallow remote->remote recursion and
1216 * do not pull if threads are active.
1221 * Pull a runnable thread from a remote run queue. We have
1222 * to adjust qcpu and uload manually because the lp we return
1223 * might be assigned directly to uschedcp (setrunqueue might
1226 xdd = dfly_choose_worst_queue(dd);
1227 if (xdd && xdd != dd && spin_trylock(&xdd->spin)) {
1228 lp = dfly_chooseproc_locked(xdd, NULL, 1);
1230 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1231 atomic_add_int(&xdd->uload,
1232 -((lp->lwp_priority & ~PPQMASK) &
1235 lp->lwp_qcpu = dd->cpuid;
1236 atomic_add_int(&dd->uload,
1237 ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1238 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1240 spin_unlock(&xdd->spin);
1251 lp = TAILQ_FIRST(q);
1252 KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1255 * If the passed lwp <chklp> is reasonably close to the selected
1256 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1258 * Note that we must error on the side of <chklp> to avoid bouncing
1259 * between threads in the acquire code.
1262 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1266 KTR_COND_LOG(usched_chooseproc,
1267 lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1268 lp->lwp_proc->p_pid,
1269 lp->lwp_thread->td_gd->gd_cpuid,
1272 TAILQ_REMOVE(q, lp, lwp_procq);
1275 *which &= ~(1 << pri);
1276 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1277 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1285 * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1287 * Choose a cpu node to schedule lp on, hopefully nearby its current
1288 * node. We give the current node a modest advantage for obvious reasons.
1290 * We also give the node the thread was woken up FROM a slight advantage
1291 * in order to try to schedule paired threads which synchronize/block waiting
1292 * for each other fairly close to each other. Similarly in a network setting
1293 * this feature will also attempt to place a user process near the kernel
1294 * protocol thread that is feeding it data. THIS IS A CRITICAL PART of the
1295 * algorithm as it heuristically groups synchronizing processes for locality
1296 * of reference in multi-socket systems.
1298 * The caller will normally dfly_setrunqueue() lp on the returned queue.
1300 * When the topology is known choose a cpu whos group has, in aggregate,
1301 * has the lowest weighted load.
1305 dfly_choose_best_queue(struct lwp *lp)
1311 dfly_pcpu_t dd1 = &dfly_pcpu[lp->lwp_qcpu];
1312 dfly_pcpu_t dd2 = &dfly_pcpu[lp->lwp_thread->td_wakefromcpu];
1321 * When the topology is unknown choose a random cpu that is hopefully
1324 if (dd1->cpunode == NULL)
1325 return (dfly_choose_queue_simple(dd1, lp));
1328 * When the topology is known choose a cpu whos group has, in
1329 * aggregate, has the lowest weighted load.
1331 cpup = root_cpu_node;
1336 * Degenerate case super-root
1338 if (cpup->child_node && cpup->child_no == 1) {
1339 cpup = cpup->child_node;
1346 if (cpup->child_node == NULL) {
1347 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1352 lowest_load = 0x7FFFFFFF;
1354 for (n = 0; n < cpup->child_no; ++n) {
1356 * Accumulate load information for all cpus
1357 * which are members of this node.
1359 cpun = &cpup->child_node[n];
1360 mask = cpun->members & usched_global_cpumask &
1361 smp_active_mask & lp->lwp_cpumask;
1366 * Compensate if the lp is already accounted for in
1367 * the aggregate uload for this mask set. We want
1368 * to calculate the loads as if lp was not present.
1370 if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1371 CPUMASK(lp->lwp_qcpu) & mask) {
1372 load = -((lp->lwp_priority & ~PPQMASK) &
1380 cpuid = BSFCPUMASK(mask);
1381 load += dfly_pcpu[cpuid].uload;
1382 load += dfly_pcpu[cpuid].runqcount *
1383 usched_dfly_weight3;
1384 mask &= ~CPUMASK(cpuid);
1390 * Give a slight advantage to the cpu groups (lp)
1391 * belongs to, and a very slight advantage to the
1392 * cpu groups our synchronous partner belongs to.
1394 if (cpun->members & dd1->cpumask)
1395 load -= usched_dfly_weight1;
1396 else if (cpun->members & dd2->cpumask)
1397 load -= usched_dfly_weight2;
1400 * Calculate the best load
1402 if (cpub == NULL || lowest_load > load ||
1403 (lowest_load == load &&
1404 (cpun->members & dd1->cpumask))
1412 if (usched_dfly_chooser)
1413 kprintf("lp %02d->%02d %s\n",
1414 lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1419 * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1421 * Choose the worst queue close to dd's cpu node with a non-empty runq.
1423 * This is used by the thread chooser when the current cpu's queues are
1424 * empty to steal a thread from another cpu's queue. We want to offload
1425 * the most heavily-loaded queue.
1429 dfly_choose_worst_queue(dfly_pcpu_t dd)
1443 * When the topology is unknown choose a random cpu that is hopefully
1446 if (dd->cpunode == NULL) {
1451 * When the topology is known choose a cpu whos group has, in
1452 * aggregate, has the lowest weighted load.
1454 cpup = root_cpu_node;
1458 * Degenerate case super-root
1460 if (cpup->child_node && cpup->child_no == 1) {
1461 cpup = cpup->child_node;
1468 if (cpup->child_node == NULL) {
1469 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1476 for (n = 0; n < cpup->child_no; ++n) {
1478 * Accumulate load information for all cpus
1479 * which are members of this node.
1481 cpun = &cpup->child_node[n];
1482 mask = cpun->members & usched_global_cpumask &
1489 cpuid = BSFCPUMASK(mask);
1490 load += dfly_pcpu[cpuid].uload;
1491 load += dfly_pcpu[cpuid].runqcount *
1492 usched_dfly_weight3;
1493 mask &= ~CPUMASK(cpuid);
1499 * Give a slight advantage to nearby cpus.
1501 if (cpun->members & dd->cpumask)
1502 load += usched_dfly_weight1;
1505 * The best candidate is the one with the worst
1506 * (highest) load. Prefer candiates that are
1507 * closer to our cpu.
1509 if (cpub == NULL || highest_load < load ||
1510 (highest_load == load &&
1511 (cpun->members & dd->cpumask))
1513 highest_load = load;
1524 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1532 * Fallback to the original heuristic, select random cpu,
1533 * first checking cpus not currently running a user thread.
1536 cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1537 mask = ~dfly_curprocmask & dfly_rdyprocmask & lp->lwp_cpumask &
1538 smp_active_mask & usched_global_cpumask;
1541 tmpmask = ~(CPUMASK(cpuid) - 1);
1543 cpuid = BSFCPUMASK(mask & tmpmask);
1545 cpuid = BSFCPUMASK(mask);
1546 rdd = &dfly_pcpu[cpuid];
1548 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1550 mask &= ~CPUMASK(cpuid);
1554 * Then cpus which might have a currently running lp
1556 cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1557 mask = dfly_curprocmask & dfly_rdyprocmask &
1558 lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
1561 tmpmask = ~(CPUMASK(cpuid) - 1);
1563 cpuid = BSFCPUMASK(mask & tmpmask);
1565 cpuid = BSFCPUMASK(mask);
1566 rdd = &dfly_pcpu[cpuid];
1568 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1570 mask &= ~CPUMASK(cpuid);
1574 * If we cannot find a suitable cpu we reload from dfly_scancpu
1575 * and round-robin. Other cpus will pickup as they release their
1576 * current lwps or become ready.
1578 * Avoid a degenerate system lockup case if usched_global_cpumask
1579 * is set to 0 or otherwise does not cover lwp_cpumask.
1581 * We only kick the target helper thread in this case, we do not
1582 * set the user resched flag because
1584 cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1585 if ((CPUMASK(cpuid) & usched_global_cpumask) == 0)
1587 rdd = &dfly_pcpu[cpuid];
1594 dfly_need_user_resched_remote(void *dummy)
1596 globaldata_t gd = mycpu;
1597 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
1599 need_user_resched();
1601 /* Call wakeup_mycpu to avoid sending IPIs to other CPUs */
1602 wakeup_mycpu(&dd->helper_thread);
1608 * dfly_remrunqueue_locked() removes a given process from the run queue
1609 * that it is on, clearing the queue busy bit if it becomes empty.
1611 * Note that user process scheduler is different from the LWKT schedule.
1612 * The user process scheduler only manages user processes but it uses LWKT
1613 * underneath, and a user process operating in the kernel will often be
1614 * 'released' from our management.
1616 * uload is NOT adjusted here. It is only adjusted if the lwkt_thread goes
1617 * to sleep or the lwp is moved to a different runq.
1620 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1626 KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1627 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1629 /*rdd->uload -= (lp->lwp_priority & ~PPQMASK) & PRIMASK;*/
1630 KKASSERT(rdd->runqcount >= 0);
1632 pri = lp->lwp_rqindex;
1633 switch(lp->lwp_rqtype) {
1634 case RTP_PRIO_NORMAL:
1635 q = &rdd->queues[pri];
1636 which = &rdd->queuebits;
1638 case RTP_PRIO_REALTIME:
1640 q = &rdd->rtqueues[pri];
1641 which = &rdd->rtqueuebits;
1644 q = &rdd->idqueues[pri];
1645 which = &rdd->idqueuebits;
1648 panic("remrunqueue: invalid rtprio type");
1651 TAILQ_REMOVE(q, lp, lwp_procq);
1652 if (TAILQ_EMPTY(q)) {
1653 KASSERT((*which & (1 << pri)) != 0,
1654 ("remrunqueue: remove from empty queue"));
1655 *which &= ~(1 << pri);
1660 * dfly_setrunqueue_locked()
1662 * Add a process whos rqtype and rqindex had previously been calculated
1663 * onto the appropriate run queue. Determine if the addition requires
1664 * a reschedule on a cpu and return the cpuid or -1.
1666 * NOTE: Lower priorities are better priorities.
1668 * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1669 * sum of the rough lwp_priority for all running and runnable
1670 * processes. Lower priority processes (higher lwp_priority
1671 * values) actually DO count as more load, not less, because
1672 * these are the programs which require the most care with
1673 * regards to cpu selection.
1676 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1682 if (lp->lwp_qcpu != rdd->cpuid) {
1683 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1684 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1685 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1686 -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1688 lp->lwp_qcpu = rdd->cpuid;
1691 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1692 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1694 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1695 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1696 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1697 (lp->lwp_priority & ~PPQMASK) & PRIMASK);
1700 pri = lp->lwp_rqindex;
1702 switch(lp->lwp_rqtype) {
1703 case RTP_PRIO_NORMAL:
1704 q = &rdd->queues[pri];
1705 which = &rdd->queuebits;
1707 case RTP_PRIO_REALTIME:
1709 q = &rdd->rtqueues[pri];
1710 which = &rdd->rtqueuebits;
1713 q = &rdd->idqueues[pri];
1714 which = &rdd->idqueuebits;
1717 panic("remrunqueue: invalid rtprio type");
1722 * Add to the correct queue and set the appropriate bit. If no
1723 * lower priority (i.e. better) processes are in the queue then
1724 * we want a reschedule, calculate the best cpu for the job.
1726 * Always run reschedules on the LWPs original cpu.
1728 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1735 * For SMP systems a user scheduler helper thread is created for each
1736 * cpu and is used to allow one cpu to wakeup another for the purposes of
1737 * scheduling userland threads from setrunqueue().
1739 * UP systems do not need the helper since there is only one cpu.
1741 * We can't use the idle thread for this because we might block.
1742 * Additionally, doing things this way allows us to HLT idle cpus
1746 dfly_helper_thread(void *dummy)
1755 cpuid = gd->gd_cpuid; /* doesn't change */
1756 mask = gd->gd_cpumask; /* doesn't change */
1757 dd = &dfly_pcpu[cpuid];
1760 * Since we only want to be woken up only when no user processes
1761 * are scheduled on a cpu, run at an ultra low priority.
1763 lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1765 tsleep(&dd->helper_thread, 0, "schslp", hz);
1769 * We use the LWKT deschedule-interlock trick to avoid racing
1770 * dfly_rdyprocmask. This means we cannot block through to the
1771 * manual lwkt_switch() call we make below.
1774 tsleep_interlock(&dd->helper_thread, 0);
1776 spin_lock(&dd->spin);
1778 atomic_set_cpumask(&dfly_rdyprocmask, mask);
1779 clear_user_resched(); /* This satisfied the reschedule request */
1780 dd->rrcount = 0; /* Reset the round-robin counter */
1782 if ((dfly_curprocmask & mask) == 0) {
1784 * No thread is currently scheduled.
1786 KKASSERT(dd->uschedcp == NULL);
1787 if ((nlp = dfly_chooseproc_locked(dd, NULL, 0)) != NULL) {
1788 KTR_COND_LOG(usched_sched_thread_no_process,
1789 nlp->lwp_proc->p_pid == usched_dfly_pid_debug,
1791 nlp->lwp_proc->p_pid,
1792 nlp->lwp_thread->td_gd->gd_cpuid);
1794 atomic_set_cpumask(&dfly_curprocmask, mask);
1795 dd->upri = nlp->lwp_priority;
1797 dd->rrcount = 0; /* reset round robin */
1798 spin_unlock(&dd->spin);
1799 lwkt_acquire(nlp->lwp_thread);
1800 lwkt_schedule(nlp->lwp_thread);
1802 spin_unlock(&dd->spin);
1804 } else if (dd->runqcount) {
1806 * Possibly find a better process to schedule.
1808 nlp = dfly_chooseproc_locked(dd, dd->uschedcp, 0);
1810 KTR_COND_LOG(usched_sched_thread_process,
1811 nlp->lwp_proc->p_pid == usched_dfly_pid_debug,
1813 nlp->lwp_proc->p_pid,
1814 nlp->lwp_thread->td_gd->gd_cpuid);
1816 dd->upri = nlp->lwp_priority;
1818 dd->rrcount = 0; /* reset round robin */
1819 spin_unlock(&dd->spin);
1820 lwkt_acquire(nlp->lwp_thread);
1821 lwkt_schedule(nlp->lwp_thread);
1824 * Leave the thread on our run queue. Another
1825 * scheduler will try to pull it later.
1827 spin_unlock(&dd->spin);
1831 * The runq is empty.
1833 spin_unlock(&dd->spin);
1837 * We're descheduled unless someone scheduled us. Switch away.
1838 * Exiting the critical section will cause splz() to be called
1839 * for us if interrupts and such are pending.
1842 tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", hz);
1848 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
1852 new_val = usched_dfly_stick_to_level;
1854 error = sysctl_handle_int(oidp, &new_val, 0, req);
1855 if (error != 0 || req->newptr == NULL)
1857 if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
1859 usched_dfly_stick_to_level = new_val;
1865 * Setup our scheduler helpers. Note that curprocmask bit 0 has already
1866 * been cleared by rqinit() and we should not mess with it further.
1869 dfly_helper_thread_cpu_init(void)
1874 int smt_not_supported = 0;
1875 int cache_coherent_not_supported = 0;
1878 kprintf("Start scheduler helpers on cpus:\n");
1880 sysctl_ctx_init(&usched_dfly_sysctl_ctx);
1881 usched_dfly_sysctl_tree =
1882 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
1883 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
1884 "usched_dfly", CTLFLAG_RD, 0, "");
1886 for (i = 0; i < ncpus; ++i) {
1887 dfly_pcpu_t dd = &dfly_pcpu[i];
1888 cpumask_t mask = CPUMASK(i);
1890 if ((mask & smp_active_mask) == 0)
1893 spin_init(&dd->spin);
1894 dd->cpunode = get_cpu_node_by_cpuid(i);
1896 dd->cpumask = CPUMASK(i);
1897 for (j = 0; j < NQS; j++) {
1898 TAILQ_INIT(&dd->queues[j]);
1899 TAILQ_INIT(&dd->rtqueues[j]);
1900 TAILQ_INIT(&dd->idqueues[j]);
1902 atomic_clear_cpumask(&dfly_curprocmask, 1);
1904 if (dd->cpunode == NULL) {
1905 smt_not_supported = 1;
1906 cache_coherent_not_supported = 1;
1908 kprintf ("\tcpu%d - WARNING: No CPU NODE "
1909 "found for cpu\n", i);
1911 switch (dd->cpunode->type) {
1914 kprintf ("\tcpu%d - HyperThreading "
1915 "available. Core siblings: ",
1919 smt_not_supported = 1;
1922 kprintf ("\tcpu%d - No HT available, "
1923 "multi-core/physical "
1924 "cpu. Physical siblings: ",
1928 smt_not_supported = 1;
1931 kprintf ("\tcpu%d - No HT available, "
1932 "single-core/physical cpu. "
1933 "Package Siblings: ",
1937 /* Let's go for safe defaults here */
1938 smt_not_supported = 1;
1939 cache_coherent_not_supported = 1;
1941 kprintf ("\tcpu%d - Unknown cpunode->"
1942 "type=%u. Siblings: ",
1944 (u_int)dd->cpunode->type);
1949 if (dd->cpunode->parent_node != NULL) {
1950 CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
1951 kprintf("cpu%d ", cpuid);
1954 kprintf(" no siblings\n");
1959 lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread,
1960 0, i, "usched %d", i);
1963 * Allow user scheduling on the target cpu. cpu #0 has already
1964 * been enabled in rqinit().
1967 atomic_clear_cpumask(&dfly_curprocmask, mask);
1968 atomic_set_cpumask(&dfly_rdyprocmask, mask);
1969 dd->upri = PRIBASE_NULL;
1973 /* usched_dfly sysctl configurable parameters */
1975 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1976 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1977 OID_AUTO, "rrinterval", CTLFLAG_RW,
1978 &usched_dfly_rrinterval, 0, "");
1979 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1980 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1981 OID_AUTO, "decay", CTLFLAG_RW,
1982 &usched_dfly_decay, 0, "Extra decay when not running");
1983 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1984 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1985 OID_AUTO, "batch_time", CTLFLAG_RW,
1986 &usched_dfly_batch_time, 0, "Min batch counter value");
1988 /* Add enable/disable option for SMT scheduling if supported */
1989 if (smt_not_supported) {
1990 usched_dfly_smt = 0;
1991 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
1992 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1993 OID_AUTO, "smt", CTLFLAG_RD,
1994 "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
1996 usched_dfly_smt = 1;
1997 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1998 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1999 OID_AUTO, "smt", CTLFLAG_RW,
2000 &usched_dfly_smt, 0, "Enable SMT scheduling");
2004 * Add enable/disable option for cache coherent scheduling
2007 if (cache_coherent_not_supported) {
2008 usched_dfly_cache_coherent = 0;
2009 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2010 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2011 OID_AUTO, "cache_coherent", CTLFLAG_RD,
2013 "Cache coherence NOT SUPPORTED");
2015 usched_dfly_cache_coherent = 1;
2016 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2017 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2018 OID_AUTO, "cache_coherent", CTLFLAG_RW,
2019 &usched_dfly_cache_coherent, 0,
2020 "Enable/Disable cache coherent scheduling");
2022 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2023 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2024 OID_AUTO, "weight1", CTLFLAG_RW,
2025 &usched_dfly_weight1, 10,
2026 "Weight selection for current cpu");
2028 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2029 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2030 OID_AUTO, "weight2", CTLFLAG_RW,
2031 &usched_dfly_weight2, 5,
2032 "Weight selection for wakefrom cpu");
2034 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2035 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2036 OID_AUTO, "weight3", CTLFLAG_RW,
2037 &usched_dfly_weight3, 50,
2038 "Weight selection for num threads on queue");
2040 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2041 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2042 OID_AUTO, "pull_enable", CTLFLAG_RW,
2043 &usched_dfly_pull_enable, 1,
2044 "Allow pulls into empty queues");
2048 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2049 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2050 OID_AUTO, "stick_to_level",
2051 CTLTYPE_INT | CTLFLAG_RW,
2052 NULL, sizeof usched_dfly_stick_to_level,
2053 sysctl_usched_dfly_stick_to_level, "I",
2054 "Stick a process to this level. See sysctl"
2055 "paremter hw.cpu_topology.level_description");
2059 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2060 dfly_helper_thread_cpu_init, NULL)
2062 #else /* No SMP options - just add the configurable parameters to sysctl */
2065 sched_sysctl_tree_init(void)
2067 sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2068 usched_dfly_sysctl_tree =
2069 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2070 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2071 "usched_dfly", CTLFLAG_RD, 0, "");
2073 /* usched_dfly sysctl configurable parameters */
2074 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2075 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2076 OID_AUTO, "rrinterval", CTLFLAG_RW,
2077 &usched_dfly_rrinterval, 0, "");
2078 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2079 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2080 OID_AUTO, "decay", CTLFLAG_RW,
2081 &usched_dfly_decay, 0, "Extra decay when not running");
2082 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2083 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2084 OID_AUTO, "batch_time", CTLFLAG_RW,
2085 &usched_dfly_batch_time, 0, "Min batch counter value");
2087 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2088 sched_sysctl_tree_init, NULL)