2 * Copyright (c) 2012 The DragonFly Project. All rights reserved.
3 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com>,
7 * by Mihai Carabas <mihai.carabas@gmail.com>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
41 #include <sys/queue.h>
43 #include <sys/rtprio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
59 * Priorities. Note that with 32 run queues per scheduler each queue
60 * represents four priority levels.
66 #define PRIMASK (MAXPRI - 1)
67 #define PRIBASE_REALTIME 0
68 #define PRIBASE_NORMAL MAXPRI
69 #define PRIBASE_IDLE (MAXPRI * 2)
70 #define PRIBASE_THREAD (MAXPRI * 3)
71 #define PRIBASE_NULL (MAXPRI * 4)
73 #define NQS 32 /* 32 run queues. */
74 #define PPQ (MAXPRI / NQS) /* priorities per queue */
75 #define PPQMASK (PPQ - 1)
78 * NICEPPQ - number of nice units per priority queue
79 * ESTCPUPPQ - number of estcpu units per priority queue
80 * ESTCPUMAX - number of estcpu units
84 #define ESTCPUMAX (ESTCPUPPQ * NQS)
85 #define BATCHMAX (ESTCPUFREQ * 30)
86 #define PRIO_RANGE (PRIO_MAX - PRIO_MIN + 1)
88 #define ESTCPULIM(v) min((v), ESTCPUMAX)
92 #define lwp_priority lwp_usdata.dfly.priority
93 #define lwp_forked lwp_usdata.dfly.forked
94 #define lwp_rqindex lwp_usdata.dfly.rqindex
95 #define lwp_estcpu lwp_usdata.dfly.estcpu
96 #define lwp_estfast lwp_usdata.dfly.estfast
97 #define lwp_uload lwp_usdata.dfly.uload
98 #define lwp_rqtype lwp_usdata.dfly.rqtype
99 #define lwp_qcpu lwp_usdata.dfly.qcpu
101 struct usched_dfly_pcpu {
102 struct spinlock spin;
103 struct thread helper_thread;
108 struct lwp *uschedcp;
109 struct rq queues[NQS];
110 struct rq rtqueues[NQS];
111 struct rq idqueues[NQS];
113 u_int32_t rtqueuebits;
114 u_int32_t idqueuebits;
123 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
125 static void dfly_acquire_curproc(struct lwp *lp);
126 static void dfly_release_curproc(struct lwp *lp);
127 static void dfly_select_curproc(globaldata_t gd);
128 static void dfly_setrunqueue(struct lwp *lp);
129 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
130 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
132 static void dfly_recalculate_estcpu(struct lwp *lp);
133 static void dfly_resetpriority(struct lwp *lp);
134 static void dfly_forking(struct lwp *plp, struct lwp *lp);
135 static void dfly_exiting(struct lwp *lp, struct proc *);
136 static void dfly_uload_update(struct lwp *lp);
137 static void dfly_yield(struct lwp *lp);
139 static void dfly_changeqcpu_locked(struct lwp *lp,
140 dfly_pcpu_t dd, dfly_pcpu_t rdd);
141 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
142 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
143 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
147 static void dfly_need_user_resched_remote(void *dummy);
149 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
150 struct lwp *chklp, int worst);
151 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
152 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
154 struct usched usched_dfly = {
156 "dfly", "Original DragonFly Scheduler",
157 NULL, /* default registration */
158 NULL, /* default deregistration */
159 dfly_acquire_curproc,
160 dfly_release_curproc,
163 dfly_recalculate_estcpu,
168 NULL, /* setcpumask not supported */
173 * We have NQS (32) run queues per scheduling class. For the normal
174 * class, there are 128 priorities scaled onto these 32 queues. New
175 * processes are added to the last entry in each queue, and processes
176 * are selected for running by taking them from the head and maintaining
177 * a simple FIFO arrangement. Realtime and Idle priority processes have
178 * and explicit 0-31 priority which maps directly onto their class queue
179 * index. When a queue has something in it, the corresponding bit is
180 * set in the queuebits variable, allowing a single read to determine
181 * the state of all 32 queues and then a ffs() to find the first busy
184 static cpumask_t dfly_curprocmask = -1; /* currently running a user process */
185 static cpumask_t dfly_rdyprocmask; /* ready to accept a user process */
187 static volatile int dfly_scancpu;
189 static volatile int dfly_ucount; /* total running on whole system */
190 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
191 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
192 static struct sysctl_oid *usched_dfly_sysctl_tree;
194 /* Debug info exposed through debug.* sysctl */
196 static int usched_dfly_debug = -1;
197 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
198 &usched_dfly_debug, 0,
199 "Print debug information for this pid");
201 static int usched_dfly_pid_debug = -1;
202 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
203 &usched_dfly_pid_debug, 0,
204 "Print KTR debug information for this pid");
206 static int usched_dfly_chooser = 0;
207 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
208 &usched_dfly_chooser, 0,
209 "Print KTR debug information for this pid");
212 * Tunning usched_dfly - configurable through kern.usched_dfly.
214 * weight1 - Tries to keep threads on their current cpu. If you
215 * make this value too large the scheduler will not be
216 * able to load-balance large loads.
218 * weight2 - If non-zero, detects thread pairs undergoing synchronous
219 * communications and tries to move them closer together.
220 * Behavior is adjusted by bit 4 of features (0x10).
222 * WARNING! Weight2 is a ridiculously sensitive parameter,
223 * a small value is recommended.
225 * weight3 - Weighting based on the number of recently runnable threads
226 * on the userland scheduling queue (ignoring their loads).
227 * A nominal value here prevents high-priority (low-load)
228 * threads from accumulating on one cpu core when other
229 * cores are available.
231 * This value should be left fairly small relative to weight1
234 * weight4 - Weighting based on other cpu queues being available
235 * or running processes with higher lwp_priority's.
237 * This allows a thread to migrate to another nearby cpu if it
238 * is unable to run on the current cpu based on the other cpu
239 * being idle or running a lower priority (higher lwp_priority)
240 * thread. This value should be large enough to override weight1
242 * features - These flags can be set or cleared to enable or disable various
245 * 0x01 Enable idle-cpu pulling (default)
246 * 0x02 Enable proactive pushing (default)
247 * 0x04 Enable rebalancing rover (default)
248 * 0x08 Enable more proactive pushing (default)
249 * 0x10 (flip weight2 limit on same cpu) (default)
250 * 0x20 choose best cpu for forked process
251 * 0x40 choose current cpu for forked process
252 * 0x80 choose random cpu for forked process (default)
255 static int usched_dfly_smt = 0;
256 static int usched_dfly_cache_coherent = 0;
257 static int usched_dfly_weight1 = 200; /* keep thread on current cpu */
258 static int usched_dfly_weight2 = 120; /* synchronous peer's current cpu */
259 static int usched_dfly_weight3 = 40; /* number of threads on queue */
260 static int usched_dfly_weight4 = 160; /* availability of idle cores */
261 static int usched_dfly_features = 0x8F; /* allow pulls */
262 static int usched_dfly_swmask = ~PPQMASK; /* allow pulls */
264 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
265 static int usched_dfly_decay = 8;
267 /* KTR debug printings */
269 KTR_INFO_MASTER(usched);
271 #if !defined(KTR_USCHED_DFLY)
272 #define KTR_USCHED_DFLY KTR_ALL
275 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
276 "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
277 pid_t pid, int old_cpuid, int curr);
280 * This function is called when the kernel intends to return to userland.
281 * It is responsible for making the thread the current designated userland
282 * thread for this cpu, blocking if necessary.
284 * The kernel has already depressed our LWKT priority so we must not switch
285 * until we have either assigned or disposed of the thread.
287 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
288 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will
289 * occur, this function is called only under very controlled circumstances.
292 dfly_acquire_curproc(struct lwp *lp)
303 * Make sure we aren't sitting on a tsleep queue.
306 crit_enter_quick(td);
307 if (td->td_flags & TDF_TSLEEPQ)
309 dfly_recalculate_estcpu(lp);
312 dd = &dfly_pcpu[gd->gd_cpuid];
315 * Process any pending interrupts/ipi's, then handle reschedule
316 * requests. dfly_release_curproc() will try to assign a new
317 * uschedcp that isn't us and otherwise NULL it out.
320 if (user_resched_wanted()) {
321 if (dd->uschedcp == lp)
323 clear_user_resched();
324 dfly_release_curproc(lp);
328 * Loop until we are the current user thread.
330 * NOTE: dd spinlock not held at top of loop.
332 if (dd->uschedcp == lp)
335 while (dd->uschedcp != lp) {
338 spin_lock(&dd->spin);
341 * We are not or are no longer the current lwp and a forced
342 * reschedule was requested. Figure out the best cpu to
343 * run on (our current cpu will be given significant weight).
345 * (if a reschedule was not requested we want to move this
346 * step after the uschedcp tests).
350 (usched_dfly_features & 0x08) &&
351 (rdd = dfly_choose_best_queue(lp)) != dd) {
352 dfly_changeqcpu_locked(lp, dd, rdd);
353 spin_unlock(&dd->spin);
354 lwkt_deschedule(lp->lwp_thread);
355 dfly_setrunqueue_dd(rdd, lp);
358 dd = &dfly_pcpu[gd->gd_cpuid];
364 * Either no reschedule was requested or the best queue was
365 * dd, and no current process has been selected. We can
366 * trivially become the current lwp on the current cpu.
368 if (dd->uschedcp == NULL) {
369 atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
371 dd->upri = lp->lwp_priority;
372 KKASSERT(lp->lwp_qcpu == dd->cpuid);
373 spin_unlock(&dd->spin);
378 * Can we steal the current designated user thread?
380 * If we do the other thread will stall when it tries to
381 * return to userland, possibly rescheduling elsewhere.
383 * It is important to do a masked test to avoid the edge
384 * case where two near-equal-priority threads are constantly
385 * interrupting each other. Since our context is the one
386 * that is active NOW, we WANT to steal the uschedcp
387 * designation and not switch-flap.
390 (dd->upri & ~PPQMASK) >=
391 (lp->lwp_priority & ~PPQMASK)) {
393 dd->upri = lp->lwp_priority;
394 KKASSERT(lp->lwp_qcpu == dd->cpuid);
395 spin_unlock(&dd->spin);
401 * We are not the current lwp, figure out the best cpu
402 * to run on (our current cpu will be given significant
403 * weight). Loop on cpu change.
405 if ((usched_dfly_features & 0x02) &&
406 force_resched == 0 &&
407 (rdd = dfly_choose_best_queue(lp)) != dd) {
408 dfly_changeqcpu_locked(lp, dd, rdd);
409 spin_unlock(&dd->spin);
410 lwkt_deschedule(lp->lwp_thread);
411 dfly_setrunqueue_dd(rdd, lp);
414 dd = &dfly_pcpu[gd->gd_cpuid];
420 * We cannot become the current lwp, place the lp on the
421 * run-queue of this or another cpu and deschedule ourselves.
423 * When we are reactivated we will have another chance.
425 * Reload after a switch or setrunqueue/switch possibly
426 * moved us to another cpu.
428 spin_unlock(&dd->spin);
429 lwkt_deschedule(lp->lwp_thread);
430 dfly_setrunqueue_dd(dd, lp);
433 dd = &dfly_pcpu[gd->gd_cpuid];
437 * Make sure upri is synchronized, then yield to LWKT threads as
438 * needed before returning. This could result in another reschedule.
443 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
447 * DFLY_RELEASE_CURPROC
449 * This routine detaches the current thread from the userland scheduler,
450 * usually because the thread needs to run or block in the kernel (at
451 * kernel priority) for a while.
453 * This routine is also responsible for selecting a new thread to
454 * make the current thread.
456 * NOTE: This implementation differs from the dummy example in that
457 * dfly_select_curproc() is able to select the current process, whereas
458 * dummy_select_curproc() is not able to select the current process.
459 * This means we have to NULL out uschedcp.
461 * Additionally, note that we may already be on a run queue if releasing
462 * via the lwkt_switch() in dfly_setrunqueue().
465 dfly_release_curproc(struct lwp *lp)
467 globaldata_t gd = mycpu;
468 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
471 * Make sure td_wakefromcpu is defaulted. This will be overwritten
474 if (dd->uschedcp == lp) {
475 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
476 spin_lock(&dd->spin);
477 if (dd->uschedcp == lp) {
478 dd->uschedcp = NULL; /* don't let lp be selected */
479 dd->upri = PRIBASE_NULL;
480 atomic_clear_cpumask(&dfly_curprocmask, gd->gd_cpumask);
481 spin_unlock(&dd->spin);
482 dfly_select_curproc(gd);
484 spin_unlock(&dd->spin);
490 * DFLY_SELECT_CURPROC
492 * Select a new current process for this cpu and clear any pending user
493 * reschedule request. The cpu currently has no current process.
495 * This routine is also responsible for equal-priority round-robining,
496 * typically triggered from dfly_schedulerclock(). In our dummy example
497 * all the 'user' threads are LWKT scheduled all at once and we just
498 * call lwkt_switch().
500 * The calling process is not on the queue and cannot be selected.
504 dfly_select_curproc(globaldata_t gd)
506 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
508 int cpuid = gd->gd_cpuid;
512 spin_lock(&dd->spin);
513 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
516 atomic_set_cpumask(&dfly_curprocmask, CPUMASK(cpuid));
517 dd->upri = nlp->lwp_priority;
519 dd->rrcount = 0; /* reset round robin */
520 spin_unlock(&dd->spin);
522 lwkt_acquire(nlp->lwp_thread);
524 lwkt_schedule(nlp->lwp_thread);
526 spin_unlock(&dd->spin);
532 * Place the specified lwp on the user scheduler's run queue. This routine
533 * must be called with the thread descheduled. The lwp must be runnable.
534 * It must not be possible for anyone else to explicitly schedule this thread.
536 * The thread may be the current thread as a special case.
539 dfly_setrunqueue(struct lwp *lp)
545 * First validate the process LWKT state.
547 KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
548 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
549 ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
550 lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
551 KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
554 * NOTE: dd/rdd do not necessarily represent the current cpu.
555 * Instead they may represent the cpu the thread was last
556 * scheduled on or inherited by its parent.
558 dd = &dfly_pcpu[lp->lwp_qcpu];
562 * This process is not supposed to be scheduled anywhere or assigned
563 * as the current process anywhere. Assert the condition.
565 KKASSERT(rdd->uschedcp != lp);
569 * If we are not SMP we do not have a scheduler helper to kick
570 * and must directly activate the process if none are scheduled.
572 * This is really only an issue when bootstrapping init since
573 * the caller in all other cases will be a user process, and
574 * even if released (rdd->uschedcp == NULL), that process will
575 * kickstart the scheduler when it returns to user mode from
578 * NOTE: On SMP we can't just set some other cpu's uschedcp.
580 if (rdd->uschedcp == NULL) {
581 spin_lock(&rdd->spin);
582 if (rdd->uschedcp == NULL) {
583 atomic_set_cpumask(&dfly_curprocmask, 1);
585 rdd->upri = lp->lwp_priority;
586 spin_unlock(&rdd->spin);
587 lwkt_schedule(lp->lwp_thread);
590 spin_unlock(&rdd->spin);
596 * Ok, we have to setrunqueue some target cpu and request a reschedule
599 * We have to choose the best target cpu. It might not be the current
600 * target even if the current cpu has no running user thread (for
601 * example, because the current cpu might be a hyperthread and its
602 * sibling has a thread assigned).
604 * If we just forked it is most optimal to run the child on the same
605 * cpu just in case the parent decides to wait for it (thus getting
606 * off that cpu). As long as there is nothing else runnable on the
607 * cpu, that is. If we did this unconditionally a parent forking
608 * multiple children before waiting (e.g. make -j N) leaves other
609 * cpus idle that could be working.
611 if (lp->lwp_forked) {
613 if (usched_dfly_features & 0x20)
614 rdd = dfly_choose_best_queue(lp);
615 else if (usched_dfly_features & 0x40)
616 rdd = &dfly_pcpu[lp->lwp_qcpu];
617 else if (usched_dfly_features & 0x80)
618 rdd = dfly_choose_queue_simple(rdd, lp);
619 else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
620 rdd = dfly_choose_best_queue(lp);
622 rdd = &dfly_pcpu[lp->lwp_qcpu];
624 rdd = dfly_choose_best_queue(lp);
625 /* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
627 if (lp->lwp_qcpu != rdd->cpuid) {
628 spin_lock(&dd->spin);
629 dfly_changeqcpu_locked(lp, dd, rdd);
630 spin_unlock(&dd->spin);
633 dfly_setrunqueue_dd(rdd, lp);
639 * Change qcpu to rdd->cpuid. The dd the lp is CURRENTLY on must be
640 * spin-locked on-call. rdd does not have to be.
643 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
645 if (lp->lwp_qcpu != rdd->cpuid) {
646 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
647 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
648 atomic_add_int(&dd->uload, -lp->lwp_uload);
649 atomic_add_int(&dd->ucount, -1);
650 atomic_add_int(&dfly_ucount, -1);
652 lp->lwp_qcpu = rdd->cpuid;
659 * Place lp on rdd's runqueue. Nothing is locked on call. This function
660 * also performs all necessary ancillary notification actions.
663 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
669 * We might be moving the lp to another cpu's run queue, and once
670 * on the runqueue (even if it is our cpu's), another cpu can rip
673 * TDF_MIGRATING might already be set if this is part of a
674 * remrunqueue+setrunqueue sequence.
676 if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
677 lwkt_giveaway(lp->lwp_thread);
679 rgd = globaldata_find(rdd->cpuid);
682 * We lose control of the lp the moment we release the spinlock
683 * after having placed it on the queue. i.e. another cpu could pick
684 * it up, or it could exit, or its priority could be further
685 * adjusted, or something like that.
687 * WARNING! rdd can point to a foreign cpu!
689 spin_lock(&rdd->spin);
690 dfly_setrunqueue_locked(rdd, lp);
693 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
694 spin_unlock(&rdd->spin);
695 if (rdd->uschedcp == NULL) {
696 wakeup_mycpu(&rdd->helper_thread); /* XXX */
702 spin_unlock(&rdd->spin);
705 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
706 spin_unlock(&rdd->spin);
707 lwkt_send_ipiq(rgd, dfly_need_user_resched_remote,
709 } else if (dfly_rdyprocmask & rgd->gd_cpumask) {
710 atomic_clear_cpumask(&dfly_rdyprocmask,
712 spin_unlock(&rdd->spin);
713 wakeup(&rdd->helper_thread);
715 spin_unlock(&rdd->spin);
720 * Request a reschedule if appropriate.
722 spin_lock(&rdd->spin);
723 dfly_setrunqueue_locked(rdd, lp);
724 spin_unlock(&rdd->spin);
725 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
732 * This routine is called from a systimer IPI. It MUST be MP-safe and
733 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on
738 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
740 globaldata_t gd = mycpu;
741 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
744 * Spinlocks also hold a critical section so there should not be
747 KKASSERT(gd->gd_spinlocks_wr == 0);
753 * Do we need to round-robin? We round-robin 10 times a second.
754 * This should only occur for cpu-bound batch processes.
756 if (++dd->rrcount >= usched_dfly_rrinterval) {
757 lp->lwp_thread->td_wakefromcpu = -1;
763 * Adjust estcpu upward using a real time equivalent calculation,
764 * and recalculate lp's priority.
766 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
767 dfly_resetpriority(lp);
770 * Rebalance two cpus every 8 ticks, pulling the worst thread
771 * from the worst cpu's queue into a rotating cpu number.
773 * This mechanic is needed because the push algorithms can
774 * steady-state in an non-optimal configuration. We need to mix it
775 * up a little, even if it means breaking up a paired thread, so
776 * the push algorithms can rebalance the degenerate conditions.
777 * This portion of the algorithm exists to ensure stability at the
778 * selected weightings.
780 * Because we might be breaking up optimal conditions we do not want
781 * to execute this too quickly, hence we only rebalance approximately
782 * ~7-8 times per second. The push's, on the otherhand, are capable
783 * moving threads to other cpus at a much higher rate.
785 * We choose the most heavily loaded thread from the worst queue
786 * in order to ensure that multiple heavy-weight threads on the same
787 * queue get broken up, and also because these threads are the most
788 * likely to be able to remain in place. Hopefully then any pairings,
789 * if applicable, migrate to where these threads are.
792 if ((usched_dfly_features & 0x04) &&
793 ((u_int)sched_ticks & 7) == 0 &&
794 (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
801 rdd = dfly_choose_worst_queue(dd);
803 spin_lock(&dd->spin);
804 if (spin_trylock(&rdd->spin)) {
805 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
806 spin_unlock(&rdd->spin);
808 spin_unlock(&dd->spin);
810 spin_unlock(&dd->spin);
816 /* dd->spin held if nlp != NULL */
819 * Either schedule it or add it to our queue.
822 (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
823 atomic_set_cpumask(&dfly_curprocmask, dd->cpumask);
824 dd->upri = nlp->lwp_priority;
826 dd->rrcount = 0; /* reset round robin */
827 spin_unlock(&dd->spin);
828 lwkt_acquire(nlp->lwp_thread);
829 lwkt_schedule(nlp->lwp_thread);
831 dfly_setrunqueue_locked(dd, nlp);
832 spin_unlock(&dd->spin);
839 * Called from acquire and from kern_synch's one-second timer (one of the
840 * callout helper threads) with a critical section held.
842 * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for
843 * overall system load.
845 * Note that no recalculation occurs for a process which sleeps and wakes
846 * up in the same tick. That is, a system doing thousands of context
847 * switches per second will still only do serious estcpu calculations
848 * ESTCPUFREQ times per second.
852 dfly_recalculate_estcpu(struct lwp *lp)
854 globaldata_t gd = mycpu;
862 * We have to subtract periodic to get the last schedclock
863 * timeout time, otherwise we would get the upcoming timeout.
864 * Keep in mind that a process can migrate between cpus and
865 * while the scheduler clock should be very close, boundary
866 * conditions could lead to a small negative delta.
868 cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
870 if (lp->lwp_slptime > 1) {
872 * Too much time has passed, do a coarse correction.
874 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
875 dfly_resetpriority(lp);
876 lp->lwp_cpbase = cpbase;
879 } else if (lp->lwp_cpbase != cpbase) {
881 * Adjust estcpu if we are in a different tick. Don't waste
882 * time if we are in the same tick.
884 * First calculate the number of ticks in the measurement
885 * interval. The ttlticks calculation can wind up 0 due to
886 * a bug in the handling of lwp_slptime (as yet not found),
887 * so make sure we do not get a divide by 0 panic.
889 ttlticks = (cpbase - lp->lwp_cpbase) /
890 gd->gd_schedclock.periodic;
893 lp->lwp_cpbase = cpbase;
897 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
900 * Calculate the percentage of one cpu being used then
901 * compensate for any system load in excess of ncpus.
903 * For example, if we have 8 cores and 16 running cpu-bound
904 * processes then all things being equal each process will
905 * get 50% of one cpu. We need to pump this value back
906 * up to 100% so the estcpu calculation properly adjusts
907 * the process's dynamic priority.
909 * estcpu is scaled by ESTCPUMAX, pctcpu is scaled by FSCALE.
911 estcpu = (lp->lwp_pctcpu * ESTCPUMAX) >> FSHIFT;
912 ucount = dfly_ucount;
913 if (ucount > ncpus) {
914 estcpu += estcpu * (ucount - ncpus) / ncpus;
917 if (usched_dfly_debug == lp->lwp_proc->p_pid) {
918 kprintf("pid %d lwp %p estcpu %3d %3d cp %d/%d",
919 lp->lwp_proc->p_pid, lp,
920 estcpu, lp->lwp_estcpu,
921 lp->lwp_cpticks, ttlticks);
925 * Adjust lp->lwp_esetcpu. The decay factor determines how
926 * quickly lwp_estcpu collapses to its realtime calculation.
927 * A slower collapse gives us a more accurate number over
928 * the long term but can create problems with bursty threads
929 * or threads which become cpu hogs.
931 * To solve this problem, newly started lwps and lwps which
932 * are restarting after having been asleep for a while are
933 * given a much, much faster decay in order to quickly
934 * detect whether they become cpu-bound.
936 * NOTE: p_nice is accounted for in dfly_resetpriority(),
937 * and not here, but we must still ensure that a
938 * cpu-bound nice -20 process does not completely
939 * override a cpu-bound nice +20 process.
941 * NOTE: We must use ESTCPULIM() here to deal with any
944 decay_factor = usched_dfly_decay;
945 if (decay_factor < 1)
947 if (decay_factor > 1024)
950 if (lp->lwp_estfast < usched_dfly_decay) {
952 lp->lwp_estcpu = ESTCPULIM(
953 (lp->lwp_estcpu * lp->lwp_estfast + estcpu) /
954 (lp->lwp_estfast + 1));
956 lp->lwp_estcpu = ESTCPULIM(
957 (lp->lwp_estcpu * decay_factor + estcpu) /
961 if (usched_dfly_debug == lp->lwp_proc->p_pid)
962 kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
963 dfly_resetpriority(lp);
964 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
970 * Compute the priority of a process when running in user mode.
971 * Arrange to reschedule if the resulting priority is better
972 * than that of the current process.
974 * This routine may be called with any process.
976 * This routine is called by fork1() for initial setup with the process
977 * of the run queue, and also may be called normally with the process on or
981 dfly_resetpriority(struct lwp *lp)
994 * Lock the scheduler (lp) belongs to. This can be on a different
995 * cpu. Handle races. This loop breaks out with the appropriate
1001 rdd = &dfly_pcpu[rcpu];
1002 spin_lock(&rdd->spin);
1003 if (rcpu == lp->lwp_qcpu)
1005 spin_unlock(&rdd->spin);
1009 * Calculate the new priority and queue type
1011 newrqtype = lp->lwp_rtprio.type;
1014 case RTP_PRIO_REALTIME:
1016 newpriority = PRIBASE_REALTIME +
1017 (lp->lwp_rtprio.prio & PRIMASK);
1019 case RTP_PRIO_NORMAL:
1023 estcpu = lp->lwp_estcpu;
1026 * p_nice piece Adds (0-40) * 2 0-80
1027 * estcpu Adds 16384 * 4 / 512 0-128
1029 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
1030 newpriority += estcpu * PPQ / ESTCPUPPQ;
1031 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
1032 NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
1033 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
1036 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1038 case RTP_PRIO_THREAD:
1039 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1042 panic("Bad RTP_PRIO %d", newrqtype);
1047 * The LWKT scheduler doesn't dive usched structures, give it a hint
1048 * on the relative priority of user threads running in the kernel.
1049 * The LWKT scheduler will always ensure that a user thread running
1050 * in the kernel will get cpu some time, regardless of its upri,
1051 * but can decide not to instantly switch from one kernel or user
1052 * mode user thread to a kernel-mode user thread when it has a less
1053 * desireable user priority.
1055 * td_upri has normal sense (higher values are more desireable), so
1058 lp->lwp_thread->td_upri = -(newpriority & usched_dfly_swmask);
1061 * The newpriority incorporates the queue type so do a simple masked
1062 * check to determine if the process has moved to another queue. If
1063 * it has, and it is currently on a run queue, then move it.
1065 * Since uload is ~PPQMASK masked, no modifications are necessary if
1066 * we end up in the same run queue.
1068 if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1069 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1070 dfly_remrunqueue_locked(rdd, lp);
1071 lp->lwp_priority = newpriority;
1072 lp->lwp_rqtype = newrqtype;
1073 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1074 dfly_setrunqueue_locked(rdd, lp);
1077 lp->lwp_priority = newpriority;
1078 lp->lwp_rqtype = newrqtype;
1079 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1084 * In the same PPQ, uload cannot change.
1086 lp->lwp_priority = newpriority;
1092 * Adjust effective load
1094 delta_uload = lp->lwp_estcpu / NQS; /* 0-511, 0-100% cpu */
1095 delta_uload -= lp->lwp_uload;
1096 lp->lwp_uload += delta_uload;
1097 if (lp->lwp_mpflags & LWP_MP_ULOAD)
1098 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, delta_uload);
1101 * Determine if we need to reschedule the target cpu. This only
1102 * occurs if the LWP is already on a scheduler queue, which means
1103 * that idle cpu notification has already occured. At most we
1104 * need only issue a need_user_resched() on the appropriate cpu.
1106 * The LWP may be owned by a CPU different from the current one,
1107 * in which case dd->uschedcp may be modified without an MP lock
1108 * or a spinlock held. The worst that happens is that the code
1109 * below causes a spurious need_user_resched() on the target CPU
1110 * and dd->pri to be wrong for a short period of time, both of
1111 * which are harmless.
1113 * If checkpri is 0 we are adjusting the priority of the current
1114 * process, possibly higher (less desireable), so ignore the upri
1115 * check which will fail in that case.
1118 if ((dfly_rdyprocmask & CPUMASK(rcpu)) &&
1120 (rdd->upri & ~PRIMASK) >
1121 (lp->lwp_priority & ~PRIMASK))) {
1123 if (rcpu == mycpu->gd_cpuid) {
1124 spin_unlock(&rdd->spin);
1125 need_user_resched();
1127 atomic_clear_cpumask(&dfly_rdyprocmask,
1129 spin_unlock(&rdd->spin);
1130 lwkt_send_ipiq(globaldata_find(rcpu),
1131 dfly_need_user_resched_remote,
1135 spin_unlock(&rdd->spin);
1136 need_user_resched();
1139 spin_unlock(&rdd->spin);
1142 spin_unlock(&rdd->spin);
1149 dfly_yield(struct lwp *lp)
1152 /* FUTURE (or something similar) */
1153 switch(lp->lwp_rqtype) {
1154 case RTP_PRIO_NORMAL:
1155 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
1161 need_user_resched();
1165 * Called from fork1() when a new child process is being created.
1167 * Give the child process an initial estcpu that is more batch then
1168 * its parent and dock the parent for the fork (but do not
1169 * reschedule the parent).
1173 * XXX lwp should be "spawning" instead of "forking"
1176 dfly_forking(struct lwp *plp, struct lwp *lp)
1179 * Put the child 4 queue slots (out of 32) higher than the parent
1180 * (less desireable than the parent).
1182 lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1184 lp->lwp_estfast = 0;
1187 * Dock the parent a cost for the fork, protecting us from fork
1188 * bombs. If the parent is forking quickly make the child more
1191 plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1195 * Called when a lwp is being removed from this scheduler, typically
1196 * during lwp_exit(). We have to clean out any ULOAD accounting before
1197 * we can let the lp go. The dd->spin lock is not needed for uload
1200 * Scheduler dequeueing has already occurred, no further action in that
1204 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1206 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1208 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1209 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1210 atomic_add_int(&dd->uload, -lp->lwp_uload);
1211 atomic_add_int(&dd->ucount, -1);
1212 atomic_add_int(&dfly_ucount, -1);
1217 * This function cannot block in any way, but spinlocks are ok.
1219 * Update the uload based on the state of the thread (whether it is going
1220 * to sleep or running again). The uload is meant to be a longer-term
1221 * load and not an instantanious load.
1224 dfly_uload_update(struct lwp *lp)
1226 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1228 if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1229 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1230 spin_lock(&dd->spin);
1231 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1232 atomic_set_int(&lp->lwp_mpflags,
1234 atomic_add_int(&dd->uload, lp->lwp_uload);
1235 atomic_add_int(&dd->ucount, 1);
1236 atomic_add_int(&dfly_ucount, 1);
1238 spin_unlock(&dd->spin);
1240 } else if (lp->lwp_slptime > 0) {
1241 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1242 spin_lock(&dd->spin);
1243 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1244 atomic_clear_int(&lp->lwp_mpflags,
1246 atomic_add_int(&dd->uload, -lp->lwp_uload);
1247 atomic_add_int(&dd->ucount, -1);
1248 atomic_add_int(&dfly_ucount, -1);
1250 spin_unlock(&dd->spin);
1256 * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1257 * it selects a user process and returns it. If chklp is non-NULL and chklp
1258 * has a better or equal priority then the process that would otherwise be
1259 * chosen, NULL is returned.
1261 * Until we fix the RUNQ code the chklp test has to be strict or we may
1262 * bounce between processes trying to acquire the current process designation.
1264 * Must be called with rdd->spin locked. The spinlock is left intact through
1265 * the entire routine. dd->spin does not have to be locked.
1267 * If worst is non-zero this function finds the worst thread instead of the
1268 * best thread (used by the schedulerclock-based rover).
1272 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1273 struct lwp *chklp, int worst)
1277 u_int32_t *which, *which2;
1283 rtqbits = rdd->rtqueuebits;
1284 tsqbits = rdd->queuebits;
1285 idqbits = rdd->idqueuebits;
1289 pri = bsrl(idqbits);
1290 q = &rdd->idqueues[pri];
1291 which = &rdd->idqueuebits;
1293 } else if (tsqbits) {
1294 pri = bsrl(tsqbits);
1295 q = &rdd->queues[pri];
1296 which = &rdd->queuebits;
1298 } else if (rtqbits) {
1299 pri = bsrl(rtqbits);
1300 q = &rdd->rtqueues[pri];
1301 which = &rdd->rtqueuebits;
1306 lp = TAILQ_LAST(q, rq);
1309 pri = bsfl(rtqbits);
1310 q = &rdd->rtqueues[pri];
1311 which = &rdd->rtqueuebits;
1313 } else if (tsqbits) {
1314 pri = bsfl(tsqbits);
1315 q = &rdd->queues[pri];
1316 which = &rdd->queuebits;
1318 } else if (idqbits) {
1319 pri = bsfl(idqbits);
1320 q = &rdd->idqueues[pri];
1321 which = &rdd->idqueuebits;
1326 lp = TAILQ_FIRST(q);
1328 KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1331 * If the passed lwp <chklp> is reasonably close to the selected
1332 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1334 * Note that we must error on the side of <chklp> to avoid bouncing
1335 * between threads in the acquire code.
1338 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1342 KTR_COND_LOG(usched_chooseproc,
1343 lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1344 lp->lwp_proc->p_pid,
1345 lp->lwp_thread->td_gd->gd_cpuid,
1348 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1349 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1350 TAILQ_REMOVE(q, lp, lwp_procq);
1353 *which &= ~(1 << pri);
1356 * If we are choosing a process from rdd with the intent to
1357 * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1361 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1362 atomic_add_int(&rdd->uload, -lp->lwp_uload);
1363 atomic_add_int(&rdd->ucount, -1);
1364 atomic_add_int(&dfly_ucount, -1);
1366 lp->lwp_qcpu = dd->cpuid;
1367 atomic_add_int(&dd->uload, lp->lwp_uload);
1368 atomic_add_int(&dd->ucount, 1);
1369 atomic_add_int(&dfly_ucount, 1);
1370 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1378 * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1380 * Choose a cpu node to schedule lp on, hopefully nearby its current
1383 * We give the current node a modest advantage for obvious reasons.
1385 * We also give the node the thread was woken up FROM a slight advantage
1386 * in order to try to schedule paired threads which synchronize/block waiting
1387 * for each other fairly close to each other. Similarly in a network setting
1388 * this feature will also attempt to place a user process near the kernel
1389 * protocol thread that is feeding it data. THIS IS A CRITICAL PART of the
1390 * algorithm as it heuristically groups synchronizing processes for locality
1391 * of reference in multi-socket systems.
1393 * We check against running processes and give a big advantage if there
1396 * The caller will normally dfly_setrunqueue() lp on the returned queue.
1398 * When the topology is known choose a cpu whos group has, in aggregate,
1399 * has the lowest weighted load.
1403 dfly_choose_best_queue(struct lwp *lp)
1410 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1420 * When the topology is unknown choose a random cpu that is hopefully
1423 if (dd->cpunode == NULL)
1424 return (dfly_choose_queue_simple(dd, lp));
1429 if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1430 wakemask = dfly_pcpu[wakecpu].cpumask;
1435 * When the topology is known choose a cpu whos group has, in
1436 * aggregate, has the lowest weighted load.
1438 cpup = root_cpu_node;
1443 * Degenerate case super-root
1445 if (cpup->child_node && cpup->child_no == 1) {
1446 cpup = cpup->child_node;
1453 if (cpup->child_node == NULL) {
1454 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1459 lowest_load = 0x7FFFFFFF;
1461 for (n = 0; n < cpup->child_no; ++n) {
1463 * Accumulate load information for all cpus
1464 * which are members of this node.
1466 cpun = &cpup->child_node[n];
1467 mask = cpun->members & usched_global_cpumask &
1468 smp_active_mask & lp->lwp_cpumask;
1476 cpuid = BSFCPUMASK(mask);
1477 rdd = &dfly_pcpu[cpuid];
1479 load += rdd->ucount * usched_dfly_weight3;
1481 if (rdd->uschedcp == NULL &&
1482 rdd->runqcount == 0) {
1483 load -= usched_dfly_weight4;
1484 } else if (rdd->upri > lp->lwp_priority + PPQ) {
1485 load -= usched_dfly_weight4 / 2;
1487 mask &= ~CPUMASK(cpuid);
1492 * Compensate if the lp is already accounted for in
1493 * the aggregate uload for this mask set. We want
1494 * to calculate the loads as if lp were not present,
1495 * otherwise the calculation is bogus.
1497 if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1498 (dd->cpumask & cpun->members)) {
1499 load -= lp->lwp_uload;
1500 load -= usched_dfly_weight3;
1506 * Advantage the cpu group (lp) is already on.
1508 if (cpun->members & dd->cpumask)
1509 load -= usched_dfly_weight1;
1512 * Advantage the cpu group we want to pair (lp) to,
1513 * but don't let it go to the exact same cpu as
1514 * the wakecpu target.
1516 * We do this by checking whether cpun is a
1517 * terminal node or not. All cpun's at the same
1518 * level will either all be terminal or all not
1521 * If it is and we match we disadvantage the load.
1522 * If it is and we don't match we advantage the load.
1524 * Also note that we are effectively disadvantaging
1525 * all-but-one by the same amount, so it won't effect
1526 * the weight1 factor for the all-but-one nodes.
1528 if (cpun->members & wakemask) {
1529 if (cpun->child_node != NULL) {
1531 load -= usched_dfly_weight2;
1533 if (usched_dfly_features & 0x10)
1534 load += usched_dfly_weight2;
1536 load -= usched_dfly_weight2;
1541 * Calculate the best load
1543 if (cpub == NULL || lowest_load > load ||
1544 (lowest_load == load &&
1545 (cpun->members & dd->cpumask))
1553 if (usched_dfly_chooser)
1554 kprintf("lp %02d->%02d %s\n",
1555 lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1560 * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1562 * Choose the worst queue close to dd's cpu node with a non-empty runq
1563 * that is NOT dd. Also require that the moving of the highest-load thread
1564 * from rdd to dd does not cause the uload's to cross each other.
1566 * This is used by the thread chooser when the current cpu's queues are
1567 * empty to steal a thread from another cpu's queue. We want to offload
1568 * the most heavily-loaded queue.
1572 dfly_choose_worst_queue(dfly_pcpu_t dd)
1590 * When the topology is unknown choose a random cpu that is hopefully
1593 if (dd->cpunode == NULL) {
1598 * When the topology is known choose a cpu whos group has, in
1599 * aggregate, has the lowest weighted load.
1601 cpup = root_cpu_node;
1605 * Degenerate case super-root
1607 if (cpup->child_node && cpup->child_no == 1) {
1608 cpup = cpup->child_node;
1615 if (cpup->child_node == NULL) {
1616 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1623 for (n = 0; n < cpup->child_no; ++n) {
1625 * Accumulate load information for all cpus
1626 * which are members of this node.
1628 cpun = &cpup->child_node[n];
1629 mask = cpun->members & usched_global_cpumask &
1637 cpuid = BSFCPUMASK(mask);
1638 rdd = &dfly_pcpu[cpuid];
1640 load += rdd->ucount * usched_dfly_weight3;
1641 if (rdd->uschedcp == NULL &&
1642 rdd->runqcount == 0 &&
1643 globaldata_find(cpuid)->gd_tdrunqcount == 0
1645 load -= usched_dfly_weight4;
1646 } else if (rdd->upri > dd->upri + PPQ) {
1647 load -= usched_dfly_weight4 / 2;
1649 mask &= ~CPUMASK(cpuid);
1655 * Prefer candidates which are somewhat closer to
1658 if (dd->cpumask & cpun->members)
1659 load += usched_dfly_weight1;
1662 * The best candidate is the one with the worst
1665 if (cpub == NULL || highest_load < load) {
1666 highest_load = load;
1674 * We never return our own node (dd), and only return a remote
1675 * node if it's load is significantly worse than ours (i.e. where
1676 * stealing a thread would be considered reasonable).
1678 * This also helps us avoid breaking paired threads apart which
1679 * can have disastrous effects on performance.
1686 if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
1688 if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
1690 if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
1693 if (rdd->uload - hpri < dd->uload + hpri)
1701 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1709 * Fallback to the original heuristic, select random cpu,
1710 * first checking cpus not currently running a user thread.
1713 cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1714 mask = ~dfly_curprocmask & dfly_rdyprocmask & lp->lwp_cpumask &
1715 smp_active_mask & usched_global_cpumask;
1718 tmpmask = ~(CPUMASK(cpuid) - 1);
1720 cpuid = BSFCPUMASK(mask & tmpmask);
1722 cpuid = BSFCPUMASK(mask);
1723 rdd = &dfly_pcpu[cpuid];
1725 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1727 mask &= ~CPUMASK(cpuid);
1731 * Then cpus which might have a currently running lp
1733 cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1734 mask = dfly_curprocmask & dfly_rdyprocmask &
1735 lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
1738 tmpmask = ~(CPUMASK(cpuid) - 1);
1740 cpuid = BSFCPUMASK(mask & tmpmask);
1742 cpuid = BSFCPUMASK(mask);
1743 rdd = &dfly_pcpu[cpuid];
1745 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1747 mask &= ~CPUMASK(cpuid);
1751 * If we cannot find a suitable cpu we reload from dfly_scancpu
1752 * and round-robin. Other cpus will pickup as they release their
1753 * current lwps or become ready.
1755 * Avoid a degenerate system lockup case if usched_global_cpumask
1756 * is set to 0 or otherwise does not cover lwp_cpumask.
1758 * We only kick the target helper thread in this case, we do not
1759 * set the user resched flag because
1761 cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1762 if ((CPUMASK(cpuid) & usched_global_cpumask) == 0)
1764 rdd = &dfly_pcpu[cpuid];
1771 dfly_need_user_resched_remote(void *dummy)
1773 globaldata_t gd = mycpu;
1774 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
1776 need_user_resched();
1778 /* Call wakeup_mycpu to avoid sending IPIs to other CPUs */
1779 wakeup_mycpu(&dd->helper_thread);
1785 * dfly_remrunqueue_locked() removes a given process from the run queue
1786 * that it is on, clearing the queue busy bit if it becomes empty.
1788 * Note that user process scheduler is different from the LWKT schedule.
1789 * The user process scheduler only manages user processes but it uses LWKT
1790 * underneath, and a user process operating in the kernel will often be
1791 * 'released' from our management.
1793 * uload is NOT adjusted here. It is only adjusted if the lwkt_thread goes
1794 * to sleep or the lwp is moved to a different runq.
1797 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1803 KKASSERT(rdd->runqcount >= 0);
1805 pri = lp->lwp_rqindex;
1807 switch(lp->lwp_rqtype) {
1808 case RTP_PRIO_NORMAL:
1809 q = &rdd->queues[pri];
1810 which = &rdd->queuebits;
1812 case RTP_PRIO_REALTIME:
1814 q = &rdd->rtqueues[pri];
1815 which = &rdd->rtqueuebits;
1818 q = &rdd->idqueues[pri];
1819 which = &rdd->idqueuebits;
1822 panic("remrunqueue: invalid rtprio type");
1825 KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1826 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1827 TAILQ_REMOVE(q, lp, lwp_procq);
1829 if (TAILQ_EMPTY(q)) {
1830 KASSERT((*which & (1 << pri)) != 0,
1831 ("remrunqueue: remove from empty queue"));
1832 *which &= ~(1 << pri);
1837 * dfly_setrunqueue_locked()
1839 * Add a process whos rqtype and rqindex had previously been calculated
1840 * onto the appropriate run queue. Determine if the addition requires
1841 * a reschedule on a cpu and return the cpuid or -1.
1843 * NOTE: Lower priorities are better priorities.
1845 * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1846 * sum of the rough lwp_priority for all running and runnable
1847 * processes. Lower priority processes (higher lwp_priority
1848 * values) actually DO count as more load, not less, because
1849 * these are the programs which require the most care with
1850 * regards to cpu selection.
1853 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1859 KKASSERT(lp->lwp_qcpu == rdd->cpuid);
1861 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1862 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1863 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, lp->lwp_uload);
1864 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].ucount, 1);
1865 atomic_add_int(&dfly_ucount, 1);
1868 pri = lp->lwp_rqindex;
1870 switch(lp->lwp_rqtype) {
1871 case RTP_PRIO_NORMAL:
1872 q = &rdd->queues[pri];
1873 which = &rdd->queuebits;
1875 case RTP_PRIO_REALTIME:
1877 q = &rdd->rtqueues[pri];
1878 which = &rdd->rtqueuebits;
1881 q = &rdd->idqueues[pri];
1882 which = &rdd->idqueuebits;
1885 panic("remrunqueue: invalid rtprio type");
1890 * Add to the correct queue and set the appropriate bit. If no
1891 * lower priority (i.e. better) processes are in the queue then
1892 * we want a reschedule, calculate the best cpu for the job.
1894 * Always run reschedules on the LWPs original cpu.
1896 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1897 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1899 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1906 * For SMP systems a user scheduler helper thread is created for each
1907 * cpu and is used to allow one cpu to wakeup another for the purposes of
1908 * scheduling userland threads from setrunqueue().
1910 * UP systems do not need the helper since there is only one cpu.
1912 * We can't use the idle thread for this because we might block.
1913 * Additionally, doing things this way allows us to HLT idle cpus
1917 dfly_helper_thread(void *dummy)
1927 cpuid = gd->gd_cpuid; /* doesn't change */
1928 mask = gd->gd_cpumask; /* doesn't change */
1929 dd = &dfly_pcpu[cpuid];
1932 * Since we only want to be woken up only when no user processes
1933 * are scheduled on a cpu, run at an ultra low priority.
1935 lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1937 tsleep(&dd->helper_thread, 0, "schslp", 0);
1941 * We use the LWKT deschedule-interlock trick to avoid racing
1942 * dfly_rdyprocmask. This means we cannot block through to the
1943 * manual lwkt_switch() call we make below.
1946 tsleep_interlock(&dd->helper_thread, 0);
1948 spin_lock(&dd->spin);
1950 atomic_set_cpumask(&dfly_rdyprocmask, mask);
1951 clear_user_resched(); /* This satisfied the reschedule request */
1952 dd->rrcount = 0; /* Reset the round-robin counter */
1954 if (dd->runqcount || dd->uschedcp != NULL) {
1956 * Threads are available. A thread may or may not be
1957 * currently scheduled. Get the best thread already queued
1960 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
1962 atomic_set_cpumask(&dfly_curprocmask, mask);
1963 dd->upri = nlp->lwp_priority;
1965 dd->rrcount = 0; /* reset round robin */
1966 spin_unlock(&dd->spin);
1967 lwkt_acquire(nlp->lwp_thread);
1968 lwkt_schedule(nlp->lwp_thread);
1971 * This situation should not occur because we had
1972 * at least one thread available.
1974 spin_unlock(&dd->spin);
1976 } else if (usched_dfly_features & 0x01) {
1978 * This cpu is devoid of runnable threads, steal a thread
1979 * from another cpu. Since we're stealing, might as well
1980 * load balance at the same time.
1982 * We choose the highest-loaded thread from the worst queue.
1984 * NOTE! This function only returns a non-NULL rdd when
1985 * another cpu's queue is obviously overloaded. We
1986 * do not want to perform the type of rebalancing
1987 * the schedclock does here because it would result
1988 * in insane process pulling when 'steady' state is
1989 * partially unbalanced (e.g. 6 runnables and only
1992 rdd = dfly_choose_worst_queue(dd);
1993 if (rdd && spin_trylock(&rdd->spin)) {
1994 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
1995 spin_unlock(&rdd->spin);
2000 atomic_set_cpumask(&dfly_curprocmask, mask);
2001 dd->upri = nlp->lwp_priority;
2003 dd->rrcount = 0; /* reset round robin */
2004 spin_unlock(&dd->spin);
2005 lwkt_acquire(nlp->lwp_thread);
2006 lwkt_schedule(nlp->lwp_thread);
2009 * Leave the thread on our run queue. Another
2010 * scheduler will try to pull it later.
2012 spin_unlock(&dd->spin);
2016 * devoid of runnable threads and not allowed to steal
2019 spin_unlock(&dd->spin);
2023 * We're descheduled unless someone scheduled us. Switch away.
2024 * Exiting the critical section will cause splz() to be called
2025 * for us if interrupts and such are pending.
2028 tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
2034 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2038 new_val = usched_dfly_stick_to_level;
2040 error = sysctl_handle_int(oidp, &new_val, 0, req);
2041 if (error != 0 || req->newptr == NULL)
2043 if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2045 usched_dfly_stick_to_level = new_val;
2051 * Setup our scheduler helpers. Note that curprocmask bit 0 has already
2052 * been cleared by rqinit() and we should not mess with it further.
2055 dfly_helper_thread_cpu_init(void)
2060 int smt_not_supported = 0;
2061 int cache_coherent_not_supported = 0;
2064 kprintf("Start scheduler helpers on cpus:\n");
2066 sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2067 usched_dfly_sysctl_tree =
2068 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2069 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2070 "usched_dfly", CTLFLAG_RD, 0, "");
2072 for (i = 0; i < ncpus; ++i) {
2073 dfly_pcpu_t dd = &dfly_pcpu[i];
2074 cpumask_t mask = CPUMASK(i);
2076 if ((mask & smp_active_mask) == 0)
2079 spin_init(&dd->spin);
2080 dd->cpunode = get_cpu_node_by_cpuid(i);
2082 dd->cpumask = CPUMASK(i);
2083 for (j = 0; j < NQS; j++) {
2084 TAILQ_INIT(&dd->queues[j]);
2085 TAILQ_INIT(&dd->rtqueues[j]);
2086 TAILQ_INIT(&dd->idqueues[j]);
2088 atomic_clear_cpumask(&dfly_curprocmask, 1);
2090 if (dd->cpunode == NULL) {
2091 smt_not_supported = 1;
2092 cache_coherent_not_supported = 1;
2094 kprintf ("\tcpu%d - WARNING: No CPU NODE "
2095 "found for cpu\n", i);
2097 switch (dd->cpunode->type) {
2100 kprintf ("\tcpu%d - HyperThreading "
2101 "available. Core siblings: ",
2105 smt_not_supported = 1;
2108 kprintf ("\tcpu%d - No HT available, "
2109 "multi-core/physical "
2110 "cpu. Physical siblings: ",
2114 smt_not_supported = 1;
2117 kprintf ("\tcpu%d - No HT available, "
2118 "single-core/physical cpu. "
2119 "Package Siblings: ",
2123 /* Let's go for safe defaults here */
2124 smt_not_supported = 1;
2125 cache_coherent_not_supported = 1;
2127 kprintf ("\tcpu%d - Unknown cpunode->"
2128 "type=%u. Siblings: ",
2130 (u_int)dd->cpunode->type);
2135 if (dd->cpunode->parent_node != NULL) {
2136 CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
2137 kprintf("cpu%d ", cpuid);
2140 kprintf(" no siblings\n");
2145 lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread,
2146 0, i, "usched %d", i);
2149 * Allow user scheduling on the target cpu. cpu #0 has already
2150 * been enabled in rqinit().
2153 atomic_clear_cpumask(&dfly_curprocmask, mask);
2154 atomic_set_cpumask(&dfly_rdyprocmask, mask);
2155 dd->upri = PRIBASE_NULL;
2159 /* usched_dfly sysctl configurable parameters */
2161 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2162 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2163 OID_AUTO, "rrinterval", CTLFLAG_RW,
2164 &usched_dfly_rrinterval, 0, "");
2165 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2166 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2167 OID_AUTO, "decay", CTLFLAG_RW,
2168 &usched_dfly_decay, 0, "Extra decay when not running");
2170 /* Add enable/disable option for SMT scheduling if supported */
2171 if (smt_not_supported) {
2172 usched_dfly_smt = 0;
2173 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2174 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2175 OID_AUTO, "smt", CTLFLAG_RD,
2176 "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2178 usched_dfly_smt = 1;
2179 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2180 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2181 OID_AUTO, "smt", CTLFLAG_RW,
2182 &usched_dfly_smt, 0, "Enable SMT scheduling");
2186 * Add enable/disable option for cache coherent scheduling
2189 if (cache_coherent_not_supported) {
2190 usched_dfly_cache_coherent = 0;
2191 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2192 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2193 OID_AUTO, "cache_coherent", CTLFLAG_RD,
2195 "Cache coherence NOT SUPPORTED");
2197 usched_dfly_cache_coherent = 1;
2198 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2199 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2200 OID_AUTO, "cache_coherent", CTLFLAG_RW,
2201 &usched_dfly_cache_coherent, 0,
2202 "Enable/Disable cache coherent scheduling");
2204 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2205 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2206 OID_AUTO, "weight1", CTLFLAG_RW,
2207 &usched_dfly_weight1, 10,
2208 "Weight selection for current cpu");
2210 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2211 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2212 OID_AUTO, "weight2", CTLFLAG_RW,
2213 &usched_dfly_weight2, 5,
2214 "Weight selection for wakefrom cpu");
2216 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2217 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2218 OID_AUTO, "weight3", CTLFLAG_RW,
2219 &usched_dfly_weight3, 50,
2220 "Weight selection for num threads on queue");
2222 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2223 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2224 OID_AUTO, "weight4", CTLFLAG_RW,
2225 &usched_dfly_weight4, 50,
2226 "Availability of other idle cpus");
2228 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2229 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2230 OID_AUTO, "features", CTLFLAG_RW,
2231 &usched_dfly_features, 15,
2232 "Allow pulls into empty queues");
2234 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2235 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2236 OID_AUTO, "swmask", CTLFLAG_RW,
2237 &usched_dfly_swmask, ~PPQMASK,
2238 "Queue mask to force thread switch");
2242 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2243 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2244 OID_AUTO, "stick_to_level",
2245 CTLTYPE_INT | CTLFLAG_RW,
2246 NULL, sizeof usched_dfly_stick_to_level,
2247 sysctl_usched_dfly_stick_to_level, "I",
2248 "Stick a process to this level. See sysctl"
2249 "paremter hw.cpu_topology.level_description");
2253 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2254 dfly_helper_thread_cpu_init, NULL)
2256 #else /* No SMP options - just add the configurable parameters to sysctl */
2259 sched_sysctl_tree_init(void)
2261 sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2262 usched_dfly_sysctl_tree =
2263 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2264 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2265 "usched_dfly", CTLFLAG_RD, 0, "");
2267 /* usched_dfly sysctl configurable parameters */
2268 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2269 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2270 OID_AUTO, "rrinterval", CTLFLAG_RW,
2271 &usched_dfly_rrinterval, 0, "");
2272 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2273 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2274 OID_AUTO, "decay", CTLFLAG_RW,
2275 &usched_dfly_decay, 0, "Extra decay when not running");
2277 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2278 sched_sysctl_tree_init, NULL)