2 * Copyright (c) 2012 The DragonFly Project. All rights reserved.
3 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com>,
7 * by Mihai Carabas <mihai.carabas@gmail.com>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
41 #include <sys/queue.h>
43 #include <sys/rtprio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
59 * Priorities. Note that with 32 run queues per scheduler each queue
60 * represents four priority levels.
66 #define PRIMASK (MAXPRI - 1)
67 #define PRIBASE_REALTIME 0
68 #define PRIBASE_NORMAL MAXPRI
69 #define PRIBASE_IDLE (MAXPRI * 2)
70 #define PRIBASE_THREAD (MAXPRI * 3)
71 #define PRIBASE_NULL (MAXPRI * 4)
73 #define NQS 32 /* 32 run queues. */
74 #define PPQ (MAXPRI / NQS) /* priorities per queue */
75 #define PPQMASK (PPQ - 1)
78 * NICEPPQ - number of nice units per priority queue
79 * ESTCPUPPQ - number of estcpu units per priority queue
80 * ESTCPUMAX - number of estcpu units
84 #define ESTCPUMAX (ESTCPUPPQ * NQS)
85 #define BATCHMAX (ESTCPUFREQ * 30)
86 #define PRIO_RANGE (PRIO_MAX - PRIO_MIN + 1)
88 #define ESTCPULIM(v) min((v), ESTCPUMAX)
92 #define lwp_priority lwp_usdata.dfly.priority
93 #define lwp_forked lwp_usdata.dfly.forked
94 #define lwp_rqindex lwp_usdata.dfly.rqindex
95 #define lwp_estcpu lwp_usdata.dfly.estcpu
96 #define lwp_estfast lwp_usdata.dfly.estfast
97 #define lwp_rqtype lwp_usdata.dfly.rqtype
98 #define lwp_qcpu lwp_usdata.dfly.qcpu
100 struct usched_dfly_pcpu {
101 struct spinlock spin;
102 struct thread helper_thread;
107 struct lwp *uschedcp;
108 struct rq queues[NQS];
109 struct rq rtqueues[NQS];
110 struct rq idqueues[NQS];
112 u_int32_t rtqueuebits;
113 u_int32_t idqueuebits;
122 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
124 static void dfly_acquire_curproc(struct lwp *lp);
125 static void dfly_release_curproc(struct lwp *lp);
126 static void dfly_select_curproc(globaldata_t gd);
127 static void dfly_setrunqueue(struct lwp *lp);
128 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
129 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
131 static void dfly_recalculate_estcpu(struct lwp *lp);
132 static void dfly_resetpriority(struct lwp *lp);
133 static void dfly_forking(struct lwp *plp, struct lwp *lp);
134 static void dfly_exiting(struct lwp *lp, struct proc *);
135 static void dfly_uload_update(struct lwp *lp);
136 static void dfly_yield(struct lwp *lp);
138 static void dfly_changeqcpu_locked(struct lwp *lp,
139 dfly_pcpu_t dd, dfly_pcpu_t rdd);
140 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
141 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
142 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
146 static void dfly_need_user_resched_remote(void *dummy);
148 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
149 struct lwp *chklp, int worst);
150 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
151 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
153 struct usched usched_dfly = {
155 "dfly", "Original DragonFly Scheduler",
156 NULL, /* default registration */
157 NULL, /* default deregistration */
158 dfly_acquire_curproc,
159 dfly_release_curproc,
162 dfly_recalculate_estcpu,
167 NULL, /* setcpumask not supported */
172 * We have NQS (32) run queues per scheduling class. For the normal
173 * class, there are 128 priorities scaled onto these 32 queues. New
174 * processes are added to the last entry in each queue, and processes
175 * are selected for running by taking them from the head and maintaining
176 * a simple FIFO arrangement. Realtime and Idle priority processes have
177 * and explicit 0-31 priority which maps directly onto their class queue
178 * index. When a queue has something in it, the corresponding bit is
179 * set in the queuebits variable, allowing a single read to determine
180 * the state of all 32 queues and then a ffs() to find the first busy
183 static cpumask_t dfly_curprocmask = -1; /* currently running a user process */
184 static cpumask_t dfly_rdyprocmask; /* ready to accept a user process */
186 static volatile int dfly_scancpu;
188 static volatile int dfly_ucount; /* total running on whole system */
189 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
190 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
191 static struct sysctl_oid *usched_dfly_sysctl_tree;
193 /* Debug info exposed through debug.* sysctl */
195 static int usched_dfly_debug = -1;
196 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
197 &usched_dfly_debug, 0,
198 "Print debug information for this pid");
200 static int usched_dfly_pid_debug = -1;
201 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
202 &usched_dfly_pid_debug, 0,
203 "Print KTR debug information for this pid");
205 static int usched_dfly_chooser = 0;
206 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
207 &usched_dfly_chooser, 0,
208 "Print KTR debug information for this pid");
211 * Tunning usched_dfly - configurable through kern.usched_dfly.
213 * weight1 - Tries to keep threads on their current cpu. If you
214 * make this value too large the scheduler will not be
215 * able to load-balance large loads.
217 * weight2 - If non-zero, detects thread pairs undergoing synchronous
218 * communications and tries to move them closer together.
219 * Behavior is adjusted by bit 4 of features (0x10).
221 * WARNING! Weight2 is a ridiculously sensitive parameter,
222 * a small value is recommended.
224 * weight3 - Weighting based on the number of recently runnable threads
225 * on the userland scheduling queue (ignoring their loads).
226 * A nominal value here prevents high-priority (low-load)
227 * threads from accumulating on one cpu core when other
228 * cores are available.
230 * This value should be left fairly small relative to weight1
233 * weight4 - Weighting based on other cpu queues being available
234 * or running processes with higher lwp_priority's.
236 * This allows a thread to migrate to another nearby cpu if it
237 * is unable to run on the current cpu based on the other cpu
238 * being idle or running a lower priority (higher lwp_priority)
239 * thread. This value should be large enough to override weight1
241 * features - These flags can be set or cleared to enable or disable various
244 * 0x01 Enable idle-cpu pulling (default)
245 * 0x02 Enable proactive pushing (default)
246 * 0x04 Enable rebalancing rover (default)
247 * 0x08 Enable more proactive pushing (default)
248 * 0x10 (flip weight2 limit on same cpu) (default)
249 * 0x20 choose best cpu for forked process
250 * 0x40 choose current cpu for forked process
251 * 0x80 choose random cpu for forked process (default)
254 static int usched_dfly_smt = 0;
255 static int usched_dfly_cache_coherent = 0;
256 static int usched_dfly_weight1 = 50; /* keep thread on current cpu */
257 static int usched_dfly_weight2 = 30; /* synchronous peer's current cpu */
258 static int usched_dfly_weight3 = 10; /* number of threads on queue */
259 static int usched_dfly_weight4 = 40; /* availability of idle cores */
260 static int usched_dfly_features = 0x8F; /* allow pulls */
262 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
263 static int usched_dfly_decay = 8;
265 /* KTR debug printings */
267 KTR_INFO_MASTER(usched);
269 #if !defined(KTR_USCHED_DFLY)
270 #define KTR_USCHED_DFLY KTR_ALL
273 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
274 "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
275 pid_t pid, int old_cpuid, int curr);
278 * This function is called when the kernel intends to return to userland.
279 * It is responsible for making the thread the current designated userland
280 * thread for this cpu, blocking if necessary.
282 * The kernel has already depressed our LWKT priority so we must not switch
283 * until we have either assigned or disposed of the thread.
285 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
286 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will
287 * occur, this function is called only under very controlled circumstances.
290 dfly_acquire_curproc(struct lwp *lp)
301 * Make sure we aren't sitting on a tsleep queue.
304 crit_enter_quick(td);
305 if (td->td_flags & TDF_TSLEEPQ)
307 dfly_recalculate_estcpu(lp);
310 dd = &dfly_pcpu[gd->gd_cpuid];
313 * Process any pending interrupts/ipi's, then handle reschedule
314 * requests. dfly_release_curproc() will try to assign a new
315 * uschedcp that isn't us and otherwise NULL it out.
318 if (user_resched_wanted()) {
319 if (dd->uschedcp == lp)
321 clear_user_resched();
322 dfly_release_curproc(lp);
326 * Loop until we are the current user thread.
328 * NOTE: dd spinlock not held at top of loop.
330 if (dd->uschedcp == lp)
333 while (dd->uschedcp != lp) {
336 spin_lock(&dd->spin);
339 * We are not or are no longer the current lwp and a forced
340 * reschedule was requested. Figure out the best cpu to
341 * run on (our current cpu will be given significant weight).
343 * (if a reschedule was not requested we want to move this
344 * step after the uschedcp tests).
348 (usched_dfly_features & 0x08) &&
349 (rdd = dfly_choose_best_queue(lp)) != dd) {
350 dfly_changeqcpu_locked(lp, dd, rdd);
351 spin_unlock(&dd->spin);
352 lwkt_deschedule(lp->lwp_thread);
353 dfly_setrunqueue_dd(rdd, lp);
356 dd = &dfly_pcpu[gd->gd_cpuid];
362 * Either no reschedule was requested or the best queue was
363 * dd, and no current process has been selected. We can
364 * trivially become the current lwp on the current cpu.
366 if (dd->uschedcp == NULL) {
367 atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
369 dd->upri = lp->lwp_priority;
370 KKASSERT(lp->lwp_qcpu == dd->cpuid);
371 spin_unlock(&dd->spin);
376 * Can we steal the current designated user thread?
378 * If we do the other thread will stall when it tries to
379 * return to userland, possibly rescheduling elsewhere.
381 * It is important to do a masked test to avoid the edge
382 * case where two near-equal-priority threads are constantly
383 * interrupting each other.
386 (dd->upri & ~PPQMASK) >
387 (lp->lwp_priority & ~PPQMASK)) {
389 dd->upri = lp->lwp_priority;
390 KKASSERT(lp->lwp_qcpu == dd->cpuid);
391 spin_unlock(&dd->spin);
397 * We are not the current lwp, figure out the best cpu
398 * to run on (our current cpu will be given significant
399 * weight). Loop on cpu change.
401 if ((usched_dfly_features & 0x02) &&
402 force_resched == 0 &&
403 (rdd = dfly_choose_best_queue(lp)) != dd) {
404 dfly_changeqcpu_locked(lp, dd, rdd);
405 spin_unlock(&dd->spin);
406 lwkt_deschedule(lp->lwp_thread);
407 dfly_setrunqueue_dd(rdd, lp);
410 dd = &dfly_pcpu[gd->gd_cpuid];
416 * We cannot become the current lwp, place the lp on the
417 * run-queue of this or another cpu and deschedule ourselves.
419 * When we are reactivated we will have another chance.
421 * Reload after a switch or setrunqueue/switch possibly
422 * moved us to another cpu.
424 spin_unlock(&dd->spin);
425 lwkt_deschedule(lp->lwp_thread);
426 dfly_setrunqueue_dd(dd, lp);
429 dd = &dfly_pcpu[gd->gd_cpuid];
433 * Make sure upri is synchronized, then yield to LWKT threads as
434 * needed before returning. This could result in another reschedule.
439 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
443 * DFLY_RELEASE_CURPROC
445 * This routine detaches the current thread from the userland scheduler,
446 * usually because the thread needs to run or block in the kernel (at
447 * kernel priority) for a while.
449 * This routine is also responsible for selecting a new thread to
450 * make the current thread.
452 * NOTE: This implementation differs from the dummy example in that
453 * dfly_select_curproc() is able to select the current process, whereas
454 * dummy_select_curproc() is not able to select the current process.
455 * This means we have to NULL out uschedcp.
457 * Additionally, note that we may already be on a run queue if releasing
458 * via the lwkt_switch() in dfly_setrunqueue().
461 dfly_release_curproc(struct lwp *lp)
463 globaldata_t gd = mycpu;
464 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
467 * Make sure td_wakefromcpu is defaulted. This will be overwritten
470 if (dd->uschedcp == lp) {
471 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
472 spin_lock(&dd->spin);
473 if (dd->uschedcp == lp) {
474 dd->uschedcp = NULL; /* don't let lp be selected */
475 dd->upri = PRIBASE_NULL;
476 atomic_clear_cpumask(&dfly_curprocmask, gd->gd_cpumask);
477 spin_unlock(&dd->spin);
478 dfly_select_curproc(gd);
480 spin_unlock(&dd->spin);
486 * DFLY_SELECT_CURPROC
488 * Select a new current process for this cpu and clear any pending user
489 * reschedule request. The cpu currently has no current process.
491 * This routine is also responsible for equal-priority round-robining,
492 * typically triggered from dfly_schedulerclock(). In our dummy example
493 * all the 'user' threads are LWKT scheduled all at once and we just
494 * call lwkt_switch().
496 * The calling process is not on the queue and cannot be selected.
500 dfly_select_curproc(globaldata_t gd)
502 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
504 int cpuid = gd->gd_cpuid;
508 spin_lock(&dd->spin);
509 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
512 atomic_set_cpumask(&dfly_curprocmask, CPUMASK(cpuid));
513 dd->upri = nlp->lwp_priority;
515 dd->rrcount = 0; /* reset round robin */
516 spin_unlock(&dd->spin);
518 lwkt_acquire(nlp->lwp_thread);
520 lwkt_schedule(nlp->lwp_thread);
522 spin_unlock(&dd->spin);
528 * Place the specified lwp on the user scheduler's run queue. This routine
529 * must be called with the thread descheduled. The lwp must be runnable.
530 * It must not be possible for anyone else to explicitly schedule this thread.
532 * The thread may be the current thread as a special case.
535 dfly_setrunqueue(struct lwp *lp)
541 * First validate the process LWKT state.
543 KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
544 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
545 ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
546 lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
547 KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
550 * NOTE: dd/rdd do not necessarily represent the current cpu.
551 * Instead they may represent the cpu the thread was last
552 * scheduled on or inherited by its parent.
554 dd = &dfly_pcpu[lp->lwp_qcpu];
558 * This process is not supposed to be scheduled anywhere or assigned
559 * as the current process anywhere. Assert the condition.
561 KKASSERT(rdd->uschedcp != lp);
565 * If we are not SMP we do not have a scheduler helper to kick
566 * and must directly activate the process if none are scheduled.
568 * This is really only an issue when bootstrapping init since
569 * the caller in all other cases will be a user process, and
570 * even if released (rdd->uschedcp == NULL), that process will
571 * kickstart the scheduler when it returns to user mode from
574 * NOTE: On SMP we can't just set some other cpu's uschedcp.
576 if (rdd->uschedcp == NULL) {
577 spin_lock(&rdd->spin);
578 if (rdd->uschedcp == NULL) {
579 atomic_set_cpumask(&dfly_curprocmask, 1);
581 rdd->upri = lp->lwp_priority;
582 spin_unlock(&rdd->spin);
583 lwkt_schedule(lp->lwp_thread);
586 spin_unlock(&rdd->spin);
592 * Ok, we have to setrunqueue some target cpu and request a reschedule
595 * We have to choose the best target cpu. It might not be the current
596 * target even if the current cpu has no running user thread (for
597 * example, because the current cpu might be a hyperthread and its
598 * sibling has a thread assigned).
600 * If we just forked it is most optimal to run the child on the same
601 * cpu just in case the parent decides to wait for it (thus getting
602 * off that cpu). As long as there is nothing else runnable on the
603 * cpu, that is. If we did this unconditionally a parent forking
604 * multiple children before waiting (e.g. make -j N) leaves other
605 * cpus idle that could be working.
607 if (lp->lwp_forked) {
609 if (usched_dfly_features & 0x20)
610 rdd = dfly_choose_best_queue(lp);
611 else if (usched_dfly_features & 0x40)
612 rdd = &dfly_pcpu[lp->lwp_qcpu];
613 else if (usched_dfly_features & 0x80)
614 rdd = dfly_choose_queue_simple(rdd, lp);
615 else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
616 rdd = dfly_choose_best_queue(lp);
618 rdd = &dfly_pcpu[lp->lwp_qcpu];
620 rdd = dfly_choose_best_queue(lp);
621 /* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
623 if (lp->lwp_qcpu != rdd->cpuid) {
624 spin_lock(&dd->spin);
625 dfly_changeqcpu_locked(lp, dd, rdd);
626 spin_unlock(&dd->spin);
629 dfly_setrunqueue_dd(rdd, lp);
635 * Change qcpu to rdd->cpuid. The dd the lp is CURRENTLY on must be
636 * spin-locked on-call. rdd does not have to be.
639 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
641 if (lp->lwp_qcpu != rdd->cpuid) {
642 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
643 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
644 atomic_add_int(&dd->uload,
645 -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
646 atomic_add_int(&dd->ucount, -1);
647 atomic_add_int(&dfly_ucount, -1);
649 lp->lwp_qcpu = rdd->cpuid;
656 * Place lp on rdd's runqueue. Nothing is locked on call. This function
657 * also performs all necessary ancillary notification actions.
660 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
666 * We might be moving the lp to another cpu's run queue, and once
667 * on the runqueue (even if it is our cpu's), another cpu can rip
670 * TDF_MIGRATING might already be set if this is part of a
671 * remrunqueue+setrunqueue sequence.
673 if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
674 lwkt_giveaway(lp->lwp_thread);
676 rgd = globaldata_find(rdd->cpuid);
679 * We lose control of the lp the moment we release the spinlock
680 * after having placed it on the queue. i.e. another cpu could pick
681 * it up, or it could exit, or its priority could be further
682 * adjusted, or something like that.
684 * WARNING! rdd can point to a foreign cpu!
686 spin_lock(&rdd->spin);
687 dfly_setrunqueue_locked(rdd, lp);
690 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
691 spin_unlock(&rdd->spin);
692 if (rdd->uschedcp == NULL) {
693 wakeup_mycpu(&rdd->helper_thread); /* XXX */
699 spin_unlock(&rdd->spin);
702 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
703 spin_unlock(&rdd->spin);
704 lwkt_send_ipiq(rgd, dfly_need_user_resched_remote,
706 } else if (dfly_rdyprocmask & rgd->gd_cpumask) {
707 atomic_clear_cpumask(&dfly_rdyprocmask,
709 spin_unlock(&rdd->spin);
710 wakeup(&rdd->helper_thread);
712 spin_unlock(&rdd->spin);
717 * Request a reschedule if appropriate.
719 spin_lock(&rdd->spin);
720 dfly_setrunqueue_locked(rdd, lp);
721 spin_unlock(&rdd->spin);
722 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
729 * This routine is called from a systimer IPI. It MUST be MP-safe and
730 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on
735 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
737 globaldata_t gd = mycpu;
738 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
741 * Spinlocks also hold a critical section so there should not be
744 KKASSERT(gd->gd_spinlocks_wr == 0);
750 * Do we need to round-robin? We round-robin 10 times a second.
751 * This should only occur for cpu-bound batch processes.
753 if (++dd->rrcount >= usched_dfly_rrinterval) {
754 lp->lwp_thread->td_wakefromcpu = -1;
760 * Adjust estcpu upward using a real time equivalent calculation,
761 * and recalculate lp's priority.
763 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
764 dfly_resetpriority(lp);
767 * Rebalance cpus on each scheduler tick. Each cpu in turn will
768 * calculate the worst queue and, if sufficiently loaded, will
769 * pull a process from that queue into our current queue.
771 * To try to avoid always moving the same thread. XXX
774 if ((usched_dfly_features & 0x04) &&
775 ((uint16_t)sched_ticks % ncpus) == gd->gd_cpuid) {
783 * We have to choose the worst thread in the worst queue
784 * because it likely finished its batch on that cpu and is
785 * now waiting for cpu again.
787 rdd = dfly_choose_worst_queue(dd);
789 spin_lock(&dd->spin);
790 if (spin_trylock(&rdd->spin)) {
791 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
792 spin_unlock(&rdd->spin);
794 spin_unlock(&dd->spin);
796 spin_unlock(&dd->spin);
802 /* dd->spin held if nlp != NULL */
805 * Either schedule it or add it to our queue.
808 (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
809 atomic_set_cpumask(&dfly_curprocmask, dd->cpumask);
810 dd->upri = nlp->lwp_priority;
812 dd->rrcount = 0; /* reset round robin */
813 spin_unlock(&dd->spin);
814 lwkt_acquire(nlp->lwp_thread);
815 lwkt_schedule(nlp->lwp_thread);
817 dfly_setrunqueue_locked(dd, nlp);
818 spin_unlock(&dd->spin);
825 * Called from acquire and from kern_synch's one-second timer (one of the
826 * callout helper threads) with a critical section held.
828 * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for
829 * overall system load.
831 * Note that no recalculation occurs for a process which sleeps and wakes
832 * up in the same tick. That is, a system doing thousands of context
833 * switches per second will still only do serious estcpu calculations
834 * ESTCPUFREQ times per second.
838 dfly_recalculate_estcpu(struct lwp *lp)
840 globaldata_t gd = mycpu;
848 * We have to subtract periodic to get the last schedclock
849 * timeout time, otherwise we would get the upcoming timeout.
850 * Keep in mind that a process can migrate between cpus and
851 * while the scheduler clock should be very close, boundary
852 * conditions could lead to a small negative delta.
854 cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
856 if (lp->lwp_slptime > 1) {
858 * Too much time has passed, do a coarse correction.
860 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
861 dfly_resetpriority(lp);
862 lp->lwp_cpbase = cpbase;
865 } else if (lp->lwp_cpbase != cpbase) {
867 * Adjust estcpu if we are in a different tick. Don't waste
868 * time if we are in the same tick.
870 * First calculate the number of ticks in the measurement
871 * interval. The ttlticks calculation can wind up 0 due to
872 * a bug in the handling of lwp_slptime (as yet not found),
873 * so make sure we do not get a divide by 0 panic.
875 ttlticks = (cpbase - lp->lwp_cpbase) /
876 gd->gd_schedclock.periodic;
879 lp->lwp_cpbase = cpbase;
883 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
886 * Calculate the percentage of one cpu being used then
887 * compensate for any system load in excess of ncpus.
889 * For example, if we have 8 cores and 16 running cpu-bound
890 * processes then all things being equal each process will
891 * get 50% of one cpu. We need to pump this value back
892 * up to 100% so the estcpu calculation properly adjusts
893 * the process's dynamic priority.
895 * estcpu is scaled by ESTCPUMAX, pctcpu is scaled by FSCALE.
897 estcpu = (lp->lwp_pctcpu * ESTCPUMAX) >> FSHIFT;
898 ucount = dfly_ucount;
899 if (ucount > ncpus) {
900 estcpu += estcpu * (ucount - ncpus) / ncpus;
903 if (usched_dfly_debug == lp->lwp_proc->p_pid) {
904 kprintf("pid %d lwp %p estcpu %3d %3d cp %d/%d",
905 lp->lwp_proc->p_pid, lp,
906 estcpu, lp->lwp_estcpu,
907 lp->lwp_cpticks, ttlticks);
911 * Adjust lp->lwp_esetcpu. The decay factor determines how
912 * quickly lwp_estcpu collapses to its realtime calculation.
913 * A slower collapse gives us a more accurate number over
914 * the long term but can create problems with bursty threads
915 * or threads which become cpu hogs.
917 * To solve this problem, newly started lwps and lwps which
918 * are restarting after having been asleep for a while are
919 * given a much, much faster decay in order to quickly
920 * detect whether they become cpu-bound.
922 * NOTE: p_nice is accounted for in dfly_resetpriority(),
923 * and not here, but we must still ensure that a
924 * cpu-bound nice -20 process does not completely
925 * override a cpu-bound nice +20 process.
927 * NOTE: We must use ESTCPULIM() here to deal with any
930 decay_factor = usched_dfly_decay;
931 if (decay_factor < 1)
933 if (decay_factor > 1024)
936 if (lp->lwp_estfast < usched_dfly_decay) {
938 lp->lwp_estcpu = ESTCPULIM(
939 (lp->lwp_estcpu * lp->lwp_estfast + estcpu) /
940 (lp->lwp_estfast + 1));
942 lp->lwp_estcpu = ESTCPULIM(
943 (lp->lwp_estcpu * decay_factor + estcpu) /
947 if (usched_dfly_debug == lp->lwp_proc->p_pid)
948 kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
949 dfly_resetpriority(lp);
950 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
956 * Compute the priority of a process when running in user mode.
957 * Arrange to reschedule if the resulting priority is better
958 * than that of the current process.
960 * This routine may be called with any process.
962 * This routine is called by fork1() for initial setup with the process
963 * of the run queue, and also may be called normally with the process on or
967 dfly_resetpriority(struct lwp *lp)
979 * Lock the scheduler (lp) belongs to. This can be on a different
980 * cpu. Handle races. This loop breaks out with the appropriate
986 rdd = &dfly_pcpu[rcpu];
987 spin_lock(&rdd->spin);
988 if (rcpu == lp->lwp_qcpu)
990 spin_unlock(&rdd->spin);
994 * Calculate the new priority and queue type
996 newrqtype = lp->lwp_rtprio.type;
999 case RTP_PRIO_REALTIME:
1001 newpriority = PRIBASE_REALTIME +
1002 (lp->lwp_rtprio.prio & PRIMASK);
1004 case RTP_PRIO_NORMAL:
1008 estcpu = lp->lwp_estcpu;
1011 * p_nice piece Adds (0-40) * 2 0-80
1012 * estcpu Adds 16384 * 4 / 512 0-128
1014 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
1015 newpriority += estcpu * PPQ / ESTCPUPPQ;
1016 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
1017 NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
1018 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
1021 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1023 case RTP_PRIO_THREAD:
1024 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1027 panic("Bad RTP_PRIO %d", newrqtype);
1032 * The newpriority incorporates the queue type so do a simple masked
1033 * check to determine if the process has moved to another queue. If
1034 * it has, and it is currently on a run queue, then move it.
1036 * Since uload is ~PPQMASK masked, no modifications are necessary if
1037 * we end up in the same run queue.
1039 if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1043 * uload can change, calculate the adjustment to reduce
1044 * edge cases since choosers scan the cpu topology without
1047 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1049 -((lp->lwp_priority & ~PPQMASK) & PRIMASK) +
1050 ((newpriority & ~PPQMASK) & PRIMASK);
1051 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1053 /* no change in ucount */
1055 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1056 dfly_remrunqueue_locked(rdd, lp);
1057 lp->lwp_priority = newpriority;
1058 lp->lwp_rqtype = newrqtype;
1059 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1060 dfly_setrunqueue_locked(rdd, lp);
1063 lp->lwp_priority = newpriority;
1064 lp->lwp_rqtype = newrqtype;
1065 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1070 * In the same PPQ, uload cannot change.
1072 lp->lwp_priority = newpriority;
1078 * Determine if we need to reschedule the target cpu. This only
1079 * occurs if the LWP is already on a scheduler queue, which means
1080 * that idle cpu notification has already occured. At most we
1081 * need only issue a need_user_resched() on the appropriate cpu.
1083 * The LWP may be owned by a CPU different from the current one,
1084 * in which case dd->uschedcp may be modified without an MP lock
1085 * or a spinlock held. The worst that happens is that the code
1086 * below causes a spurious need_user_resched() on the target CPU
1087 * and dd->pri to be wrong for a short period of time, both of
1088 * which are harmless.
1090 * If checkpri is 0 we are adjusting the priority of the current
1091 * process, possibly higher (less desireable), so ignore the upri
1092 * check which will fail in that case.
1095 if ((dfly_rdyprocmask & CPUMASK(rcpu)) &&
1097 (rdd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK))) {
1099 if (rcpu == mycpu->gd_cpuid) {
1100 spin_unlock(&rdd->spin);
1101 need_user_resched();
1103 atomic_clear_cpumask(&dfly_rdyprocmask,
1105 spin_unlock(&rdd->spin);
1106 lwkt_send_ipiq(globaldata_find(rcpu),
1107 dfly_need_user_resched_remote,
1111 spin_unlock(&rdd->spin);
1112 need_user_resched();
1115 spin_unlock(&rdd->spin);
1118 spin_unlock(&rdd->spin);
1125 dfly_yield(struct lwp *lp)
1128 /* FUTURE (or something similar) */
1129 switch(lp->lwp_rqtype) {
1130 case RTP_PRIO_NORMAL:
1131 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
1137 need_user_resched();
1141 * Called from fork1() when a new child process is being created.
1143 * Give the child process an initial estcpu that is more batch then
1144 * its parent and dock the parent for the fork (but do not
1145 * reschedule the parent).
1149 * XXX lwp should be "spawning" instead of "forking"
1152 dfly_forking(struct lwp *plp, struct lwp *lp)
1155 * Put the child 4 queue slots (out of 32) higher than the parent
1156 * (less desireable than the parent).
1158 lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1160 lp->lwp_estfast = 0;
1163 * Dock the parent a cost for the fork, protecting us from fork
1164 * bombs. If the parent is forking quickly make the child more
1167 plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1171 * Called when a lwp is being removed from this scheduler, typically
1172 * during lwp_exit(). We have to clean out any ULOAD accounting before
1173 * we can let the lp go. The dd->spin lock is not needed for uload
1176 * Scheduler dequeueing has already occurred, no further action in that
1180 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1182 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1184 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1185 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1186 atomic_add_int(&dd->uload,
1187 -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1188 atomic_add_int(&dd->ucount, -1);
1189 atomic_add_int(&dfly_ucount, -1);
1194 * This function cannot block in any way, but spinlocks are ok.
1196 * Update the uload based on the state of the thread (whether it is going
1197 * to sleep or running again). The uload is meant to be a longer-term
1198 * load and not an instantanious load.
1201 dfly_uload_update(struct lwp *lp)
1203 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1205 if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1206 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1207 spin_lock(&dd->spin);
1208 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1209 atomic_set_int(&lp->lwp_mpflags,
1211 atomic_add_int(&dd->uload,
1212 ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1213 atomic_add_int(&dd->ucount, 1);
1214 atomic_add_int(&dfly_ucount, 1);
1216 spin_unlock(&dd->spin);
1218 } else if (lp->lwp_slptime > 0) {
1219 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1220 spin_lock(&dd->spin);
1221 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1222 atomic_clear_int(&lp->lwp_mpflags,
1224 atomic_add_int(&dd->uload,
1225 -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1226 atomic_add_int(&dd->ucount, -1);
1227 atomic_add_int(&dfly_ucount, -1);
1229 spin_unlock(&dd->spin);
1235 * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1236 * it selects a user process and returns it. If chklp is non-NULL and chklp
1237 * has a better or equal priority then the process that would otherwise be
1238 * chosen, NULL is returned.
1240 * Until we fix the RUNQ code the chklp test has to be strict or we may
1241 * bounce between processes trying to acquire the current process designation.
1243 * Must be called with rdd->spin locked. The spinlock is left intact through
1244 * the entire routine. dd->spin does not have to be locked.
1246 * If worst is non-zero this function finds the worst thread instead of the
1247 * best thread (used by the schedulerclock-based rover).
1251 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1252 struct lwp *chklp, int worst)
1256 u_int32_t *which, *which2;
1262 rtqbits = rdd->rtqueuebits;
1263 tsqbits = rdd->queuebits;
1264 idqbits = rdd->idqueuebits;
1268 pri = bsrl(idqbits);
1269 q = &rdd->idqueues[pri];
1270 which = &rdd->idqueuebits;
1272 } else if (tsqbits) {
1273 pri = bsrl(tsqbits);
1274 q = &rdd->queues[pri];
1275 which = &rdd->queuebits;
1277 } else if (rtqbits) {
1278 pri = bsrl(rtqbits);
1279 q = &rdd->rtqueues[pri];
1280 which = &rdd->rtqueuebits;
1285 lp = TAILQ_LAST(q, rq);
1288 pri = bsfl(rtqbits);
1289 q = &rdd->rtqueues[pri];
1290 which = &rdd->rtqueuebits;
1292 } else if (tsqbits) {
1293 pri = bsfl(tsqbits);
1294 q = &rdd->queues[pri];
1295 which = &rdd->queuebits;
1297 } else if (idqbits) {
1298 pri = bsfl(idqbits);
1299 q = &rdd->idqueues[pri];
1300 which = &rdd->idqueuebits;
1305 lp = TAILQ_FIRST(q);
1307 KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1310 * If the passed lwp <chklp> is reasonably close to the selected
1311 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1313 * Note that we must error on the side of <chklp> to avoid bouncing
1314 * between threads in the acquire code.
1317 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1321 KTR_COND_LOG(usched_chooseproc,
1322 lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1323 lp->lwp_proc->p_pid,
1324 lp->lwp_thread->td_gd->gd_cpuid,
1327 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1328 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1329 TAILQ_REMOVE(q, lp, lwp_procq);
1332 *which &= ~(1 << pri);
1335 * If we are choosing a process from rdd with the intent to
1336 * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1340 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1341 atomic_add_int(&rdd->uload,
1342 -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1343 atomic_add_int(&rdd->ucount, -1);
1344 atomic_add_int(&dfly_ucount, -1);
1346 lp->lwp_qcpu = dd->cpuid;
1347 atomic_add_int(&dd->uload,
1348 ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1349 atomic_add_int(&dd->ucount, 1);
1350 atomic_add_int(&dfly_ucount, 1);
1351 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1359 * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1361 * Choose a cpu node to schedule lp on, hopefully nearby its current
1364 * We give the current node a modest advantage for obvious reasons.
1366 * We also give the node the thread was woken up FROM a slight advantage
1367 * in order to try to schedule paired threads which synchronize/block waiting
1368 * for each other fairly close to each other. Similarly in a network setting
1369 * this feature will also attempt to place a user process near the kernel
1370 * protocol thread that is feeding it data. THIS IS A CRITICAL PART of the
1371 * algorithm as it heuristically groups synchronizing processes for locality
1372 * of reference in multi-socket systems.
1374 * We check against running processes and give a big advantage if there
1377 * The caller will normally dfly_setrunqueue() lp on the returned queue.
1379 * When the topology is known choose a cpu whos group has, in aggregate,
1380 * has the lowest weighted load.
1384 dfly_choose_best_queue(struct lwp *lp)
1391 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1401 * When the topology is unknown choose a random cpu that is hopefully
1404 if (dd->cpunode == NULL)
1405 return (dfly_choose_queue_simple(dd, lp));
1410 if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1411 wakemask = dfly_pcpu[wakecpu].cpumask;
1416 * When the topology is known choose a cpu whos group has, in
1417 * aggregate, has the lowest weighted load.
1419 cpup = root_cpu_node;
1424 * Degenerate case super-root
1426 if (cpup->child_node && cpup->child_no == 1) {
1427 cpup = cpup->child_node;
1434 if (cpup->child_node == NULL) {
1435 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1440 lowest_load = 0x7FFFFFFF;
1442 for (n = 0; n < cpup->child_no; ++n) {
1444 * Accumulate load information for all cpus
1445 * which are members of this node.
1447 cpun = &cpup->child_node[n];
1448 mask = cpun->members & usched_global_cpumask &
1449 smp_active_mask & lp->lwp_cpumask;
1457 cpuid = BSFCPUMASK(mask);
1458 rdd = &dfly_pcpu[cpuid];
1460 load += rdd->ucount * usched_dfly_weight3;
1462 if (rdd->uschedcp == NULL &&
1463 rdd->runqcount == 0) {
1464 load -= usched_dfly_weight4;
1465 } else if (rdd->upri > lp->lwp_priority + PPQ) {
1466 load -= usched_dfly_weight4 / 2;
1468 mask &= ~CPUMASK(cpuid);
1473 * Compensate if the lp is already accounted for in
1474 * the aggregate uload for this mask set. We want
1475 * to calculate the loads as if lp were not present,
1476 * otherwise the calculation is bogus.
1478 if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1479 (dd->cpumask & cpun->members)) {
1480 load -= ((lp->lwp_priority & ~PPQMASK) &
1482 load -= usched_dfly_weight3;
1488 * Advantage the cpu group (lp) is already on.
1490 if (cpun->members & dd->cpumask)
1491 load -= usched_dfly_weight1;
1494 * Advantage the cpu group we want to pair (lp) to,
1495 * but don't let it go to the exact same cpu as
1496 * the wakecpu target.
1498 * We do this by checking whether cpun is a
1499 * terminal node or not. All cpun's at the same
1500 * level will either all be terminal or all not
1503 * If it is and we match we disadvantage the load.
1504 * If it is and we don't match we advantage the load.
1506 * Also note that we are effectively disadvantaging
1507 * all-but-one by the same amount, so it won't effect
1508 * the weight1 factor for the all-but-one nodes.
1510 if (cpun->members & wakemask) {
1511 if (cpun->child_node != NULL) {
1513 load -= usched_dfly_weight2;
1515 if (usched_dfly_features & 0x10)
1516 load += usched_dfly_weight2;
1518 load -= usched_dfly_weight2;
1523 * Calculate the best load
1525 if (cpub == NULL || lowest_load > load ||
1526 (lowest_load == load &&
1527 (cpun->members & dd->cpumask))
1535 if (usched_dfly_chooser)
1536 kprintf("lp %02d->%02d %s\n",
1537 lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1542 * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1544 * Choose the worst queue close to dd's cpu node with a non-empty runq
1545 * that is NOT dd. Also require that the moving of the highest-load thread
1546 * from rdd to dd does not cause the uload's to cross each other.
1548 * This is used by the thread chooser when the current cpu's queues are
1549 * empty to steal a thread from another cpu's queue. We want to offload
1550 * the most heavily-loaded queue.
1554 dfly_choose_worst_queue(dfly_pcpu_t dd)
1570 * When the topology is unknown choose a random cpu that is hopefully
1573 if (dd->cpunode == NULL) {
1578 * When the topology is known choose a cpu whos group has, in
1579 * aggregate, has the lowest weighted load.
1581 cpup = root_cpu_node;
1585 * Degenerate case super-root
1587 if (cpup->child_node && cpup->child_no == 1) {
1588 cpup = cpup->child_node;
1595 if (cpup->child_node == NULL) {
1596 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1603 for (n = 0; n < cpup->child_no; ++n) {
1605 * Accumulate load information for all cpus
1606 * which are members of this node.
1608 cpun = &cpup->child_node[n];
1609 mask = cpun->members & usched_global_cpumask &
1617 cpuid = BSFCPUMASK(mask);
1618 rdd = &dfly_pcpu[cpuid];
1620 load += rdd->ucount * usched_dfly_weight3;
1621 if (rdd->uschedcp == NULL &&
1622 rdd->runqcount == 0 &&
1623 globaldata_find(cpuid)->gd_tdrunqcount == 0
1625 load -= usched_dfly_weight4;
1626 } else if (rdd->upri > dd->upri + PPQ) {
1627 load -= usched_dfly_weight4 / 2;
1629 mask &= ~CPUMASK(cpuid);
1635 * Prefer candidates which are somewhat closer to
1638 if (dd->cpumask & cpun->members)
1639 load += usched_dfly_weight1;
1642 * The best candidate is the one with the worst
1645 if (cpub == NULL || highest_load < load) {
1646 highest_load = load;
1654 * We never return our own node (dd), and only return a remote
1655 * node if it's load is significantly worse than ours (i.e. where
1656 * stealing a thread would be considered reasonable).
1658 * This also helps us avoid breaking paired threads apart which
1659 * can have disastrous effects on performance.
1665 if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
1667 if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
1669 if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
1672 if (rdd->uload - hpri < dd->uload + hpri)
1679 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1687 * Fallback to the original heuristic, select random cpu,
1688 * first checking cpus not currently running a user thread.
1691 cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1692 mask = ~dfly_curprocmask & dfly_rdyprocmask & lp->lwp_cpumask &
1693 smp_active_mask & usched_global_cpumask;
1696 tmpmask = ~(CPUMASK(cpuid) - 1);
1698 cpuid = BSFCPUMASK(mask & tmpmask);
1700 cpuid = BSFCPUMASK(mask);
1701 rdd = &dfly_pcpu[cpuid];
1703 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1705 mask &= ~CPUMASK(cpuid);
1709 * Then cpus which might have a currently running lp
1711 cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1712 mask = dfly_curprocmask & dfly_rdyprocmask &
1713 lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
1716 tmpmask = ~(CPUMASK(cpuid) - 1);
1718 cpuid = BSFCPUMASK(mask & tmpmask);
1720 cpuid = BSFCPUMASK(mask);
1721 rdd = &dfly_pcpu[cpuid];
1723 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1725 mask &= ~CPUMASK(cpuid);
1729 * If we cannot find a suitable cpu we reload from dfly_scancpu
1730 * and round-robin. Other cpus will pickup as they release their
1731 * current lwps or become ready.
1733 * Avoid a degenerate system lockup case if usched_global_cpumask
1734 * is set to 0 or otherwise does not cover lwp_cpumask.
1736 * We only kick the target helper thread in this case, we do not
1737 * set the user resched flag because
1739 cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1740 if ((CPUMASK(cpuid) & usched_global_cpumask) == 0)
1742 rdd = &dfly_pcpu[cpuid];
1749 dfly_need_user_resched_remote(void *dummy)
1751 globaldata_t gd = mycpu;
1752 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
1754 need_user_resched();
1756 /* Call wakeup_mycpu to avoid sending IPIs to other CPUs */
1757 wakeup_mycpu(&dd->helper_thread);
1763 * dfly_remrunqueue_locked() removes a given process from the run queue
1764 * that it is on, clearing the queue busy bit if it becomes empty.
1766 * Note that user process scheduler is different from the LWKT schedule.
1767 * The user process scheduler only manages user processes but it uses LWKT
1768 * underneath, and a user process operating in the kernel will often be
1769 * 'released' from our management.
1771 * uload is NOT adjusted here. It is only adjusted if the lwkt_thread goes
1772 * to sleep or the lwp is moved to a different runq.
1775 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1781 KKASSERT(rdd->runqcount >= 0);
1783 pri = lp->lwp_rqindex;
1785 switch(lp->lwp_rqtype) {
1786 case RTP_PRIO_NORMAL:
1787 q = &rdd->queues[pri];
1788 which = &rdd->queuebits;
1790 case RTP_PRIO_REALTIME:
1792 q = &rdd->rtqueues[pri];
1793 which = &rdd->rtqueuebits;
1796 q = &rdd->idqueues[pri];
1797 which = &rdd->idqueuebits;
1800 panic("remrunqueue: invalid rtprio type");
1803 KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1804 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1805 TAILQ_REMOVE(q, lp, lwp_procq);
1807 if (TAILQ_EMPTY(q)) {
1808 KASSERT((*which & (1 << pri)) != 0,
1809 ("remrunqueue: remove from empty queue"));
1810 *which &= ~(1 << pri);
1815 * dfly_setrunqueue_locked()
1817 * Add a process whos rqtype and rqindex had previously been calculated
1818 * onto the appropriate run queue. Determine if the addition requires
1819 * a reschedule on a cpu and return the cpuid or -1.
1821 * NOTE: Lower priorities are better priorities.
1823 * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1824 * sum of the rough lwp_priority for all running and runnable
1825 * processes. Lower priority processes (higher lwp_priority
1826 * values) actually DO count as more load, not less, because
1827 * these are the programs which require the most care with
1828 * regards to cpu selection.
1831 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1837 KKASSERT(lp->lwp_qcpu == rdd->cpuid);
1839 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1840 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1841 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1842 (lp->lwp_priority & ~PPQMASK) & PRIMASK);
1843 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].ucount, 1);
1844 atomic_add_int(&dfly_ucount, 1);
1847 pri = lp->lwp_rqindex;
1849 switch(lp->lwp_rqtype) {
1850 case RTP_PRIO_NORMAL:
1851 q = &rdd->queues[pri];
1852 which = &rdd->queuebits;
1854 case RTP_PRIO_REALTIME:
1856 q = &rdd->rtqueues[pri];
1857 which = &rdd->rtqueuebits;
1860 q = &rdd->idqueues[pri];
1861 which = &rdd->idqueuebits;
1864 panic("remrunqueue: invalid rtprio type");
1869 * Add to the correct queue and set the appropriate bit. If no
1870 * lower priority (i.e. better) processes are in the queue then
1871 * we want a reschedule, calculate the best cpu for the job.
1873 * Always run reschedules on the LWPs original cpu.
1875 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1876 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1878 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1885 * For SMP systems a user scheduler helper thread is created for each
1886 * cpu and is used to allow one cpu to wakeup another for the purposes of
1887 * scheduling userland threads from setrunqueue().
1889 * UP systems do not need the helper since there is only one cpu.
1891 * We can't use the idle thread for this because we might block.
1892 * Additionally, doing things this way allows us to HLT idle cpus
1896 dfly_helper_thread(void *dummy)
1906 cpuid = gd->gd_cpuid; /* doesn't change */
1907 mask = gd->gd_cpumask; /* doesn't change */
1908 dd = &dfly_pcpu[cpuid];
1911 * Since we only want to be woken up only when no user processes
1912 * are scheduled on a cpu, run at an ultra low priority.
1914 lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1916 tsleep(&dd->helper_thread, 0, "schslp", 0);
1920 * We use the LWKT deschedule-interlock trick to avoid racing
1921 * dfly_rdyprocmask. This means we cannot block through to the
1922 * manual lwkt_switch() call we make below.
1925 tsleep_interlock(&dd->helper_thread, 0);
1927 spin_lock(&dd->spin);
1929 atomic_set_cpumask(&dfly_rdyprocmask, mask);
1930 clear_user_resched(); /* This satisfied the reschedule request */
1931 dd->rrcount = 0; /* Reset the round-robin counter */
1933 if (dd->runqcount || dd->uschedcp != NULL) {
1935 * Threads are available. A thread may or may not be
1936 * currently scheduled. Get the best thread already queued
1939 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
1941 atomic_set_cpumask(&dfly_curprocmask, mask);
1942 dd->upri = nlp->lwp_priority;
1944 dd->rrcount = 0; /* reset round robin */
1945 spin_unlock(&dd->spin);
1946 lwkt_acquire(nlp->lwp_thread);
1947 lwkt_schedule(nlp->lwp_thread);
1950 * This situation should not occur because we had
1951 * at least one thread available.
1953 spin_unlock(&dd->spin);
1955 } else if (usched_dfly_features & 0x01) {
1957 * This cpu is devoid of runnable threads, steal a thread
1958 * from another cpu. Since we're stealing, might as well
1959 * load balance at the same time.
1961 * NOTE! This function only returns a non-NULL rdd when
1962 * another cpu's queue is obviously overloaded. We
1963 * do not want to perform the type of rebalancing
1964 * the schedclock does here because it would result
1965 * in insane process pulling when 'steady' state is
1966 * partially unbalanced (e.g. 6 runnables and only
1969 rdd = dfly_choose_worst_queue(dd);
1970 if (rdd && spin_trylock(&rdd->spin)) {
1971 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 0);
1972 spin_unlock(&rdd->spin);
1977 atomic_set_cpumask(&dfly_curprocmask, mask);
1978 dd->upri = nlp->lwp_priority;
1980 dd->rrcount = 0; /* reset round robin */
1981 spin_unlock(&dd->spin);
1982 lwkt_acquire(nlp->lwp_thread);
1983 lwkt_schedule(nlp->lwp_thread);
1986 * Leave the thread on our run queue. Another
1987 * scheduler will try to pull it later.
1989 spin_unlock(&dd->spin);
1993 * devoid of runnable threads and not allowed to steal
1996 spin_unlock(&dd->spin);
2000 * We're descheduled unless someone scheduled us. Switch away.
2001 * Exiting the critical section will cause splz() to be called
2002 * for us if interrupts and such are pending.
2005 tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
2011 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2015 new_val = usched_dfly_stick_to_level;
2017 error = sysctl_handle_int(oidp, &new_val, 0, req);
2018 if (error != 0 || req->newptr == NULL)
2020 if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2022 usched_dfly_stick_to_level = new_val;
2028 * Setup our scheduler helpers. Note that curprocmask bit 0 has already
2029 * been cleared by rqinit() and we should not mess with it further.
2032 dfly_helper_thread_cpu_init(void)
2037 int smt_not_supported = 0;
2038 int cache_coherent_not_supported = 0;
2041 kprintf("Start scheduler helpers on cpus:\n");
2043 sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2044 usched_dfly_sysctl_tree =
2045 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2046 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2047 "usched_dfly", CTLFLAG_RD, 0, "");
2049 for (i = 0; i < ncpus; ++i) {
2050 dfly_pcpu_t dd = &dfly_pcpu[i];
2051 cpumask_t mask = CPUMASK(i);
2053 if ((mask & smp_active_mask) == 0)
2056 spin_init(&dd->spin);
2057 dd->cpunode = get_cpu_node_by_cpuid(i);
2059 dd->cpumask = CPUMASK(i);
2060 for (j = 0; j < NQS; j++) {
2061 TAILQ_INIT(&dd->queues[j]);
2062 TAILQ_INIT(&dd->rtqueues[j]);
2063 TAILQ_INIT(&dd->idqueues[j]);
2065 atomic_clear_cpumask(&dfly_curprocmask, 1);
2067 if (dd->cpunode == NULL) {
2068 smt_not_supported = 1;
2069 cache_coherent_not_supported = 1;
2071 kprintf ("\tcpu%d - WARNING: No CPU NODE "
2072 "found for cpu\n", i);
2074 switch (dd->cpunode->type) {
2077 kprintf ("\tcpu%d - HyperThreading "
2078 "available. Core siblings: ",
2082 smt_not_supported = 1;
2085 kprintf ("\tcpu%d - No HT available, "
2086 "multi-core/physical "
2087 "cpu. Physical siblings: ",
2091 smt_not_supported = 1;
2094 kprintf ("\tcpu%d - No HT available, "
2095 "single-core/physical cpu. "
2096 "Package Siblings: ",
2100 /* Let's go for safe defaults here */
2101 smt_not_supported = 1;
2102 cache_coherent_not_supported = 1;
2104 kprintf ("\tcpu%d - Unknown cpunode->"
2105 "type=%u. Siblings: ",
2107 (u_int)dd->cpunode->type);
2112 if (dd->cpunode->parent_node != NULL) {
2113 CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
2114 kprintf("cpu%d ", cpuid);
2117 kprintf(" no siblings\n");
2122 lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread,
2123 0, i, "usched %d", i);
2126 * Allow user scheduling on the target cpu. cpu #0 has already
2127 * been enabled in rqinit().
2130 atomic_clear_cpumask(&dfly_curprocmask, mask);
2131 atomic_set_cpumask(&dfly_rdyprocmask, mask);
2132 dd->upri = PRIBASE_NULL;
2136 /* usched_dfly sysctl configurable parameters */
2138 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2139 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2140 OID_AUTO, "rrinterval", CTLFLAG_RW,
2141 &usched_dfly_rrinterval, 0, "");
2142 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2143 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2144 OID_AUTO, "decay", CTLFLAG_RW,
2145 &usched_dfly_decay, 0, "Extra decay when not running");
2147 /* Add enable/disable option for SMT scheduling if supported */
2148 if (smt_not_supported) {
2149 usched_dfly_smt = 0;
2150 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2151 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2152 OID_AUTO, "smt", CTLFLAG_RD,
2153 "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2155 usched_dfly_smt = 1;
2156 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2157 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2158 OID_AUTO, "smt", CTLFLAG_RW,
2159 &usched_dfly_smt, 0, "Enable SMT scheduling");
2163 * Add enable/disable option for cache coherent scheduling
2166 if (cache_coherent_not_supported) {
2167 usched_dfly_cache_coherent = 0;
2168 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2169 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2170 OID_AUTO, "cache_coherent", CTLFLAG_RD,
2172 "Cache coherence NOT SUPPORTED");
2174 usched_dfly_cache_coherent = 1;
2175 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2176 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2177 OID_AUTO, "cache_coherent", CTLFLAG_RW,
2178 &usched_dfly_cache_coherent, 0,
2179 "Enable/Disable cache coherent scheduling");
2181 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2182 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2183 OID_AUTO, "weight1", CTLFLAG_RW,
2184 &usched_dfly_weight1, 10,
2185 "Weight selection for current cpu");
2187 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2188 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2189 OID_AUTO, "weight2", CTLFLAG_RW,
2190 &usched_dfly_weight2, 5,
2191 "Weight selection for wakefrom cpu");
2193 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2194 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2195 OID_AUTO, "weight3", CTLFLAG_RW,
2196 &usched_dfly_weight3, 50,
2197 "Weight selection for num threads on queue");
2199 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2200 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2201 OID_AUTO, "weight4", CTLFLAG_RW,
2202 &usched_dfly_weight4, 50,
2203 "Availability of other idle cpus");
2205 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2206 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2207 OID_AUTO, "features", CTLFLAG_RW,
2208 &usched_dfly_features, 15,
2209 "Allow pulls into empty queues");
2213 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2214 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2215 OID_AUTO, "stick_to_level",
2216 CTLTYPE_INT | CTLFLAG_RW,
2217 NULL, sizeof usched_dfly_stick_to_level,
2218 sysctl_usched_dfly_stick_to_level, "I",
2219 "Stick a process to this level. See sysctl"
2220 "paremter hw.cpu_topology.level_description");
2224 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2225 dfly_helper_thread_cpu_init, NULL)
2227 #else /* No SMP options - just add the configurable parameters to sysctl */
2230 sched_sysctl_tree_init(void)
2232 sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2233 usched_dfly_sysctl_tree =
2234 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2235 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2236 "usched_dfly", CTLFLAG_RD, 0, "");
2238 /* usched_dfly sysctl configurable parameters */
2239 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2240 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2241 OID_AUTO, "rrinterval", CTLFLAG_RW,
2242 &usched_dfly_rrinterval, 0, "");
2243 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2244 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2245 OID_AUTO, "decay", CTLFLAG_RW,
2246 &usched_dfly_decay, 0, "Extra decay when not running");
2248 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2249 sched_sysctl_tree_init, NULL)