2 * Copyright (c) 2012 The DragonFly Project. All rights reserved.
3 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com>,
7 * by Mihai Carabas <mihai.carabas@gmail.com>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
41 #include <sys/queue.h>
43 #include <sys/rtprio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
59 * Priorities. Note that with 32 run queues per scheduler each queue
60 * represents four priority levels.
66 #define PRIMASK (MAXPRI - 1)
67 #define PRIBASE_REALTIME 0
68 #define PRIBASE_NORMAL MAXPRI
69 #define PRIBASE_IDLE (MAXPRI * 2)
70 #define PRIBASE_THREAD (MAXPRI * 3)
71 #define PRIBASE_NULL (MAXPRI * 4)
73 #define NQS 32 /* 32 run queues. */
74 #define PPQ (MAXPRI / NQS) /* priorities per queue */
75 #define PPQMASK (PPQ - 1)
78 * NICEPPQ - number of nice units per priority queue
79 * ESTCPUPPQ - number of estcpu units per priority queue
80 * ESTCPUMAX - number of estcpu units
84 #define ESTCPUMAX (ESTCPUPPQ * NQS)
85 #define BATCHMAX (ESTCPUFREQ * 30)
86 #define PRIO_RANGE (PRIO_MAX - PRIO_MIN + 1)
88 #define ESTCPULIM(v) min((v), ESTCPUMAX)
92 #define lwp_priority lwp_usdata.dfly.priority
93 #define lwp_forked lwp_usdata.dfly.forked
94 #define lwp_rqindex lwp_usdata.dfly.rqindex
95 #define lwp_estcpu lwp_usdata.dfly.estcpu
96 #define lwp_batch lwp_usdata.dfly.batch
97 #define lwp_rqtype lwp_usdata.dfly.rqtype
98 #define lwp_qcpu lwp_usdata.dfly.qcpu
100 struct usched_dfly_pcpu {
101 struct spinlock spin;
102 struct thread helper_thread;
106 struct lwp *uschedcp;
107 struct rq queues[NQS];
108 struct rq rtqueues[NQS];
109 struct rq idqueues[NQS];
111 u_int32_t rtqueuebits;
112 u_int32_t idqueuebits;
121 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
123 static void dfly_acquire_curproc(struct lwp *lp);
124 static void dfly_release_curproc(struct lwp *lp);
125 static void dfly_select_curproc(globaldata_t gd);
126 static void dfly_setrunqueue(struct lwp *lp);
127 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
129 static void dfly_recalculate_estcpu(struct lwp *lp);
130 static void dfly_resetpriority(struct lwp *lp);
131 static void dfly_forking(struct lwp *plp, struct lwp *lp);
132 static void dfly_exiting(struct lwp *lp, struct proc *);
133 static void dfly_uload_update(struct lwp *lp);
134 static void dfly_yield(struct lwp *lp);
136 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
137 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
138 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
140 static void dfly_wakeup_random_helper(dfly_pcpu_t notdd);
145 static void dfly_need_user_resched_remote(void *dummy);
147 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t dd, struct lwp *chklp,
149 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
150 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
152 struct usched usched_dfly = {
154 "dfly", "Original DragonFly Scheduler",
155 NULL, /* default registration */
156 NULL, /* default deregistration */
157 dfly_acquire_curproc,
158 dfly_release_curproc,
161 dfly_recalculate_estcpu,
166 NULL, /* setcpumask not supported */
171 * We have NQS (32) run queues per scheduling class. For the normal
172 * class, there are 128 priorities scaled onto these 32 queues. New
173 * processes are added to the last entry in each queue, and processes
174 * are selected for running by taking them from the head and maintaining
175 * a simple FIFO arrangement. Realtime and Idle priority processes have
176 * and explicit 0-31 priority which maps directly onto their class queue
177 * index. When a queue has something in it, the corresponding bit is
178 * set in the queuebits variable, allowing a single read to determine
179 * the state of all 32 queues and then a ffs() to find the first busy
182 static cpumask_t dfly_curprocmask = -1; /* currently running a user process */
183 static cpumask_t dfly_rdyprocmask; /* ready to accept a user process */
185 static volatile int dfly_scancpu;
186 /*static struct spinlock dfly_spin = SPINLOCK_INITIALIZER(dfly_spin);*/
188 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
189 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
190 static struct sysctl_oid *usched_dfly_sysctl_tree;
192 /* Debug info exposed through debug.* sysctl */
194 static int usched_dfly_debug = -1;
195 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
196 &usched_dfly_debug, 0,
197 "Print debug information for this pid");
199 static int usched_dfly_pid_debug = -1;
200 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
201 &usched_dfly_pid_debug, 0,
202 "Print KTR debug information for this pid");
204 static int usched_dfly_chooser = 0;
205 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
206 &usched_dfly_chooser, 0,
207 "Print KTR debug information for this pid");
209 /* Tunning usched_dfly - configurable through kern.usched_dfly.* */
211 static int usched_dfly_smt = 0;
212 static int usched_dfly_cache_coherent = 0;
213 static int usched_dfly_weight1 = 10;
214 static int usched_dfly_weight2 = 5;
215 static int usched_dfly_stick_to_level = 0;
217 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
218 static int usched_dfly_decay = 8;
219 static int usched_dfly_batch_time = 10;
221 /* KTR debug printings */
223 KTR_INFO_MASTER(usched);
225 #if !defined(KTR_USCHED_DFLY)
226 #define KTR_USCHED_DFLY KTR_ALL
230 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_urw, 0,
231 "USCHED_DFLY(dfly_acquire_curproc in user_reseched_wanted "
232 "after release: pid %d, cpuid %d, curr_cpuid %d)",
233 pid_t pid, int cpuid, int curr);
234 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_before_loop, 0,
235 "USCHED_DFLY(dfly_acquire_curproc before loop: pid %d, cpuid %d, "
237 pid_t pid, int cpuid, int curr);
238 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_not, 0,
239 "USCHED_DFLY(dfly_acquire_curproc couldn't acquire after "
240 "dfly_setrunqueue: pid %d, cpuid %d, curr_lp pid %d, curr_cpuid %d)",
241 pid_t pid, int cpuid, pid_t curr_pid, int curr_cpuid);
242 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_acquire_curproc_switch, 0,
243 "USCHED_DFLY(dfly_acquire_curproc after lwkt_switch: pid %d, "
244 "cpuid %d, curr_cpuid %d)",
245 pid_t pid, int cpuid, int curr);
247 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_release_curproc, 0,
248 "USCHED_DFLY(dfly_release_curproc before select: pid %d, "
249 "cpuid %d, curr_cpuid %d)",
250 pid_t pid, int cpuid, int curr);
252 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_select_curproc, 0,
253 "USCHED_DFLY(dfly_release_curproc before select: pid %d, "
254 "cpuid %d, old_pid %d, old_cpuid %d, curr_cpuid %d)",
255 pid_t pid, int cpuid, pid_t old_pid, int old_cpuid, int curr);
258 KTR_INFO(KTR_USCHED_DFLY, usched, batchy_test_false, 0,
259 "USCHED_DFLY(batchy_looser_pri_test false: pid %d, "
260 "cpuid %d, verify_mask %lu)",
261 pid_t pid, int cpuid, cpumask_t mask);
262 KTR_INFO(KTR_USCHED_DFLY, usched, batchy_test_true, 0,
263 "USCHED_DFLY(batchy_looser_pri_test true: pid %d, "
264 "cpuid %d, verify_mask %lu)",
265 pid_t pid, int cpuid, cpumask_t mask);
267 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_fc_smt, 0,
268 "USCHED_DFLY(dfly_setrunqueue free cpus smt: pid %d, cpuid %d, "
269 "mask %lu, curr_cpuid %d)",
270 pid_t pid, int cpuid, cpumask_t mask, int curr);
271 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_fc_non_smt, 0,
272 "USCHED_DFLY(dfly_setrunqueue free cpus check non_smt: pid %d, "
273 "cpuid %d, mask %lu, curr_cpuid %d)",
274 pid_t pid, int cpuid, cpumask_t mask, int curr);
275 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_rc, 0,
276 "USCHED_DFLY(dfly_setrunqueue running cpus check: pid %d, "
277 "cpuid %d, mask %lu, curr_cpuid %d)",
278 pid_t pid, int cpuid, cpumask_t mask, int curr);
279 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_found, 0,
280 "USCHED_DFLY(dfly_setrunqueue found cpu: pid %d, cpuid %d, "
281 "mask %lu, found_cpuid %d, curr_cpuid %d)",
282 pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
283 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_not_found, 0,
284 "USCHED_DFLY(dfly_setrunqueue not found cpu: pid %d, cpuid %d, "
285 "try_cpuid %d, curr_cpuid %d)",
286 pid_t pid, int cpuid, int try_cpuid, int curr);
287 KTR_INFO(KTR_USCHED_DFLY, usched, dfly_setrunqueue_found_best_cpuid, 0,
288 "USCHED_DFLY(dfly_setrunqueue found cpu: pid %d, cpuid %d, "
289 "mask %lu, found_cpuid %d, curr_cpuid %d)",
290 pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
294 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
295 "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
296 pid_t pid, int old_cpuid, int curr);
299 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc, 0,
300 "USCHED_DFLY(chooseproc_cc: pid %d, old_cpuid %d, curr_cpuid %d)",
301 pid_t pid, int old_cpuid, int curr);
302 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc_not_good, 0,
303 "USCHED_DFLY(chooseproc_cc not good: pid %d, old_cpumask %lu, "
304 "sibling_mask %lu, curr_cpumask %lu)",
305 pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
306 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc_cc_elected, 0,
307 "USCHED_DFLY(chooseproc_cc elected: pid %d, old_cpumask %lu, "
308 "sibling_mask %lu, curr_cpumask: %lu)",
309 pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
312 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_no_process, 0,
313 "USCHED_DFLY(sched_thread %d no process scheduled: pid %d, old_cpuid %d)",
314 int id, pid_t pid, int cpuid);
315 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_process, 0,
316 "USCHED_DFLY(sched_thread %d process scheduled: pid %d, old_cpuid %d)",
317 int id, pid_t pid, int cpuid);
319 KTR_INFO(KTR_USCHED_DFLY, usched, sched_thread_no_process_found, 0,
320 "USCHED_DFLY(sched_thread %d no process found; tmpmask %lu)",
321 int id, cpumask_t tmpmask);
326 * DFLY_ACQUIRE_CURPROC
328 * This function is called when the kernel intends to return to userland.
329 * It is responsible for making the thread the current designated userland
330 * thread for this cpu, blocking if necessary.
332 * The kernel has already depressed our LWKT priority so we must not switch
333 * until we have either assigned or disposed of the thread.
335 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
336 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will
337 * occur, this function is called only under very controlled circumstances.
340 dfly_acquire_curproc(struct lwp *lp)
347 * Make sure we aren't sitting on a tsleep queue.
350 crit_enter_quick(td);
351 if (td->td_flags & TDF_TSLEEPQ)
353 dfly_recalculate_estcpu(lp);
356 * If a reschedule was requested give another thread the
359 if (user_resched_wanted()) {
360 clear_user_resched();
361 dfly_release_curproc(lp);
365 * Loop until we are the current user thread
368 dd = &dfly_pcpu[gd->gd_cpuid];
372 * Process any pending events and higher priority threads.
377 * Become the currently scheduled user thread for this cpu
378 * if we can do so trivially.
380 * We can steal another thread's current thread designation
381 * on this cpu since if we are running that other thread
382 * must not be, so we can safely deschedule it.
384 if (dd->uschedcp == lp) {
386 * We are already the current lwp (hot path).
388 dd->upri = lp->lwp_priority;
389 } else if (dd->uschedcp == NULL) {
391 * We can trivially become the current lwp.
393 atomic_set_cpumask(&dfly_curprocmask, gd->gd_cpumask);
395 dd->upri = lp->lwp_priority;
396 KKASSERT(lp->lwp_qcpu == dd->cpuid);
397 } else if (dd->uschedcp && dd->upri > lp->lwp_priority) {
399 * We can steal the current cpu's lwp designation
400 * away simply by replacing it. The other thread
401 * will stall when it tries to return to userland,
402 * possibly rescheduling elsewhere when it calls
406 dd->upri = lp->lwp_priority;
407 KKASSERT(lp->lwp_qcpu == dd->cpuid);
410 * We cannot become the current lwp, place the lp
411 * on the run-queue of this or another cpu and
412 * deschedule ourselves.
414 * When we are reactivated we will have another
417 lwkt_deschedule(lp->lwp_thread);
418 dfly_setrunqueue(lp);
421 * Reload after a switch or setrunqueue/switch possibly
422 * moved us to another cpu.
426 dd = &dfly_pcpu[gd->gd_cpuid];
428 } while (dd->uschedcp != lp);
431 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
435 * DFLY_RELEASE_CURPROC
437 * This routine detaches the current thread from the userland scheduler,
438 * usually because the thread needs to run or block in the kernel (at
439 * kernel priority) for a while.
441 * This routine is also responsible for selecting a new thread to
442 * make the current thread.
444 * NOTE: This implementation differs from the dummy example in that
445 * dfly_select_curproc() is able to select the current process, whereas
446 * dummy_select_curproc() is not able to select the current process.
447 * This means we have to NULL out uschedcp.
449 * Additionally, note that we may already be on a run queue if releasing
450 * via the lwkt_switch() in dfly_setrunqueue().
454 dfly_release_curproc(struct lwp *lp)
456 globaldata_t gd = mycpu;
457 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
460 * Make sure td_wakefromcpu is defaulted. This will be overwritten
463 lp->lwp_thread->td_wakefromcpu = gd->gd_cpuid;
465 if (dd->uschedcp == lp) {
467 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
469 dd->uschedcp = NULL; /* don't let lp be selected */
470 dd->upri = PRIBASE_NULL;
471 atomic_clear_cpumask(&dfly_curprocmask, gd->gd_cpumask);
472 dfly_select_curproc(gd);
478 * DFLY_SELECT_CURPROC
480 * Select a new current process for this cpu and clear any pending user
481 * reschedule request. The cpu currently has no current process.
483 * This routine is also responsible for equal-priority round-robining,
484 * typically triggered from dfly_schedulerclock(). In our dummy example
485 * all the 'user' threads are LWKT scheduled all at once and we just
486 * call lwkt_switch().
488 * The calling process is not on the queue and cannot be selected.
492 dfly_select_curproc(globaldata_t gd)
494 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
496 int cpuid = gd->gd_cpuid;
500 /*spin_lock(&dfly_spin);*/
501 spin_lock(&dd->spin);
502 nlp = dfly_chooseproc_locked(dd, dd->uschedcp, 0);
505 atomic_set_cpumask(&dfly_curprocmask, CPUMASK(cpuid));
506 dd->upri = nlp->lwp_priority;
508 dd->rrcount = 0; /* reset round robin */
509 spin_unlock(&dd->spin);
510 /*spin_unlock(&dfly_spin);*/
512 lwkt_acquire(nlp->lwp_thread);
514 lwkt_schedule(nlp->lwp_thread);
516 spin_unlock(&dd->spin);
517 /*spin_unlock(&dfly_spin);*/
523 * Place the specified lwp on the user scheduler's run queue. This routine
524 * must be called with the thread descheduled. The lwp must be runnable.
525 * It must not be possible for anyone else to explicitly schedule this thread.
527 * The thread may be the current thread as a special case.
530 dfly_setrunqueue(struct lwp *lp)
538 * First validate the process LWKT state.
541 KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
542 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
543 ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
544 lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
545 KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
548 * NOTE: rdd does not necessarily represent the current cpu.
549 * Instead it represents the cpu the thread was last
552 rdd = &dfly_pcpu[lp->lwp_qcpu];
555 * This process is not supposed to be scheduled anywhere or assigned
556 * as the current process anywhere. Assert the condition.
558 KKASSERT(rdd->uschedcp != lp);
562 * If we are not SMP we do not have a scheduler helper to kick
563 * and must directly activate the process if none are scheduled.
565 * This is really only an issue when bootstrapping init since
566 * the caller in all other cases will be a user process, and
567 * even if released (rdd->uschedcp == NULL), that process will
568 * kickstart the scheduler when it returns to user mode from
571 * NOTE: On SMP we can't just set some other cpu's uschedcp.
573 if (rdd->uschedcp == NULL) {
574 spin_lock(&rdd->spin);
575 if (rdd->uschedcp == NULL) {
576 atomic_set_cpumask(&dfly_curprocmask, 1);
578 rdd->upri = lp->lwp_priority;
579 spin_unlock(&rdd->spin);
580 lwkt_schedule(lp->lwp_thread);
584 spin_unlock(&rdd->spin);
590 * XXX fixme. Could be part of a remrunqueue/setrunqueue
591 * operation when the priority is recalculated, so TDF_MIGRATING
592 * may already be set.
594 if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
595 lwkt_giveaway(lp->lwp_thread);
600 * Ok, we have to setrunqueue some target cpu and request a reschedule
603 * We have to choose the best target cpu. It might not be the current
604 * target even if the current cpu has no running user thread (for
605 * example, because the current cpu might be a hyperthread and its
606 * sibling has a thread assigned).
608 * If we just forked it is most optimal to run the child on the same
609 * cpu just in case the parent decides to wait for it (thus getting
610 * off that cpu). As long as there is nothing else runnable on the
611 * cpu, that is. If we did this unconditionally a parent forking
612 * multiple children before waiting (e.g. make -j N) leaves other
613 * cpus idle that could be working.
615 /*spin_lock(&dfly_spin);*/
616 if (lp->lwp_forked) {
618 if (dfly_pcpu[lp->lwp_qcpu].runqcount)
619 rdd = dfly_choose_best_queue(lp);
621 rdd = &dfly_pcpu[lp->lwp_qcpu];
622 /* dfly_wakeup_random_helper(rdd); */
624 rdd = dfly_choose_best_queue(lp);
626 rgd = globaldata_find(rdd->cpuid);
629 * We lose control of lp the moment we release the spinlock after
630 * having placed lp on the queue. i.e. another cpu could pick it
631 * up and it could exit, or its priority could be further adjusted,
632 * or something like that.
634 * WARNING! dd can point to a foreign cpu!
636 spin_lock(&rdd->spin);
637 dfly_setrunqueue_locked(rdd, lp);
638 /*spin_unlock(&dfly_spin);*/
641 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
642 spin_unlock(&rdd->spin);
643 if (rdd->uschedcp == NULL) {
644 wakeup_mycpu(&rdd->helper_thread); /* XXX */
650 spin_unlock(&rdd->spin);
653 atomic_clear_cpumask(&dfly_rdyprocmask, rgd->gd_cpumask);
654 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
655 spin_unlock(&rdd->spin);
656 lwkt_send_ipiq(rgd, dfly_need_user_resched_remote,
659 spin_unlock(&rdd->spin);
660 wakeup(&rdd->helper_thread);
665 * Request a reschedule if appropriate.
667 spin_lock(&rdd->spin);
668 dfly_setrunqueue_locked(rdd, lp);
669 spin_unlock(&rdd->spin);
670 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
680 * This wakes up a random helper that might have no work on its cpu to do.
681 * The idea is to improve fork/fork-exec/fork-wait/exec and similar
682 * process-spawning sequences by first scheduling the forked process
683 * on the same cpu as the parent, in case the parent is just going to
684 * wait*(). But if the parent does not wait we want another cpu to pick
685 * the forked process up ASAP.
687 * The ipi/helper-scheduling sequence typically takes a lot longer to run
688 * than a return-from-procedure-call and the parent then entering a
689 * wait*(). There's a race here that we want the parent to win ONLY if
690 * it is going to wait*().
692 * If a process sticks around for long enough normal scheduling action
693 * will move it to the right place.
697 dfly_wakeup_random_helper(dfly_pcpu_t notdd)
703 mask = dfly_rdyprocmask & ~dfly_curprocmask & smp_active_mask &
704 usched_global_cpumask & ~notdd->cpumask;
706 cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
709 tmpmask = ~(CPUMASK(cpuid) - 1);
711 cpuid = BSFCPUMASK(mask & tmpmask);
713 cpuid = BSFCPUMASK(mask);
714 atomic_clear_cpumask(&dfly_rdyprocmask, CPUMASK(cpuid));
715 wakeup(&dfly_pcpu[cpuid].helper_thread);
722 * This routine is called from a systimer IPI. It MUST be MP-safe and
723 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on
728 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
730 globaldata_t gd = mycpu;
731 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
734 * Do we need to round-robin? We round-robin 10 times a second.
735 * This should only occur for cpu-bound batch processes.
737 if (++dd->rrcount >= usched_dfly_rrinterval) {
743 * Adjust estcpu upward using a real time equivalent calculation.
745 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
748 * Spinlocks also hold a critical section so there should not be
751 KKASSERT(gd->gd_spinlocks_wr == 0);
753 dfly_resetpriority(lp);
757 * Called from acquire and from kern_synch's one-second timer (one of the
758 * callout helper threads) with a critical section held.
760 * Decay p_estcpu based on the number of ticks we haven't been running
761 * and our p_nice. As the load increases each process observes a larger
762 * number of idle ticks (because other processes are running in them).
763 * This observation leads to a larger correction which tends to make the
764 * system more 'batchy'.
766 * Note that no recalculation occurs for a process which sleeps and wakes
767 * up in the same tick. That is, a system doing thousands of context
768 * switches per second will still only do serious estcpu calculations
769 * ESTCPUFREQ times per second.
773 dfly_recalculate_estcpu(struct lwp *lp)
775 globaldata_t gd = mycpu;
776 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
783 * We have to subtract periodic to get the last schedclock
784 * timeout time, otherwise we would get the upcoming timeout.
785 * Keep in mind that a process can migrate between cpus and
786 * while the scheduler clock should be very close, boundary
787 * conditions could lead to a small negative delta.
789 cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
791 if (lp->lwp_slptime > 1) {
793 * Too much time has passed, do a coarse correction.
795 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
796 dfly_resetpriority(lp);
797 lp->lwp_cpbase = cpbase;
799 lp->lwp_batch -= ESTCPUFREQ;
800 if (lp->lwp_batch < 0)
802 } else if (lp->lwp_cpbase != cpbase) {
804 * Adjust estcpu if we are in a different tick. Don't waste
805 * time if we are in the same tick.
807 * First calculate the number of ticks in the measurement
808 * interval. The ttlticks calculation can wind up 0 due to
809 * a bug in the handling of lwp_slptime (as yet not found),
810 * so make sure we do not get a divide by 0 panic.
812 ttlticks = (cpbase - lp->lwp_cpbase) /
813 gd->gd_schedclock.periodic;
816 lp->lwp_cpbase = cpbase;
820 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
823 * Calculate the percentage of one cpu used factoring in ncpus
824 * and the load and adjust estcpu. Handle degenerate cases
825 * by adding 1 to runqcount.
827 * estcpu is scaled by ESTCPUMAX.
829 * runqcount is the excess number of user processes
830 * that cannot be immediately scheduled to cpus. We want
831 * to count these as running to avoid range compression
832 * in the base calculation (which is the actual percentage
835 estcpu = (lp->lwp_cpticks * ESTCPUMAX) *
836 (dd->runqcount + ncpus) / (ncpus * ttlticks);
839 * If estcpu is > 50% we become more batch-like
840 * If estcpu is <= 50% we become less batch-like
842 * It takes 30 cpu seconds to traverse the entire range.
844 if (estcpu > ESTCPUMAX / 2) {
845 lp->lwp_batch += ttlticks;
846 if (lp->lwp_batch > BATCHMAX)
847 lp->lwp_batch = BATCHMAX;
849 lp->lwp_batch -= ttlticks;
850 if (lp->lwp_batch < 0)
854 if (usched_dfly_debug == lp->lwp_proc->p_pid) {
855 kprintf("pid %d lwp %p estcpu %3d %3d bat %d cp %d/%d",
856 lp->lwp_proc->p_pid, lp,
857 estcpu, lp->lwp_estcpu,
859 lp->lwp_cpticks, ttlticks);
863 * Adjust lp->lwp_esetcpu. The decay factor determines how
864 * quickly lwp_estcpu collapses to its realtime calculation.
865 * A slower collapse gives us a more accurate number but
866 * can cause a cpu hog to eat too much cpu before the
867 * scheduler decides to downgrade it.
869 * NOTE: p_nice is accounted for in dfly_resetpriority(),
870 * and not here, but we must still ensure that a
871 * cpu-bound nice -20 process does not completely
872 * override a cpu-bound nice +20 process.
874 * NOTE: We must use ESTCPULIM() here to deal with any
877 decay_factor = usched_dfly_decay;
878 if (decay_factor < 1)
880 if (decay_factor > 1024)
883 lp->lwp_estcpu = ESTCPULIM(
884 (lp->lwp_estcpu * decay_factor + estcpu) /
887 if (usched_dfly_debug == lp->lwp_proc->p_pid)
888 kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
889 dfly_resetpriority(lp);
890 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
896 * Compute the priority of a process when running in user mode.
897 * Arrange to reschedule if the resulting priority is better
898 * than that of the current process.
900 * This routine may be called with any process.
902 * This routine is called by fork1() for initial setup with the process
903 * of the run queue, and also may be called normally with the process on or
907 dfly_resetpriority(struct lwp *lp)
919 * Lock the scheduler (lp) belongs to. This can be on a different
920 * cpu. Handle races. This loop breaks out with the appropriate
925 rdd = &dfly_pcpu[rcpu];
926 spin_lock(&rdd->spin);
927 if (rcpu == lp->lwp_qcpu)
929 spin_unlock(&rdd->spin);
933 * Calculate the new priority and queue type
935 newrqtype = lp->lwp_rtprio.type;
938 case RTP_PRIO_REALTIME:
940 newpriority = PRIBASE_REALTIME +
941 (lp->lwp_rtprio.prio & PRIMASK);
943 case RTP_PRIO_NORMAL:
945 * Detune estcpu based on batchiness. lwp_batch ranges
946 * from 0 to BATCHMAX. Limit estcpu for the sake of
947 * the priority calculation to between 50% and 100%.
949 estcpu = lp->lwp_estcpu * (lp->lwp_batch + BATCHMAX) /
953 * p_nice piece Adds (0-40) * 2 0-80
954 * estcpu Adds 16384 * 4 / 512 0-128
956 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
957 newpriority += estcpu * PPQ / ESTCPUPPQ;
958 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
959 NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
960 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
963 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
965 case RTP_PRIO_THREAD:
966 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
969 panic("Bad RTP_PRIO %d", newrqtype);
974 * The newpriority incorporates the queue type so do a simple masked
975 * check to determine if the process has moved to another queue. If
976 * it has, and it is currently on a run queue, then move it.
978 * Since uload is ~PPQMASK masked, no modifications are necessary if
979 * we end up in the same run queue.
981 if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
985 * uload can change, calculate the adjustment to reduce
986 * edge cases since choosers scan the cpu topology without
989 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
991 -((lp->lwp_priority & ~PPQMASK) & PRIMASK) +
992 ((newpriority & ~PPQMASK) & PRIMASK);
993 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
996 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
997 dfly_remrunqueue_locked(rdd, lp);
998 lp->lwp_priority = newpriority;
999 lp->lwp_rqtype = newrqtype;
1000 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1001 dfly_setrunqueue_locked(rdd, lp);
1004 lp->lwp_priority = newpriority;
1005 lp->lwp_rqtype = newrqtype;
1006 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1011 * In the same PPQ, uload cannot change.
1013 lp->lwp_priority = newpriority;
1019 * Determine if we need to reschedule the target cpu. This only
1020 * occurs if the LWP is already on a scheduler queue, which means
1021 * that idle cpu notification has already occured. At most we
1022 * need only issue a need_user_resched() on the appropriate cpu.
1024 * The LWP may be owned by a CPU different from the current one,
1025 * in which case dd->uschedcp may be modified without an MP lock
1026 * or a spinlock held. The worst that happens is that the code
1027 * below causes a spurious need_user_resched() on the target CPU
1028 * and dd->pri to be wrong for a short period of time, both of
1029 * which are harmless.
1031 * If checkpri is 0 we are adjusting the priority of the current
1032 * process, possibly higher (less desireable), so ignore the upri
1033 * check which will fail in that case.
1036 if ((dfly_rdyprocmask & CPUMASK(rcpu)) &&
1038 (rdd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK))) {
1040 if (rcpu == mycpu->gd_cpuid) {
1041 spin_unlock(&rdd->spin);
1042 need_user_resched();
1044 atomic_clear_cpumask(&dfly_rdyprocmask,
1046 spin_unlock(&rdd->spin);
1047 lwkt_send_ipiq(globaldata_find(rcpu),
1048 dfly_need_user_resched_remote,
1052 spin_unlock(&rdd->spin);
1053 need_user_resched();
1056 spin_unlock(&rdd->spin);
1059 spin_unlock(&rdd->spin);
1066 dfly_yield(struct lwp *lp)
1069 /* FUTURE (or something similar) */
1070 switch(lp->lwp_rqtype) {
1071 case RTP_PRIO_NORMAL:
1072 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
1078 need_user_resched();
1082 * Called from fork1() when a new child process is being created.
1084 * Give the child process an initial estcpu that is more batch then
1085 * its parent and dock the parent for the fork (but do not
1086 * reschedule the parent). This comprises the main part of our batch
1087 * detection heuristic for both parallel forking and sequential execs.
1089 * XXX lwp should be "spawning" instead of "forking"
1092 dfly_forking(struct lwp *plp, struct lwp *lp)
1095 * Put the child 4 queue slots (out of 32) higher than the parent
1096 * (less desireable than the parent).
1098 lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1102 * The batch status of children always starts out centerline
1103 * and will inch-up or inch-down as appropriate. It takes roughly
1104 * ~15 seconds of >50% cpu to hit the limit.
1106 lp->lwp_batch = BATCHMAX / 2;
1109 * Dock the parent a cost for the fork, protecting us from fork
1110 * bombs. If the parent is forking quickly make the child more
1113 plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1117 * Called when a lwp is being removed from this scheduler, typically
1118 * during lwp_exit().
1121 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1123 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1125 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1126 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1127 atomic_add_int(&dd->uload,
1128 -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1133 dfly_uload_update(struct lwp *lp)
1135 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1137 if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1138 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1139 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1140 atomic_add_int(&dd->uload,
1141 ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1144 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1145 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1146 atomic_add_int(&dd->uload,
1147 -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1153 * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1154 * it selects a user process and returns it. If chklp is non-NULL and chklp
1155 * has a better or equal priority then the process that would otherwise be
1156 * chosen, NULL is returned.
1158 * Until we fix the RUNQ code the chklp test has to be strict or we may
1159 * bounce between processes trying to acquire the current process designation.
1161 * Must be called with dfly_spin exclusive held. The spinlock is
1162 * left intact through the entire routine.
1164 * if chklp is NULL this function will dive other cpu's queues looking
1165 * for work if the current queue is empty.
1169 dfly_chooseproc_locked(dfly_pcpu_t dd, struct lwp *chklp, int isremote)
1176 u_int32_t *which, *which2;
1182 rtqbits = dd->rtqueuebits;
1183 tsqbits = dd->queuebits;
1184 idqbits = dd->idqueuebits;
1187 pri = bsfl(rtqbits);
1188 q = &dd->rtqueues[pri];
1189 which = &dd->rtqueuebits;
1191 } else if (tsqbits) {
1192 pri = bsfl(tsqbits);
1193 q = &dd->queues[pri];
1194 which = &dd->queuebits;
1196 } else if (idqbits) {
1197 pri = bsfl(idqbits);
1198 q = &dd->idqueues[pri];
1199 which = &dd->idqueuebits;
1205 * Disallow remote->remote recursion
1210 * Pull a runnable thread from a remote run queue. We have
1211 * to adjust qcpu and uload manually because the lp we return
1212 * might be assigned directly to uschedcp (setrunqueue might
1215 xdd = dfly_choose_worst_queue(dd);
1216 if (xdd && xdd != dd && spin_trylock(&xdd->spin)) {
1217 lp = dfly_chooseproc_locked(xdd, NULL, 1);
1219 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1220 atomic_add_int(&xdd->uload,
1221 -((lp->lwp_priority & ~PPQMASK) &
1224 lp->lwp_qcpu = dd->cpuid;
1225 atomic_add_int(&dd->uload,
1226 ((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1227 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1229 spin_unlock(&xdd->spin);
1240 lp = TAILQ_FIRST(q);
1241 KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1244 * If the passed lwp <chklp> is reasonably close to the selected
1245 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1247 * Note that we must error on the side of <chklp> to avoid bouncing
1248 * between threads in the acquire code.
1251 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1255 KTR_COND_LOG(usched_chooseproc,
1256 lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1257 lp->lwp_proc->p_pid,
1258 lp->lwp_thread->td_gd->gd_cpuid,
1261 TAILQ_REMOVE(q, lp, lwp_procq);
1264 *which &= ~(1 << pri);
1265 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1266 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1274 * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1276 * Choose a cpu node to schedule lp on, hopefully nearby its current
1277 * node. We give the current node a modest advantage for obvious reasons.
1279 * We also give the node the thread was woken up FROM a slight advantage
1280 * in order to try to schedule paired threads which synchronize/block waiting
1281 * for each other fairly close to each other. Similarly in a network setting
1282 * this feature will also attempt to place a user process near the kernel
1283 * protocol thread that is feeding it data. THIS IS A CRITICAL PART of the
1284 * algorithm as it heuristically groups synchronizing processes for locality
1285 * of reference in multi-socket systems.
1287 * The caller will normally dfly_setrunqueue() lp on the returned queue.
1289 * When the topology is known choose a cpu whos group has, in aggregate,
1290 * has the lowest weighted load.
1294 dfly_choose_best_queue(struct lwp *lp)
1300 dfly_pcpu_t dd1 = &dfly_pcpu[lp->lwp_qcpu];
1301 dfly_pcpu_t dd2 = &dfly_pcpu[lp->lwp_thread->td_wakefromcpu];
1310 * When the topology is unknown choose a random cpu that is hopefully
1313 if (dd1->cpunode == NULL)
1314 return (dfly_choose_queue_simple(dd1, lp));
1317 * When the topology is known choose a cpu whos group has, in
1318 * aggregate, has the lowest weighted load.
1320 cpup = root_cpu_node;
1322 level = cpu_topology_levels_number;
1326 * Degenerate case super-root
1328 if (cpup->child_node && cpup->child_no == 1) {
1329 cpup = cpup->child_node;
1337 if (cpup->child_node == NULL) {
1338 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1343 lowest_load = 0x7FFFFFFF;
1345 for (n = 0; n < cpup->child_no; ++n) {
1347 * Accumulate load information for all cpus
1348 * which are members of this node.
1350 cpun = &cpup->child_node[n];
1351 mask = cpun->members & usched_global_cpumask &
1352 smp_active_mask & lp->lwp_cpumask;
1357 cpuid = BSFCPUMASK(mask);
1358 load += dfly_pcpu[cpuid].uload;
1359 mask &= ~CPUMASK(cpuid);
1363 * Give a slight advantage to nearby cpus.
1365 if (cpun->members & dd1->cpumask)
1366 load -= PPQ * level * usched_dfly_weight1 / 10;
1367 else if (cpun->members & dd2->cpumask)
1368 load -= PPQ * level * usched_dfly_weight2 / 10;
1371 * Calculate the best load
1373 if (cpub == NULL || lowest_load > load ||
1374 (lowest_load == load &&
1375 (cpun->members & dd1->cpumask))
1384 if (usched_dfly_chooser)
1385 kprintf("lp %02d->%02d %s\n",
1386 lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1391 * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1393 * Choose the worst queue close to dd's cpu node with a non-empty runq.
1395 * This is used by the thread chooser when the current cpu's queues are
1396 * empty to steal a thread from another cpu's queue. We want to offload
1397 * the most heavily-loaded queue.
1401 dfly_choose_worst_queue(dfly_pcpu_t dd)
1416 * When the topology is unknown choose a random cpu that is hopefully
1419 if (dd->cpunode == NULL) {
1424 * When the topology is known choose a cpu whos group has, in
1425 * aggregate, has the lowest weighted load.
1427 cpup = root_cpu_node;
1429 level = cpu_topology_levels_number;
1432 * Degenerate case super-root
1434 if (cpup->child_node && cpup->child_no == 1) {
1435 cpup = cpup->child_node;
1443 if (cpup->child_node == NULL) {
1444 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1451 for (n = 0; n < cpup->child_no; ++n) {
1453 * Accumulate load information for all cpus
1454 * which are members of this node.
1456 cpun = &cpup->child_node[n];
1457 mask = cpun->members & usched_global_cpumask &
1464 cpuid = BSFCPUMASK(mask);
1465 load += dfly_pcpu[cpuid].uload;
1466 if (dfly_pcpu[cpuid].uload)
1468 mask &= ~CPUMASK(cpuid);
1472 * Give a slight advantage to nearby cpus.
1474 if (cpun->members & dd->cpumask)
1475 load += PPQ * level;
1478 * The best candidate is the one with the worst
1479 * (highest) load. Prefer candiates that are
1480 * closer to our cpu.
1483 (cpub == NULL || highest_load < load ||
1484 (highest_load == load &&
1485 (cpun->members & dd->cpumask)))
1487 highest_load = load;
1499 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1507 * Fallback to the original heuristic, select random cpu,
1508 * first checking cpus not currently running a user thread.
1511 cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1512 mask = ~dfly_curprocmask & dfly_rdyprocmask & lp->lwp_cpumask &
1513 smp_active_mask & usched_global_cpumask;
1516 tmpmask = ~(CPUMASK(cpuid) - 1);
1518 cpuid = BSFCPUMASK(mask & tmpmask);
1520 cpuid = BSFCPUMASK(mask);
1521 rdd = &dfly_pcpu[cpuid];
1523 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1525 mask &= ~CPUMASK(cpuid);
1529 * Then cpus which might have a currently running lp
1531 cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1532 mask = dfly_curprocmask & dfly_rdyprocmask &
1533 lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
1536 tmpmask = ~(CPUMASK(cpuid) - 1);
1538 cpuid = BSFCPUMASK(mask & tmpmask);
1540 cpuid = BSFCPUMASK(mask);
1541 rdd = &dfly_pcpu[cpuid];
1543 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1545 mask &= ~CPUMASK(cpuid);
1549 * If we cannot find a suitable cpu we reload from dfly_scancpu
1550 * and round-robin. Other cpus will pickup as they release their
1551 * current lwps or become ready.
1553 * Avoid a degenerate system lockup case if usched_global_cpumask
1554 * is set to 0 or otherwise does not cover lwp_cpumask.
1556 * We only kick the target helper thread in this case, we do not
1557 * set the user resched flag because
1559 cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1560 if ((CPUMASK(cpuid) & usched_global_cpumask) == 0)
1562 rdd = &dfly_pcpu[cpuid];
1569 dfly_need_user_resched_remote(void *dummy)
1571 globaldata_t gd = mycpu;
1572 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
1574 need_user_resched();
1576 /* Call wakeup_mycpu to avoid sending IPIs to other CPUs */
1577 wakeup_mycpu(&dd->helper_thread);
1583 * dfly_remrunqueue_locked() removes a given process from the run queue
1584 * that it is on, clearing the queue busy bit if it becomes empty.
1586 * Note that user process scheduler is different from the LWKT schedule.
1587 * The user process scheduler only manages user processes but it uses LWKT
1588 * underneath, and a user process operating in the kernel will often be
1589 * 'released' from our management.
1591 * uload is NOT adjusted here. It is only adjusted if the lwkt_thread goes
1592 * to sleep or the lwp is moved to a different runq.
1595 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1601 KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1602 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1604 /*rdd->uload -= (lp->lwp_priority & ~PPQMASK) & PRIMASK;*/
1605 KKASSERT(rdd->runqcount >= 0);
1607 pri = lp->lwp_rqindex;
1608 switch(lp->lwp_rqtype) {
1609 case RTP_PRIO_NORMAL:
1610 q = &rdd->queues[pri];
1611 which = &rdd->queuebits;
1613 case RTP_PRIO_REALTIME:
1615 q = &rdd->rtqueues[pri];
1616 which = &rdd->rtqueuebits;
1619 q = &rdd->idqueues[pri];
1620 which = &rdd->idqueuebits;
1623 panic("remrunqueue: invalid rtprio type");
1626 TAILQ_REMOVE(q, lp, lwp_procq);
1627 if (TAILQ_EMPTY(q)) {
1628 KASSERT((*which & (1 << pri)) != 0,
1629 ("remrunqueue: remove from empty queue"));
1630 *which &= ~(1 << pri);
1635 * dfly_setrunqueue_locked()
1637 * Add a process whos rqtype and rqindex had previously been calculated
1638 * onto the appropriate run queue. Determine if the addition requires
1639 * a reschedule on a cpu and return the cpuid or -1.
1641 * NOTE: Lower priorities are better priorities.
1643 * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1644 * sum of the rough lwp_priority for all running and runnable
1645 * processes. Lower priority processes (higher lwp_priority
1646 * values) actually DO count as more load, not less, because
1647 * these are the programs which require the most care with
1648 * regards to cpu selection.
1651 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1657 if (lp->lwp_qcpu != rdd->cpuid) {
1658 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1659 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1660 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1661 -((lp->lwp_priority & ~PPQMASK) & PRIMASK));
1663 lp->lwp_qcpu = rdd->cpuid;
1666 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1667 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1669 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1670 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1671 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload,
1672 (lp->lwp_priority & ~PPQMASK) & PRIMASK);
1675 pri = lp->lwp_rqindex;
1677 switch(lp->lwp_rqtype) {
1678 case RTP_PRIO_NORMAL:
1679 q = &rdd->queues[pri];
1680 which = &rdd->queuebits;
1682 case RTP_PRIO_REALTIME:
1684 q = &rdd->rtqueues[pri];
1685 which = &rdd->rtqueuebits;
1688 q = &rdd->idqueues[pri];
1689 which = &rdd->idqueuebits;
1692 panic("remrunqueue: invalid rtprio type");
1697 * Add to the correct queue and set the appropriate bit. If no
1698 * lower priority (i.e. better) processes are in the queue then
1699 * we want a reschedule, calculate the best cpu for the job.
1701 * Always run reschedules on the LWPs original cpu.
1703 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1710 * For SMP systems a user scheduler helper thread is created for each
1711 * cpu and is used to allow one cpu to wakeup another for the purposes of
1712 * scheduling userland threads from setrunqueue().
1714 * UP systems do not need the helper since there is only one cpu.
1716 * We can't use the idle thread for this because we might block.
1717 * Additionally, doing things this way allows us to HLT idle cpus
1721 dfly_helper_thread(void *dummy)
1730 cpuid = gd->gd_cpuid; /* doesn't change */
1731 mask = gd->gd_cpumask; /* doesn't change */
1732 dd = &dfly_pcpu[cpuid];
1735 * Since we only want to be woken up only when no user processes
1736 * are scheduled on a cpu, run at an ultra low priority.
1738 lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1740 tsleep(&dd->helper_thread, 0, "schslp", 0);
1744 * We use the LWKT deschedule-interlock trick to avoid racing
1745 * dfly_rdyprocmask. This means we cannot block through to the
1746 * manual lwkt_switch() call we make below.
1749 tsleep_interlock(&dd->helper_thread, 0);
1751 /*spin_lock(&dfly_spin);*/
1752 spin_lock(&dd->spin);
1754 atomic_set_cpumask(&dfly_rdyprocmask, mask);
1755 clear_user_resched(); /* This satisfied the reschedule request */
1756 dd->rrcount = 0; /* Reset the round-robin counter */
1758 if ((dfly_curprocmask & mask) == 0) {
1760 * No thread is currently scheduled.
1762 KKASSERT(dd->uschedcp == NULL);
1763 if ((nlp = dfly_chooseproc_locked(dd, NULL, 0)) != NULL) {
1764 KTR_COND_LOG(usched_sched_thread_no_process,
1765 nlp->lwp_proc->p_pid == usched_dfly_pid_debug,
1767 nlp->lwp_proc->p_pid,
1768 nlp->lwp_thread->td_gd->gd_cpuid);
1770 atomic_set_cpumask(&dfly_curprocmask, mask);
1771 dd->upri = nlp->lwp_priority;
1773 dd->rrcount = 0; /* reset round robin */
1774 spin_unlock(&dd->spin);
1775 /*spin_unlock(&dfly_spin);*/
1776 lwkt_acquire(nlp->lwp_thread);
1777 lwkt_schedule(nlp->lwp_thread);
1779 spin_unlock(&dd->spin);
1780 /*spin_unlock(&dfly_spin);*/
1782 } else if (dd->runqcount) {
1784 * Possibly find a better process to schedule.
1786 nlp = dfly_chooseproc_locked(dd, dd->uschedcp, 0);
1788 KTR_COND_LOG(usched_sched_thread_process,
1789 nlp->lwp_proc->p_pid == usched_dfly_pid_debug,
1791 nlp->lwp_proc->p_pid,
1792 nlp->lwp_thread->td_gd->gd_cpuid);
1794 dd->upri = nlp->lwp_priority;
1796 dd->rrcount = 0; /* reset round robin */
1797 spin_unlock(&dd->spin);
1798 /*spin_unlock(&dfly_spin);*/
1799 lwkt_acquire(nlp->lwp_thread);
1800 lwkt_schedule(nlp->lwp_thread);
1803 * Leave the thread on our run queue. Another
1804 * scheduler will try to pull it later.
1806 spin_unlock(&dd->spin);
1807 /*spin_unlock(&dfly_spin);*/
1811 * The runq is empty.
1813 spin_unlock(&dd->spin);
1814 /*spin_unlock(&dfly_spin);*/
1818 * We're descheduled unless someone scheduled us. Switch away.
1819 * Exiting the critical section will cause splz() to be called
1820 * for us if interrupts and such are pending.
1823 tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
1827 /* sysctl stick_to_level parameter */
1829 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
1833 new_val = usched_dfly_stick_to_level;
1835 error = sysctl_handle_int(oidp, &new_val, 0, req);
1836 if (error != 0 || req->newptr == NULL)
1838 if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
1840 usched_dfly_stick_to_level = new_val;
1845 * Setup our scheduler helpers. Note that curprocmask bit 0 has already
1846 * been cleared by rqinit() and we should not mess with it further.
1849 dfly_helper_thread_cpu_init(void)
1854 int smt_not_supported = 0;
1855 int cache_coherent_not_supported = 0;
1858 kprintf("Start scheduler helpers on cpus:\n");
1860 sysctl_ctx_init(&usched_dfly_sysctl_ctx);
1861 usched_dfly_sysctl_tree =
1862 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
1863 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
1864 "usched_dfly", CTLFLAG_RD, 0, "");
1866 for (i = 0; i < ncpus; ++i) {
1867 dfly_pcpu_t dd = &dfly_pcpu[i];
1868 cpumask_t mask = CPUMASK(i);
1870 if ((mask & smp_active_mask) == 0)
1873 spin_init(&dd->spin);
1874 dd->cpunode = get_cpu_node_by_cpuid(i);
1876 dd->cpumask = CPUMASK(i);
1877 for (j = 0; j < NQS; j++) {
1878 TAILQ_INIT(&dd->queues[j]);
1879 TAILQ_INIT(&dd->rtqueues[j]);
1880 TAILQ_INIT(&dd->idqueues[j]);
1882 atomic_clear_cpumask(&dfly_curprocmask, 1);
1884 if (dd->cpunode == NULL) {
1885 smt_not_supported = 1;
1886 cache_coherent_not_supported = 1;
1888 kprintf ("\tcpu%d - WARNING: No CPU NODE "
1889 "found for cpu\n", i);
1891 switch (dd->cpunode->type) {
1894 kprintf ("\tcpu%d - HyperThreading "
1895 "available. Core siblings: ",
1899 smt_not_supported = 1;
1902 kprintf ("\tcpu%d - No HT available, "
1903 "multi-core/physical "
1904 "cpu. Physical siblings: ",
1908 smt_not_supported = 1;
1911 kprintf ("\tcpu%d - No HT available, "
1912 "single-core/physical cpu. "
1913 "Package Siblings: ",
1917 /* Let's go for safe defaults here */
1918 smt_not_supported = 1;
1919 cache_coherent_not_supported = 1;
1921 kprintf ("\tcpu%d - Unknown cpunode->"
1922 "type=%u. Siblings: ",
1924 (u_int)dd->cpunode->type);
1929 if (dd->cpunode->parent_node != NULL) {
1930 CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
1931 kprintf("cpu%d ", cpuid);
1934 kprintf(" no siblings\n");
1939 lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread,
1940 0, i, "usched %d", i);
1943 * Allow user scheduling on the target cpu. cpu #0 has already
1944 * been enabled in rqinit().
1947 atomic_clear_cpumask(&dfly_curprocmask, mask);
1948 atomic_set_cpumask(&dfly_rdyprocmask, mask);
1949 dd->upri = PRIBASE_NULL;
1953 /* usched_dfly sysctl configurable parameters */
1955 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1956 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1957 OID_AUTO, "rrinterval", CTLFLAG_RW,
1958 &usched_dfly_rrinterval, 0, "");
1959 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1960 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1961 OID_AUTO, "decay", CTLFLAG_RW,
1962 &usched_dfly_decay, 0, "Extra decay when not running");
1963 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1964 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1965 OID_AUTO, "batch_time", CTLFLAG_RW,
1966 &usched_dfly_batch_time, 0, "Min batch counter value");
1968 /* Add enable/disable option for SMT scheduling if supported */
1969 if (smt_not_supported) {
1970 usched_dfly_smt = 0;
1971 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
1972 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1973 OID_AUTO, "smt", CTLFLAG_RD,
1974 "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
1976 usched_dfly_smt = 1;
1977 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1978 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1979 OID_AUTO, "smt", CTLFLAG_RW,
1980 &usched_dfly_smt, 0, "Enable SMT scheduling");
1984 * Add enable/disable option for cache coherent scheduling
1987 if (cache_coherent_not_supported) {
1988 usched_dfly_cache_coherent = 0;
1989 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
1990 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1991 OID_AUTO, "cache_coherent", CTLFLAG_RD,
1993 "Cache coherence NOT SUPPORTED");
1995 usched_dfly_cache_coherent = 1;
1996 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
1997 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
1998 OID_AUTO, "cache_coherent", CTLFLAG_RW,
1999 &usched_dfly_cache_coherent, 0,
2000 "Enable/Disable cache coherent scheduling");
2002 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2003 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2004 OID_AUTO, "weight1", CTLFLAG_RW,
2005 &usched_dfly_weight1, 10,
2006 "Weight selection for current cpu");
2008 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2009 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2010 OID_AUTO, "weight2", CTLFLAG_RW,
2011 &usched_dfly_weight2, 5,
2012 "Weight selection for wakefrom cpu");
2014 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2015 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2016 OID_AUTO, "stick_to_level",
2017 CTLTYPE_INT | CTLFLAG_RW,
2018 NULL, sizeof usched_dfly_stick_to_level,
2019 sysctl_usched_dfly_stick_to_level, "I",
2020 "Stick a process to this level. See sysctl"
2021 "paremter hw.cpu_topology.level_description");
2024 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2025 dfly_helper_thread_cpu_init, NULL)
2027 #else /* No SMP options - just add the configurable parameters to sysctl */
2030 sched_sysctl_tree_init(void)
2032 sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2033 usched_dfly_sysctl_tree =
2034 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2035 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2036 "usched_dfly", CTLFLAG_RD, 0, "");
2038 /* usched_dfly sysctl configurable parameters */
2039 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2040 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2041 OID_AUTO, "rrinterval", CTLFLAG_RW,
2042 &usched_dfly_rrinterval, 0, "");
2043 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2044 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2045 OID_AUTO, "decay", CTLFLAG_RW,
2046 &usched_dfly_decay, 0, "Extra decay when not running");
2047 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2048 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2049 OID_AUTO, "batch_time", CTLFLAG_RW,
2050 &usched_dfly_batch_time, 0, "Min batch counter value");
2052 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2053 sched_sysctl_tree_init, NULL)