2 * Copyright (c) 2012-2017 The DragonFly Project. All rights reserved.
3 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com>,
7 * by Mihai Carabas <mihai.carabas@gmail.com>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
41 #include <sys/queue.h>
43 #include <sys/rtprio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
54 #include <machine/cpu.h>
55 #include <machine/smp.h>
58 * Priorities. Note that with 32 run queues per scheduler each queue
59 * represents four priority levels.
65 #define PRIMASK (MAXPRI - 1)
66 #define PRIBASE_REALTIME 0
67 #define PRIBASE_NORMAL MAXPRI
68 #define PRIBASE_IDLE (MAXPRI * 2)
69 #define PRIBASE_THREAD (MAXPRI * 3)
70 #define PRIBASE_NULL (MAXPRI * 4)
72 #define NQS 32 /* 32 run queues. */
73 #define PPQ (MAXPRI / NQS) /* priorities per queue */
74 #define PPQMASK (PPQ - 1)
77 * NICE_QS - maximum queues nice can shift the process
78 * EST_QS - maximum queues estcpu can shift the process
80 * ESTCPUPPQ - number of estcpu units per priority queue
81 * ESTCPUMAX - number of estcpu units
83 * Remember that NICE runs over the whole -20 to +20 range.
85 #define NICE_QS 24 /* -20 to +20 shift in whole queues */
86 #define EST_QS 20 /* 0-MAX shift in whole queues */
88 #define ESTCPUMAX (ESTCPUPPQ * EST_QS)
89 #define PRIO_RANGE (PRIO_MAX - PRIO_MIN + 1)
91 #define ESTCPULIM(v) min((v), ESTCPUMAX)
95 #define lwp_priority lwp_usdata.dfly.priority
96 #define lwp_forked lwp_usdata.dfly.forked
97 #define lwp_rqindex lwp_usdata.dfly.rqindex
98 #define lwp_estcpu lwp_usdata.dfly.estcpu
99 #define lwp_estfast lwp_usdata.dfly.estfast
100 #define lwp_uload lwp_usdata.dfly.uload
101 #define lwp_rqtype lwp_usdata.dfly.rqtype
102 #define lwp_qcpu lwp_usdata.dfly.qcpu
103 #define lwp_rrcount lwp_usdata.dfly.rrcount
106 lptouload(struct lwp *lp)
110 uload = lp->lwp_estcpu / NQS;
111 uload -= uload * lp->lwp_proc->p_nice / (PRIO_MAX + 1);
117 * DFly scheduler pcpu structure. Note that the pcpu uload field must
118 * be 64-bits to avoid overflowing in the situation where more than 32768
119 * processes are on a single cpu's queue. Since high-end systems can
120 * easily run 900,000+ processes, we have to deal with it.
122 struct usched_dfly_pcpu {
123 struct spinlock spin;
124 struct thread *helper_thread;
125 struct globaldata *gd;
128 long uload; /* 64-bits to avoid overflow (1) */
131 struct lwp *uschedcp;
132 struct rq queues[NQS];
133 struct rq rtqueues[NQS];
134 struct rq idqueues[NQS];
136 u_int32_t rtqueuebits;
137 u_int32_t idqueuebits;
145 * Reflecting bits in the global atomic masks allows us to avoid
146 * a certain degree of global ping-ponging.
148 #define DFLY_PCPU_RDYMASK 0x0001 /* reflect rdyprocmask */
149 #define DFLY_PCPU_CURMASK 0x0002 /* reflect curprocmask */
151 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
153 static void dfly_acquire_curproc(struct lwp *lp);
154 static void dfly_release_curproc(struct lwp *lp);
155 static void dfly_select_curproc(globaldata_t gd);
156 static void dfly_setrunqueue(struct lwp *lp);
157 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
158 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
160 static void dfly_recalculate_estcpu(struct lwp *lp);
161 static void dfly_resetpriority(struct lwp *lp);
162 static void dfly_forking(struct lwp *plp, struct lwp *lp);
163 static void dfly_exiting(struct lwp *lp, struct proc *);
164 static void dfly_uload_update(struct lwp *lp);
165 static void dfly_yield(struct lwp *lp);
166 static void dfly_changeqcpu_locked(struct lwp *lp,
167 dfly_pcpu_t dd, dfly_pcpu_t rdd);
168 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
169 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd, int forceit);
170 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
171 static void dfly_need_user_resched_remote(void *dummy);
172 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
173 struct lwp *chklp, int worst);
174 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
175 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
176 static void dfly_changedcpu(struct lwp *lp);
178 struct usched usched_dfly = {
180 "dfly", "Original DragonFly Scheduler",
181 NULL, /* default registration */
182 NULL, /* default deregistration */
183 dfly_acquire_curproc,
184 dfly_release_curproc,
187 dfly_recalculate_estcpu,
192 NULL, /* setcpumask not supported */
198 * We have NQS (32) run queues per scheduling class. For the normal
199 * class, there are 128 priorities scaled onto these 32 queues. New
200 * processes are added to the last entry in each queue, and processes
201 * are selected for running by taking them from the head and maintaining
202 * a simple FIFO arrangement. Realtime and Idle priority processes have
203 * and explicit 0-31 priority which maps directly onto their class queue
204 * index. When a queue has something in it, the corresponding bit is
205 * set in the queuebits variable, allowing a single read to determine
206 * the state of all 32 queues and then a ffs() to find the first busy
209 * curprocmask is used to publish cpus with assigned curprocs to the rest
210 * of the cpus. In certain situations curprocmask may leave a bit set
211 * (e.g. a yield or a token-based yield) even though dd->uschedcp is
212 * NULL'd out temporarily).
214 /* currently running a user process */
215 static cpumask_t dfly_curprocmask = CPUMASK_INITIALIZER_ALLONES;
216 static cpumask_t dfly_rdyprocmask; /* ready to accept a user process */
217 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
218 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
219 static struct sysctl_oid *usched_dfly_sysctl_tree;
220 static struct lock usched_dfly_config_lk = LOCK_INITIALIZER("usdfs", 0, 0);
222 /* Debug info exposed through debug.* sysctl */
224 static int usched_dfly_debug = -1;
225 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
226 &usched_dfly_debug, 0,
227 "Print debug information for this pid");
229 static int usched_dfly_pid_debug = -1;
230 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
231 &usched_dfly_pid_debug, 0,
232 "Print KTR debug information for this pid");
234 static int usched_dfly_chooser = 0;
235 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
236 &usched_dfly_chooser, 0,
237 "Print KTR debug information for this pid");
242 * The fork bias can have a large effect on the system in the face of a
243 * make -j N or other high-forking applications.
245 * Larger values are much less invasive vs other things that
246 * might be running in the system, but can cause exec chains
247 * such as those typically generated by make to have higher
248 * latencies in the face of modest load.
250 * Lower values are more invasive but have reduced latencies
251 * for such exec chains.
253 * make -j 10 buildkernel example, build times:
256 * +1 3:14 -5.2% <-- default
259 * This issue occurs due to the way the scheduler affinity heuristics work.
260 * There is no way to really 'fix' the affinity heuristics because when it
261 * comes right down to it trying to instantly schedule a process on an
262 * available cpu (even if it will become unavailable a microsecond later)
263 * tends to cause processes to shift around between cpus and sockets too much
264 * and breaks the affinity.
266 * NOTE: Heavily concurrent builds typically have enough things on the pan
267 * that they remain time-efficient even with a higher bias.
269 static int usched_dfly_forkbias = 1;
270 SYSCTL_INT(_debug, OID_AUTO, dfly_forkbias, CTLFLAG_RW,
271 &usched_dfly_forkbias, 0,
272 "Fork bias for estcpu in whole queues");
275 * Tunning usched_dfly - configurable through kern.usched_dfly.
277 * weight1 - Tries to keep threads on their current cpu. If you
278 * make this value too large the scheduler will not be
279 * able to load-balance large loads.
281 * Generally set to a fairly low value, but high enough
282 * such that estcpu jitter doesn't move threads around.
284 * weight2 - If non-zero, detects thread pairs undergoing synchronous
285 * communications and tries to move them closer together.
286 * The weight advantages the same package and socket and
287 * disadvantages the same core and same cpu.
289 * WARNING! Weight2 is a ridiculously sensitive parameter,
290 * particularly against weight4. change the default at your
293 * weight3 - Weighting based on the number of recently runnable threads
294 * on the userland scheduling queue (ignoring their loads).
296 * A nominal value here prevents high-priority (low-load)
297 * threads from accumulating on one cpu core when other
298 * cores are available.
300 * This value should be left fairly small because low-load
301 * high priority threads can still be mostly idle and too
302 * high a value will kick cpu-bound processes off the cpu
305 * weight4 - Weighting based on availability of other logical cpus running
306 * less important threads (by upri) than the thread we are trying
309 * This allows a thread to migrate to another nearby cpu if it
310 * is unable to run on the current cpu based on the other cpu
311 * being idle or running a less important (higher lwp_priority)
312 * thread. This value should be large enough to override weight1,
313 * but not so large as to override weight2.
315 * This parameter generally ensures fairness at the cost of some
316 * performance (if set to too high). It should generally be just
317 * a tad lower than weight2.
319 * weight5 - Weighting based on the relative amount of ram connected
320 * to the node a cpu resides on.
322 * This value should remain fairly low to allow assymetric
323 * NUMA nodes to get threads scheduled to them. Setting a very
324 * high level will prevent scheduling on assymetric NUMA nodes
325 * with low amounts of directly-attached memory.
327 * Note that when testing e.g. N threads on a machine with N
328 * cpu cores with assymtric NUMA nodes, a non-zero value will
329 * cause some cpu threads on the low-priority NUMA nodes to remain
330 * idle even when a few process threads are doubled-up on other
331 * cpus. But this is typically more ideal because it deschedules
332 * low-priority NUMA nodes at lighter nodes.
334 * Values between 50 and 200 are recommended. Default is 50.
336 * weight6 - rdd transfer weight hysteresis. Defaults to 0, can be increased
337 * to improve stabillity at the cost of more mis-schedules.
339 * ipc_smt - If enabled, advantage IPC pairing to sibling cpu threads.
340 * If -1, automatic when load >= 1/2 ncpus (default).
342 * ipc_same- If enabled, advantage IPC pairing to the same logical cpu.
343 * If -1, automatic when load >= ncpus (default).
345 * features - These flags can be set or cleared to enable or disable various
348 * 0x01 Enable idle-cpu pulling (default)
349 * 0x02 Enable proactive pushing (default)
350 * 0x04 Enable rebalancing rover (default)
351 * 0x08 Enable more proactive pushing (default)
353 * 0x20 choose best cpu for forked process (default)
354 * 0x40 choose current cpu for forked process
355 * 0x80 choose random cpu for forked process
357 * NOTE - The idea behind forking mechanic 0x20 is that most
358 * fork()ing is either followed by an exec in the child,
359 * or the parent wait*()s. If the child is short-lived,
360 * there is effectively an IPC dependency (td_wakefromcpu
361 * is also set in kern_fork.c) and we want to implement
362 * the weight2 behavior to reduce IPIs and to reduce CPU
363 * cache ping-ponging.
365 __read_mostly static int usched_dfly_smt = 0;
366 __read_mostly static int usched_dfly_cache_coherent = 0;
367 __read_mostly static int usched_dfly_weight1 = 30; /* keep thread on cpu */
368 __read_mostly static int usched_dfly_weight2 = 180; /* IPC locality */
369 __read_mostly static int usched_dfly_weight3 = 10; /* threads on queue */
370 __read_mostly static int usched_dfly_weight4 = 120; /* availability of cores */
371 __read_mostly static int usched_dfly_weight5 = 50; /* node attached memory */
372 __read_mostly static int usched_dfly_weight6 = 0; /* rdd transfer weight */
373 __read_mostly static int usched_dfly_features = 0x2f; /* allow pulls */
374 __read_mostly static int usched_dfly_fast_resched = PPQ / 2; /* delta pri */
375 __read_mostly static int usched_dfly_swmask = ~PPQMASK; /* allow pulls */
376 __read_mostly static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
377 __read_mostly static int usched_dfly_decay = 8;
378 __read_mostly static int usched_dfly_ipc_smt = -1; /* IPC auto smt pair */
379 __read_mostly static int usched_dfly_ipc_same = -1; /* IPC auto same log cpu */
380 __read_mostly static long usched_dfly_node_mem;
382 /* KTR debug printings */
384 KTR_INFO_MASTER(usched);
386 #if !defined(KTR_USCHED_DFLY)
387 #define KTR_USCHED_DFLY KTR_ALL
390 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
391 "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
392 pid_t pid, int old_cpuid, int curr);
395 * This function is called when the kernel intends to return to userland.
396 * It is responsible for making the thread the current designated userland
397 * thread for this cpu, blocking if necessary.
399 * The kernel will not depress our LWKT priority until after we return,
400 * in case we have to shove over to another cpu.
402 * We must determine our thread's disposition before we switch away. This
403 * is very sensitive code.
405 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
406 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will
407 * occur, this function is called only under very controlled circumstances.
410 dfly_acquire_curproc(struct lwp *lp)
419 * Make sure we aren't sitting on a tsleep queue.
422 crit_enter_quick(td);
423 if (td->td_flags & TDF_TSLEEPQ)
425 dfly_recalculate_estcpu(lp);
428 dd = &dfly_pcpu[gd->gd_cpuid];
431 * Process any pending interrupts/ipi's, then handle reschedule
432 * requests. dfly_release_curproc() will try to assign a new
433 * uschedcp that isn't us and otherwise NULL it out.
436 if ((td->td_mpflags & TDF_MP_BATCH_DEMARC) &&
437 lp->lwp_rrcount >= usched_dfly_rrinterval / 2) {
441 if (user_resched_wanted()) {
442 if (dd->uschedcp == lp)
444 clear_user_resched();
445 dfly_release_curproc(lp);
449 * Loop until we are the current user thread.
451 * NOTE: dd spinlock not held at top of loop.
453 if (dd->uschedcp == lp)
456 while (dd->uschedcp != lp) {
458 * Do not do a lwkt_yield_quick() here as it will prevent
459 * the lwp from being placed on the dfly_bsd runqueue for
460 * one cycle (possibly an entire round-robin), preventing
461 * it from being scheduled to another cpu.
463 /* lwkt_yield_quick(); */
465 if (usched_dfly_debug == lp->lwp_proc->p_pid)
466 kprintf(" pid %d acquire curcpu %d (force %d) ",
467 lp->lwp_proc->p_pid, gd->gd_cpuid,
471 spin_lock(&dd->spin);
473 /* This lwp is an outcast; force reschedule. */
475 CPUMASK_TESTBIT(lp->lwp_cpumask, gd->gd_cpuid) == 0) &&
476 (rdd = dfly_choose_best_queue(lp)) != dd) {
477 dfly_changeqcpu_locked(lp, dd, rdd);
478 spin_unlock(&dd->spin);
479 lwkt_deschedule(lp->lwp_thread);
480 dfly_setrunqueue_dd(rdd, lp);
483 dd = &dfly_pcpu[gd->gd_cpuid];
484 if (usched_dfly_debug == lp->lwp_proc->p_pid)
485 kprintf("SEL-A cpu %d\n", gd->gd_cpuid);
490 * We are not or are no longer the current lwp and a forced
491 * reschedule was requested. Figure out the best cpu to
492 * run on (our current cpu will be given significant weight).
494 * Doing this on many cpus simultaneously leads to
495 * instability so pace the operation.
497 * (if a reschedule was not requested we want to move this
498 * step after the uschedcp tests).
501 (usched_dfly_features & 0x08) &&
502 (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
503 if ((rdd = dfly_choose_best_queue(lp)) != dd) {
504 dfly_changeqcpu_locked(lp, dd, rdd);
505 spin_unlock(&dd->spin);
506 lwkt_deschedule(lp->lwp_thread);
507 dfly_setrunqueue_dd(rdd, lp);
510 dd = &dfly_pcpu[gd->gd_cpuid];
511 if (usched_dfly_debug == lp->lwp_proc->p_pid)
512 kprintf("SEL-B cpu %d\n", gd->gd_cpuid);
515 if (usched_dfly_debug == lp->lwp_proc->p_pid)
516 kprintf("(SEL-B same cpu) ");
520 * Either no reschedule was requested or the best queue was
521 * dd, and no current process has been selected. We can
522 * trivially become the current lwp on the current cpu.
524 if (dd->uschedcp == NULL) {
525 atomic_clear_int(&lp->lwp_thread->td_mpflags,
527 if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
528 ATOMIC_CPUMASK_ORBIT(dfly_curprocmask,
530 dd->flags |= DFLY_PCPU_CURMASK;
533 dd->upri = lp->lwp_priority;
534 KKASSERT(lp->lwp_qcpu == dd->cpuid);
535 spin_unlock(&dd->spin);
536 if (usched_dfly_debug == lp->lwp_proc->p_pid)
537 kprintf("SEL-C cpu %d (same cpu)\n",
543 * Can we steal the current designated user thread?
545 * If we do the other thread will stall when it tries to
546 * return to userland, possibly rescheduling elsewhere.
547 * Set need_user_resched() to get the thread to cycle soonest.
549 * It is important to do a masked test to avoid the edge
550 * case where two near-equal-priority threads are constantly
551 * interrupting each other.
553 * In the exact match case another thread has already gained
554 * uschedcp and lowered its priority, if we steal it the
555 * other thread will stay stuck on the LWKT runq and not
556 * push to another cpu. So don't steal on equal-priority even
557 * though it might appear to be more beneficial due to not
558 * having to switch back to the other thread's context.
560 * usched_dfly_fast_resched requires that two threads be
561 * significantly far apart in priority in order to interrupt.
563 * If better but not sufficiently far apart, the current
564 * uschedcp will be interrupted at the next scheduler clock.
567 (dd->upri & ~PPQMASK) >
568 (lp->lwp_priority & ~PPQMASK) + usched_dfly_fast_resched) {
570 dd->upri = lp->lwp_priority;
571 KKASSERT(lp->lwp_qcpu == dd->cpuid);
573 spin_unlock(&dd->spin);
574 if (usched_dfly_debug == lp->lwp_proc->p_pid)
575 kprintf("SEL-D cpu %d (same cpu)\n",
581 * Requeue us at lwp_priority, which recalculate_estcpu()
582 * set for us. Reset the rrcount to force placement
583 * at the end of the queue.
585 * We used to move ourselves to the worst queue, but
586 * this creates a fairly serious priority inversion
589 if (lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) {
590 spin_unlock(&dd->spin);
591 lp->lwp_rrcount = usched_dfly_rrinterval;
592 lp->lwp_rqindex = (lp->lwp_priority & PRIMASK) / PPQ;
594 lwkt_deschedule(lp->lwp_thread);
595 dfly_setrunqueue_dd(dd, lp);
596 atomic_clear_int(&lp->lwp_thread->td_mpflags,
600 dd = &dfly_pcpu[gd->gd_cpuid];
601 if (usched_dfly_debug == lp->lwp_proc->p_pid)
602 kprintf("SEL-E cpu %d (requeue)\n",
608 * We are not the current lwp, figure out the best cpu
609 * to run on (our current cpu will be given significant
610 * weight). Loop on cpu change.
612 if ((usched_dfly_features & 0x02) &&
613 force_resched == 0 &&
614 (rdd = dfly_choose_best_queue(lp)) != dd) {
615 dfly_changeqcpu_locked(lp, dd, rdd);
616 spin_unlock(&dd->spin);
617 lwkt_deschedule(lp->lwp_thread);
618 dfly_setrunqueue_dd(rdd, lp);
621 dd = &dfly_pcpu[gd->gd_cpuid];
622 if (usched_dfly_debug == lp->lwp_proc->p_pid)
623 kprintf("SEL-F cpu %d (requeue new cpu)\n",
629 * We cannot become the current lwp, place the lp on the
630 * run-queue of this or another cpu and deschedule ourselves.
632 * When we are reactivated we will have another chance.
634 * Reload after a switch or setrunqueue/switch possibly
635 * moved us to another cpu.
637 spin_unlock(&dd->spin);
638 lwkt_deschedule(lp->lwp_thread);
639 dfly_setrunqueue_dd(dd, lp);
642 dd = &dfly_pcpu[gd->gd_cpuid];
643 if (usched_dfly_debug == lp->lwp_proc->p_pid)
644 kprintf("SEL-G cpu %d (fallback setrunq)\n",
647 if (usched_dfly_debug == lp->lwp_proc->p_pid)
648 kprintf(" pid %d acquire DONE cpu %d\n",
649 lp->lwp_proc->p_pid, gd->gd_cpuid);
652 * Make sure upri is synchronized, then yield to LWKT threads as
653 * needed before returning. This could result in another reschedule.
658 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
662 * DFLY_RELEASE_CURPROC
664 * This routine detaches the current thread from the userland scheduler,
665 * usually because the thread needs to run or block in the kernel (at
666 * kernel priority) for a while.
668 * This routine is also responsible for selecting a new thread to
669 * make the current thread.
671 * NOTE: This implementation differs from the dummy example in that
672 * dfly_select_curproc() is able to select the current process, whereas
673 * dummy_select_curproc() is not able to select the current process.
674 * This means we have to NULL out uschedcp.
676 * Additionally, note that we may already be on a run queue if releasing
677 * via the lwkt_switch() in dfly_setrunqueue().
680 dfly_release_curproc(struct lwp *lp)
682 globaldata_t gd = mycpu;
683 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
686 * Make sure td_wakefromcpu is defaulted. This will be overwritten
689 if (dd->uschedcp == lp) {
690 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
691 spin_lock(&dd->spin);
692 if (dd->uschedcp == lp) {
693 dd->uschedcp = NULL; /* don't let lp be selected */
694 dd->upri = PRIBASE_NULL;
697 * We're just going to set it again, avoid the global
698 * cache line ping-pong.
700 if ((lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) == 0) {
701 if (dd->flags & DFLY_PCPU_CURMASK) {
702 ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask,
704 dd->flags &= ~DFLY_PCPU_CURMASK;
707 spin_unlock(&dd->spin);
708 dfly_select_curproc(gd);
710 spin_unlock(&dd->spin);
716 * DFLY_SELECT_CURPROC
718 * Select a new current process for this cpu and clear any pending user
719 * reschedule request. The cpu currently has no current process.
721 * This routine is also responsible for equal-priority round-robining,
722 * typically triggered from dfly_schedulerclock(). In our dummy example
723 * all the 'user' threads are LWKT scheduled all at once and we just
724 * call lwkt_switch().
726 * The calling process is not on the queue and cannot be selected.
730 dfly_select_curproc(globaldata_t gd)
732 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
734 int cpuid = gd->gd_cpuid;
738 spin_lock(&dd->spin);
739 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
742 if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
743 ATOMIC_CPUMASK_ORBIT(dfly_curprocmask, cpuid);
744 dd->flags |= DFLY_PCPU_CURMASK;
746 dd->upri = nlp->lwp_priority;
749 dd->rrcount = 0; /* reset round robin */
751 spin_unlock(&dd->spin);
752 lwkt_acquire(nlp->lwp_thread);
753 lwkt_schedule(nlp->lwp_thread);
755 spin_unlock(&dd->spin);
761 * Place the specified lwp on the user scheduler's run queue. This routine
762 * must be called with the thread descheduled. The lwp must be runnable.
763 * It must not be possible for anyone else to explicitly schedule this thread.
765 * The thread may be the current thread as a special case.
768 dfly_setrunqueue(struct lwp *lp)
774 * First validate the process LWKT state.
776 KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
777 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
778 ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
779 lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
780 KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
783 * NOTE: dd/rdd do not necessarily represent the current cpu.
784 * Instead they may represent the cpu the thread was last
785 * scheduled on or inherited by its parent.
787 dd = &dfly_pcpu[lp->lwp_qcpu];
791 * This process is not supposed to be scheduled anywhere or assigned
792 * as the current process anywhere. Assert the condition.
794 KKASSERT(rdd->uschedcp != lp);
797 * Ok, we have to setrunqueue some target cpu and request a reschedule
800 * We have to choose the best target cpu. It might not be the current
801 * target even if the current cpu has no running user thread (for
802 * example, because the current cpu might be a hyperthread and its
803 * sibling has a thread assigned).
805 * If we just forked it is most optimal to run the child on the same
806 * cpu just in case the parent decides to wait for it (thus getting
807 * off that cpu). As long as there is nothing else runnable on the
808 * cpu, that is. If we did this unconditionally a parent forking
809 * multiple children before waiting (e.g. make -j N) leaves other
810 * cpus idle that could be working.
812 if (lp->lwp_forked) {
814 if (usched_dfly_features & 0x20)
815 rdd = dfly_choose_best_queue(lp);
816 else if (usched_dfly_features & 0x40)
817 rdd = &dfly_pcpu[lp->lwp_qcpu];
818 else if (usched_dfly_features & 0x80)
819 rdd = dfly_choose_queue_simple(rdd, lp);
820 else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
821 rdd = dfly_choose_best_queue(lp);
823 rdd = &dfly_pcpu[lp->lwp_qcpu];
825 rdd = dfly_choose_best_queue(lp);
826 /* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
828 if (lp->lwp_qcpu != rdd->cpuid) {
829 spin_lock(&dd->spin);
830 dfly_changeqcpu_locked(lp, dd, rdd);
831 spin_unlock(&dd->spin);
833 dfly_setrunqueue_dd(rdd, lp);
837 * Change qcpu to rdd->cpuid. The dd the lp is CURRENTLY on must be
838 * spin-locked on-call. rdd does not have to be.
841 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
843 if (lp->lwp_qcpu != rdd->cpuid) {
844 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
845 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
846 atomic_add_long(&dd->uload, -lp->lwp_uload);
847 atomic_add_int(&dd->ucount, -1);
849 lp->lwp_qcpu = rdd->cpuid;
854 * Place lp on rdd's runqueue. Nothing is locked on call. This function
855 * also performs all necessary ancillary notification actions.
858 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
863 * We might be moving the lp to another cpu's run queue, and once
864 * on the runqueue (even if it is our cpu's), another cpu can rip
867 * TDF_MIGRATING might already be set if this is part of a
868 * remrunqueue+setrunqueue sequence.
870 if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
871 lwkt_giveaway(lp->lwp_thread);
876 * We lose control of the lp the moment we release the spinlock
877 * after having placed it on the queue. i.e. another cpu could pick
878 * it up, or it could exit, or its priority could be further
879 * adjusted, or something like that.
881 * WARNING! rdd can point to a foreign cpu!
883 spin_lock(&rdd->spin);
884 dfly_setrunqueue_locked(rdd, lp);
887 * Potentially interrupt the currently-running thread
889 if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK)) {
891 * Currently running thread is better or same, do not
894 spin_unlock(&rdd->spin);
895 } else if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK) +
896 usched_dfly_fast_resched) {
898 * Currently running thread is not better, but not so bad
899 * that we need to interrupt it. Let it run for one more
903 rdd->uschedcp->lwp_rrcount < usched_dfly_rrinterval) {
904 rdd->uschedcp->lwp_rrcount = usched_dfly_rrinterval - 1;
906 spin_unlock(&rdd->spin);
907 } else if (rgd == mycpu) {
909 * We should interrupt the currently running thread, which
910 * is on the current cpu. However, if DIDYIELD is set we
911 * round-robin unconditionally and do not interrupt it.
913 spin_unlock(&rdd->spin);
914 if (rdd->uschedcp == NULL)
915 wakeup_mycpu(rdd->helper_thread); /* XXX */
916 if ((lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) == 0)
920 * We should interrupt the currently running thread, which
921 * is on a different cpu.
923 spin_unlock(&rdd->spin);
924 lwkt_send_ipiq(rgd, dfly_need_user_resched_remote, NULL);
929 * This routine is called from a systimer IPI. It MUST be MP-safe and
930 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on
935 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
937 globaldata_t gd = mycpu;
938 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
941 * Spinlocks also hold a critical section so there should not be
944 KKASSERT(gd->gd_spinlocks == 0 || dumping);
947 * If lp is NULL we might be contended and lwkt_switch() may have
948 * cycled into the idle thread. Apply the tick to the current
949 * process on this cpu if it is contended.
951 if (gd->gd_curthread == &gd->gd_idlethread) {
953 if (lp && (lp->lwp_thread == NULL ||
954 lp->lwp_thread->td_contended == 0)) {
960 * Dock thread for tick
964 * Do we need to round-robin? We round-robin 10 times a
965 * second. This should only occur for cpu-bound batch
968 if (++lp->lwp_rrcount >= usched_dfly_rrinterval)
972 * Adjust estcpu upward using a real time equivalent
973 * calculation, and recalculate lp's priority. Estcpu
974 * is increased such that it will cap-out over a period
977 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu +
978 ESTCPUMAX / ESTCPUFREQ + 1);
979 dfly_resetpriority(lp);
983 * Rebalance two cpus every 8 ticks, pulling the worst thread
984 * from the worst cpu's queue into a rotating cpu number.
985 * Also require that the moving of the highest-load thread
986 * from rdd to dd does not cause the uload to cross over.
988 * This mechanic is needed because the push algorithms can
989 * steady-state in an non-optimal configuration. We need to mix it
990 * up a little, even if it means breaking up a paired thread, so
991 * the push algorithms can rebalance the degenerate conditions.
992 * This portion of the algorithm exists to ensure stability at the
993 * selected weightings.
995 * Because we might be breaking up optimal conditions we do not want
996 * to execute this too quickly, hence we only rebalance approximately
997 * ~7-8 times per second. The push's, on the otherhand, are capable
998 * moving threads to other cpus at a much higher rate.
1000 * We choose the most heavily loaded thread from the worst queue
1001 * in order to ensure that multiple heavy-weight threads on the same
1002 * queue get broken up, and also because these threads are the most
1003 * likely to be able to remain in place. Hopefully then any pairings,
1004 * if applicable, migrate to where these threads are.
1006 if ((usched_dfly_features & 0x04) &&
1007 ((u_int)sched_ticks & 7) == 0 &&
1008 (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
1015 rdd = dfly_choose_worst_queue(dd, 1);
1016 if (rdd && dd->uload + usched_dfly_weight6 / 2 < rdd->uload) {
1017 spin_lock(&dd->spin);
1018 if (spin_trylock(&rdd->spin)) {
1019 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
1020 spin_unlock(&rdd->spin);
1022 spin_unlock(&dd->spin);
1024 spin_unlock(&dd->spin);
1030 /* dd->spin held if nlp != NULL */
1033 * Either schedule it or add it to our queue.
1036 (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
1037 if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
1038 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask,
1040 dd->flags |= DFLY_PCPU_CURMASK;
1042 dd->upri = nlp->lwp_priority;
1045 dd->rrcount = 0; /* reset round robin */
1047 spin_unlock(&dd->spin);
1048 lwkt_acquire(nlp->lwp_thread);
1049 lwkt_schedule(nlp->lwp_thread);
1051 dfly_setrunqueue_locked(dd, nlp);
1052 spin_unlock(&dd->spin);
1058 * Called from acquire and from kern_synch's one-second timer (one of the
1059 * callout helper threads) with a critical section held.
1061 * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for
1062 * overall system load.
1064 * Note that no recalculation occurs for a process which sleeps and wakes
1065 * up in the same tick. That is, a system doing thousands of context
1066 * switches per second will still only do serious estcpu calculations
1067 * ESTCPUFREQ times per second.
1071 dfly_recalculate_estcpu(struct lwp *lp)
1073 globaldata_t gd = mycpu;
1075 sysclock_t ttlticks;
1081 * We have to subtract periodic to get the last schedclock
1082 * timeout time, otherwise we would get the upcoming timeout.
1083 * Keep in mind that a process can migrate between cpus and
1084 * while the scheduler clock should be very close, boundary
1085 * conditions could lead to a small negative delta.
1087 cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
1089 if (lp->lwp_slptime > 1) {
1091 * Too much time has passed, do a coarse correction.
1093 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
1094 dfly_resetpriority(lp);
1095 lp->lwp_cpbase = cpbase;
1096 lp->lwp_cpticks = 0;
1097 lp->lwp_estfast = 0;
1098 } else if (lp->lwp_cpbase != cpbase) {
1100 * Adjust estcpu if we are in a different tick. Don't waste
1101 * time if we are in the same tick.
1103 * First calculate the number of ticks in the measurement
1104 * interval. The ttlticks calculation can wind up 0 due to
1105 * a bug in the handling of lwp_slptime (as yet not found),
1106 * so make sure we do not get a divide by 0 panic.
1108 ttlticks = (cpbase - lp->lwp_cpbase) /
1109 gd->gd_schedclock.periodic;
1110 if ((ssysclock_t)ttlticks < 0) {
1112 lp->lwp_cpbase = cpbase;
1116 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
1119 * Calculate instant estcpu based percentage of (one) cpu
1120 * used and exponentially average it into the current
1123 ucount = dfly_pcpu[lp->lwp_qcpu].ucount;
1124 estcpu = lp->lwp_cpticks * ESTCPUMAX / ttlticks;
1127 * The higher ttlticks gets, the more meaning the calculation
1128 * has and the smaller our decay_factor in the exponential
1131 * The uload calculation has been removed because it actually
1132 * makes things worse, causing processes which use less cpu
1133 * (such as a browser) to be pumped up and treated the same
1134 * as a cpu-bound process (such as a make). The same effect
1135 * can occur with sufficient load without the uload
1136 * calculation, but occurs less quickly and takes more load.
1137 * In addition, the less cpu a process uses the smaller the
1138 * effect of the overload.
1143 decay_factor = hz - ttlticks;
1145 lp->lwp_estcpu = ESTCPULIM(
1146 (lp->lwp_estcpu * ttlticks + estcpu) /
1148 dfly_resetpriority(lp);
1149 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
1150 lp->lwp_cpticks = 0;
1155 * Compute the priority of a process when running in user mode.
1156 * Arrange to reschedule if the resulting priority is better
1157 * than that of the current process.
1159 * This routine may be called with any process.
1161 * This routine is called by fork1() for initial setup with the process of
1162 * the run queue, and also may be called normally with the process on or
1163 * off the run queue.
1166 dfly_resetpriority(struct lwp *lp)
1179 * Lock the scheduler (lp) belongs to. This can be on a different
1180 * cpu. Handle races. This loop breaks out with the appropriate
1184 rcpu = lp->lwp_qcpu;
1186 rdd = &dfly_pcpu[rcpu];
1187 spin_lock(&rdd->spin);
1188 if (rcpu == lp->lwp_qcpu)
1190 spin_unlock(&rdd->spin);
1194 * Calculate the new priority and queue type
1196 newrqtype = lp->lwp_rtprio.type;
1199 case RTP_PRIO_REALTIME:
1201 newpriority = PRIBASE_REALTIME +
1202 (lp->lwp_rtprio.prio & PRIMASK);
1204 case RTP_PRIO_NORMAL:
1206 * Calculate the new priority.
1208 * nice contributes up to NICE_QS queues (typ 32 - full range)
1209 * estcpu contributes up to EST_QS queues (typ 24)
1211 * A nice +20 process receives 1/10 cpu vs nice+0. Niced
1212 * process more than 20 apart may receive no cpu, so cpu
1213 * bound nice -20 can prevent a nice +5 from getting any
1214 * cpu. A nice+0, being in the middle, always gets some cpu
1217 estcpu = lp->lwp_estcpu;
1218 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) *
1219 (NICE_QS * PPQ) / PRIO_RANGE;
1220 newpriority += estcpu * PPQ / ESTCPUPPQ;
1221 if (newpriority < 0)
1223 if (newpriority >= MAXPRI)
1224 newpriority = MAXPRI - 1;
1225 newpriority += PRIBASE_NORMAL;
1228 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1230 case RTP_PRIO_THREAD:
1231 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1234 panic("Bad RTP_PRIO %d", newrqtype);
1239 * The LWKT scheduler doesn't dive usched structures, give it a hint
1240 * on the relative priority of user threads running in the kernel.
1241 * The LWKT scheduler will always ensure that a user thread running
1242 * in the kernel will get cpu some time, regardless of its upri,
1243 * but can decide not to instantly switch from one kernel or user
1244 * mode user thread to a kernel-mode user thread when it has a less
1245 * desireable user priority.
1247 * td_upri has normal sense (higher values are more desireable), so
1248 * negate it (this is a different field lp->lwp_priority)
1250 lp->lwp_thread->td_upri = -(newpriority & usched_dfly_swmask);
1253 * The newpriority incorporates the queue type so do a simple masked
1254 * check to determine if the process has moved to another queue. If
1255 * it has, and it is currently on a run queue, then move it.
1257 * Since uload is ~PPQMASK masked, no modifications are necessary if
1258 * we end up in the same run queue.
1260 * Reset rrcount if moving to a higher-priority queue, otherwise
1263 if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1264 if (lp->lwp_priority < newpriority)
1265 lp->lwp_rrcount = 0;
1266 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1267 dfly_remrunqueue_locked(rdd, lp);
1268 lp->lwp_priority = newpriority;
1269 lp->lwp_rqtype = newrqtype;
1270 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1271 dfly_setrunqueue_locked(rdd, lp);
1274 lp->lwp_priority = newpriority;
1275 lp->lwp_rqtype = newrqtype;
1276 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1281 * In the same PPQ, uload cannot change.
1283 lp->lwp_priority = newpriority;
1289 * Adjust effective load.
1291 * Calculate load then scale up or down geometrically based on p_nice.
1292 * Processes niced up (positive) are less important, and processes
1293 * niced downard (negative) are more important. The higher the uload,
1294 * the more important the thread.
1296 /* 0-511, 0-100% cpu */
1297 delta_uload = lptouload(lp);
1298 delta_uload -= lp->lwp_uload;
1299 if (lp->lwp_uload + delta_uload < -32767) {
1300 delta_uload = -32768 - lp->lwp_uload;
1301 } else if (lp->lwp_uload + delta_uload > 32767) {
1302 delta_uload = 32767 - lp->lwp_uload;
1304 lp->lwp_uload += delta_uload;
1305 if (lp->lwp_mpflags & LWP_MP_ULOAD)
1306 atomic_add_long(&dfly_pcpu[lp->lwp_qcpu].uload, delta_uload);
1309 * Determine if we need to reschedule the target cpu. This only
1310 * occurs if the LWP is already on a scheduler queue, which means
1311 * that idle cpu notification has already occured. At most we
1312 * need only issue a need_user_resched() on the appropriate cpu.
1314 * The LWP may be owned by a CPU different from the current one,
1315 * in which case dd->uschedcp may be modified without an MP lock
1316 * or a spinlock held. The worst that happens is that the code
1317 * below causes a spurious need_user_resched() on the target CPU
1318 * and dd->pri to be wrong for a short period of time, both of
1319 * which are harmless.
1321 * If checkpri is 0 we are adjusting the priority of the current
1322 * process, possibly higher (less desireable), so ignore the upri
1323 * check which will fail in that case.
1326 if (CPUMASK_TESTBIT(dfly_rdyprocmask, rcpu) &&
1328 (rdd->upri & ~PRIMASK) >
1329 (lp->lwp_priority & ~PRIMASK))) {
1330 if (rcpu == mycpu->gd_cpuid) {
1331 spin_unlock(&rdd->spin);
1332 need_user_resched();
1334 spin_unlock(&rdd->spin);
1335 lwkt_send_ipiq(globaldata_find(rcpu),
1336 dfly_need_user_resched_remote,
1340 spin_unlock(&rdd->spin);
1343 spin_unlock(&rdd->spin);
1350 dfly_yield(struct lwp *lp)
1352 if (lp->lwp_qcpu != mycpu->gd_cpuid)
1354 KKASSERT(lp == curthread->td_lwp);
1357 * Don't set need_user_resched() or mess with rrcount or anything.
1358 * the TDF flag will override everything as long as we release.
1360 atomic_set_int(&lp->lwp_thread->td_mpflags, TDF_MP_DIDYIELD);
1361 dfly_release_curproc(lp);
1365 * Thread was forcefully migrated to another cpu. Normally forced migrations
1366 * are used for iterations and the kernel returns to the original cpu before
1367 * returning and this is not needed. However, if the kernel migrates a
1368 * thread to another cpu and wants to leave it there, it has to call this
1371 * Note that the lwkt_migratecpu() function also released the thread, so
1372 * we don't have to worry about that.
1376 dfly_changedcpu(struct lwp *lp)
1378 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1379 dfly_pcpu_t rdd = &dfly_pcpu[mycpu->gd_cpuid];
1382 spin_lock(&dd->spin);
1383 dfly_changeqcpu_locked(lp, dd, rdd);
1384 spin_unlock(&dd->spin);
1389 * Called from fork1() when a new child process is being created.
1391 * Give the child process an initial estcpu that is more batch then
1392 * its parent and dock the parent for the fork (but do not
1393 * reschedule the parent).
1397 * XXX lwp should be "spawning" instead of "forking"
1400 dfly_forking(struct lwp *plp, struct lwp *lp)
1405 * Put the child 4 queue slots (out of 32) higher than the parent
1406 * (less desireable than the parent).
1408 lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu +
1409 ESTCPUPPQ * usched_dfly_forkbias);
1411 lp->lwp_estfast = 0;
1414 * Even though the lp will be scheduled specially the first time
1415 * due to lp->lwp_forked, it is important to initialize lwp_qcpu
1416 * to avoid favoring a fixed cpu.
1419 static uint16_t save_cpu;
1420 lp->lwp_qcpu = ++save_cpu % ncpus;
1422 lp->lwp_qcpu = plp->lwp_qcpu;
1423 if (CPUMASK_TESTBIT(lp->lwp_cpumask, lp->lwp_qcpu) == 0)
1424 lp->lwp_qcpu = BSFCPUMASK(lp->lwp_cpumask);
1428 * Dock the parent a cost for the fork, protecting us from fork
1429 * bombs. If the parent is forking quickly this makes both the
1430 * parent and child more batchy.
1432 estcpu = plp->lwp_estcpu + ESTCPUPPQ / 16;
1433 plp->lwp_estcpu = ESTCPULIM(estcpu);
1437 * Called when a lwp is being removed from this scheduler, typically
1438 * during lwp_exit(). We have to clean out any ULOAD accounting before
1439 * we can let the lp go. The dd->spin lock is not needed for uload
1442 * Scheduler dequeueing has already occurred, no further action in that
1446 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1448 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1450 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1451 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1452 atomic_add_long(&dd->uload, -lp->lwp_uload);
1453 atomic_add_int(&dd->ucount, -1);
1458 * This function cannot block in any way, but spinlocks are ok.
1460 * Update the uload based on the state of the thread (whether it is going
1461 * to sleep or running again). The uload is meant to be a longer-term
1462 * load and not an instantanious load.
1465 dfly_uload_update(struct lwp *lp)
1467 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1469 if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1470 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1471 spin_lock(&dd->spin);
1472 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1473 atomic_set_int(&lp->lwp_mpflags,
1475 atomic_add_long(&dd->uload, lp->lwp_uload);
1476 atomic_add_int(&dd->ucount, 1);
1478 spin_unlock(&dd->spin);
1480 } else if (lp->lwp_slptime > 0) {
1481 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1482 spin_lock(&dd->spin);
1483 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1484 atomic_clear_int(&lp->lwp_mpflags,
1486 atomic_add_long(&dd->uload, -lp->lwp_uload);
1487 atomic_add_int(&dd->ucount, -1);
1489 spin_unlock(&dd->spin);
1495 * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1496 * it selects a user process and returns it. If chklp is non-NULL and chklp
1497 * has a better or equal priority then the process that would otherwise be
1498 * chosen, NULL is returned.
1500 * Until we fix the RUNQ code the chklp test has to be strict or we may
1501 * bounce between processes trying to acquire the current process designation.
1503 * Must be called with rdd->spin locked. The spinlock is left intact through
1504 * the entire routine. dd->spin does not have to be locked.
1506 * If worst is non-zero this function finds the worst thread instead of the
1507 * best thread (used by the schedulerclock-based rover).
1511 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1512 struct lwp *chklp, int worst)
1523 * Select best or worst process. Once selected, clear the bit
1524 * in our local variable (idqbits, tsqbits, or rtqbits) just
1525 * in case we have to loop.
1527 rtqbits = rdd->rtqueuebits;
1528 tsqbits = rdd->queuebits;
1529 idqbits = rdd->idqueuebits;
1534 pri = bsrl(idqbits);
1535 idqbits &= ~(1U << pri);
1536 q = &rdd->idqueues[pri];
1537 which = &rdd->idqueuebits;
1538 } else if (tsqbits) {
1539 pri = bsrl(tsqbits);
1540 tsqbits &= ~(1U << pri);
1541 q = &rdd->queues[pri];
1542 which = &rdd->queuebits;
1543 } else if (rtqbits) {
1544 pri = bsrl(rtqbits);
1545 rtqbits &= ~(1U << pri);
1546 q = &rdd->rtqueues[pri];
1547 which = &rdd->rtqueuebits;
1551 lp = TAILQ_LAST(q, rq);
1554 pri = bsfl(rtqbits);
1555 rtqbits &= ~(1U << pri);
1556 q = &rdd->rtqueues[pri];
1557 which = &rdd->rtqueuebits;
1558 } else if (tsqbits) {
1559 pri = bsfl(tsqbits);
1560 tsqbits &= ~(1U << pri);
1561 q = &rdd->queues[pri];
1562 which = &rdd->queuebits;
1563 } else if (idqbits) {
1564 pri = bsfl(idqbits);
1565 idqbits &= ~(1U << pri);
1566 q = &rdd->idqueues[pri];
1567 which = &rdd->idqueuebits;
1571 lp = TAILQ_FIRST(q);
1573 KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1577 * If the passed lwp <chklp> is reasonably close to the selected
1578 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1580 * Note that we must error on the side of <chklp> to avoid bouncing
1581 * between threads in the acquire code.
1584 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1589 * When rdd != dd, we have to make sure that the process we
1590 * are pulling is allow to run on our cpu. This alternative
1591 * path is a bit more expensive but its not considered to be
1592 * in the critical path.
1594 if (rdd != dd && CPUMASK_TESTBIT(lp->lwp_cpumask, dd->cpuid) == 0) {
1596 lp = TAILQ_PREV(lp, rq, lwp_procq);
1598 lp = TAILQ_NEXT(lp, lwp_procq);
1604 KTR_COND_LOG(usched_chooseproc,
1605 lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1606 lp->lwp_proc->p_pid,
1607 lp->lwp_thread->td_gd->gd_cpuid,
1610 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1611 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1612 TAILQ_REMOVE(q, lp, lwp_procq);
1615 *which &= ~(1 << pri);
1618 * If we are choosing a process from rdd with the intent to
1619 * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1623 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1624 atomic_add_long(&rdd->uload, -lp->lwp_uload);
1625 atomic_add_int(&rdd->ucount, -1);
1627 lp->lwp_qcpu = dd->cpuid;
1628 atomic_add_long(&dd->uload, lp->lwp_uload);
1629 atomic_add_int(&dd->ucount, 1);
1630 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1636 * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1638 * Choose a cpu node to schedule lp on, hopefully nearby its current
1641 * We give the current node a modest advantage for obvious reasons.
1643 * We also give the node the thread was woken up FROM a slight advantage
1644 * in order to try to schedule paired threads which synchronize/block waiting
1645 * for each other fairly close to each other. Similarly in a network setting
1646 * this feature will also attempt to place a user process near the kernel
1647 * protocol thread that is feeding it data. THIS IS A CRITICAL PART of the
1648 * algorithm as it heuristically groups synchronizing processes for locality
1649 * of reference in multi-socket systems.
1651 * We check against running processes and give a big advantage if there
1654 * The caller will normally dfly_setrunqueue() lp on the returned queue.
1656 * When the topology is known choose a cpu whos group has, in aggregate,
1657 * has the lowest weighted load.
1661 dfly_choose_best_queue(struct lwp *lp)
1668 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1678 * When the topology is unknown choose a random cpu that is hopefully
1681 if (dd->cpunode == NULL)
1682 return (dfly_choose_queue_simple(dd, lp));
1684 loadav = (averunnable.ldavg[0] + FSCALE / 2) >> FSHIFT;
1689 if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1690 wakemask = dfly_pcpu[wakecpu].cpumask;
1692 CPUMASK_ASSZERO(wakemask);
1694 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1695 kprintf("choosebest wakefromcpu %d:\n",
1696 lp->lwp_thread->td_wakefromcpu);
1699 * When the topology is known choose a cpu whos group has, in
1700 * aggregate, has the lowest weighted load.
1702 cpup = root_cpu_node;
1707 * Degenerate case super-root
1709 if (cpup->child_no == 1) {
1710 cpup = cpup->child_node[0];
1717 if (cpup->child_no == 0) {
1718 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1719 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1720 kprintf(" last cpu %d\n", rdd->cpuid);
1725 lowest_load = 0x7FFFFFFFFFFFFFFFLL;
1726 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1727 kprintf(" reset lowest_load for scan\n");
1729 for (n = 0; n < cpup->child_no; ++n) {
1731 * Accumulate load information for all cpus
1732 * which are members of this node.
1736 cpun = cpup->child_node[n];
1737 mask = cpun->members;
1738 CPUMASK_ANDMASK(mask, usched_global_cpumask);
1739 CPUMASK_ANDMASK(mask, smp_active_mask);
1740 CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1741 if (CPUMASK_TESTZERO(mask))
1747 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1749 while (CPUMASK_TESTNZERO(mask)) {
1750 cpuid = BSFCPUMASK(mask);
1751 rdd = &dfly_pcpu[cpuid];
1753 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1754 kprintf(" %d", cpuid);
1757 * Cumulative load for members. Note that
1758 * if (lp) is part of the group, lp's
1759 * contribution will be backed out later.
1762 load += rdd->ucount *
1763 usched_dfly_weight3;
1766 * If the node is running a less important
1767 * thread than our thread, give it an
1768 * advantage. Witha high-enough weighting
1769 * this can override most other considerations
1770 * to provide ultimate priority fairness at
1771 * the cost of localization.
1773 if ((rdd->upri & ~PPQMASK) >
1774 (lp->lwp_priority & ~PPQMASK)) {
1775 load -= usched_dfly_weight4;
1779 if (rdd->uschedcp == NULL &&
1780 rdd->runqcount == 0 &&
1781 rdd->gd->gd_tdrunqcount == 0
1783 load += rdd->uload / 2;
1784 load += rdd->ucount *
1785 usched_dfly_weight3 / 2;
1788 load += rdd->ucount *
1789 usched_dfly_weight3;
1792 CPUMASK_NANDBIT(mask, cpuid);
1797 * Compensate if the lp is already accounted for in
1798 * the aggregate uload for this mask set. We want
1799 * to calculate the loads as if lp were not present,
1800 * otherwise the calculation is bogus.
1802 if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1803 CPUMASK_TESTMASK(dd->cpumask, cpun->members)) {
1804 load -= lp->lwp_uload;
1805 load -= usched_dfly_weight3; /* ucount */
1808 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1809 kprintf("\n accum_start c=%d ld=%ld "
1810 "cpu=%d ld/cnt=%ld ",
1811 count, load, rdd->cpuid,
1815 * load is the aggregate load of count CPUs in the
1816 * group. For the weightings to work as intended,
1817 * we want an average per-cpu load.
1819 load = load / count;
1822 * Advantage the cpu group (lp) is already on.
1824 if (CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1825 load -= usched_dfly_weight1;
1827 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1828 kprintf("B:%ld ", load);
1831 * Advantage nodes with more memory
1833 if (usched_dfly_node_mem) {
1834 load -= cpun->phys_mem * usched_dfly_weight5 /
1835 usched_dfly_node_mem;
1838 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1839 kprintf("C:%ld ", load);
1842 * Advantage the cpu group we desire to pair (lp)
1843 * to, but Disadvantage hyperthreads on the same
1844 * core, or the same thread as the ipc peer.
1846 * Under very heavy loads it is usually beneficial
1847 * to set kern.usched_dfly.ipc_smt to 1, and under
1848 * extreme loads it might be beneficial to also set
1849 * kern.usched_dfly.ipc_same to 1.
1851 * load+ disadvantage
1854 if (CPUMASK_TESTMASK(cpun->members, wakemask)) {
1855 if (cpun->child_no) {
1856 if (cpun->type == CORE_LEVEL &&
1857 usched_dfly_ipc_smt < 0 &&
1858 loadav >= (ncpus >> 1)) {
1860 * Advantage at higher levels
1863 load -= usched_dfly_weight2;
1864 } else if (cpun->type == CORE_LEVEL &&
1865 usched_dfly_ipc_smt == 0) {
1867 * Disadvantage the same core
1868 * when there are hyperthreads.
1870 load += usched_dfly_weight2;
1873 * Advantage at higher levels
1876 load -= usched_dfly_weight2;
1880 * Disadvantage the last level (core
1881 * or hyperthread). Try to schedule
1884 if (usched_dfly_ipc_same < 0 &&
1886 load -= usched_dfly_weight2;
1887 } else if (usched_dfly_ipc_same) {
1888 load -= usched_dfly_weight2;
1890 load += usched_dfly_weight2;
1894 if (cpun->child_no != 0) {
1896 load -= usched_dfly_weight2;
1899 * 0x10 (disadvantage)
1900 * 0x00 (advantage) - default
1902 if (usched_dfly_features & 0x10)
1903 load += usched_dfly_weight2;
1905 load -= usched_dfly_weight2;
1910 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1911 kprintf("D:%ld ", load);
1914 * Calculate the best load
1916 if (cpub == NULL || lowest_load > load ||
1917 (lowest_load == load &&
1918 CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1924 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1925 kprintf("low=%ld]\n", lowest_load);
1929 /* Dispatch this outcast to a proper CPU. */
1930 if (__predict_false(CPUMASK_TESTBIT(lp->lwp_cpumask, rdd->cpuid) == 0))
1931 rdd = &dfly_pcpu[BSFCPUMASK(lp->lwp_cpumask)];
1932 if (usched_dfly_chooser > 0) {
1933 --usched_dfly_chooser; /* only N lines */
1934 kprintf("lp %02d->%02d %s\n",
1935 lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1937 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1938 kprintf("final cpu %d\n", rdd->cpuid);
1943 * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1945 * Choose the worst queue close to dd's cpu node with a non-empty runq
1948 * This is used by the thread chooser when the current cpu's queues are
1949 * empty to steal a thread from another cpu's queue. We want to offload
1950 * the most heavily-loaded queue.
1952 * However, we do not want to steal from far-away nodes who themselves
1953 * have idle cpu's that are more suitable to distribute the far-away
1958 dfly_choose_worst_queue(dfly_pcpu_t dd, int forceit)
1975 * When the topology is unknown choose a random cpu that is hopefully
1978 if (dd->cpunode == NULL) {
1983 * When the topology is known choose a cpu whos group has, in
1984 * aggregate, has the highest weighted load.
1986 cpup = root_cpu_node;
1990 * Degenerate case super-root
1992 if (cpup->child_no == 1) {
1993 cpup = cpup->child_node[0];
2000 if (cpup->child_no == 0) {
2001 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
2006 highest_load = -0x7FFFFFFFFFFFFFFFLL;
2008 for (n = 0; n < cpup->child_no; ++n) {
2010 * Accumulate load information for all cpus
2011 * which are members of this node.
2015 cpun = cpup->child_node[n];
2016 mask = cpun->members;
2017 CPUMASK_ANDMASK(mask, usched_global_cpumask);
2018 CPUMASK_ANDMASK(mask, smp_active_mask);
2019 if (CPUMASK_TESTZERO(mask))
2025 while (CPUMASK_TESTNZERO(mask)) {
2026 cpuid = BSFCPUMASK(mask);
2027 rdd = &dfly_pcpu[cpuid];
2030 load += rdd->ucount * usched_dfly_weight3;
2033 if (rdd->uschedcp == NULL &&
2034 rdd->runqcount == 0 &&
2035 rdd->gd->gd_tdrunqcount == 0
2037 load += rdd->uload / 2;
2038 load += rdd->ucount *
2039 usched_dfly_weight3 / 2;
2042 load += rdd->ucount *
2043 usched_dfly_weight3;
2046 CPUMASK_NANDBIT(mask, cpuid);
2052 * Advantage the cpu group (dd) is already on.
2054 * When choosing the worst queue we reverse the
2055 * sign, but only count half the weight.
2057 * weight1 needs to be high enough to be stable,
2058 * but this can also cause it to be too sticky,
2059 * so the iterator which rebalances the load sets
2060 * forceit to ignore it.
2063 CPUMASK_TESTMASK(dd->cpumask, cpun->members)) {
2064 load += usched_dfly_weight1 / 2;
2068 * Disadvantage nodes with more memory (same sign).
2070 if (usched_dfly_node_mem) {
2071 load -= cpun->phys_mem * usched_dfly_weight5 /
2072 usched_dfly_node_mem;
2077 * The best candidate is the one with the worst
2080 if (cpub == NULL || highest_load < load ||
2081 (highest_load == load &&
2082 CPUMASK_TESTMASK(cpun->members, dd->cpumask))) {
2083 highest_load = load;
2091 * We never return our own node (dd), and only return a remote
2092 * node if it's load is significantly worse than ours (i.e. where
2093 * stealing a thread would be considered reasonable).
2095 * This also helps us avoid breaking paired threads apart which
2096 * can have disastrous effects on performance.
2103 if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
2105 if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
2107 if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
2110 if (rdd->uload - hpri < dd->uload + hpri)
2118 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
2127 * Fallback to the original heuristic, select random cpu,
2128 * first checking the cpus not currently running a user thread.
2130 * Use cpuid as the base cpu in our scan, first checking
2131 * cpuid...(ncpus-1), then 0...(cpuid-1). This avoid favoring
2132 * lower-numbered cpus.
2134 ++dd->scancpu; /* SMP race ok */
2135 mask = dfly_rdyprocmask;
2136 CPUMASK_NANDMASK(mask, dfly_curprocmask);
2137 CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
2138 CPUMASK_ANDMASK(mask, smp_active_mask);
2139 CPUMASK_ANDMASK(mask, usched_global_cpumask);
2141 cpubase = (int)(dd->scancpu % ncpus);
2142 CPUMASK_ASSBMASK(tmpmask, cpubase);
2143 CPUMASK_INVMASK(tmpmask);
2144 CPUMASK_ANDMASK(tmpmask, mask);
2145 while (CPUMASK_TESTNZERO(tmpmask)) {
2146 cpuid = BSFCPUMASK(tmpmask);
2147 rdd = &dfly_pcpu[cpuid];
2149 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
2151 CPUMASK_NANDBIT(tmpmask, cpuid);
2154 CPUMASK_ASSBMASK(tmpmask, cpubase);
2155 CPUMASK_ANDMASK(tmpmask, mask);
2156 while (CPUMASK_TESTNZERO(tmpmask)) {
2157 cpuid = BSFCPUMASK(tmpmask);
2158 rdd = &dfly_pcpu[cpuid];
2160 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
2162 CPUMASK_NANDBIT(tmpmask, cpuid);
2166 * Then cpus which might have a currently running lp
2168 mask = dfly_rdyprocmask;
2169 CPUMASK_ANDMASK(mask, dfly_curprocmask);
2170 CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
2171 CPUMASK_ANDMASK(mask, smp_active_mask);
2172 CPUMASK_ANDMASK(mask, usched_global_cpumask);
2174 CPUMASK_ASSBMASK(tmpmask, cpubase);
2175 CPUMASK_INVMASK(tmpmask);
2176 CPUMASK_ANDMASK(tmpmask, mask);
2177 while (CPUMASK_TESTNZERO(tmpmask)) {
2178 cpuid = BSFCPUMASK(tmpmask);
2179 rdd = &dfly_pcpu[cpuid];
2181 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
2183 CPUMASK_NANDBIT(tmpmask, cpuid);
2186 CPUMASK_ASSBMASK(tmpmask, cpubase);
2187 CPUMASK_ANDMASK(tmpmask, mask);
2188 while (CPUMASK_TESTNZERO(tmpmask)) {
2189 cpuid = BSFCPUMASK(tmpmask);
2190 rdd = &dfly_pcpu[cpuid];
2192 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
2194 CPUMASK_NANDBIT(tmpmask, cpuid);
2198 * If we cannot find a suitable cpu we round-robin using scancpu.
2199 * Other cpus will pickup as they release their current lwps or
2202 * Avoid a degenerate system lockup case if usched_global_cpumask
2203 * is set to 0 or otherwise does not cover lwp_cpumask.
2205 * We only kick the target helper thread in this case, we do not
2206 * set the user resched flag because
2209 if (CPUMASK_TESTBIT(lp->lwp_cpumask, cpuid) == 0)
2210 cpuid = BSFCPUMASK(lp->lwp_cpumask);
2211 else if (CPUMASK_TESTBIT(usched_global_cpumask, cpuid) == 0)
2213 rdd = &dfly_pcpu[cpuid];
2220 dfly_need_user_resched_remote(void *dummy)
2222 globaldata_t gd = mycpu;
2223 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
2226 * Flag reschedule needed
2228 need_user_resched();
2231 * If no user thread is currently running we need to kick the helper
2232 * on our cpu to recover. Otherwise the cpu will never schedule
2235 * We cannot schedule the process ourselves because this is an
2236 * IPI callback and we cannot acquire spinlocks in an IPI callback.
2238 * Call wakeup_mycpu to avoid sending IPIs to other CPUs
2240 if (dd->uschedcp == NULL && (dd->flags & DFLY_PCPU_RDYMASK)) {
2241 ATOMIC_CPUMASK_NANDBIT(dfly_rdyprocmask, gd->gd_cpuid);
2242 dd->flags &= ~DFLY_PCPU_RDYMASK;
2243 wakeup_mycpu(dd->helper_thread);
2248 * dfly_remrunqueue_locked() removes a given process from the run queue
2249 * that it is on, clearing the queue busy bit if it becomes empty.
2251 * Note that user process scheduler is different from the LWKT schedule.
2252 * The user process scheduler only manages user processes but it uses LWKT
2253 * underneath, and a user process operating in the kernel will often be
2254 * 'released' from our management.
2256 * uload is NOT adjusted here. It is only adjusted if the lwkt_thread goes
2257 * to sleep or the lwp is moved to a different runq.
2260 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
2266 KKASSERT(rdd->runqcount >= 0);
2268 pri = lp->lwp_rqindex;
2270 switch(lp->lwp_rqtype) {
2271 case RTP_PRIO_NORMAL:
2272 q = &rdd->queues[pri];
2273 which = &rdd->queuebits;
2275 case RTP_PRIO_REALTIME:
2277 q = &rdd->rtqueues[pri];
2278 which = &rdd->rtqueuebits;
2281 q = &rdd->idqueues[pri];
2282 which = &rdd->idqueuebits;
2285 panic("remrunqueue: invalid rtprio type");
2288 KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
2289 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
2290 TAILQ_REMOVE(q, lp, lwp_procq);
2292 if (TAILQ_EMPTY(q)) {
2293 KASSERT((*which & (1 << pri)) != 0,
2294 ("remrunqueue: remove from empty queue"));
2295 *which &= ~(1 << pri);
2300 * dfly_setrunqueue_locked()
2302 * Add a process whos rqtype and rqindex had previously been calculated
2303 * onto the appropriate run queue. Determine if the addition requires
2304 * a reschedule on a cpu and return the cpuid or -1.
2306 * NOTE: Lower priorities are better priorities.
2308 * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
2309 * sum of the rough lwp_priority for all running and runnable
2310 * processes. Lower priority processes (higher lwp_priority
2311 * values) actually DO count as more load, not less, because
2312 * these are the programs which require the most care with
2313 * regards to cpu selection.
2316 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
2322 KKASSERT(lp->lwp_qcpu == rdd->cpuid);
2324 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
2325 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
2326 atomic_add_long(&rdd->uload, lp->lwp_uload);
2327 atomic_add_int(&rdd->ucount, 1);
2330 pri = lp->lwp_rqindex;
2332 switch(lp->lwp_rqtype) {
2333 case RTP_PRIO_NORMAL:
2334 q = &rdd->queues[pri];
2335 which = &rdd->queuebits;
2337 case RTP_PRIO_REALTIME:
2339 q = &rdd->rtqueues[pri];
2340 which = &rdd->rtqueuebits;
2343 q = &rdd->idqueues[pri];
2344 which = &rdd->idqueuebits;
2347 panic("remrunqueue: invalid rtprio type");
2352 * Place us on the selected queue. Determine if we should be
2353 * placed at the head of the queue or at the end.
2355 * We are placed at the tail if our round-robin count has expired,
2356 * or is about to expire and the system thinks its a good place to
2357 * round-robin, or there is already a next thread on the queue
2358 * (it might be trying to pick up where it left off and we don't
2359 * want to interfere).
2361 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
2362 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
2365 if (lp->lwp_rrcount >= usched_dfly_rrinterval ||
2366 (lp->lwp_rrcount >= usched_dfly_rrinterval / 2 &&
2367 (lp->lwp_thread->td_mpflags & TDF_MP_BATCH_DEMARC))
2372 atomic_clear_int(&lp->lwp_thread->td_mpflags,
2373 TDF_MP_BATCH_DEMARC);
2374 lp->lwp_rrcount = 0;
2375 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
2378 * Retain rrcount and place on head. Count is retained
2379 * even if the queue is empty.
2381 TAILQ_INSERT_HEAD(q, lp, lwp_procq);
2387 * For SMP systems a user scheduler helper thread is created for each
2388 * cpu and is used to allow one cpu to wakeup another for the purposes of
2389 * scheduling userland threads from setrunqueue().
2391 * UP systems do not need the helper since there is only one cpu.
2393 * We can't use the idle thread for this because we might block.
2394 * Additionally, doing things this way allows us to HLT idle cpus
2398 dfly_helper_thread(void *dummy)
2408 cpuid = gd->gd_cpuid; /* doesn't change */
2409 mask = gd->gd_cpumask; /* doesn't change */
2410 dd = &dfly_pcpu[cpuid];
2413 * Initial interlock, make sure all dfly_pcpu[] structures have
2414 * been initialized before proceeding.
2416 lockmgr(&usched_dfly_config_lk, LK_SHARED);
2417 lockmgr(&usched_dfly_config_lk, LK_RELEASE);
2420 * Since we only want to be woken up only when no user processes
2421 * are scheduled on a cpu, run at an ultra low priority.
2423 lwkt_setpri_self(TDPRI_USER_SCHEDULER);
2427 * We use the LWKT deschedule-interlock trick to avoid racing
2428 * dfly_rdyprocmask. This means we cannot block through to the
2429 * manual lwkt_switch() call we make below.
2432 tsleep_interlock(dd->helper_thread, 0);
2434 spin_lock(&dd->spin);
2435 if ((dd->flags & DFLY_PCPU_RDYMASK) == 0) {
2436 ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2437 dd->flags |= DFLY_PCPU_RDYMASK;
2439 clear_user_resched(); /* This satisfied the reschedule request */
2441 dd->rrcount = 0; /* Reset the round-robin counter */
2444 if (dd->runqcount || dd->uschedcp != NULL) {
2446 * Threads are available. A thread may or may not be
2447 * currently scheduled. Get the best thread already queued
2450 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
2452 if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
2453 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2454 dd->flags |= DFLY_PCPU_CURMASK;
2456 dd->upri = nlp->lwp_priority;
2459 dd->rrcount = 0; /* reset round robin */
2461 spin_unlock(&dd->spin);
2462 lwkt_acquire(nlp->lwp_thread);
2463 lwkt_schedule(nlp->lwp_thread);
2466 * This situation should not occur because we had
2467 * at least one thread available.
2469 spin_unlock(&dd->spin);
2471 } else if (usched_dfly_features & 0x01) {
2473 * This cpu is devoid of runnable threads, steal a thread
2474 * from another cpu. Since we're stealing, might as well
2475 * load balance at the same time.
2477 * We choose the highest-loaded thread from the worst queue.
2479 * NOTE! This function only returns a non-NULL rdd when
2480 * another cpu's queue is obviously overloaded. We
2481 * do not want to perform the type of rebalancing
2482 * the schedclock does here because it would result
2483 * in insane process pulling when 'steady' state is
2484 * partially unbalanced (e.g. 6 runnables and only
2487 rdd = dfly_choose_worst_queue(dd, 0);
2488 if (rdd && dd->uload + usched_dfly_weight6 < rdd->uload &&
2489 spin_trylock(&rdd->spin)) {
2490 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
2491 spin_unlock(&rdd->spin);
2496 if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
2497 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2498 dd->flags |= DFLY_PCPU_CURMASK;
2500 dd->upri = nlp->lwp_priority;
2503 dd->rrcount = 0; /* reset round robin */
2505 spin_unlock(&dd->spin);
2506 lwkt_acquire(nlp->lwp_thread);
2507 lwkt_schedule(nlp->lwp_thread);
2510 * Leave the thread on our run queue. Another
2511 * scheduler will try to pull it later.
2513 spin_unlock(&dd->spin);
2517 * devoid of runnable threads and not allowed to steal
2520 spin_unlock(&dd->spin);
2524 * We're descheduled unless someone scheduled us. Switch away.
2525 * Exiting the critical section will cause splz() to be called
2526 * for us if interrupts and such are pending.
2529 tsleep(dd->helper_thread, PINTERLOCKED, "schslp", 0);
2535 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2539 new_val = usched_dfly_stick_to_level;
2541 error = sysctl_handle_int(oidp, &new_val, 0, req);
2542 if (error != 0 || req->newptr == NULL)
2544 if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2546 usched_dfly_stick_to_level = new_val;
2552 * Setup the queues and scheduler helpers (scheduler helpers are SMP only).
2553 * Note that curprocmask bit 0 has already been cleared by rqinit() and
2554 * we should not mess with it further.
2557 usched_dfly_cpu_init(void)
2561 int smt_not_supported = 0;
2562 int cache_coherent_not_supported = 0;
2565 kprintf("Start usched_dfly helpers on cpus:\n");
2567 sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2568 usched_dfly_sysctl_tree =
2569 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2570 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2571 "usched_dfly", CTLFLAG_RD, 0, "");
2573 usched_dfly_node_mem = get_highest_node_memory();
2575 lockmgr(&usched_dfly_config_lk, LK_EXCLUSIVE);
2577 for (i = 0; i < ncpus; ++i) {
2578 dfly_pcpu_t dd = &dfly_pcpu[i];
2581 CPUMASK_ASSBIT(mask, i);
2582 if (CPUMASK_TESTMASK(mask, smp_active_mask) == 0)
2585 spin_init(&dd->spin, "uschedcpuinit");
2586 dd->cpunode = get_cpu_node_by_cpuid(i);
2588 dd->gd = globaldata_find(i);
2589 CPUMASK_ASSBIT(dd->cpumask, i);
2590 for (j = 0; j < NQS; j++) {
2591 TAILQ_INIT(&dd->queues[j]);
2592 TAILQ_INIT(&dd->rtqueues[j]);
2593 TAILQ_INIT(&dd->idqueues[j]);
2595 ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask, 0);
2597 dd->flags &= ~DFLY_PCPU_CURMASK;
2599 if (dd->cpunode == NULL) {
2600 smt_not_supported = 1;
2601 cache_coherent_not_supported = 1;
2603 kprintf (" cpu%d - WARNING: No CPU NODE "
2604 "found for cpu\n", i);
2606 switch (dd->cpunode->type) {
2609 kprintf (" cpu%d - HyperThreading "
2610 "available. Core siblings: ",
2614 smt_not_supported = 1;
2617 kprintf (" cpu%d - No HT available, "
2618 "multi-core/physical "
2619 "cpu. Physical siblings: ",
2623 smt_not_supported = 1;
2626 kprintf (" cpu%d - No HT available, "
2627 "single-core/physical cpu. "
2628 "Package siblings: ",
2632 /* Let's go for safe defaults here */
2633 smt_not_supported = 1;
2634 cache_coherent_not_supported = 1;
2636 kprintf (" cpu%d - Unknown cpunode->"
2637 "type=%u. siblings: ",
2639 (u_int)dd->cpunode->type);
2644 if (dd->cpunode->parent_node != NULL) {
2645 kprint_cpuset(&dd->cpunode->
2646 parent_node->members);
2649 kprintf(" no siblings\n");
2654 lwkt_create(dfly_helper_thread, NULL, &dd->helper_thread, NULL,
2655 0, i, "usched %d", i);
2658 * Allow user scheduling on the target cpu. cpu #0 has already
2659 * been enabled in rqinit().
2662 ATOMIC_CPUMASK_NANDMASK(dfly_curprocmask, mask);
2663 dd->flags &= ~DFLY_PCPU_CURMASK;
2665 if ((dd->flags & DFLY_PCPU_RDYMASK) == 0) {
2666 ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2667 dd->flags |= DFLY_PCPU_RDYMASK;
2669 dd->upri = PRIBASE_NULL;
2673 /* usched_dfly sysctl configurable parameters */
2675 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2676 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2677 OID_AUTO, "rrinterval", CTLFLAG_RW,
2678 &usched_dfly_rrinterval, 0, "");
2679 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2680 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2681 OID_AUTO, "decay", CTLFLAG_RW,
2682 &usched_dfly_decay, 0, "Extra decay when not running");
2683 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2684 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2685 OID_AUTO, "ipc_smt", CTLFLAG_RW,
2686 &usched_dfly_ipc_smt, 0, "Pair IPC on hyper-threads");
2687 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2688 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2689 OID_AUTO, "ipc_same", CTLFLAG_RW,
2690 &usched_dfly_ipc_same, 0, "Pair IPC on same thread");
2692 /* Add enable/disable option for SMT scheduling if supported */
2693 if (smt_not_supported) {
2694 usched_dfly_smt = 0;
2695 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2696 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2697 OID_AUTO, "smt", CTLFLAG_RD,
2698 "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2700 usched_dfly_smt = 1;
2701 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2702 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2703 OID_AUTO, "smt", CTLFLAG_RW,
2704 &usched_dfly_smt, 0, "Enable SMT scheduling");
2708 * Add enable/disable option for cache coherent scheduling
2711 if (cache_coherent_not_supported) {
2712 usched_dfly_cache_coherent = 0;
2713 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2714 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2715 OID_AUTO, "cache_coherent", CTLFLAG_RD,
2717 "Cache coherence NOT SUPPORTED");
2719 usched_dfly_cache_coherent = 1;
2720 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2721 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2722 OID_AUTO, "cache_coherent", CTLFLAG_RW,
2723 &usched_dfly_cache_coherent, 0,
2724 "Enable/Disable cache coherent scheduling");
2726 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2727 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2728 OID_AUTO, "weight1", CTLFLAG_RW,
2729 &usched_dfly_weight1, 200,
2730 "Weight selection for current cpu");
2732 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2733 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2734 OID_AUTO, "weight2", CTLFLAG_RW,
2735 &usched_dfly_weight2, 180,
2736 "Weight selection for wakefrom cpu");
2738 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2739 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2740 OID_AUTO, "weight3", CTLFLAG_RW,
2741 &usched_dfly_weight3, 40,
2742 "Weight selection for num threads on queue");
2744 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2745 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2746 OID_AUTO, "weight4", CTLFLAG_RW,
2747 &usched_dfly_weight4, 160,
2748 "Availability of other idle cpus");
2750 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2751 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2752 OID_AUTO, "weight5", CTLFLAG_RW,
2753 &usched_dfly_weight5, 50,
2754 "Memory attached to node");
2756 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2757 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2758 OID_AUTO, "weight6", CTLFLAG_RW,
2759 &usched_dfly_weight6, 150,
2762 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2763 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2764 OID_AUTO, "fast_resched", CTLFLAG_RW,
2765 &usched_dfly_fast_resched, 0,
2766 "Availability of other idle cpus");
2768 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2769 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2770 OID_AUTO, "features", CTLFLAG_RW,
2771 &usched_dfly_features, 0x8F,
2772 "Allow pulls into empty queues");
2774 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2775 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2776 OID_AUTO, "swmask", CTLFLAG_RW,
2777 &usched_dfly_swmask, ~PPQMASK,
2778 "Queue mask to force thread switch");
2781 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2782 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2783 OID_AUTO, "stick_to_level",
2784 CTLTYPE_INT | CTLFLAG_RW,
2785 NULL, sizeof usched_dfly_stick_to_level,
2786 sysctl_usched_dfly_stick_to_level, "I",
2787 "Stick a process to this level. See sysctl"
2788 "paremter hw.cpu_topology.level_description");
2791 lockmgr(&usched_dfly_config_lk, LK_RELEASE);
2794 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2795 usched_dfly_cpu_init, NULL);