2 * Copyright (c) 2012 The DragonFly Project. All rights reserved.
3 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com>,
7 * by Mihai Carabas <mihai.carabas@gmail.com>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
41 #include <sys/queue.h>
43 #include <sys/rtprio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
59 * Priorities. Note that with 32 run queues per scheduler each queue
60 * represents four priority levels.
66 #define PRIMASK (MAXPRI - 1)
67 #define PRIBASE_REALTIME 0
68 #define PRIBASE_NORMAL MAXPRI
69 #define PRIBASE_IDLE (MAXPRI * 2)
70 #define PRIBASE_THREAD (MAXPRI * 3)
71 #define PRIBASE_NULL (MAXPRI * 4)
73 #define NQS 32 /* 32 run queues. */
74 #define PPQ (MAXPRI / NQS) /* priorities per queue */
75 #define PPQMASK (PPQ - 1)
78 * NICEPPQ - number of nice units per priority queue
79 * ESTCPUPPQ - number of estcpu units per priority queue
80 * ESTCPUMAX - number of estcpu units
84 #define ESTCPUMAX (ESTCPUPPQ * NQS)
85 #define BATCHMAX (ESTCPUFREQ * 30)
86 #define PRIO_RANGE (PRIO_MAX - PRIO_MIN + 1)
88 #define ESTCPULIM(v) min((v), ESTCPUMAX)
92 #define lwp_priority lwp_usdata.dfly.priority
93 #define lwp_forked lwp_usdata.dfly.forked
94 #define lwp_rqindex lwp_usdata.dfly.rqindex
95 #define lwp_estcpu lwp_usdata.dfly.estcpu
96 #define lwp_estfast lwp_usdata.dfly.estfast
97 #define lwp_uload lwp_usdata.dfly.uload
98 #define lwp_rqtype lwp_usdata.dfly.rqtype
99 #define lwp_qcpu lwp_usdata.dfly.qcpu
100 #define lwp_rrcount lwp_usdata.dfly.rrcount
102 struct usched_dfly_pcpu {
103 struct spinlock spin;
104 struct thread helper_thread;
109 struct lwp *uschedcp;
110 struct rq queues[NQS];
111 struct rq rtqueues[NQS];
112 struct rq idqueues[NQS];
114 u_int32_t rtqueuebits;
115 u_int32_t idqueuebits;
122 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
124 static void dfly_acquire_curproc(struct lwp *lp);
125 static void dfly_release_curproc(struct lwp *lp);
126 static void dfly_select_curproc(globaldata_t gd);
127 static void dfly_setrunqueue(struct lwp *lp);
128 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
129 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
131 static void dfly_recalculate_estcpu(struct lwp *lp);
132 static void dfly_resetpriority(struct lwp *lp);
133 static void dfly_forking(struct lwp *plp, struct lwp *lp);
134 static void dfly_exiting(struct lwp *lp, struct proc *);
135 static void dfly_uload_update(struct lwp *lp);
136 static void dfly_yield(struct lwp *lp);
137 static void dfly_changeqcpu_locked(struct lwp *lp,
138 dfly_pcpu_t dd, dfly_pcpu_t rdd);
139 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
140 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
141 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
142 static void dfly_need_user_resched_remote(void *dummy);
143 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
144 struct lwp *chklp, int worst);
145 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
146 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
147 static void dfly_changedcpu(struct lwp *lp);
149 struct usched usched_dfly = {
151 "dfly", "Original DragonFly Scheduler",
152 NULL, /* default registration */
153 NULL, /* default deregistration */
154 dfly_acquire_curproc,
155 dfly_release_curproc,
158 dfly_recalculate_estcpu,
163 NULL, /* setcpumask not supported */
169 * We have NQS (32) run queues per scheduling class. For the normal
170 * class, there are 128 priorities scaled onto these 32 queues. New
171 * processes are added to the last entry in each queue, and processes
172 * are selected for running by taking them from the head and maintaining
173 * a simple FIFO arrangement. Realtime and Idle priority processes have
174 * and explicit 0-31 priority which maps directly onto their class queue
175 * index. When a queue has something in it, the corresponding bit is
176 * set in the queuebits variable, allowing a single read to determine
177 * the state of all 32 queues and then a ffs() to find the first busy
180 /* currently running a user process */
181 static cpumask_t dfly_curprocmask = CPUMASK_INITIALIZER_ALLONES;
182 static cpumask_t dfly_rdyprocmask; /* ready to accept a user process */
183 static volatile int dfly_scancpu;
184 static volatile int dfly_ucount; /* total running on whole system */
185 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
186 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
187 static struct sysctl_oid *usched_dfly_sysctl_tree;
189 /* Debug info exposed through debug.* sysctl */
191 static int usched_dfly_debug = -1;
192 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
193 &usched_dfly_debug, 0,
194 "Print debug information for this pid");
196 static int usched_dfly_pid_debug = -1;
197 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
198 &usched_dfly_pid_debug, 0,
199 "Print KTR debug information for this pid");
201 static int usched_dfly_chooser = 0;
202 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
203 &usched_dfly_chooser, 0,
204 "Print KTR debug information for this pid");
207 * Tunning usched_dfly - configurable through kern.usched_dfly.
209 * weight1 - Tries to keep threads on their current cpu. If you
210 * make this value too large the scheduler will not be
211 * able to load-balance large loads.
213 * weight2 - If non-zero, detects thread pairs undergoing synchronous
214 * communications and tries to move them closer together.
215 * Behavior is adjusted by bit 4 of features (0x10).
217 * WARNING! Weight2 is a ridiculously sensitive parameter,
218 * a small value is recommended.
220 * weight3 - Weighting based on the number of recently runnable threads
221 * on the userland scheduling queue (ignoring their loads).
222 * A nominal value here prevents high-priority (low-load)
223 * threads from accumulating on one cpu core when other
224 * cores are available.
226 * This value should be left fairly small relative to weight1
229 * weight4 - Weighting based on other cpu queues being available
230 * or running processes with higher lwp_priority's.
232 * This allows a thread to migrate to another nearby cpu if it
233 * is unable to run on the current cpu based on the other cpu
234 * being idle or running a lower priority (higher lwp_priority)
235 * thread. This value should be large enough to override weight1
237 * features - These flags can be set or cleared to enable or disable various
240 * 0x01 Enable idle-cpu pulling (default)
241 * 0x02 Enable proactive pushing (default)
242 * 0x04 Enable rebalancing rover (default)
243 * 0x08 Enable more proactive pushing (default)
244 * 0x10 (flip weight2 limit on same cpu) (default)
245 * 0x20 choose best cpu for forked process
246 * 0x40 choose current cpu for forked process
247 * 0x80 choose random cpu for forked process (default)
249 static int usched_dfly_smt = 0;
250 static int usched_dfly_cache_coherent = 0;
251 static int usched_dfly_weight1 = 200; /* keep thread on current cpu */
252 static int usched_dfly_weight2 = 180; /* synchronous peer's current cpu */
253 static int usched_dfly_weight3 = 40; /* number of threads on queue */
254 static int usched_dfly_weight4 = 160; /* availability of idle cores */
255 static int usched_dfly_features = 0x8F; /* allow pulls */
256 static int usched_dfly_fast_resched = 0;/* delta priority / resched */
257 static int usched_dfly_swmask = ~PPQMASK; /* allow pulls */
258 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
259 static int usched_dfly_decay = 8;
261 /* KTR debug printings */
263 KTR_INFO_MASTER(usched);
265 #if !defined(KTR_USCHED_DFLY)
266 #define KTR_USCHED_DFLY KTR_ALL
269 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
270 "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
271 pid_t pid, int old_cpuid, int curr);
274 * This function is called when the kernel intends to return to userland.
275 * It is responsible for making the thread the current designated userland
276 * thread for this cpu, blocking if necessary.
278 * The kernel will not depress our LWKT priority until after we return,
279 * in case we have to shove over to another cpu.
281 * We must determine our thread's disposition before we switch away. This
282 * is very sensitive code.
284 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
285 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will
286 * occur, this function is called only under very controlled circumstances.
289 dfly_acquire_curproc(struct lwp *lp)
298 * Make sure we aren't sitting on a tsleep queue.
301 crit_enter_quick(td);
302 if (td->td_flags & TDF_TSLEEPQ)
304 dfly_recalculate_estcpu(lp);
307 dd = &dfly_pcpu[gd->gd_cpuid];
310 * Process any pending interrupts/ipi's, then handle reschedule
311 * requests. dfly_release_curproc() will try to assign a new
312 * uschedcp that isn't us and otherwise NULL it out.
315 if ((td->td_mpflags & TDF_MP_BATCH_DEMARC) &&
316 lp->lwp_rrcount >= usched_dfly_rrinterval / 2) {
320 if (user_resched_wanted()) {
321 if (dd->uschedcp == lp)
323 clear_user_resched();
324 dfly_release_curproc(lp);
328 * Loop until we are the current user thread.
330 * NOTE: dd spinlock not held at top of loop.
332 if (dd->uschedcp == lp)
335 while (dd->uschedcp != lp) {
338 spin_lock(&dd->spin);
341 (usched_dfly_features & 0x08) &&
342 (rdd = dfly_choose_best_queue(lp)) != dd) {
344 * We are not or are no longer the current lwp and a
345 * forced reschedule was requested. Figure out the
346 * best cpu to run on (our current cpu will be given
347 * significant weight).
349 * (if a reschedule was not requested we want to
350 * move this step after the uschedcp tests).
352 dfly_changeqcpu_locked(lp, dd, rdd);
353 spin_unlock(&dd->spin);
354 lwkt_deschedule(lp->lwp_thread);
355 dfly_setrunqueue_dd(rdd, lp);
358 dd = &dfly_pcpu[gd->gd_cpuid];
363 * Either no reschedule was requested or the best queue was
364 * dd, and no current process has been selected. We can
365 * trivially become the current lwp on the current cpu.
367 if (dd->uschedcp == NULL) {
368 atomic_clear_int(&lp->lwp_thread->td_mpflags,
370 ATOMIC_CPUMASK_ORBIT(dfly_curprocmask, gd->gd_cpuid);
372 dd->upri = lp->lwp_priority;
373 KKASSERT(lp->lwp_qcpu == dd->cpuid);
374 spin_unlock(&dd->spin);
379 * Put us back on the same run queue unconditionally.
381 * Set rrinterval to force placement at end of queue.
382 * Select the worst queue to ensure we round-robin,
383 * but do not change estcpu.
385 if (lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) {
388 switch(lp->lwp_rqtype) {
389 case RTP_PRIO_NORMAL:
390 tsqbits = dd->queuebits;
391 spin_unlock(&dd->spin);
393 lp->lwp_rrcount = usched_dfly_rrinterval;
395 lp->lwp_rqindex = bsrl(tsqbits);
398 spin_unlock(&dd->spin);
401 lwkt_deschedule(lp->lwp_thread);
402 dfly_setrunqueue_dd(dd, lp);
403 atomic_clear_int(&lp->lwp_thread->td_mpflags,
407 dd = &dfly_pcpu[gd->gd_cpuid];
412 * Can we steal the current designated user thread?
414 * If we do the other thread will stall when it tries to
415 * return to userland, possibly rescheduling elsewhere.
417 * It is important to do a masked test to avoid the edge
418 * case where two near-equal-priority threads are constantly
419 * interrupting each other.
421 * In the exact match case another thread has already gained
422 * uschedcp and lowered its priority, if we steal it the
423 * other thread will stay stuck on the LWKT runq and not
424 * push to another cpu. So don't steal on equal-priority even
425 * though it might appear to be more beneficial due to not
426 * having to switch back to the other thread's context.
428 * usched_dfly_fast_resched requires that two threads be
429 * significantly far apart in priority in order to interrupt.
431 * If better but not sufficiently far apart, the current
432 * uschedcp will be interrupted at the next scheduler clock.
435 (dd->upri & ~PPQMASK) >
436 (lp->lwp_priority & ~PPQMASK) + usched_dfly_fast_resched) {
438 dd->upri = lp->lwp_priority;
439 KKASSERT(lp->lwp_qcpu == dd->cpuid);
440 spin_unlock(&dd->spin);
444 * We are not the current lwp, figure out the best cpu
445 * to run on (our current cpu will be given significant
446 * weight). Loop on cpu change.
448 if ((usched_dfly_features & 0x02) &&
449 force_resched == 0 &&
450 (rdd = dfly_choose_best_queue(lp)) != dd) {
451 dfly_changeqcpu_locked(lp, dd, rdd);
452 spin_unlock(&dd->spin);
453 lwkt_deschedule(lp->lwp_thread);
454 dfly_setrunqueue_dd(rdd, lp);
457 dd = &dfly_pcpu[gd->gd_cpuid];
462 * We cannot become the current lwp, place the lp on the
463 * run-queue of this or another cpu and deschedule ourselves.
465 * When we are reactivated we will have another chance.
467 * Reload after a switch or setrunqueue/switch possibly
468 * moved us to another cpu.
470 spin_unlock(&dd->spin);
471 lwkt_deschedule(lp->lwp_thread);
472 dfly_setrunqueue_dd(dd, lp);
475 dd = &dfly_pcpu[gd->gd_cpuid];
479 * Make sure upri is synchronized, then yield to LWKT threads as
480 * needed before returning. This could result in another reschedule.
485 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
489 * DFLY_RELEASE_CURPROC
491 * This routine detaches the current thread from the userland scheduler,
492 * usually because the thread needs to run or block in the kernel (at
493 * kernel priority) for a while.
495 * This routine is also responsible for selecting a new thread to
496 * make the current thread.
498 * NOTE: This implementation differs from the dummy example in that
499 * dfly_select_curproc() is able to select the current process, whereas
500 * dummy_select_curproc() is not able to select the current process.
501 * This means we have to NULL out uschedcp.
503 * Additionally, note that we may already be on a run queue if releasing
504 * via the lwkt_switch() in dfly_setrunqueue().
507 dfly_release_curproc(struct lwp *lp)
509 globaldata_t gd = mycpu;
510 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
513 * Make sure td_wakefromcpu is defaulted. This will be overwritten
516 if (dd->uschedcp == lp) {
517 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
518 spin_lock(&dd->spin);
519 if (dd->uschedcp == lp) {
520 dd->uschedcp = NULL; /* don't let lp be selected */
521 dd->upri = PRIBASE_NULL;
522 ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask, gd->gd_cpuid);
523 spin_unlock(&dd->spin);
524 dfly_select_curproc(gd);
526 spin_unlock(&dd->spin);
532 * DFLY_SELECT_CURPROC
534 * Select a new current process for this cpu and clear any pending user
535 * reschedule request. The cpu currently has no current process.
537 * This routine is also responsible for equal-priority round-robining,
538 * typically triggered from dfly_schedulerclock(). In our dummy example
539 * all the 'user' threads are LWKT scheduled all at once and we just
540 * call lwkt_switch().
542 * The calling process is not on the queue and cannot be selected.
546 dfly_select_curproc(globaldata_t gd)
548 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
550 int cpuid = gd->gd_cpuid;
554 spin_lock(&dd->spin);
555 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
558 ATOMIC_CPUMASK_ORBIT(dfly_curprocmask, cpuid);
559 dd->upri = nlp->lwp_priority;
562 dd->rrcount = 0; /* reset round robin */
564 spin_unlock(&dd->spin);
565 lwkt_acquire(nlp->lwp_thread);
566 lwkt_schedule(nlp->lwp_thread);
568 spin_unlock(&dd->spin);
574 * Place the specified lwp on the user scheduler's run queue. This routine
575 * must be called with the thread descheduled. The lwp must be runnable.
576 * It must not be possible for anyone else to explicitly schedule this thread.
578 * The thread may be the current thread as a special case.
581 dfly_setrunqueue(struct lwp *lp)
587 * First validate the process LWKT state.
589 KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
590 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
591 ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
592 lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
593 KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
596 * NOTE: dd/rdd do not necessarily represent the current cpu.
597 * Instead they may represent the cpu the thread was last
598 * scheduled on or inherited by its parent.
600 dd = &dfly_pcpu[lp->lwp_qcpu];
604 * This process is not supposed to be scheduled anywhere or assigned
605 * as the current process anywhere. Assert the condition.
607 KKASSERT(rdd->uschedcp != lp);
610 * Ok, we have to setrunqueue some target cpu and request a reschedule
613 * We have to choose the best target cpu. It might not be the current
614 * target even if the current cpu has no running user thread (for
615 * example, because the current cpu might be a hyperthread and its
616 * sibling has a thread assigned).
618 * If we just forked it is most optimal to run the child on the same
619 * cpu just in case the parent decides to wait for it (thus getting
620 * off that cpu). As long as there is nothing else runnable on the
621 * cpu, that is. If we did this unconditionally a parent forking
622 * multiple children before waiting (e.g. make -j N) leaves other
623 * cpus idle that could be working.
625 if (lp->lwp_forked) {
627 if (usched_dfly_features & 0x20)
628 rdd = dfly_choose_best_queue(lp);
629 else if (usched_dfly_features & 0x40)
630 rdd = &dfly_pcpu[lp->lwp_qcpu];
631 else if (usched_dfly_features & 0x80)
632 rdd = dfly_choose_queue_simple(rdd, lp);
633 else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
634 rdd = dfly_choose_best_queue(lp);
636 rdd = &dfly_pcpu[lp->lwp_qcpu];
638 rdd = dfly_choose_best_queue(lp);
639 /* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
641 if (lp->lwp_qcpu != rdd->cpuid) {
642 spin_lock(&dd->spin);
643 dfly_changeqcpu_locked(lp, dd, rdd);
644 spin_unlock(&dd->spin);
646 dfly_setrunqueue_dd(rdd, lp);
650 * Change qcpu to rdd->cpuid. The dd the lp is CURRENTLY on must be
651 * spin-locked on-call. rdd does not have to be.
654 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
656 if (lp->lwp_qcpu != rdd->cpuid) {
657 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
658 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
659 atomic_add_int(&dd->uload, -lp->lwp_uload);
660 atomic_add_int(&dd->ucount, -1);
661 atomic_add_int(&dfly_ucount, -1);
663 lp->lwp_qcpu = rdd->cpuid;
668 * Place lp on rdd's runqueue. Nothing is locked on call. This function
669 * also performs all necessary ancillary notification actions.
672 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
677 * We might be moving the lp to another cpu's run queue, and once
678 * on the runqueue (even if it is our cpu's), another cpu can rip
681 * TDF_MIGRATING might already be set if this is part of a
682 * remrunqueue+setrunqueue sequence.
684 if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
685 lwkt_giveaway(lp->lwp_thread);
687 rgd = globaldata_find(rdd->cpuid);
690 * We lose control of the lp the moment we release the spinlock
691 * after having placed it on the queue. i.e. another cpu could pick
692 * it up, or it could exit, or its priority could be further
693 * adjusted, or something like that.
695 * WARNING! rdd can point to a foreign cpu!
697 spin_lock(&rdd->spin);
698 dfly_setrunqueue_locked(rdd, lp);
701 * Potentially interrupt the currently-running thread
703 if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK)) {
705 * Currently running thread is better or same, do not
708 spin_unlock(&rdd->spin);
709 } else if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK) +
710 usched_dfly_fast_resched) {
712 * Currently running thread is not better, but not so bad
713 * that we need to interrupt it. Let it run for one more
717 rdd->uschedcp->lwp_rrcount < usched_dfly_rrinterval) {
718 rdd->uschedcp->lwp_rrcount = usched_dfly_rrinterval - 1;
720 spin_unlock(&rdd->spin);
721 } else if (rgd == mycpu) {
723 * We should interrupt the currently running thread, which
724 * is on the current cpu. However, if DIDYIELD is set we
725 * round-robin unconditionally and do not interrupt it.
727 spin_unlock(&rdd->spin);
728 if (rdd->uschedcp == NULL)
729 wakeup_mycpu(&rdd->helper_thread); /* XXX */
730 if ((lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) == 0)
734 * We should interrupt the currently running thread, which
735 * is on a different cpu.
737 spin_unlock(&rdd->spin);
738 lwkt_send_ipiq(rgd, dfly_need_user_resched_remote, NULL);
743 * This routine is called from a systimer IPI. It MUST be MP-safe and
744 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on
749 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
751 globaldata_t gd = mycpu;
752 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
755 * Spinlocks also hold a critical section so there should not be
758 KKASSERT(gd->gd_spinlocks == 0);
764 * Do we need to round-robin? We round-robin 10 times a second.
765 * This should only occur for cpu-bound batch processes.
767 if (++lp->lwp_rrcount >= usched_dfly_rrinterval) {
768 lp->lwp_thread->td_wakefromcpu = -1;
773 * Adjust estcpu upward using a real time equivalent calculation,
774 * and recalculate lp's priority.
776 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
777 dfly_resetpriority(lp);
780 * Rebalance two cpus every 8 ticks, pulling the worst thread
781 * from the worst cpu's queue into a rotating cpu number.
783 * This mechanic is needed because the push algorithms can
784 * steady-state in an non-optimal configuration. We need to mix it
785 * up a little, even if it means breaking up a paired thread, so
786 * the push algorithms can rebalance the degenerate conditions.
787 * This portion of the algorithm exists to ensure stability at the
788 * selected weightings.
790 * Because we might be breaking up optimal conditions we do not want
791 * to execute this too quickly, hence we only rebalance approximately
792 * ~7-8 times per second. The push's, on the otherhand, are capable
793 * moving threads to other cpus at a much higher rate.
795 * We choose the most heavily loaded thread from the worst queue
796 * in order to ensure that multiple heavy-weight threads on the same
797 * queue get broken up, and also because these threads are the most
798 * likely to be able to remain in place. Hopefully then any pairings,
799 * if applicable, migrate to where these threads are.
801 if ((usched_dfly_features & 0x04) &&
802 ((u_int)sched_ticks & 7) == 0 &&
803 (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
810 rdd = dfly_choose_worst_queue(dd);
812 spin_lock(&dd->spin);
813 if (spin_trylock(&rdd->spin)) {
814 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
815 spin_unlock(&rdd->spin);
817 spin_unlock(&dd->spin);
819 spin_unlock(&dd->spin);
825 /* dd->spin held if nlp != NULL */
828 * Either schedule it or add it to our queue.
831 (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
832 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, dd->cpumask);
833 dd->upri = nlp->lwp_priority;
836 dd->rrcount = 0; /* reset round robin */
838 spin_unlock(&dd->spin);
839 lwkt_acquire(nlp->lwp_thread);
840 lwkt_schedule(nlp->lwp_thread);
842 dfly_setrunqueue_locked(dd, nlp);
843 spin_unlock(&dd->spin);
849 * Called from acquire and from kern_synch's one-second timer (one of the
850 * callout helper threads) with a critical section held.
852 * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for
853 * overall system load.
855 * Note that no recalculation occurs for a process which sleeps and wakes
856 * up in the same tick. That is, a system doing thousands of context
857 * switches per second will still only do serious estcpu calculations
858 * ESTCPUFREQ times per second.
862 dfly_recalculate_estcpu(struct lwp *lp)
864 globaldata_t gd = mycpu;
872 * We have to subtract periodic to get the last schedclock
873 * timeout time, otherwise we would get the upcoming timeout.
874 * Keep in mind that a process can migrate between cpus and
875 * while the scheduler clock should be very close, boundary
876 * conditions could lead to a small negative delta.
878 cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
880 if (lp->lwp_slptime > 1) {
882 * Too much time has passed, do a coarse correction.
884 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
885 dfly_resetpriority(lp);
886 lp->lwp_cpbase = cpbase;
889 } else if (lp->lwp_cpbase != cpbase) {
891 * Adjust estcpu if we are in a different tick. Don't waste
892 * time if we are in the same tick.
894 * First calculate the number of ticks in the measurement
895 * interval. The ttlticks calculation can wind up 0 due to
896 * a bug in the handling of lwp_slptime (as yet not found),
897 * so make sure we do not get a divide by 0 panic.
899 ttlticks = (cpbase - lp->lwp_cpbase) /
900 gd->gd_schedclock.periodic;
901 if ((ssysclock_t)ttlticks < 0) {
903 lp->lwp_cpbase = cpbase;
907 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
910 * Calculate the percentage of one cpu being used then
911 * compensate for any system load in excess of ncpus.
913 * For example, if we have 8 cores and 16 running cpu-bound
914 * processes then all things being equal each process will
915 * get 50% of one cpu. We need to pump this value back
916 * up to 100% so the estcpu calculation properly adjusts
917 * the process's dynamic priority.
919 * estcpu is scaled by ESTCPUMAX, pctcpu is scaled by FSCALE.
921 estcpu = (lp->lwp_pctcpu * ESTCPUMAX) >> FSHIFT;
922 ucount = dfly_ucount;
923 if (ucount > ncpus) {
924 estcpu += estcpu * (ucount - ncpus) / ncpus;
927 if (usched_dfly_debug == lp->lwp_proc->p_pid) {
928 kprintf("pid %d lwp %p estcpu %3d %3d cp %d/%d",
929 lp->lwp_proc->p_pid, lp,
930 estcpu, lp->lwp_estcpu,
931 lp->lwp_cpticks, ttlticks);
935 * Adjust lp->lwp_esetcpu. The decay factor determines how
936 * quickly lwp_estcpu collapses to its realtime calculation.
937 * A slower collapse gives us a more accurate number over
938 * the long term but can create problems with bursty threads
939 * or threads which become cpu hogs.
941 * To solve this problem, newly started lwps and lwps which
942 * are restarting after having been asleep for a while are
943 * given a much, much faster decay in order to quickly
944 * detect whether they become cpu-bound.
946 * NOTE: p_nice is accounted for in dfly_resetpriority(),
947 * and not here, but we must still ensure that a
948 * cpu-bound nice -20 process does not completely
949 * override a cpu-bound nice +20 process.
951 * NOTE: We must use ESTCPULIM() here to deal with any
954 decay_factor = usched_dfly_decay;
955 if (decay_factor < 1)
957 if (decay_factor > 1024)
960 if (lp->lwp_estfast < usched_dfly_decay) {
962 lp->lwp_estcpu = ESTCPULIM(
963 (lp->lwp_estcpu * lp->lwp_estfast + estcpu) /
964 (lp->lwp_estfast + 1));
966 lp->lwp_estcpu = ESTCPULIM(
967 (lp->lwp_estcpu * decay_factor + estcpu) /
971 if (usched_dfly_debug == lp->lwp_proc->p_pid)
972 kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
973 dfly_resetpriority(lp);
974 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
980 * Compute the priority of a process when running in user mode.
981 * Arrange to reschedule if the resulting priority is better
982 * than that of the current process.
984 * This routine may be called with any process.
986 * This routine is called by fork1() for initial setup with the process of
987 * the run queue, and also may be called normally with the process on or
991 dfly_resetpriority(struct lwp *lp)
1004 * Lock the scheduler (lp) belongs to. This can be on a different
1005 * cpu. Handle races. This loop breaks out with the appropriate
1009 rcpu = lp->lwp_qcpu;
1011 rdd = &dfly_pcpu[rcpu];
1012 spin_lock(&rdd->spin);
1013 if (rcpu == lp->lwp_qcpu)
1015 spin_unlock(&rdd->spin);
1019 * Calculate the new priority and queue type
1021 newrqtype = lp->lwp_rtprio.type;
1024 case RTP_PRIO_REALTIME:
1026 newpriority = PRIBASE_REALTIME +
1027 (lp->lwp_rtprio.prio & PRIMASK);
1029 case RTP_PRIO_NORMAL:
1033 estcpu = lp->lwp_estcpu;
1036 * p_nice piece Adds (0-40) * 2 0-80
1037 * estcpu Adds 16384 * 4 / 512 0-128
1039 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
1040 newpriority += estcpu * PPQ / ESTCPUPPQ;
1041 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
1042 NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
1043 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
1046 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1048 case RTP_PRIO_THREAD:
1049 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1052 panic("Bad RTP_PRIO %d", newrqtype);
1057 * The LWKT scheduler doesn't dive usched structures, give it a hint
1058 * on the relative priority of user threads running in the kernel.
1059 * The LWKT scheduler will always ensure that a user thread running
1060 * in the kernel will get cpu some time, regardless of its upri,
1061 * but can decide not to instantly switch from one kernel or user
1062 * mode user thread to a kernel-mode user thread when it has a less
1063 * desireable user priority.
1065 * td_upri has normal sense (higher values are more desireable), so
1068 lp->lwp_thread->td_upri = -(newpriority & usched_dfly_swmask);
1071 * The newpriority incorporates the queue type so do a simple masked
1072 * check to determine if the process has moved to another queue. If
1073 * it has, and it is currently on a run queue, then move it.
1075 * Since uload is ~PPQMASK masked, no modifications are necessary if
1076 * we end up in the same run queue.
1078 * Reset rrcount if moving to a higher-priority queue, otherwise
1081 if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1082 if (lp->lwp_priority < newpriority)
1083 lp->lwp_rrcount = 0;
1084 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1085 dfly_remrunqueue_locked(rdd, lp);
1086 lp->lwp_priority = newpriority;
1087 lp->lwp_rqtype = newrqtype;
1088 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1089 dfly_setrunqueue_locked(rdd, lp);
1092 lp->lwp_priority = newpriority;
1093 lp->lwp_rqtype = newrqtype;
1094 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1099 * In the same PPQ, uload cannot change.
1101 lp->lwp_priority = newpriority;
1107 * Adjust effective load.
1109 * Calculate load then scale up or down geometrically based on p_nice.
1110 * Processes niced up (positive) are less important, and processes
1111 * niced downard (negative) are more important. The higher the uload,
1112 * the more important the thread.
1114 /* 0-511, 0-100% cpu */
1115 delta_uload = lp->lwp_estcpu / NQS;
1116 delta_uload -= delta_uload * lp->lwp_proc->p_nice / (PRIO_MAX + 1);
1119 delta_uload -= lp->lwp_uload;
1120 lp->lwp_uload += delta_uload;
1121 if (lp->lwp_mpflags & LWP_MP_ULOAD)
1122 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, delta_uload);
1125 * Determine if we need to reschedule the target cpu. This only
1126 * occurs if the LWP is already on a scheduler queue, which means
1127 * that idle cpu notification has already occured. At most we
1128 * need only issue a need_user_resched() on the appropriate cpu.
1130 * The LWP may be owned by a CPU different from the current one,
1131 * in which case dd->uschedcp may be modified without an MP lock
1132 * or a spinlock held. The worst that happens is that the code
1133 * below causes a spurious need_user_resched() on the target CPU
1134 * and dd->pri to be wrong for a short period of time, both of
1135 * which are harmless.
1137 * If checkpri is 0 we are adjusting the priority of the current
1138 * process, possibly higher (less desireable), so ignore the upri
1139 * check which will fail in that case.
1142 if (CPUMASK_TESTBIT(dfly_rdyprocmask, rcpu) &&
1144 (rdd->upri & ~PRIMASK) >
1145 (lp->lwp_priority & ~PRIMASK))) {
1146 if (rcpu == mycpu->gd_cpuid) {
1147 spin_unlock(&rdd->spin);
1148 need_user_resched();
1150 spin_unlock(&rdd->spin);
1151 lwkt_send_ipiq(globaldata_find(rcpu),
1152 dfly_need_user_resched_remote,
1156 spin_unlock(&rdd->spin);
1159 spin_unlock(&rdd->spin);
1166 dfly_yield(struct lwp *lp)
1168 if (lp->lwp_qcpu != mycpu->gd_cpuid)
1170 KKASSERT(lp == curthread->td_lwp);
1173 * Don't set need_user_resched() or mess with rrcount or anything.
1174 * the TDF flag will override everything as long as we release.
1176 atomic_set_int(&lp->lwp_thread->td_mpflags, TDF_MP_DIDYIELD);
1177 dfly_release_curproc(lp);
1181 * Thread was forcefully migrated to another cpu. Normally forced migrations
1182 * are used for iterations and the kernel returns to the original cpu before
1183 * returning and this is not needed. However, if the kernel migrates a
1184 * thread to another cpu and wants to leave it there, it has to call this
1187 * Note that the lwkt_migratecpu() function also released the thread, so
1188 * we don't have to worry about that.
1192 dfly_changedcpu(struct lwp *lp)
1194 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1195 dfly_pcpu_t rdd = &dfly_pcpu[mycpu->gd_cpuid];
1198 spin_lock(&dd->spin);
1199 dfly_changeqcpu_locked(lp, dd, rdd);
1200 spin_unlock(&dd->spin);
1205 * Called from fork1() when a new child process is being created.
1207 * Give the child process an initial estcpu that is more batch then
1208 * its parent and dock the parent for the fork (but do not
1209 * reschedule the parent).
1213 * XXX lwp should be "spawning" instead of "forking"
1216 dfly_forking(struct lwp *plp, struct lwp *lp)
1219 * Put the child 4 queue slots (out of 32) higher than the parent
1220 * (less desireable than the parent).
1222 lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1224 lp->lwp_estfast = 0;
1227 * Dock the parent a cost for the fork, protecting us from fork
1228 * bombs. If the parent is forking quickly make the child more
1231 plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1235 * Called when a lwp is being removed from this scheduler, typically
1236 * during lwp_exit(). We have to clean out any ULOAD accounting before
1237 * we can let the lp go. The dd->spin lock is not needed for uload
1240 * Scheduler dequeueing has already occurred, no further action in that
1244 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1246 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1248 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1249 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1250 atomic_add_int(&dd->uload, -lp->lwp_uload);
1251 atomic_add_int(&dd->ucount, -1);
1252 atomic_add_int(&dfly_ucount, -1);
1257 * This function cannot block in any way, but spinlocks are ok.
1259 * Update the uload based on the state of the thread (whether it is going
1260 * to sleep or running again). The uload is meant to be a longer-term
1261 * load and not an instantanious load.
1264 dfly_uload_update(struct lwp *lp)
1266 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1268 if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1269 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1270 spin_lock(&dd->spin);
1271 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1272 atomic_set_int(&lp->lwp_mpflags,
1274 atomic_add_int(&dd->uload, lp->lwp_uload);
1275 atomic_add_int(&dd->ucount, 1);
1276 atomic_add_int(&dfly_ucount, 1);
1278 spin_unlock(&dd->spin);
1280 } else if (lp->lwp_slptime > 0) {
1281 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1282 spin_lock(&dd->spin);
1283 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1284 atomic_clear_int(&lp->lwp_mpflags,
1286 atomic_add_int(&dd->uload, -lp->lwp_uload);
1287 atomic_add_int(&dd->ucount, -1);
1288 atomic_add_int(&dfly_ucount, -1);
1290 spin_unlock(&dd->spin);
1296 * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1297 * it selects a user process and returns it. If chklp is non-NULL and chklp
1298 * has a better or equal priority then the process that would otherwise be
1299 * chosen, NULL is returned.
1301 * Until we fix the RUNQ code the chklp test has to be strict or we may
1302 * bounce between processes trying to acquire the current process designation.
1304 * Must be called with rdd->spin locked. The spinlock is left intact through
1305 * the entire routine. dd->spin does not have to be locked.
1307 * If worst is non-zero this function finds the worst thread instead of the
1308 * best thread (used by the schedulerclock-based rover).
1312 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1313 struct lwp *chklp, int worst)
1323 rtqbits = rdd->rtqueuebits;
1324 tsqbits = rdd->queuebits;
1325 idqbits = rdd->idqueuebits;
1329 pri = bsrl(idqbits);
1330 q = &rdd->idqueues[pri];
1331 which = &rdd->idqueuebits;
1332 } else if (tsqbits) {
1333 pri = bsrl(tsqbits);
1334 q = &rdd->queues[pri];
1335 which = &rdd->queuebits;
1336 } else if (rtqbits) {
1337 pri = bsrl(rtqbits);
1338 q = &rdd->rtqueues[pri];
1339 which = &rdd->rtqueuebits;
1343 lp = TAILQ_LAST(q, rq);
1346 pri = bsfl(rtqbits);
1347 q = &rdd->rtqueues[pri];
1348 which = &rdd->rtqueuebits;
1349 } else if (tsqbits) {
1350 pri = bsfl(tsqbits);
1351 q = &rdd->queues[pri];
1352 which = &rdd->queuebits;
1353 } else if (idqbits) {
1354 pri = bsfl(idqbits);
1355 q = &rdd->idqueues[pri];
1356 which = &rdd->idqueuebits;
1360 lp = TAILQ_FIRST(q);
1362 KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1365 * If the passed lwp <chklp> is reasonably close to the selected
1366 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1368 * Note that we must error on the side of <chklp> to avoid bouncing
1369 * between threads in the acquire code.
1372 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1376 KTR_COND_LOG(usched_chooseproc,
1377 lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1378 lp->lwp_proc->p_pid,
1379 lp->lwp_thread->td_gd->gd_cpuid,
1382 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1383 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1384 TAILQ_REMOVE(q, lp, lwp_procq);
1387 *which &= ~(1 << pri);
1390 * If we are choosing a process from rdd with the intent to
1391 * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1395 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1396 atomic_add_int(&rdd->uload, -lp->lwp_uload);
1397 atomic_add_int(&rdd->ucount, -1);
1398 atomic_add_int(&dfly_ucount, -1);
1400 lp->lwp_qcpu = dd->cpuid;
1401 atomic_add_int(&dd->uload, lp->lwp_uload);
1402 atomic_add_int(&dd->ucount, 1);
1403 atomic_add_int(&dfly_ucount, 1);
1404 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1410 * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1412 * Choose a cpu node to schedule lp on, hopefully nearby its current
1415 * We give the current node a modest advantage for obvious reasons.
1417 * We also give the node the thread was woken up FROM a slight advantage
1418 * in order to try to schedule paired threads which synchronize/block waiting
1419 * for each other fairly close to each other. Similarly in a network setting
1420 * this feature will also attempt to place a user process near the kernel
1421 * protocol thread that is feeding it data. THIS IS A CRITICAL PART of the
1422 * algorithm as it heuristically groups synchronizing processes for locality
1423 * of reference in multi-socket systems.
1425 * We check against running processes and give a big advantage if there
1428 * The caller will normally dfly_setrunqueue() lp on the returned queue.
1430 * When the topology is known choose a cpu whos group has, in aggregate,
1431 * has the lowest weighted load.
1435 dfly_choose_best_queue(struct lwp *lp)
1442 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1452 * When the topology is unknown choose a random cpu that is hopefully
1455 if (dd->cpunode == NULL)
1456 return (dfly_choose_queue_simple(dd, lp));
1461 if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1462 wakemask = dfly_pcpu[wakecpu].cpumask;
1464 CPUMASK_ASSZERO(wakemask);
1467 * When the topology is known choose a cpu whos group has, in
1468 * aggregate, has the lowest weighted load.
1470 cpup = root_cpu_node;
1475 * Degenerate case super-root
1477 if (cpup->child_no == 1) {
1478 cpup = cpup->child_node[0];
1485 if (cpup->child_no == 0) {
1486 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1491 lowest_load = 0x7FFFFFFF;
1493 for (n = 0; n < cpup->child_no; ++n) {
1495 * Accumulate load information for all cpus
1496 * which are members of this node.
1498 cpun = cpup->child_node[n];
1499 mask = cpun->members;
1500 CPUMASK_ANDMASK(mask, usched_global_cpumask);
1501 CPUMASK_ANDMASK(mask, smp_active_mask);
1502 CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1503 if (CPUMASK_TESTZERO(mask))
1509 while (CPUMASK_TESTNZERO(mask)) {
1510 cpuid = BSFCPUMASK(mask);
1511 rdd = &dfly_pcpu[cpuid];
1513 load += rdd->ucount * usched_dfly_weight3;
1515 if (rdd->uschedcp == NULL &&
1516 rdd->runqcount == 0 &&
1517 globaldata_find(cpuid)->gd_tdrunqcount == 0
1519 load -= usched_dfly_weight4;
1522 else if (rdd->upri > lp->lwp_priority + PPQ) {
1523 load -= usched_dfly_weight4 / 2;
1526 CPUMASK_NANDBIT(mask, cpuid);
1531 * Compensate if the lp is already accounted for in
1532 * the aggregate uload for this mask set. We want
1533 * to calculate the loads as if lp were not present,
1534 * otherwise the calculation is bogus.
1536 if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1537 CPUMASK_TESTMASK(dd->cpumask, cpun->members)) {
1538 load -= lp->lwp_uload;
1539 load -= usched_dfly_weight3;
1545 * Advantage the cpu group (lp) is already on.
1547 if (CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1548 load -= usched_dfly_weight1;
1551 * Advantage the cpu group we want to pair (lp) to,
1552 * but don't let it go to the exact same cpu as
1553 * the wakecpu target.
1555 * We do this by checking whether cpun is a
1556 * terminal node or not. All cpun's at the same
1557 * level will either all be terminal or all not
1560 * If it is and we match we disadvantage the load.
1561 * If it is and we don't match we advantage the load.
1563 * Also note that we are effectively disadvantaging
1564 * all-but-one by the same amount, so it won't effect
1565 * the weight1 factor for the all-but-one nodes.
1567 if (CPUMASK_TESTMASK(cpun->members, wakemask)) {
1568 if (cpun->child_no != 0) {
1570 load -= usched_dfly_weight2;
1572 if (usched_dfly_features & 0x10)
1573 load += usched_dfly_weight2;
1575 load -= usched_dfly_weight2;
1580 * Calculate the best load
1582 if (cpub == NULL || lowest_load > load ||
1583 (lowest_load == load &&
1584 CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1592 if (usched_dfly_chooser)
1593 kprintf("lp %02d->%02d %s\n",
1594 lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1599 * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1601 * Choose the worst queue close to dd's cpu node with a non-empty runq
1602 * that is NOT dd. Also require that the moving of the highest-load thread
1603 * from rdd to dd does not cause the uload's to cross each other.
1605 * This is used by the thread chooser when the current cpu's queues are
1606 * empty to steal a thread from another cpu's queue. We want to offload
1607 * the most heavily-loaded queue.
1611 dfly_choose_worst_queue(dfly_pcpu_t dd)
1629 * When the topology is unknown choose a random cpu that is hopefully
1632 if (dd->cpunode == NULL) {
1637 * When the topology is known choose a cpu whos group has, in
1638 * aggregate, has the lowest weighted load.
1640 cpup = root_cpu_node;
1644 * Degenerate case super-root
1646 if (cpup->child_no == 1) {
1647 cpup = cpup->child_node[0];
1654 if (cpup->child_no == 0) {
1655 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1662 for (n = 0; n < cpup->child_no; ++n) {
1664 * Accumulate load information for all cpus
1665 * which are members of this node.
1667 cpun = cpup->child_node[n];
1668 mask = cpun->members;
1669 CPUMASK_ANDMASK(mask, usched_global_cpumask);
1670 CPUMASK_ANDMASK(mask, smp_active_mask);
1671 if (CPUMASK_TESTZERO(mask))
1676 while (CPUMASK_TESTNZERO(mask)) {
1677 cpuid = BSFCPUMASK(mask);
1678 rdd = &dfly_pcpu[cpuid];
1680 load += rdd->ucount * usched_dfly_weight3;
1681 if (rdd->uschedcp == NULL &&
1682 rdd->runqcount == 0 &&
1683 globaldata_find(cpuid)->gd_tdrunqcount == 0
1685 load -= usched_dfly_weight4;
1688 else if (rdd->upri > dd->upri + PPQ) {
1689 load -= usched_dfly_weight4 / 2;
1692 CPUMASK_NANDBIT(mask, cpuid);
1698 * Prefer candidates which are somewhat closer to
1701 if (CPUMASK_TESTMASK(dd->cpumask, cpun->members))
1702 load += usched_dfly_weight1;
1705 * The best candidate is the one with the worst
1708 if (cpub == NULL || highest_load < load) {
1709 highest_load = load;
1717 * We never return our own node (dd), and only return a remote
1718 * node if it's load is significantly worse than ours (i.e. where
1719 * stealing a thread would be considered reasonable).
1721 * This also helps us avoid breaking paired threads apart which
1722 * can have disastrous effects on performance.
1729 if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
1731 if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
1733 if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
1736 if (rdd->uload - hpri < dd->uload + hpri)
1744 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1752 * Fallback to the original heuristic, select random cpu,
1753 * first checking cpus not currently running a user thread.
1756 cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1757 mask = dfly_rdyprocmask;
1758 CPUMASK_NANDMASK(mask, dfly_curprocmask);
1759 CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1760 CPUMASK_ANDMASK(mask, smp_active_mask);
1761 CPUMASK_ANDMASK(mask, usched_global_cpumask);
1763 while (CPUMASK_TESTNZERO(mask)) {
1764 CPUMASK_ASSNBMASK(tmpmask, cpuid);
1765 if (CPUMASK_TESTMASK(tmpmask, mask)) {
1766 CPUMASK_ANDMASK(tmpmask, mask);
1767 cpuid = BSFCPUMASK(tmpmask);
1769 cpuid = BSFCPUMASK(mask);
1771 rdd = &dfly_pcpu[cpuid];
1773 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1775 CPUMASK_NANDBIT(mask, cpuid);
1779 * Then cpus which might have a currently running lp
1781 cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1782 mask = dfly_rdyprocmask;
1783 CPUMASK_ANDMASK(mask, dfly_curprocmask);
1784 CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1785 CPUMASK_ANDMASK(mask, smp_active_mask);
1786 CPUMASK_ANDMASK(mask, usched_global_cpumask);
1788 while (CPUMASK_TESTNZERO(mask)) {
1789 CPUMASK_ASSNBMASK(tmpmask, cpuid);
1790 if (CPUMASK_TESTMASK(tmpmask, mask)) {
1791 CPUMASK_ANDMASK(tmpmask, mask);
1792 cpuid = BSFCPUMASK(tmpmask);
1794 cpuid = BSFCPUMASK(mask);
1796 rdd = &dfly_pcpu[cpuid];
1798 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1800 CPUMASK_NANDBIT(mask, cpuid);
1804 * If we cannot find a suitable cpu we reload from dfly_scancpu
1805 * and round-robin. Other cpus will pickup as they release their
1806 * current lwps or become ready.
1808 * Avoid a degenerate system lockup case if usched_global_cpumask
1809 * is set to 0 or otherwise does not cover lwp_cpumask.
1811 * We only kick the target helper thread in this case, we do not
1812 * set the user resched flag because
1814 cpuid = (dfly_scancpu & 0xFFFF) % ncpus;
1815 if (CPUMASK_TESTBIT(usched_global_cpumask, cpuid) == 0)
1817 rdd = &dfly_pcpu[cpuid];
1824 dfly_need_user_resched_remote(void *dummy)
1826 globaldata_t gd = mycpu;
1827 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
1830 * Flag reschedule needed
1832 need_user_resched();
1835 * If no user thread is currently running we need to kick the helper
1836 * on our cpu to recover. Otherwise the cpu will never schedule
1839 * We cannot schedule the process ourselves because this is an
1840 * IPI callback and we cannot acquire spinlocks in an IPI callback.
1842 * Call wakeup_mycpu to avoid sending IPIs to other CPUs
1844 if (dd->uschedcp == NULL &&
1845 CPUMASK_TESTBIT(dfly_rdyprocmask, gd->gd_cpuid)) {
1846 ATOMIC_CPUMASK_NANDBIT(dfly_rdyprocmask, gd->gd_cpuid);
1847 wakeup_mycpu(&dd->helper_thread);
1852 * dfly_remrunqueue_locked() removes a given process from the run queue
1853 * that it is on, clearing the queue busy bit if it becomes empty.
1855 * Note that user process scheduler is different from the LWKT schedule.
1856 * The user process scheduler only manages user processes but it uses LWKT
1857 * underneath, and a user process operating in the kernel will often be
1858 * 'released' from our management.
1860 * uload is NOT adjusted here. It is only adjusted if the lwkt_thread goes
1861 * to sleep or the lwp is moved to a different runq.
1864 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1870 KKASSERT(rdd->runqcount >= 0);
1872 pri = lp->lwp_rqindex;
1874 switch(lp->lwp_rqtype) {
1875 case RTP_PRIO_NORMAL:
1876 q = &rdd->queues[pri];
1877 which = &rdd->queuebits;
1879 case RTP_PRIO_REALTIME:
1881 q = &rdd->rtqueues[pri];
1882 which = &rdd->rtqueuebits;
1885 q = &rdd->idqueues[pri];
1886 which = &rdd->idqueuebits;
1889 panic("remrunqueue: invalid rtprio type");
1892 KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1893 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1894 TAILQ_REMOVE(q, lp, lwp_procq);
1896 if (TAILQ_EMPTY(q)) {
1897 KASSERT((*which & (1 << pri)) != 0,
1898 ("remrunqueue: remove from empty queue"));
1899 *which &= ~(1 << pri);
1904 * dfly_setrunqueue_locked()
1906 * Add a process whos rqtype and rqindex had previously been calculated
1907 * onto the appropriate run queue. Determine if the addition requires
1908 * a reschedule on a cpu and return the cpuid or -1.
1910 * NOTE: Lower priorities are better priorities.
1912 * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1913 * sum of the rough lwp_priority for all running and runnable
1914 * processes. Lower priority processes (higher lwp_priority
1915 * values) actually DO count as more load, not less, because
1916 * these are the programs which require the most care with
1917 * regards to cpu selection.
1920 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1926 KKASSERT(lp->lwp_qcpu == rdd->cpuid);
1928 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1929 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1930 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, lp->lwp_uload);
1931 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].ucount, 1);
1932 atomic_add_int(&dfly_ucount, 1);
1935 pri = lp->lwp_rqindex;
1937 switch(lp->lwp_rqtype) {
1938 case RTP_PRIO_NORMAL:
1939 q = &rdd->queues[pri];
1940 which = &rdd->queuebits;
1942 case RTP_PRIO_REALTIME:
1944 q = &rdd->rtqueues[pri];
1945 which = &rdd->rtqueuebits;
1948 q = &rdd->idqueues[pri];
1949 which = &rdd->idqueuebits;
1952 panic("remrunqueue: invalid rtprio type");
1957 * Place us on the selected queue. Determine if we should be
1958 * placed at the head of the queue or at the end.
1960 * We are placed at the tail if our round-robin count has expired,
1961 * or is about to expire and the system thinks its a good place to
1962 * round-robin, or there is already a next thread on the queue
1963 * (it might be trying to pick up where it left off and we don't
1964 * want to interfere).
1966 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1967 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1970 if (lp->lwp_rrcount >= usched_dfly_rrinterval ||
1971 (lp->lwp_rrcount >= usched_dfly_rrinterval / 2 &&
1972 (lp->lwp_thread->td_mpflags & TDF_MP_BATCH_DEMARC))
1977 atomic_clear_int(&lp->lwp_thread->td_mpflags,
1978 TDF_MP_BATCH_DEMARC);
1979 lp->lwp_rrcount = 0;
1980 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1983 * Retain rrcount and place on head. Count is retained
1984 * even if the queue is empty.
1986 TAILQ_INSERT_HEAD(q, lp, lwp_procq);
1992 * For SMP systems a user scheduler helper thread is created for each
1993 * cpu and is used to allow one cpu to wakeup another for the purposes of
1994 * scheduling userland threads from setrunqueue().
1996 * UP systems do not need the helper since there is only one cpu.
1998 * We can't use the idle thread for this because we might block.
1999 * Additionally, doing things this way allows us to HLT idle cpus
2003 dfly_helper_thread(void *dummy)
2013 cpuid = gd->gd_cpuid; /* doesn't change */
2014 mask = gd->gd_cpumask; /* doesn't change */
2015 dd = &dfly_pcpu[cpuid];
2018 * Since we only want to be woken up only when no user processes
2019 * are scheduled on a cpu, run at an ultra low priority.
2021 lwkt_setpri_self(TDPRI_USER_SCHEDULER);
2023 tsleep(&dd->helper_thread, 0, "schslp", 0);
2027 * We use the LWKT deschedule-interlock trick to avoid racing
2028 * dfly_rdyprocmask. This means we cannot block through to the
2029 * manual lwkt_switch() call we make below.
2032 tsleep_interlock(&dd->helper_thread, 0);
2034 spin_lock(&dd->spin);
2036 ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2037 clear_user_resched(); /* This satisfied the reschedule request */
2039 dd->rrcount = 0; /* Reset the round-robin counter */
2042 if (dd->runqcount || dd->uschedcp != NULL) {
2044 * Threads are available. A thread may or may not be
2045 * currently scheduled. Get the best thread already queued
2048 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
2050 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2051 dd->upri = nlp->lwp_priority;
2054 dd->rrcount = 0; /* reset round robin */
2056 spin_unlock(&dd->spin);
2057 lwkt_acquire(nlp->lwp_thread);
2058 lwkt_schedule(nlp->lwp_thread);
2061 * This situation should not occur because we had
2062 * at least one thread available.
2064 spin_unlock(&dd->spin);
2066 } else if (usched_dfly_features & 0x01) {
2068 * This cpu is devoid of runnable threads, steal a thread
2069 * from another cpu. Since we're stealing, might as well
2070 * load balance at the same time.
2072 * We choose the highest-loaded thread from the worst queue.
2074 * NOTE! This function only returns a non-NULL rdd when
2075 * another cpu's queue is obviously overloaded. We
2076 * do not want to perform the type of rebalancing
2077 * the schedclock does here because it would result
2078 * in insane process pulling when 'steady' state is
2079 * partially unbalanced (e.g. 6 runnables and only
2082 rdd = dfly_choose_worst_queue(dd);
2083 if (rdd && spin_trylock(&rdd->spin)) {
2084 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
2085 spin_unlock(&rdd->spin);
2090 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2091 dd->upri = nlp->lwp_priority;
2094 dd->rrcount = 0; /* reset round robin */
2096 spin_unlock(&dd->spin);
2097 lwkt_acquire(nlp->lwp_thread);
2098 lwkt_schedule(nlp->lwp_thread);
2101 * Leave the thread on our run queue. Another
2102 * scheduler will try to pull it later.
2104 spin_unlock(&dd->spin);
2108 * devoid of runnable threads and not allowed to steal
2111 spin_unlock(&dd->spin);
2115 * We're descheduled unless someone scheduled us. Switch away.
2116 * Exiting the critical section will cause splz() to be called
2117 * for us if interrupts and such are pending.
2120 tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
2126 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2130 new_val = usched_dfly_stick_to_level;
2132 error = sysctl_handle_int(oidp, &new_val, 0, req);
2133 if (error != 0 || req->newptr == NULL)
2135 if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2137 usched_dfly_stick_to_level = new_val;
2143 * Setup the queues and scheduler helpers (scheduler helpers are SMP only).
2144 * Note that curprocmask bit 0 has already been cleared by rqinit() and
2145 * we should not mess with it further.
2148 usched_dfly_cpu_init(void)
2152 int smt_not_supported = 0;
2153 int cache_coherent_not_supported = 0;
2156 kprintf("Start usched_dfly helpers on cpus:\n");
2158 sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2159 usched_dfly_sysctl_tree =
2160 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2161 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2162 "usched_dfly", CTLFLAG_RD, 0, "");
2164 for (i = 0; i < ncpus; ++i) {
2165 dfly_pcpu_t dd = &dfly_pcpu[i];
2168 CPUMASK_ASSBIT(mask, i);
2169 if (CPUMASK_TESTMASK(mask, smp_active_mask) == 0)
2172 spin_init(&dd->spin, "uschedcpuinit");
2173 dd->cpunode = get_cpu_node_by_cpuid(i);
2175 CPUMASK_ASSBIT(dd->cpumask, i);
2176 for (j = 0; j < NQS; j++) {
2177 TAILQ_INIT(&dd->queues[j]);
2178 TAILQ_INIT(&dd->rtqueues[j]);
2179 TAILQ_INIT(&dd->idqueues[j]);
2181 ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask, 0);
2183 if (dd->cpunode == NULL) {
2184 smt_not_supported = 1;
2185 cache_coherent_not_supported = 1;
2187 kprintf (" cpu%d - WARNING: No CPU NODE "
2188 "found for cpu\n", i);
2190 switch (dd->cpunode->type) {
2193 kprintf (" cpu%d - HyperThreading "
2194 "available. Core siblings: ",
2198 smt_not_supported = 1;
2201 kprintf (" cpu%d - No HT available, "
2202 "multi-core/physical "
2203 "cpu. Physical siblings: ",
2207 smt_not_supported = 1;
2210 kprintf (" cpu%d - No HT available, "
2211 "single-core/physical cpu. "
2212 "Package siblings: ",
2216 /* Let's go for safe defaults here */
2217 smt_not_supported = 1;
2218 cache_coherent_not_supported = 1;
2220 kprintf (" cpu%d - Unknown cpunode->"
2221 "type=%u. siblings: ",
2223 (u_int)dd->cpunode->type);
2228 if (dd->cpunode->parent_node != NULL) {
2229 kprint_cpuset(&dd->cpunode->
2230 parent_node->members);
2233 kprintf(" no siblings\n");
2238 lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread,
2239 0, i, "usched %d", i);
2242 * Allow user scheduling on the target cpu. cpu #0 has already
2243 * been enabled in rqinit().
2246 ATOMIC_CPUMASK_NANDMASK(dfly_curprocmask, mask);
2247 ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2248 dd->upri = PRIBASE_NULL;
2252 /* usched_dfly sysctl configurable parameters */
2254 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2255 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2256 OID_AUTO, "rrinterval", CTLFLAG_RW,
2257 &usched_dfly_rrinterval, 0, "");
2258 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2259 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2260 OID_AUTO, "decay", CTLFLAG_RW,
2261 &usched_dfly_decay, 0, "Extra decay when not running");
2263 /* Add enable/disable option for SMT scheduling if supported */
2264 if (smt_not_supported) {
2265 usched_dfly_smt = 0;
2266 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2267 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2268 OID_AUTO, "smt", CTLFLAG_RD,
2269 "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2271 usched_dfly_smt = 1;
2272 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2273 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2274 OID_AUTO, "smt", CTLFLAG_RW,
2275 &usched_dfly_smt, 0, "Enable SMT scheduling");
2279 * Add enable/disable option for cache coherent scheduling
2282 if (cache_coherent_not_supported) {
2283 usched_dfly_cache_coherent = 0;
2284 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2285 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2286 OID_AUTO, "cache_coherent", CTLFLAG_RD,
2288 "Cache coherence NOT SUPPORTED");
2290 usched_dfly_cache_coherent = 1;
2291 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2292 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2293 OID_AUTO, "cache_coherent", CTLFLAG_RW,
2294 &usched_dfly_cache_coherent, 0,
2295 "Enable/Disable cache coherent scheduling");
2297 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2298 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2299 OID_AUTO, "weight1", CTLFLAG_RW,
2300 &usched_dfly_weight1, 200,
2301 "Weight selection for current cpu");
2303 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2304 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2305 OID_AUTO, "weight2", CTLFLAG_RW,
2306 &usched_dfly_weight2, 180,
2307 "Weight selection for wakefrom cpu");
2309 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2310 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2311 OID_AUTO, "weight3", CTLFLAG_RW,
2312 &usched_dfly_weight3, 40,
2313 "Weight selection for num threads on queue");
2315 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2316 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2317 OID_AUTO, "weight4", CTLFLAG_RW,
2318 &usched_dfly_weight4, 160,
2319 "Availability of other idle cpus");
2321 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2322 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2323 OID_AUTO, "fast_resched", CTLFLAG_RW,
2324 &usched_dfly_fast_resched, 0,
2325 "Availability of other idle cpus");
2327 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2328 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2329 OID_AUTO, "features", CTLFLAG_RW,
2330 &usched_dfly_features, 0x8F,
2331 "Allow pulls into empty queues");
2333 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2334 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2335 OID_AUTO, "swmask", CTLFLAG_RW,
2336 &usched_dfly_swmask, ~PPQMASK,
2337 "Queue mask to force thread switch");
2340 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2341 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2342 OID_AUTO, "stick_to_level",
2343 CTLTYPE_INT | CTLFLAG_RW,
2344 NULL, sizeof usched_dfly_stick_to_level,
2345 sysctl_usched_dfly_stick_to_level, "I",
2346 "Stick a process to this level. See sysctl"
2347 "paremter hw.cpu_topology.level_description");
2351 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2352 usched_dfly_cpu_init, NULL);