2 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/kernel.h>
31 #include <sys/queue.h>
33 #include <sys/rtprio.h>
35 #include <sys/sysctl.h>
36 #include <sys/resourcevar.h>
37 #include <sys/spinlock.h>
38 #include <sys/cpu_topology.h>
39 #include <sys/thread2.h>
40 #include <sys/spinlock2.h>
41 #include <sys/mplock2.h>
45 #include <machine/cpu.h>
46 #include <machine/smp.h>
49 * Priorities. Note that with 32 run queues per scheduler each queue
50 * represents four priority levels.
54 #define PRIMASK (MAXPRI - 1)
55 #define PRIBASE_REALTIME 0
56 #define PRIBASE_NORMAL MAXPRI
57 #define PRIBASE_IDLE (MAXPRI * 2)
58 #define PRIBASE_THREAD (MAXPRI * 3)
59 #define PRIBASE_NULL (MAXPRI * 4)
61 #define NQS 32 /* 32 run queues. */
62 #define PPQ (MAXPRI / NQS) /* priorities per queue */
63 #define PPQMASK (PPQ - 1)
66 * NICEPPQ - number of nice units per priority queue
68 * ESTCPUPPQ - number of estcpu units per priority queue
69 * ESTCPUMAX - number of estcpu units
73 #define ESTCPUMAX (ESTCPUPPQ * NQS)
74 #define BATCHMAX (ESTCPUFREQ * 30)
75 #define PRIO_RANGE (PRIO_MAX - PRIO_MIN + 1)
77 #define ESTCPULIM(v) min((v), ESTCPUMAX)
81 #define lwp_priority lwp_usdata.bsd4.priority
82 #define lwp_rqindex lwp_usdata.bsd4.rqindex
83 #define lwp_estcpu lwp_usdata.bsd4.estcpu
84 #define lwp_batch lwp_usdata.bsd4.batch
85 #define lwp_rqtype lwp_usdata.bsd4.rqtype
87 static void bsd4_acquire_curproc(struct lwp *lp);
88 static void bsd4_release_curproc(struct lwp *lp);
89 static void bsd4_select_curproc(globaldata_t gd);
90 static void bsd4_setrunqueue(struct lwp *lp);
91 static void bsd4_schedulerclock(struct lwp *lp, sysclock_t period,
93 static void bsd4_recalculate_estcpu(struct lwp *lp);
94 static void bsd4_resetpriority(struct lwp *lp);
95 static void bsd4_forking(struct lwp *plp, struct lwp *lp);
96 static void bsd4_exiting(struct lwp *lp, struct proc *);
97 static void bsd4_yield(struct lwp *lp);
100 static void need_user_resched_remote(void *dummy);
101 static int batchy_looser_pri_test(struct lwp* lp);
102 static struct lwp *chooseproc_locked_cache_coherent(struct lwp *chklp);
104 static struct lwp *chooseproc_locked(struct lwp *chklp);
105 static void bsd4_remrunqueue_locked(struct lwp *lp);
106 static void bsd4_setrunqueue_locked(struct lwp *lp);
107 static void kick_helper(struct lwp *lp);
109 struct usched usched_bsd4 = {
111 "bsd4", "Original DragonFly Scheduler",
112 NULL, /* default registration */
113 NULL, /* default deregistration */
114 bsd4_acquire_curproc,
115 bsd4_release_curproc,
118 bsd4_recalculate_estcpu,
122 NULL, /* setcpumask not supported */
126 struct usched_bsd4_pcpu {
127 struct thread helper_thread;
130 struct lwp *uschedcp;
131 struct lwp *old_uschedcp;
137 typedef struct usched_bsd4_pcpu *bsd4_pcpu_t;
140 * We have NQS (32) run queues per scheduling class. For the normal
141 * class, there are 128 priorities scaled onto these 32 queues. New
142 * processes are added to the last entry in each queue, and processes
143 * are selected for running by taking them from the head and maintaining
144 * a simple FIFO arrangement. Realtime and Idle priority processes have
145 * and explicit 0-31 priority which maps directly onto their class queue
146 * index. When a queue has something in it, the corresponding bit is
147 * set in the queuebits variable, allowing a single read to determine
148 * the state of all 32 queues and then a ffs() to find the first busy
151 static struct rq bsd4_queues[NQS];
152 static struct rq bsd4_rtqueues[NQS];
153 static struct rq bsd4_idqueues[NQS];
154 static u_int32_t bsd4_queuebits;
155 static u_int32_t bsd4_rtqueuebits;
156 static u_int32_t bsd4_idqueuebits;
157 static cpumask_t bsd4_curprocmask = -1; /* currently running a user process */
158 static cpumask_t bsd4_rdyprocmask; /* ready to accept a user process */
159 static int bsd4_runqcount;
161 static volatile int bsd4_scancpu;
163 static struct spinlock bsd4_spin;
164 static struct usched_bsd4_pcpu bsd4_pcpu[MAXCPU];
165 static struct sysctl_ctx_list usched_bsd4_sysctl_ctx;
166 static struct sysctl_oid *usched_bsd4_sysctl_tree;
168 /* Debug info exposed through debug.* sysctl */
170 SYSCTL_INT(_debug, OID_AUTO, bsd4_runqcount, CTLFLAG_RD, &bsd4_runqcount, 0,
171 "Number of run queues");
173 static int usched_nonoptimal;
174 SYSCTL_INT(_debug, OID_AUTO, usched_nonoptimal, CTLFLAG_RW,
175 &usched_nonoptimal, 0, "acquire_curproc() was not optimal");
176 static int usched_optimal;
177 SYSCTL_INT(_debug, OID_AUTO, usched_optimal, CTLFLAG_RW,
178 &usched_optimal, 0, "acquire_curproc() was optimal");
181 static int usched_bsd4_debug = -1;
182 SYSCTL_INT(_debug, OID_AUTO, scdebug, CTLFLAG_RW, &usched_bsd4_debug, 0,
183 "Print debug information for this pid");
184 static int usched_bsd4_pid_debug = -1;
185 SYSCTL_INT(_debug, OID_AUTO, pid_debug, CTLFLAG_RW, &usched_bsd4_pid_debug, 0,
186 "Print KTR debug information for this pid");
189 static int remote_resched_nonaffinity;
190 static int remote_resched_affinity;
191 static int choose_affinity;
192 SYSCTL_INT(_debug, OID_AUTO, remote_resched_nonaffinity, CTLFLAG_RD,
193 &remote_resched_nonaffinity, 0, "Number of remote rescheds");
194 SYSCTL_INT(_debug, OID_AUTO, remote_resched_affinity, CTLFLAG_RD,
195 &remote_resched_affinity, 0, "Number of remote rescheds");
196 SYSCTL_INT(_debug, OID_AUTO, choose_affinity, CTLFLAG_RD,
197 &choose_affinity, 0, "chooseproc() was smart");
201 /* Tunning usched_bsd4 - configurable through kern.usched_bsd4.* */
203 static int usched_bsd4_smt = 0;
204 static int usched_bsd4_cache_coherent = 0;
205 static int usched_bsd4_upri_affinity = 16; /* 32 queues - half-way */
206 static int usched_bsd4_queue_checks = 5;
207 static int usched_bsd4_stick_to_level = 0;
209 static int usched_bsd4_rrinterval = (ESTCPUFREQ + 9) / 10;
210 static int usched_bsd4_decay = 8;
211 static int usched_bsd4_batch_time = 10;
212 static long usched_bsd4_kicks;
214 /* KTR debug printings */
216 KTR_INFO_MASTER(usched);
218 #if !defined(KTR_USCHED_BSD4)
219 #define KTR_USCHED_BSD4 KTR_ALL
222 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_urw, 0,
223 "USCHED_BSD4(bsd4_acquire_curproc in user_reseched_wanted "
224 "after release: pid %d, cpuid %d, curr_cpuid %d)",
225 pid_t pid, int cpuid, int curr);
226 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_before_loop, 0,
227 "USCHED_BSD4(bsd4_acquire_curproc before loop: pid %d, cpuid %d, "
229 pid_t pid, int cpuid, int curr);
230 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_not, 0,
231 "USCHED_BSD4(bsd4_acquire_curproc couldn't acquire after "
232 "bsd4_setrunqueue: pid %d, cpuid %d, curr_lp pid %d, curr_cpuid %d)",
233 pid_t pid, int cpuid, pid_t curr_pid, int curr_cpuid);
234 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_acquire_curproc_switch, 0,
235 "USCHED_BSD4(bsd4_acquire_curproc after lwkt_switch: pid %d, "
236 "cpuid %d, curr_cpuid %d)",
237 pid_t pid, int cpuid, int curr);
239 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_release_curproc, 0,
240 "USCHED_BSD4(bsd4_release_curproc before select: pid %d, "
241 "cpuid %d, curr_cpuid %d)",
242 pid_t pid, int cpuid, int curr);
244 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_select_curproc, 0,
245 "USCHED_BSD4(bsd4_release_curproc before select: pid %d, "
246 "cpuid %d, old_pid %d, old_cpuid %d, curr_cpuid %d)",
247 pid_t pid, int cpuid, pid_t old_pid, int old_cpuid, int curr);
250 KTR_INFO(KTR_USCHED_BSD4, usched, batchy_test_false, 0,
251 "USCHED_BSD4(batchy_looser_pri_test false: pid %d, "
252 "cpuid %d, verify_mask %lu)",
253 pid_t pid, int cpuid, cpumask_t mask);
254 KTR_INFO(KTR_USCHED_BSD4, usched, batchy_test_true, 0,
255 "USCHED_BSD4(batchy_looser_pri_test true: pid %d, "
256 "cpuid %d, verify_mask %lu)",
257 pid_t pid, int cpuid, cpumask_t mask);
259 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_fc_smt, 0,
260 "USCHED_BSD4(bsd4_setrunqueue free cpus smt: pid %d, cpuid %d, "
261 "mask %lu, curr_cpuid %d)",
262 pid_t pid, int cpuid, cpumask_t mask, int curr);
263 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_fc_non_smt, 0,
264 "USCHED_BSD4(bsd4_setrunqueue free cpus check non_smt: pid %d, "
265 "cpuid %d, mask %lu, curr_cpuid %d)",
266 pid_t pid, int cpuid, cpumask_t mask, int curr);
267 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_rc, 0,
268 "USCHED_BSD4(bsd4_setrunqueue running cpus check: pid %d, "
269 "cpuid %d, mask %lu, curr_cpuid %d)",
270 pid_t pid, int cpuid, cpumask_t mask, int curr);
271 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_found, 0,
272 "USCHED_BSD4(bsd4_setrunqueue found cpu: pid %d, cpuid %d, "
273 "mask %lu, found_cpuid %d, curr_cpuid %d)",
274 pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
275 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_not_found, 0,
276 "USCHED_BSD4(bsd4_setrunqueue not found cpu: pid %d, cpuid %d, "
277 "try_cpuid %d, curr_cpuid %d)",
278 pid_t pid, int cpuid, int try_cpuid, int curr);
279 KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_found_best_cpuid, 0,
280 "USCHED_BSD4(bsd4_setrunqueue found cpu: pid %d, cpuid %d, "
281 "mask %lu, found_cpuid %d, curr_cpuid %d)",
282 pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr);
285 KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc, 0,
286 "USCHED_BSD4(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
287 pid_t pid, int old_cpuid, int curr);
289 KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc_cc, 0,
290 "USCHED_BSD4(chooseproc_cc: pid %d, old_cpuid %d, curr_cpuid %d)",
291 pid_t pid, int old_cpuid, int curr);
292 KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc_cc_not_good, 0,
293 "USCHED_BSD4(chooseproc_cc not good: pid %d, old_cpumask %lu, "
294 "sibling_mask %lu, curr_cpumask %lu)",
295 pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
296 KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc_cc_elected, 0,
297 "USCHED_BSD4(chooseproc_cc elected: pid %d, old_cpumask %lu, "
298 "sibling_mask %lu, curr_cpumask: %lu)",
299 pid_t pid, cpumask_t old_cpumask, cpumask_t sibling_mask, cpumask_t curr);
301 KTR_INFO(KTR_USCHED_BSD4, usched, sched_thread_no_process, 0,
302 "USCHED_BSD4(sched_thread %d no process scheduled: pid %d, old_cpuid %d)",
303 int id, pid_t pid, int cpuid);
304 KTR_INFO(KTR_USCHED_BSD4, usched, sched_thread_process, 0,
305 "USCHED_BSD4(sched_thread %d process scheduled: pid %d, old_cpuid %d)",
306 int id, pid_t pid, int cpuid);
307 KTR_INFO(KTR_USCHED_BSD4, usched, sched_thread_no_process_found, 0,
308 "USCHED_BSD4(sched_thread %d no process found; tmpmask %lu)",
309 int id, cpumask_t tmpmask);
313 * Initialize the run queues at boot time.
320 spin_init(&bsd4_spin);
321 for (i = 0; i < NQS; i++) {
322 TAILQ_INIT(&bsd4_queues[i]);
323 TAILQ_INIT(&bsd4_rtqueues[i]);
324 TAILQ_INIT(&bsd4_idqueues[i]);
326 atomic_clear_cpumask(&bsd4_curprocmask, 1);
328 SYSINIT(runqueue, SI_BOOT2_USCHED, SI_ORDER_FIRST, rqinit, NULL)
331 * BSD4_ACQUIRE_CURPROC
333 * This function is called when the kernel intends to return to userland.
334 * It is responsible for making the thread the current designated userland
335 * thread for this cpu, blocking if necessary.
337 * The kernel has already depressed our LWKT priority so we must not switch
338 * until we have either assigned or disposed of the thread.
340 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
341 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will
342 * occur, this function is called only under very controlled circumstances.
347 bsd4_acquire_curproc(struct lwp *lp)
357 * Make sure we aren't sitting on a tsleep queue.
360 crit_enter_quick(td);
361 if (td->td_flags & TDF_TSLEEPQ)
363 bsd4_recalculate_estcpu(lp);
366 * If a reschedule was requested give another thread the
369 if (user_resched_wanted()) {
370 clear_user_resched();
371 bsd4_release_curproc(lp);
373 KTR_COND_LOG(usched_bsd4_acquire_curproc_urw,
374 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
376 lp->lwp_thread->td_gd->gd_cpuid,
381 * Loop until we are the current user thread
384 dd = &bsd4_pcpu[gd->gd_cpuid];
386 KTR_COND_LOG(usched_bsd4_acquire_curproc_before_loop,
387 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
389 lp->lwp_thread->td_gd->gd_cpuid,
394 * Process any pending events and higher priority threads.
399 * Become the currently scheduled user thread for this cpu
400 * if we can do so trivially.
402 * We can steal another thread's current thread designation
403 * on this cpu since if we are running that other thread
404 * must not be, so we can safely deschedule it.
406 if (dd->uschedcp == lp) {
408 * We are already the current lwp (hot path).
410 dd->upri = lp->lwp_priority;
411 } else if (dd->uschedcp == NULL) {
413 * We can trivially become the current lwp.
415 atomic_set_cpumask(&bsd4_curprocmask, gd->gd_cpumask);
417 dd->upri = lp->lwp_priority;
418 } else if (dd->upri > lp->lwp_priority) {
420 * We can steal the current cpu's lwp designation
421 * away simply by replacing it. The other thread
422 * will stall when it tries to return to userland.
425 dd->upri = lp->lwp_priority;
427 lwkt_deschedule(olp->lwp_thread);
428 bsd4_setrunqueue(olp);
432 * We cannot become the current lwp, place the lp
433 * on the bsd4 run-queue and deschedule ourselves.
435 * When we are reactivated we will have another
438 lwkt_deschedule(lp->lwp_thread);
440 bsd4_setrunqueue(lp);
442 KTR_COND_LOG(usched_bsd4_acquire_curproc_not,
443 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
445 lp->lwp_thread->td_gd->gd_cpuid,
446 dd->uschedcp->lwp_proc->p_pid,
453 * Reload after a switch or setrunqueue/switch possibly
454 * moved us to another cpu.
457 dd = &bsd4_pcpu[gd->gd_cpuid];
459 KTR_COND_LOG(usched_bsd4_acquire_curproc_switch,
460 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
462 lp->lwp_thread->td_gd->gd_cpuid,
465 } while (dd->uschedcp != lp);
468 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
472 * BSD4_RELEASE_CURPROC
474 * This routine detaches the current thread from the userland scheduler,
475 * usually because the thread needs to run or block in the kernel (at
476 * kernel priority) for a while.
478 * This routine is also responsible for selecting a new thread to
479 * make the current thread.
481 * NOTE: This implementation differs from the dummy example in that
482 * bsd4_select_curproc() is able to select the current process, whereas
483 * dummy_select_curproc() is not able to select the current process.
484 * This means we have to NULL out uschedcp.
486 * Additionally, note that we may already be on a run queue if releasing
487 * via the lwkt_switch() in bsd4_setrunqueue().
493 bsd4_release_curproc(struct lwp *lp)
495 globaldata_t gd = mycpu;
496 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
498 if (dd->uschedcp == lp) {
500 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
502 KTR_COND_LOG(usched_bsd4_release_curproc,
503 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
505 lp->lwp_thread->td_gd->gd_cpuid,
508 dd->uschedcp = NULL; /* don't let lp be selected */
509 dd->upri = PRIBASE_NULL;
510 atomic_clear_cpumask(&bsd4_curprocmask, gd->gd_cpumask);
511 dd->old_uschedcp = lp; /* used only for KTR debug prints */
512 bsd4_select_curproc(gd);
518 * BSD4_SELECT_CURPROC
520 * Select a new current process for this cpu and clear any pending user
521 * reschedule request. The cpu currently has no current process.
523 * This routine is also responsible for equal-priority round-robining,
524 * typically triggered from bsd4_schedulerclock(). In our dummy example
525 * all the 'user' threads are LWKT scheduled all at once and we just
526 * call lwkt_switch().
528 * The calling process is not on the queue and cannot be selected.
534 bsd4_select_curproc(globaldata_t gd)
536 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
538 int cpuid = gd->gd_cpuid;
542 spin_lock(&bsd4_spin);
544 if(usched_bsd4_cache_coherent)
545 nlp = chooseproc_locked_cache_coherent(dd->uschedcp);
548 nlp = chooseproc_locked(dd->uschedcp);
552 KTR_COND_LOG(usched_bsd4_select_curproc,
553 nlp->lwp_proc->p_pid == usched_bsd4_pid_debug,
554 nlp->lwp_proc->p_pid,
555 nlp->lwp_thread->td_gd->gd_cpuid,
556 dd->old_uschedcp->lwp_proc->p_pid,
557 dd->old_uschedcp->lwp_thread->td_gd->gd_cpuid,
560 atomic_set_cpumask(&bsd4_curprocmask, CPUMASK(cpuid));
561 dd->upri = nlp->lwp_priority;
563 dd->rrcount = 0; /* reset round robin */
564 spin_unlock(&bsd4_spin);
566 lwkt_acquire(nlp->lwp_thread);
568 lwkt_schedule(nlp->lwp_thread);
570 spin_unlock(&bsd4_spin);
574 } else if (bsd4_runqcount && (bsd4_rdyprocmask & CPUMASK(cpuid))) {
575 atomic_clear_cpumask(&bsd4_rdyprocmask, CPUMASK(cpuid));
576 spin_unlock(&bsd4_spin);
577 lwkt_schedule(&dd->helper_thread);
579 spin_unlock(&bsd4_spin);
587 * batchy_looser_pri_test() - determine if a process is batchy or not
588 * relative to the other processes running in the system
591 batchy_looser_pri_test(struct lwp* lp)
594 bsd4_pcpu_t other_dd;
597 /* Current running processes */
598 mask = bsd4_curprocmask & smp_active_mask
599 & usched_global_cpumask;
602 cpu = BSFCPUMASK(mask);
603 other_dd = &bsd4_pcpu[cpu];
604 if (other_dd->upri - lp->lwp_priority > usched_bsd4_upri_affinity * PPQ) {
606 KTR_COND_LOG(usched_batchy_test_false,
607 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
609 lp->lwp_thread->td_gd->gd_cpuid,
610 (unsigned long)mask);
614 mask &= ~CPUMASK(cpu);
617 KTR_COND_LOG(usched_batchy_test_true,
618 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
620 lp->lwp_thread->td_gd->gd_cpuid,
621 (unsigned long)mask);
631 * Place the specified lwp on the user scheduler's run queue. This routine
632 * must be called with the thread descheduled. The lwp must be runnable.
634 * The thread may be the current thread as a special case.
639 bsd4_setrunqueue(struct lwp *lp)
650 * First validate the process state relative to the current cpu.
651 * We don't need the spinlock for this, just a critical section.
652 * We are in control of the process.
655 KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
656 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
657 ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
658 lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
659 KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
662 * Note: gd and dd are relative to the target thread's last cpu,
663 * NOT our current cpu.
665 gd = lp->lwp_thread->td_gd;
666 dd = &bsd4_pcpu[gd->gd_cpuid];
669 * This process is not supposed to be scheduled anywhere or assigned
670 * as the current process anywhere. Assert the condition.
672 KKASSERT(dd->uschedcp != lp);
676 * If we are not SMP we do not have a scheduler helper to kick
677 * and must directly activate the process if none are scheduled.
679 * This is really only an issue when bootstrapping init since
680 * the caller in all other cases will be a user process, and
681 * even if released (dd->uschedcp == NULL), that process will
682 * kickstart the scheduler when it returns to user mode from
685 if (dd->uschedcp == NULL) {
686 atomic_set_cpumask(&bsd4_curprocmask, gd->gd_cpumask);
688 dd->upri = lp->lwp_priority;
689 lwkt_schedule(lp->lwp_thread);
697 * XXX fixme. Could be part of a remrunqueue/setrunqueue
698 * operation when the priority is recalculated, so TDF_MIGRATING
699 * may already be set.
701 if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
702 lwkt_giveaway(lp->lwp_thread);
706 * We lose control of lp the moment we release the spinlock after
707 * having placed lp on the queue. i.e. another cpu could pick it
708 * up and it could exit, or its priority could be further adjusted,
709 * or something like that.
711 spin_lock(&bsd4_spin);
712 bsd4_setrunqueue_locked(lp);
713 lp->lwp_setrunqueue_ticks = sched_ticks;
717 * Kick the scheduler helper on one of the other cpu's
718 * and request a reschedule if appropriate.
720 * NOTE: We check all cpus whos rdyprocmask is set. First we
721 * look for cpus without designated lps, then we look for
722 * cpus with designated lps with a worse priority than our
727 if (usched_bsd4_smt) {
730 * SMT heuristic - Try to schedule on a free physical core.
731 * If no physical core found than choose the one that has
732 * an interactive thread.
736 int min_prio = MAXPRI * MAXPRI;
739 cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
740 mask = ~bsd4_curprocmask & bsd4_rdyprocmask & lp->lwp_cpumask &
741 smp_active_mask & usched_global_cpumask;
743 KTR_COND_LOG(usched_bsd4_setrunqueue_fc_smt,
744 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
746 lp->lwp_thread->td_gd->gd_cpuid,
751 tmpmask = ~(CPUMASK(cpuid) - 1);
753 cpuid = BSFCPUMASK(mask & tmpmask);
755 cpuid = BSFCPUMASK(mask);
756 gd = globaldata_find(cpuid);
757 dd = &bsd4_pcpu[cpuid];
759 if ((dd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK)) {
760 if (dd->cpunode->parent_node->members & ~dd->cpunode->members & mask) {
762 KTR_COND_LOG(usched_bsd4_setrunqueue_found,
763 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
765 lp->lwp_thread->td_gd->gd_cpuid,
772 sibling = BSFCPUMASK(dd->cpunode->parent_node->members &
773 ~dd->cpunode->members);
774 if (min_prio > bsd4_pcpu[sibling].upri) {
775 min_prio = bsd4_pcpu[sibling].upri;
780 mask &= ~CPUMASK(cpuid);
783 if (best_cpuid != -1) {
785 gd = globaldata_find(cpuid);
786 dd = &bsd4_pcpu[cpuid];
788 KTR_COND_LOG(usched_bsd4_setrunqueue_found_best_cpuid,
789 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
791 lp->lwp_thread->td_gd->gd_cpuid,
799 /* Fallback to the original heuristic */
800 cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
801 mask = ~bsd4_curprocmask & bsd4_rdyprocmask & lp->lwp_cpumask &
802 smp_active_mask & usched_global_cpumask;
804 KTR_COND_LOG(usched_bsd4_setrunqueue_fc_non_smt,
805 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
807 lp->lwp_thread->td_gd->gd_cpuid,
812 tmpmask = ~(CPUMASK(cpuid) - 1);
814 cpuid = BSFCPUMASK(mask & tmpmask);
816 cpuid = BSFCPUMASK(mask);
817 gd = globaldata_find(cpuid);
818 dd = &bsd4_pcpu[cpuid];
820 if ((dd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK)) {
822 KTR_COND_LOG(usched_bsd4_setrunqueue_found,
823 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
825 lp->lwp_thread->td_gd->gd_cpuid,
832 mask &= ~CPUMASK(cpuid);
837 * Then cpus which might have a currently running lp
839 mask = bsd4_curprocmask & bsd4_rdyprocmask &
840 lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
842 KTR_COND_LOG(usched_bsd4_setrunqueue_rc,
843 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
845 lp->lwp_thread->td_gd->gd_cpuid,
850 tmpmask = ~(CPUMASK(cpuid) - 1);
852 cpuid = BSFCPUMASK(mask & tmpmask);
854 cpuid = BSFCPUMASK(mask);
855 gd = globaldata_find(cpuid);
856 dd = &bsd4_pcpu[cpuid];
858 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
860 KTR_COND_LOG(usched_bsd4_setrunqueue_found,
861 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
863 lp->lwp_thread->td_gd->gd_cpuid,
870 mask &= ~CPUMASK(cpuid);
874 * If we cannot find a suitable cpu we reload from bsd4_scancpu
875 * and round-robin. Other cpus will pickup as they release their
876 * current lwps or become ready.
878 * Avoid a degenerate system lockup case if usched_global_cpumask
879 * is set to 0 or otherwise does not cover lwp_cpumask.
881 * We only kick the target helper thread in this case, we do not
882 * set the user resched flag because
884 cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
885 if ((CPUMASK(cpuid) & usched_global_cpumask) == 0) {
888 gd = globaldata_find(cpuid);
889 dd = &bsd4_pcpu[cpuid];
891 KTR_COND_LOG(usched_bsd4_setrunqueue_not_found,
892 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
894 lp->lwp_thread->td_gd->gd_cpuid,
900 spin_unlock(&bsd4_spin);
901 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
902 if (dd->uschedcp == NULL) {
903 wakeup_mycpu(&dd->helper_thread);
909 atomic_clear_cpumask(&bsd4_rdyprocmask, CPUMASK(cpuid));
910 spin_unlock(&bsd4_spin);
911 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
912 lwkt_send_ipiq(gd, need_user_resched_remote, NULL);
914 wakeup(&dd->helper_thread);
918 * Request a reschedule if appropriate.
920 spin_unlock(&bsd4_spin);
921 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
929 * This routine is called from a systimer IPI. It MUST be MP-safe and
930 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on
937 bsd4_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
939 globaldata_t gd = mycpu;
940 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
943 * Do we need to round-robin? We round-robin 10 times a second.
944 * This should only occur for cpu-bound batch processes.
946 if (++dd->rrcount >= usched_bsd4_rrinterval) {
952 * Adjust estcpu upward using a real time equivalent calculation.
954 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
957 * Spinlocks also hold a critical section so there should not be
960 KKASSERT(gd->gd_spinlocks_wr == 0);
962 bsd4_resetpriority(lp);
965 * if we can't call bsd4_resetpriority for some reason we must call
966 * need user_resched().
973 * Called from acquire and from kern_synch's one-second timer (one of the
974 * callout helper threads) with a critical section held.
976 * Decay p_estcpu based on the number of ticks we haven't been running
977 * and our p_nice. As the load increases each process observes a larger
978 * number of idle ticks (because other processes are running in them).
979 * This observation leads to a larger correction which tends to make the
980 * system more 'batchy'.
982 * Note that no recalculation occurs for a process which sleeps and wakes
983 * up in the same tick. That is, a system doing thousands of context
984 * switches per second will still only do serious estcpu calculations
985 * ESTCPUFREQ times per second.
991 bsd4_recalculate_estcpu(struct lwp *lp)
993 globaldata_t gd = mycpu;
1000 * We have to subtract periodic to get the last schedclock
1001 * timeout time, otherwise we would get the upcoming timeout.
1002 * Keep in mind that a process can migrate between cpus and
1003 * while the scheduler clock should be very close, boundary
1004 * conditions could lead to a small negative delta.
1006 cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
1008 if (lp->lwp_slptime > 1) {
1010 * Too much time has passed, do a coarse correction.
1012 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
1013 bsd4_resetpriority(lp);
1014 lp->lwp_cpbase = cpbase;
1015 lp->lwp_cpticks = 0;
1016 lp->lwp_batch -= ESTCPUFREQ;
1017 if (lp->lwp_batch < 0)
1019 } else if (lp->lwp_cpbase != cpbase) {
1021 * Adjust estcpu if we are in a different tick. Don't waste
1022 * time if we are in the same tick.
1024 * First calculate the number of ticks in the measurement
1025 * interval. The ttlticks calculation can wind up 0 due to
1026 * a bug in the handling of lwp_slptime (as yet not found),
1027 * so make sure we do not get a divide by 0 panic.
1029 ttlticks = (cpbase - lp->lwp_cpbase) /
1030 gd->gd_schedclock.periodic;
1033 lp->lwp_cpbase = cpbase;
1037 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
1040 * Calculate the percentage of one cpu used factoring in ncpus
1041 * and the load and adjust estcpu. Handle degenerate cases
1042 * by adding 1 to bsd4_runqcount.
1044 * estcpu is scaled by ESTCPUMAX.
1046 * bsd4_runqcount is the excess number of user processes
1047 * that cannot be immediately scheduled to cpus. We want
1048 * to count these as running to avoid range compression
1049 * in the base calculation (which is the actual percentage
1052 estcpu = (lp->lwp_cpticks * ESTCPUMAX) *
1053 (bsd4_runqcount + ncpus) / (ncpus * ttlticks);
1056 * If estcpu is > 50% we become more batch-like
1057 * If estcpu is <= 50% we become less batch-like
1059 * It takes 30 cpu seconds to traverse the entire range.
1061 if (estcpu > ESTCPUMAX / 2) {
1062 lp->lwp_batch += ttlticks;
1063 if (lp->lwp_batch > BATCHMAX)
1064 lp->lwp_batch = BATCHMAX;
1066 lp->lwp_batch -= ttlticks;
1067 if (lp->lwp_batch < 0)
1071 if (usched_bsd4_debug == lp->lwp_proc->p_pid) {
1072 kprintf("pid %d lwp %p estcpu %3d %3d bat %d cp %d/%d",
1073 lp->lwp_proc->p_pid, lp,
1074 estcpu, lp->lwp_estcpu,
1076 lp->lwp_cpticks, ttlticks);
1080 * Adjust lp->lwp_esetcpu. The decay factor determines how
1081 * quickly lwp_estcpu collapses to its realtime calculation.
1082 * A slower collapse gives us a more accurate number but
1083 * can cause a cpu hog to eat too much cpu before the
1084 * scheduler decides to downgrade it.
1086 * NOTE: p_nice is accounted for in bsd4_resetpriority(),
1087 * and not here, but we must still ensure that a
1088 * cpu-bound nice -20 process does not completely
1089 * override a cpu-bound nice +20 process.
1091 * NOTE: We must use ESTCPULIM() here to deal with any
1094 decay_factor = usched_bsd4_decay;
1095 if (decay_factor < 1)
1097 if (decay_factor > 1024)
1098 decay_factor = 1024;
1100 lp->lwp_estcpu = ESTCPULIM(
1101 (lp->lwp_estcpu * decay_factor + estcpu) /
1102 (decay_factor + 1));
1104 if (usched_bsd4_debug == lp->lwp_proc->p_pid)
1105 kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
1106 bsd4_resetpriority(lp);
1107 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
1108 lp->lwp_cpticks = 0;
1113 * Compute the priority of a process when running in user mode.
1114 * Arrange to reschedule if the resulting priority is better
1115 * than that of the current process.
1117 * This routine may be called with any process.
1119 * This routine is called by fork1() for initial setup with the process
1120 * of the run queue, and also may be called normally with the process on or
1121 * off the run queue.
1126 bsd4_resetpriority(struct lwp *lp)
1136 * Calculate the new priority and queue type
1139 spin_lock(&bsd4_spin);
1141 newrqtype = lp->lwp_rtprio.type;
1144 case RTP_PRIO_REALTIME:
1146 newpriority = PRIBASE_REALTIME +
1147 (lp->lwp_rtprio.prio & PRIMASK);
1149 case RTP_PRIO_NORMAL:
1151 * Detune estcpu based on batchiness. lwp_batch ranges
1152 * from 0 to BATCHMAX. Limit estcpu for the sake of
1153 * the priority calculation to between 50% and 100%.
1155 estcpu = lp->lwp_estcpu * (lp->lwp_batch + BATCHMAX) /
1159 * p_nice piece Adds (0-40) * 2 0-80
1160 * estcpu Adds 16384 * 4 / 512 0-128
1162 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
1163 newpriority += estcpu * PPQ / ESTCPUPPQ;
1164 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
1165 NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
1166 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
1169 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1171 case RTP_PRIO_THREAD:
1172 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1175 panic("Bad RTP_PRIO %d", newrqtype);
1180 * The newpriority incorporates the queue type so do a simple masked
1181 * check to determine if the process has moved to another queue. If
1182 * it has, and it is currently on a run queue, then move it.
1184 if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1185 lp->lwp_priority = newpriority;
1186 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1187 bsd4_remrunqueue_locked(lp);
1188 lp->lwp_rqtype = newrqtype;
1189 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1190 bsd4_setrunqueue_locked(lp);
1193 lp->lwp_rqtype = newrqtype;
1194 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1197 reschedcpu = lp->lwp_thread->td_gd->gd_cpuid;
1199 lp->lwp_priority = newpriority;
1205 * Determine if we need to reschedule the target cpu. This only
1206 * occurs if the LWP is already on a scheduler queue, which means
1207 * that idle cpu notification has already occured. At most we
1208 * need only issue a need_user_resched() on the appropriate cpu.
1210 * The LWP may be owned by a CPU different from the current one,
1211 * in which case dd->uschedcp may be modified without an MP lock
1212 * or a spinlock held. The worst that happens is that the code
1213 * below causes a spurious need_user_resched() on the target CPU
1214 * and dd->pri to be wrong for a short period of time, both of
1215 * which are harmless.
1217 * If checkpri is 0 we are adjusting the priority of the current
1218 * process, possibly higher (less desireable), so ignore the upri
1219 * check which will fail in that case.
1221 if (reschedcpu >= 0) {
1222 dd = &bsd4_pcpu[reschedcpu];
1223 if ((bsd4_rdyprocmask & CPUMASK(reschedcpu)) &&
1225 (dd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK))) {
1227 if (reschedcpu == mycpu->gd_cpuid) {
1228 spin_unlock(&bsd4_spin);
1229 need_user_resched();
1231 spin_unlock(&bsd4_spin);
1232 atomic_clear_cpumask(&bsd4_rdyprocmask,
1233 CPUMASK(reschedcpu));
1234 lwkt_send_ipiq(lp->lwp_thread->td_gd,
1235 need_user_resched_remote, NULL);
1238 spin_unlock(&bsd4_spin);
1239 need_user_resched();
1242 spin_unlock(&bsd4_spin);
1245 spin_unlock(&bsd4_spin);
1255 bsd4_yield(struct lwp *lp)
1258 /* FUTURE (or something similar) */
1259 switch(lp->lwp_rqtype) {
1260 case RTP_PRIO_NORMAL:
1261 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
1267 need_user_resched();
1271 * Called from fork1() when a new child process is being created.
1273 * Give the child process an initial estcpu that is more batch then
1274 * its parent and dock the parent for the fork (but do not
1275 * reschedule the parent). This comprises the main part of our batch
1276 * detection heuristic for both parallel forking and sequential execs.
1278 * XXX lwp should be "spawning" instead of "forking"
1283 bsd4_forking(struct lwp *plp, struct lwp *lp)
1286 * Put the child 4 queue slots (out of 32) higher than the parent
1287 * (less desireable than the parent).
1289 lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1292 * The batch status of children always starts out centerline
1293 * and will inch-up or inch-down as appropriate. It takes roughly
1294 * ~15 seconds of >50% cpu to hit the limit.
1296 lp->lwp_batch = BATCHMAX / 2;
1299 * Dock the parent a cost for the fork, protecting us from fork
1300 * bombs. If the parent is forking quickly make the child more
1303 plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1307 * Called when a parent waits for a child.
1312 bsd4_exiting(struct lwp *lp, struct proc *child_proc)
1317 * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1318 * it selects a user process and returns it. If chklp is non-NULL and chklp
1319 * has a better or equal priority then the process that would otherwise be
1320 * chosen, NULL is returned.
1322 * Until we fix the RUNQ code the chklp test has to be strict or we may
1323 * bounce between processes trying to acquire the current process designation.
1325 * MPSAFE - must be called with bsd4_spin exclusive held. The spinlock is
1326 * left intact through the entire routine.
1330 chooseproc_locked(struct lwp *chklp)
1334 u_int32_t *which, *which2;
1341 rtqbits = bsd4_rtqueuebits;
1342 tsqbits = bsd4_queuebits;
1343 idqbits = bsd4_idqueuebits;
1344 cpumask = mycpu->gd_cpumask;
1351 pri = bsfl(rtqbits);
1352 q = &bsd4_rtqueues[pri];
1353 which = &bsd4_rtqueuebits;
1355 } else if (tsqbits) {
1356 pri = bsfl(tsqbits);
1357 q = &bsd4_queues[pri];
1358 which = &bsd4_queuebits;
1360 } else if (idqbits) {
1361 pri = bsfl(idqbits);
1362 q = &bsd4_idqueues[pri];
1363 which = &bsd4_idqueuebits;
1368 lp = TAILQ_FIRST(q);
1369 KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1372 while ((lp->lwp_cpumask & cpumask) == 0) {
1373 lp = TAILQ_NEXT(lp, lwp_procq);
1375 *which2 &= ~(1 << pri);
1382 * If the passed lwp <chklp> is reasonably close to the selected
1383 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1385 * Note that we must error on the side of <chklp> to avoid bouncing
1386 * between threads in the acquire code.
1389 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1395 * If the chosen lwp does not reside on this cpu spend a few
1396 * cycles looking for a better candidate at the same priority level.
1397 * This is a fallback check, setrunqueue() tries to wakeup the
1398 * correct cpu and is our front-line affinity.
1400 if (lp->lwp_thread->td_gd != mycpu &&
1401 (chklp = TAILQ_NEXT(lp, lwp_procq)) != NULL
1403 if (chklp->lwp_thread->td_gd == mycpu) {
1410 KTR_COND_LOG(usched_chooseproc,
1411 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1412 lp->lwp_proc->p_pid,
1413 lp->lwp_thread->td_gd->gd_cpuid,
1416 TAILQ_REMOVE(q, lp, lwp_procq);
1419 *which &= ~(1 << pri);
1420 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1421 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1428 * chooseproc() - with a cache coherence heuristic. Try to pull a process that
1429 * has its home on the current CPU> If the process doesn't have its home here
1430 * and is a batchy one (see batcy_looser_pri_test), we can wait for a
1431 * sched_tick, may be its home will become free and pull it in. Anyway,
1432 * we can't wait more than one tick. If that tick expired, we pull in that
1433 * process, no matter what.
1437 chooseproc_locked_cache_coherent(struct lwp *chklp)
1441 u_int32_t *which, *which2;
1449 struct lwp * min_level_lwp = NULL;
1450 struct rq *min_q = NULL;
1452 cpu_node_t* cpunode = NULL;
1453 u_int32_t min_level = MAXCPU; /* number of levels < MAXCPU */
1454 u_int32_t *min_which = NULL;
1455 u_int32_t min_pri = 0;
1456 u_int32_t level = 0;
1458 rtqbits = bsd4_rtqueuebits;
1459 tsqbits = bsd4_queuebits;
1460 idqbits = bsd4_idqueuebits;
1461 cpumask = mycpu->gd_cpumask;
1463 /* Get the mask coresponding to the sysctl configured level */
1464 cpunode = bsd4_pcpu[mycpu->gd_cpuid].cpunode;
1465 level = usched_bsd4_stick_to_level;
1467 cpunode = cpunode->parent_node;
1470 /* The cpus which can ellect a process */
1471 siblings = cpunode->members;
1476 pri = bsfl(rtqbits);
1477 q = &bsd4_rtqueues[pri];
1478 which = &bsd4_rtqueuebits;
1480 } else if (tsqbits) {
1481 pri = bsfl(tsqbits);
1482 q = &bsd4_queues[pri];
1483 which = &bsd4_queuebits;
1485 } else if (idqbits) {
1486 pri = bsfl(idqbits);
1487 q = &bsd4_idqueues[pri];
1488 which = &bsd4_idqueuebits;
1492 * No more left and we didn't reach the checks limit.
1494 kick_helper(min_level_lwp);
1497 lp = TAILQ_FIRST(q);
1498 KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1501 * Limit the number of checks/queue to a configurable value to
1502 * minimize the contention (we are in a locked region
1504 while (checks < usched_bsd4_queue_checks) {
1505 if ((lp->lwp_cpumask & cpumask) == 0 ||
1506 ((siblings & lp->lwp_thread->td_gd->gd_cpumask) == 0 &&
1507 (lp->lwp_setrunqueue_ticks == sched_ticks ||
1508 lp->lwp_setrunqueue_ticks == (int)(sched_ticks - 1)) &&
1509 batchy_looser_pri_test(lp))) {
1511 KTR_COND_LOG(usched_chooseproc_cc_not_good,
1512 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1513 lp->lwp_proc->p_pid,
1514 (unsigned long)lp->lwp_thread->td_gd->gd_cpumask,
1515 (unsigned long)siblings,
1516 (unsigned long)cpumask);
1518 cpunode = bsd4_pcpu[lp->lwp_thread->td_gd->gd_cpuid].cpunode;
1521 if (cpunode->members & cpumask)
1523 cpunode = cpunode->parent_node;
1526 if (level < min_level ||
1527 (level == min_level && min_level_lwp &&
1528 lp->lwp_priority < min_level_lwp->lwp_priority)) {
1529 kick_helper(min_level_lwp);
1538 lp = TAILQ_NEXT(lp, lwp_procq);
1540 *which2 &= ~(1 << pri);
1544 KTR_COND_LOG(usched_chooseproc_cc_elected,
1545 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1546 lp->lwp_proc->p_pid,
1547 (unsigned long)lp->lwp_thread->td_gd->gd_cpumask,
1548 (unsigned long)siblings,
1549 (unsigned long)cpumask);
1557 * Checks exhausted, we tried to defer too many threads, so schedule
1558 * the best of the worst.
1564 KASSERT(lp, ("chooseproc: at least the first lp was good"));
1569 * If the passed lwp <chklp> is reasonably close to the selected
1570 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1572 * Note that we must error on the side of <chklp> to avoid bouncing
1573 * between threads in the acquire code.
1576 if (chklp->lwp_priority < lp->lwp_priority + PPQ) {
1582 KTR_COND_LOG(usched_chooseproc_cc,
1583 lp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1584 lp->lwp_proc->p_pid,
1585 lp->lwp_thread->td_gd->gd_cpuid,
1588 TAILQ_REMOVE(q, lp, lwp_procq);
1591 *which &= ~(1 << pri);
1592 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1593 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1599 * If we aren't willing to schedule a ready process on our cpu, give it's
1600 * target cpu a kick rather than wait for the next tick.
1602 * Called with bsd4_spin held.
1606 kick_helper(struct lwp *lp)
1613 gd = lp->lwp_thread->td_gd;
1614 dd = &bsd4_pcpu[gd->gd_cpuid];
1615 if ((smp_active_mask & usched_global_cpumask &
1616 bsd4_rdyprocmask & gd->gd_cpumask) == 0) {
1619 ++usched_bsd4_kicks;
1620 atomic_clear_cpumask(&bsd4_rdyprocmask, gd->gd_cpumask);
1621 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
1622 lwkt_send_ipiq(gd, need_user_resched_remote, NULL);
1624 wakeup(&dd->helper_thread);
1630 need_user_resched_remote(void *dummy)
1632 globaldata_t gd = mycpu;
1633 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
1635 need_user_resched();
1637 /* Call wakeup_mycpu to avoid sending IPIs to other CPUs */
1638 wakeup_mycpu(&dd->helper_thread);
1644 * bsd4_remrunqueue_locked() removes a given process from the run queue
1645 * that it is on, clearing the queue busy bit if it becomes empty.
1647 * Note that user process scheduler is different from the LWKT schedule.
1648 * The user process scheduler only manages user processes but it uses LWKT
1649 * underneath, and a user process operating in the kernel will often be
1650 * 'released' from our management.
1652 * MPSAFE - bsd4_spin must be held exclusively on call
1655 bsd4_remrunqueue_locked(struct lwp *lp)
1661 KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1662 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1664 KKASSERT(bsd4_runqcount >= 0);
1666 pri = lp->lwp_rqindex;
1667 switch(lp->lwp_rqtype) {
1668 case RTP_PRIO_NORMAL:
1669 q = &bsd4_queues[pri];
1670 which = &bsd4_queuebits;
1672 case RTP_PRIO_REALTIME:
1674 q = &bsd4_rtqueues[pri];
1675 which = &bsd4_rtqueuebits;
1678 q = &bsd4_idqueues[pri];
1679 which = &bsd4_idqueuebits;
1682 panic("remrunqueue: invalid rtprio type");
1685 TAILQ_REMOVE(q, lp, lwp_procq);
1686 if (TAILQ_EMPTY(q)) {
1687 KASSERT((*which & (1 << pri)) != 0,
1688 ("remrunqueue: remove from empty queue"));
1689 *which &= ~(1 << pri);
1694 * bsd4_setrunqueue_locked()
1696 * Add a process whos rqtype and rqindex had previously been calculated
1697 * onto the appropriate run queue. Determine if the addition requires
1698 * a reschedule on a cpu and return the cpuid or -1.
1700 * NOTE: Lower priorities are better priorities.
1702 * MPSAFE - bsd4_spin must be held exclusively on call
1705 bsd4_setrunqueue_locked(struct lwp *lp)
1711 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1712 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1715 pri = lp->lwp_rqindex;
1717 switch(lp->lwp_rqtype) {
1718 case RTP_PRIO_NORMAL:
1719 q = &bsd4_queues[pri];
1720 which = &bsd4_queuebits;
1722 case RTP_PRIO_REALTIME:
1724 q = &bsd4_rtqueues[pri];
1725 which = &bsd4_rtqueuebits;
1728 q = &bsd4_idqueues[pri];
1729 which = &bsd4_idqueuebits;
1732 panic("remrunqueue: invalid rtprio type");
1737 * Add to the correct queue and set the appropriate bit. If no
1738 * lower priority (i.e. better) processes are in the queue then
1739 * we want a reschedule, calculate the best cpu for the job.
1741 * Always run reschedules on the LWPs original cpu.
1743 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1750 * For SMP systems a user scheduler helper thread is created for each
1751 * cpu and is used to allow one cpu to wakeup another for the purposes of
1752 * scheduling userland threads from setrunqueue().
1754 * UP systems do not need the helper since there is only one cpu.
1756 * We can't use the idle thread for this because we might block.
1757 * Additionally, doing things this way allows us to HLT idle cpus
1763 sched_thread(void *dummy)
1777 cpuid = gd->gd_cpuid; /* doesn't change */
1778 mask = gd->gd_cpumask; /* doesn't change */
1779 dd = &bsd4_pcpu[cpuid];
1782 * Since we are woken up only when no user processes are scheduled
1783 * on a cpu, we can run at an ultra low priority.
1785 lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1787 tsleep(&dd->helper_thread, 0, "sched_thread_sleep", 0);
1791 * We use the LWKT deschedule-interlock trick to avoid racing
1792 * bsd4_rdyprocmask. This means we cannot block through to the
1793 * manual lwkt_switch() call we make below.
1796 tsleep_interlock(&dd->helper_thread, 0);
1797 spin_lock(&bsd4_spin);
1798 atomic_set_cpumask(&bsd4_rdyprocmask, mask);
1800 clear_user_resched(); /* This satisfied the reschedule request */
1801 dd->rrcount = 0; /* Reset the round-robin counter */
1803 if ((bsd4_curprocmask & mask) == 0) {
1805 * No thread is currently scheduled.
1807 KKASSERT(dd->uschedcp == NULL);
1808 if ((nlp = chooseproc_locked(NULL)) != NULL) {
1809 KTR_COND_LOG(usched_sched_thread_no_process,
1810 nlp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1812 nlp->lwp_proc->p_pid,
1813 nlp->lwp_thread->td_gd->gd_cpuid);
1815 atomic_set_cpumask(&bsd4_curprocmask, mask);
1816 dd->upri = nlp->lwp_priority;
1818 dd->rrcount = 0; /* reset round robin */
1819 spin_unlock(&bsd4_spin);
1821 lwkt_acquire(nlp->lwp_thread);
1823 lwkt_schedule(nlp->lwp_thread);
1825 spin_unlock(&bsd4_spin);
1827 } else if (bsd4_runqcount) {
1828 if ((nlp = chooseproc_locked(dd->uschedcp)) != NULL) {
1829 KTR_COND_LOG(usched_sched_thread_process,
1830 nlp->lwp_proc->p_pid == usched_bsd4_pid_debug,
1832 nlp->lwp_proc->p_pid,
1833 nlp->lwp_thread->td_gd->gd_cpuid);
1835 dd->upri = nlp->lwp_priority;
1837 dd->rrcount = 0; /* reset round robin */
1838 spin_unlock(&bsd4_spin);
1840 lwkt_acquire(nlp->lwp_thread);
1842 lwkt_schedule(nlp->lwp_thread);
1845 * CHAINING CONDITION TRAIN
1847 * We could not deal with the scheduler wakeup
1848 * request on this cpu, locate a ready scheduler
1849 * with no current lp assignment and chain to it.
1851 * This ensures that a wakeup race which fails due
1852 * to priority test does not leave other unscheduled
1853 * cpus idle when the runqueue is not empty.
1855 tmpmask = ~bsd4_curprocmask &
1856 bsd4_rdyprocmask & smp_active_mask;
1858 tmpid = BSFCPUMASK(tmpmask);
1859 tmpdd = &bsd4_pcpu[tmpid];
1860 atomic_clear_cpumask(&bsd4_rdyprocmask,
1862 spin_unlock(&bsd4_spin);
1863 wakeup(&tmpdd->helper_thread);
1865 spin_unlock(&bsd4_spin);
1868 KTR_LOG(usched_sched_thread_no_process_found,
1869 gd->gd_cpuid, (unsigned long)tmpmask);
1873 * The runq is empty.
1875 spin_unlock(&bsd4_spin);
1879 * We're descheduled unless someone scheduled us. Switch away.
1880 * Exiting the critical section will cause splz() to be called
1881 * for us if interrupts and such are pending.
1884 tsleep(&dd->helper_thread, PINTERLOCKED, "schslp", 0);
1888 /* sysctl stick_to_level parameter */
1890 sysctl_usched_bsd4_stick_to_level(SYSCTL_HANDLER_ARGS)
1894 new_val = usched_bsd4_stick_to_level;
1896 error = sysctl_handle_int(oidp, &new_val, 0, req);
1897 if (error != 0 || req->newptr == NULL)
1899 if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
1901 usched_bsd4_stick_to_level = new_val;
1906 * Setup our scheduler helpers. Note that curprocmask bit 0 has already
1907 * been cleared by rqinit() and we should not mess with it further.
1910 sched_thread_cpu_init(void)
1914 int smt_not_supported = 0;
1915 int cache_coherent_not_supported = 0;
1918 kprintf("Start scheduler helpers on cpus:\n");
1920 sysctl_ctx_init(&usched_bsd4_sysctl_ctx);
1921 usched_bsd4_sysctl_tree =
1922 SYSCTL_ADD_NODE(&usched_bsd4_sysctl_ctx,
1923 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
1924 "usched_bsd4", CTLFLAG_RD, 0, "");
1926 for (i = 0; i < ncpus; ++i) {
1927 bsd4_pcpu_t dd = &bsd4_pcpu[i];
1928 cpumask_t mask = CPUMASK(i);
1930 if ((mask & smp_active_mask) == 0)
1933 dd->cpunode = get_cpu_node_by_cpuid(i);
1935 if (dd->cpunode == NULL) {
1936 smt_not_supported = 1;
1937 cache_coherent_not_supported = 1;
1939 kprintf ("\tcpu%d - WARNING: No CPU NODE "
1940 "found for cpu\n", i);
1942 switch (dd->cpunode->type) {
1945 kprintf ("\tcpu%d - HyperThreading "
1946 "available. Core siblings: ",
1950 smt_not_supported = 1;
1953 kprintf ("\tcpu%d - No HT available, "
1954 "multi-core/physical "
1955 "cpu. Physical siblings: ",
1959 smt_not_supported = 1;
1962 kprintf ("\tcpu%d - No HT available, "
1963 "single-core/physical cpu. "
1964 "Package Siblings: ",
1968 /* Let's go for safe defaults here */
1969 smt_not_supported = 1;
1970 cache_coherent_not_supported = 1;
1972 kprintf ("\tcpu%d - Unknown cpunode->"
1973 "type=%u. Siblings: ",
1975 (u_int)dd->cpunode->type);
1980 if (dd->cpunode->parent_node != NULL) {
1981 CPUSET_FOREACH(cpuid, dd->cpunode->parent_node->members)
1982 kprintf("cpu%d ", cpuid);
1985 kprintf(" no siblings\n");
1990 lwkt_create(sched_thread, NULL, NULL, &dd->helper_thread,
1991 0, i, "usched %d", i);
1994 * Allow user scheduling on the target cpu. cpu #0 has already
1995 * been enabled in rqinit().
1998 atomic_clear_cpumask(&bsd4_curprocmask, mask);
1999 atomic_set_cpumask(&bsd4_rdyprocmask, mask);
2000 dd->upri = PRIBASE_NULL;
2004 /* usched_bsd4 sysctl configurable parameters */
2006 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2007 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2008 OID_AUTO, "rrinterval", CTLFLAG_RW,
2009 &usched_bsd4_rrinterval, 0, "");
2010 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2011 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2012 OID_AUTO, "decay", CTLFLAG_RW,
2013 &usched_bsd4_decay, 0, "Extra decay when not running");
2014 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2015 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2016 OID_AUTO, "batch_time", CTLFLAG_RW,
2017 &usched_bsd4_batch_time, 0, "Min batch counter value");
2018 SYSCTL_ADD_LONG(&usched_bsd4_sysctl_ctx,
2019 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2020 OID_AUTO, "kicks", CTLFLAG_RW,
2021 &usched_bsd4_kicks, "Number of kickstarts");
2023 /* Add enable/disable option for SMT scheduling if supported */
2024 if (smt_not_supported) {
2025 usched_bsd4_smt = 0;
2026 SYSCTL_ADD_STRING(&usched_bsd4_sysctl_ctx,
2027 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2028 OID_AUTO, "smt", CTLFLAG_RD,
2029 "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2031 usched_bsd4_smt = 1;
2032 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2033 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2034 OID_AUTO, "smt", CTLFLAG_RW,
2035 &usched_bsd4_smt, 0, "Enable SMT scheduling");
2039 * Add enable/disable option for cache coherent scheduling
2042 if (cache_coherent_not_supported) {
2044 usched_bsd4_cache_coherent = 0;
2045 SYSCTL_ADD_STRING(&usched_bsd4_sysctl_ctx,
2046 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2047 OID_AUTO, "cache_coherent", CTLFLAG_RD,
2049 "Cache coherence NOT SUPPORTED");
2053 usched_bsd4_cache_coherent = 1;
2054 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2055 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2056 OID_AUTO, "cache_coherent", CTLFLAG_RW,
2057 &usched_bsd4_cache_coherent, 0,
2058 "Enable/Disable cache coherent scheduling");
2061 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2062 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2063 OID_AUTO, "upri_affinity", CTLFLAG_RW,
2064 &usched_bsd4_upri_affinity, 1,
2065 "Number of PPQs in user priority check");
2067 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2068 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2069 OID_AUTO, "queue_checks", CTLFLAG_RW,
2070 &usched_bsd4_queue_checks, 5,
2071 "LWPs to check from a queue before giving up");
2073 SYSCTL_ADD_PROC(&usched_bsd4_sysctl_ctx,
2074 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2075 OID_AUTO, "stick_to_level",
2076 CTLTYPE_INT | CTLFLAG_RW,
2077 NULL, sizeof usched_bsd4_stick_to_level,
2078 sysctl_usched_bsd4_stick_to_level, "I",
2079 "Stick a process to this level. See sysctl"
2080 "paremter hw.cpu_topology.level_description");
2083 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2084 sched_thread_cpu_init, NULL)
2086 #else /* No SMP options - just add the configurable parameters to sysctl */
2089 sched_sysctl_tree_init(void)
2091 sysctl_ctx_init(&usched_bsd4_sysctl_ctx);
2092 usched_bsd4_sysctl_tree =
2093 SYSCTL_ADD_NODE(&usched_bsd4_sysctl_ctx,
2094 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2095 "usched_bsd4", CTLFLAG_RD, 0, "");
2097 /* usched_bsd4 sysctl configurable parameters */
2098 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2099 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2100 OID_AUTO, "rrinterval", CTLFLAG_RW,
2101 &usched_bsd4_rrinterval, 0, "");
2102 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2103 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2104 OID_AUTO, "decay", CTLFLAG_RW,
2105 &usched_bsd4_decay, 0, "Extra decay when not running");
2106 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx,
2107 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree),
2108 OID_AUTO, "batch_time", CTLFLAG_RW,
2109 &usched_bsd4_batch_time, 0, "Min batch counter value");
2111 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2112 sched_sysctl_tree_init, NULL)