2 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/kernel.h>
31 #include <sys/queue.h>
33 #include <sys/rtprio.h>
35 #include <sys/sysctl.h>
36 #include <sys/resourcevar.h>
37 #include <sys/spinlock.h>
38 #include <machine/cpu.h>
39 #include <machine/smp.h>
41 #include <sys/thread2.h>
42 #include <sys/spinlock2.h>
43 #include <sys/mplock2.h>
46 * Priorities. Note that with 32 run queues per scheduler each queue
47 * represents four priority levels.
51 #define PRIMASK (MAXPRI - 1)
52 #define PRIBASE_REALTIME 0
53 #define PRIBASE_NORMAL MAXPRI
54 #define PRIBASE_IDLE (MAXPRI * 2)
55 #define PRIBASE_THREAD (MAXPRI * 3)
56 #define PRIBASE_NULL (MAXPRI * 4)
58 #define NQS 32 /* 32 run queues. */
59 #define PPQ (MAXPRI / NQS) /* priorities per queue */
60 #define PPQMASK (PPQ - 1)
63 * NICEPPQ - number of nice units per priority queue
65 * ESTCPUPPQ - number of estcpu units per priority queue
66 * ESTCPUMAX - number of estcpu units
70 #define ESTCPUMAX (ESTCPUPPQ * NQS)
71 #define BATCHMAX (ESTCPUFREQ * 30)
72 #define PRIO_RANGE (PRIO_MAX - PRIO_MIN + 1)
74 #define ESTCPULIM(v) min((v), ESTCPUMAX)
78 #define lwp_priority lwp_usdata.bsd4.priority
79 #define lwp_rqindex lwp_usdata.bsd4.rqindex
80 #define lwp_estcpu lwp_usdata.bsd4.estcpu
81 #define lwp_batch lwp_usdata.bsd4.batch
82 #define lwp_rqtype lwp_usdata.bsd4.rqtype
84 static void bsd4_acquire_curproc(struct lwp *lp);
85 static void bsd4_release_curproc(struct lwp *lp);
86 static void bsd4_select_curproc(globaldata_t gd);
87 static void bsd4_setrunqueue(struct lwp *lp);
88 static void bsd4_schedulerclock(struct lwp *lp, sysclock_t period,
90 static void bsd4_recalculate_estcpu(struct lwp *lp);
91 static void bsd4_resetpriority(struct lwp *lp);
92 static void bsd4_forking(struct lwp *plp, struct lwp *lp);
93 static void bsd4_exiting(struct lwp *lp, struct proc *);
94 static void bsd4_yield(struct lwp *lp);
97 static void need_user_resched_remote(void *dummy);
99 static struct lwp *chooseproc_locked(struct lwp *chklp);
100 static void bsd4_remrunqueue_locked(struct lwp *lp);
101 static void bsd4_setrunqueue_locked(struct lwp *lp);
103 struct usched usched_bsd4 = {
105 "bsd4", "Original DragonFly Scheduler",
106 NULL, /* default registration */
107 NULL, /* default deregistration */
108 bsd4_acquire_curproc,
109 bsd4_release_curproc,
112 bsd4_recalculate_estcpu,
116 NULL, /* setcpumask not supported */
120 struct usched_bsd4_pcpu {
121 struct thread helper_thread;
124 struct lwp *uschedcp;
127 typedef struct usched_bsd4_pcpu *bsd4_pcpu_t;
130 * We have NQS (32) run queues per scheduling class. For the normal
131 * class, there are 128 priorities scaled onto these 32 queues. New
132 * processes are added to the last entry in each queue, and processes
133 * are selected for running by taking them from the head and maintaining
134 * a simple FIFO arrangement. Realtime and Idle priority processes have
135 * and explicit 0-31 priority which maps directly onto their class queue
136 * index. When a queue has something in it, the corresponding bit is
137 * set in the queuebits variable, allowing a single read to determine
138 * the state of all 32 queues and then a ffs() to find the first busy
141 static struct rq bsd4_queues[NQS];
142 static struct rq bsd4_rtqueues[NQS];
143 static struct rq bsd4_idqueues[NQS];
144 static u_int32_t bsd4_queuebits;
145 static u_int32_t bsd4_rtqueuebits;
146 static u_int32_t bsd4_idqueuebits;
147 static cpumask_t bsd4_curprocmask = -1; /* currently running a user process */
148 static cpumask_t bsd4_rdyprocmask; /* ready to accept a user process */
149 static int bsd4_runqcount;
151 static volatile int bsd4_scancpu;
153 static struct spinlock bsd4_spin;
154 static struct usched_bsd4_pcpu bsd4_pcpu[MAXCPU];
156 SYSCTL_INT(_debug, OID_AUTO, bsd4_runqcount, CTLFLAG_RD, &bsd4_runqcount, 0,
157 "Number of run queues");
159 static int usched_nonoptimal;
160 SYSCTL_INT(_debug, OID_AUTO, usched_nonoptimal, CTLFLAG_RW,
161 &usched_nonoptimal, 0, "acquire_curproc() was not optimal");
162 static int usched_optimal;
163 SYSCTL_INT(_debug, OID_AUTO, usched_optimal, CTLFLAG_RW,
164 &usched_optimal, 0, "acquire_curproc() was optimal");
166 static int usched_debug = -1;
167 SYSCTL_INT(_debug, OID_AUTO, scdebug, CTLFLAG_RW, &usched_debug, 0,
168 "Print debug information for this pid");
170 static int remote_resched_nonaffinity;
171 static int remote_resched_affinity;
172 static int choose_affinity;
173 SYSCTL_INT(_debug, OID_AUTO, remote_resched_nonaffinity, CTLFLAG_RD,
174 &remote_resched_nonaffinity, 0, "Number of remote rescheds");
175 SYSCTL_INT(_debug, OID_AUTO, remote_resched_affinity, CTLFLAG_RD,
176 &remote_resched_affinity, 0, "Number of remote rescheds");
177 SYSCTL_INT(_debug, OID_AUTO, choose_affinity, CTLFLAG_RD,
178 &choose_affinity, 0, "chooseproc() was smart");
181 static int usched_bsd4_rrinterval = (ESTCPUFREQ + 9) / 10;
182 SYSCTL_INT(_kern, OID_AUTO, usched_bsd4_rrinterval, CTLFLAG_RW,
183 &usched_bsd4_rrinterval, 0, "");
184 static int usched_bsd4_decay = 8;
185 SYSCTL_INT(_kern, OID_AUTO, usched_bsd4_decay, CTLFLAG_RW,
186 &usched_bsd4_decay, 0, "Extra decay when not running");
187 static int usched_bsd4_batch_time = 10;
188 SYSCTL_INT(_kern, OID_AUTO, usched_bsd4_batch_time, CTLFLAG_RW,
189 &usched_bsd4_batch_time, 0, "Minimum batch counter value");
192 * Initialize the run queues at boot time.
199 spin_init(&bsd4_spin);
200 for (i = 0; i < NQS; i++) {
201 TAILQ_INIT(&bsd4_queues[i]);
202 TAILQ_INIT(&bsd4_rtqueues[i]);
203 TAILQ_INIT(&bsd4_idqueues[i]);
205 atomic_clear_cpumask(&bsd4_curprocmask, 1);
207 SYSINIT(runqueue, SI_BOOT2_USCHED, SI_ORDER_FIRST, rqinit, NULL)
210 * BSD4_ACQUIRE_CURPROC
212 * This function is called when the kernel intends to return to userland.
213 * It is responsible for making the thread the current designated userland
214 * thread for this cpu, blocking if necessary.
216 * The kernel has already depressed our LWKT priority so we must not switch
217 * until we have either assigned or disposed of the thread.
219 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
220 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will
221 * occur, this function is called only under very controlled circumstances.
226 bsd4_acquire_curproc(struct lwp *lp)
236 * Make sure we aren't sitting on a tsleep queue.
239 crit_enter_quick(td);
240 if (td->td_flags & TDF_TSLEEPQ)
242 bsd4_recalculate_estcpu(lp);
245 * If a reschedule was requested give another thread the
248 if (user_resched_wanted()) {
249 clear_user_resched();
250 bsd4_release_curproc(lp);
254 * Loop until we are the current user thread
257 dd = &bsd4_pcpu[gd->gd_cpuid];
261 * Process any pending events and higher priority threads.
266 * Become the currently scheduled user thread for this cpu
267 * if we can do so trivially.
269 * We can steal another thread's current thread designation
270 * on this cpu since if we are running that other thread
271 * must not be, so we can safely deschedule it.
273 if (dd->uschedcp == lp) {
275 * We are already the current lwp (hot path).
277 dd->upri = lp->lwp_priority;
278 } else if (dd->uschedcp == NULL) {
280 * We can trivially become the current lwp.
282 atomic_set_cpumask(&bsd4_curprocmask, gd->gd_cpumask);
284 dd->upri = lp->lwp_priority;
285 } else if (dd->upri > lp->lwp_priority) {
287 * We can steal the current cpu's lwp designation
288 * away simply by replacing it. The other thread
289 * will stall when it tries to return to userland.
292 dd->upri = lp->lwp_priority;
294 lwkt_deschedule(olp->lwp_thread);
295 bsd4_setrunqueue(olp);
299 * We cannot become the current lwp, place the lp
300 * on the bsd4 run-queue and deschedule ourselves.
302 * When we are reactivated we will have another
305 lwkt_deschedule(lp->lwp_thread);
306 bsd4_setrunqueue(lp);
309 * Reload after a switch or setrunqueue/switch possibly
310 * moved us to another cpu.
313 dd = &bsd4_pcpu[gd->gd_cpuid];
315 } while (dd->uschedcp != lp);
318 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
322 * BSD4_RELEASE_CURPROC
324 * This routine detaches the current thread from the userland scheduler,
325 * usually because the thread needs to run or block in the kernel (at
326 * kernel priority) for a while.
328 * This routine is also responsible for selecting a new thread to
329 * make the current thread.
331 * NOTE: This implementation differs from the dummy example in that
332 * bsd4_select_curproc() is able to select the current process, whereas
333 * dummy_select_curproc() is not able to select the current process.
334 * This means we have to NULL out uschedcp.
336 * Additionally, note that we may already be on a run queue if releasing
337 * via the lwkt_switch() in bsd4_setrunqueue().
342 bsd4_release_curproc(struct lwp *lp)
344 globaldata_t gd = mycpu;
345 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
347 if (dd->uschedcp == lp) {
349 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
350 dd->uschedcp = NULL; /* don't let lp be selected */
351 dd->upri = PRIBASE_NULL;
352 atomic_clear_cpumask(&bsd4_curprocmask, gd->gd_cpumask);
353 bsd4_select_curproc(gd);
359 * BSD4_SELECT_CURPROC
361 * Select a new current process for this cpu and clear any pending user
362 * reschedule request. The cpu currently has no current process.
364 * This routine is also responsible for equal-priority round-robining,
365 * typically triggered from bsd4_schedulerclock(). In our dummy example
366 * all the 'user' threads are LWKT scheduled all at once and we just
367 * call lwkt_switch().
369 * The calling process is not on the queue and cannot be selected.
375 bsd4_select_curproc(globaldata_t gd)
377 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
379 int cpuid = gd->gd_cpuid;
383 spin_lock(&bsd4_spin);
384 if ((nlp = chooseproc_locked(dd->uschedcp)) != NULL) {
385 atomic_set_cpumask(&bsd4_curprocmask, CPUMASK(cpuid));
386 dd->upri = nlp->lwp_priority;
388 spin_unlock(&bsd4_spin);
390 lwkt_acquire(nlp->lwp_thread);
392 lwkt_schedule(nlp->lwp_thread);
394 spin_unlock(&bsd4_spin);
397 } else if (bsd4_runqcount && (bsd4_rdyprocmask & CPUMASK(cpuid))) {
398 atomic_clear_cpumask(&bsd4_rdyprocmask, CPUMASK(cpuid));
399 spin_unlock(&bsd4_spin);
400 lwkt_schedule(&dd->helper_thread);
402 spin_unlock(&bsd4_spin);
411 * Place the specified lwp on the user scheduler's run queue. This routine
412 * must be called with the thread descheduled. The lwp must be runnable.
414 * The thread may be the current thread as a special case.
419 bsd4_setrunqueue(struct lwp *lp)
430 * First validate the process state relative to the current cpu.
431 * We don't need the spinlock for this, just a critical section.
432 * We are in control of the process.
435 KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
436 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
437 ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
438 lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
439 KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
442 * Note: gd and dd are relative to the target thread's last cpu,
443 * NOT our current cpu.
445 gd = lp->lwp_thread->td_gd;
446 dd = &bsd4_pcpu[gd->gd_cpuid];
449 * This process is not supposed to be scheduled anywhere or assigned
450 * as the current process anywhere. Assert the condition.
452 KKASSERT(dd->uschedcp != lp);
456 * If we are not SMP we do not have a scheduler helper to kick
457 * and must directly activate the process if none are scheduled.
459 * This is really only an issue when bootstrapping init since
460 * the caller in all other cases will be a user process, and
461 * even if released (dd->uschedcp == NULL), that process will
462 * kickstart the scheduler when it returns to user mode from
465 if (dd->uschedcp == NULL) {
466 atomic_set_cpumask(&bsd4_curprocmask, gd->gd_cpumask);
468 dd->upri = lp->lwp_priority;
469 lwkt_schedule(lp->lwp_thread);
477 * XXX fixme. Could be part of a remrunqueue/setrunqueue
478 * operation when the priority is recalculated, so TDF_MIGRATING
479 * may already be set.
481 if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
482 lwkt_giveaway(lp->lwp_thread);
486 * We lose control of lp the moment we release the spinlock after
487 * having placed lp on the queue. i.e. another cpu could pick it
488 * up and it could exit, or its priority could be further adjusted,
489 * or something like that.
491 spin_lock(&bsd4_spin);
492 bsd4_setrunqueue_locked(lp);
496 * Kick the scheduler helper on one of the other cpu's
497 * and request a reschedule if appropriate.
499 * NOTE: We check all cpus whos rdyprocmask is set. First we
500 * look for cpus without designated lps, then we look for
501 * cpus with designated lps with a worse priority than our
505 cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
506 mask = ~bsd4_curprocmask & bsd4_rdyprocmask & lp->lwp_cpumask &
507 smp_active_mask & usched_global_cpumask;
510 tmpmask = ~(CPUMASK(cpuid) - 1);
512 cpuid = BSFCPUMASK(mask & tmpmask);
514 cpuid = BSFCPUMASK(mask);
515 gd = globaldata_find(cpuid);
516 dd = &bsd4_pcpu[cpuid];
518 if ((dd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
520 mask &= ~CPUMASK(cpuid);
524 * Then cpus which might have a currently running lp
526 mask = bsd4_curprocmask & bsd4_rdyprocmask &
527 lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
530 tmpmask = ~(CPUMASK(cpuid) - 1);
532 cpuid = BSFCPUMASK(mask & tmpmask);
534 cpuid = BSFCPUMASK(mask);
535 gd = globaldata_find(cpuid);
536 dd = &bsd4_pcpu[cpuid];
538 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
540 mask &= ~CPUMASK(cpuid);
544 * If we cannot find a suitable cpu we reload from bsd4_scancpu
545 * and round-robin. Other cpus will pickup as they release their
546 * current lwps or become ready.
548 * Avoid a degenerate system lockup case if usched_global_cpumask
549 * is set to 0 or otherwise does not cover lwp_cpumask.
551 * We only kick the target helper thread in this case, we do not
552 * set the user resched flag because
554 cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
555 if ((CPUMASK(cpuid) & usched_global_cpumask) == 0) {
558 gd = globaldata_find(cpuid);
559 dd = &bsd4_pcpu[cpuid];
562 spin_unlock(&bsd4_spin);
563 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
564 if (dd->uschedcp == NULL) {
565 lwkt_schedule(&dd->helper_thread);
571 atomic_clear_cpumask(&bsd4_rdyprocmask, CPUMASK(cpuid));
572 spin_unlock(&bsd4_spin);
573 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
574 lwkt_send_ipiq(gd, need_user_resched_remote, NULL);
576 lwkt_schedule(&dd->helper_thread);
580 * Request a reschedule if appropriate.
582 spin_unlock(&bsd4_spin);
583 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
591 * This routine is called from a systimer IPI. It MUST be MP-safe and
592 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on
599 bsd4_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
601 globaldata_t gd = mycpu;
602 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
605 * Do we need to round-robin? We round-robin 10 times a second.
606 * This should only occur for cpu-bound batch processes.
608 if (++dd->rrcount >= usched_bsd4_rrinterval) {
614 * Adjust estcpu upward using a real time equivalent calculation.
616 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUMAX / ESTCPUFREQ + 1);
619 * Spinlocks also hold a critical section so there should not be
622 KKASSERT(gd->gd_spinlocks_wr == 0);
624 bsd4_resetpriority(lp);
627 * if we can't call bsd4_resetpriority for some reason we must call
628 * need user_resched().
635 * Called from acquire and from kern_synch's one-second timer (one of the
636 * callout helper threads) with a critical section held.
638 * Decay p_estcpu based on the number of ticks we haven't been running
639 * and our p_nice. As the load increases each process observes a larger
640 * number of idle ticks (because other processes are running in them).
641 * This observation leads to a larger correction which tends to make the
642 * system more 'batchy'.
644 * Note that no recalculation occurs for a process which sleeps and wakes
645 * up in the same tick. That is, a system doing thousands of context
646 * switches per second will still only do serious estcpu calculations
647 * ESTCPUFREQ times per second.
653 bsd4_recalculate_estcpu(struct lwp *lp)
655 globaldata_t gd = mycpu;
662 * We have to subtract periodic to get the last schedclock
663 * timeout time, otherwise we would get the upcoming timeout.
664 * Keep in mind that a process can migrate between cpus and
665 * while the scheduler clock should be very close, boundary
666 * conditions could lead to a small negative delta.
668 cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
670 if (lp->lwp_slptime > 1) {
672 * Too much time has passed, do a coarse correction.
674 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
675 bsd4_resetpriority(lp);
676 lp->lwp_cpbase = cpbase;
678 lp->lwp_batch -= ESTCPUFREQ;
679 if (lp->lwp_batch < 0)
681 } else if (lp->lwp_cpbase != cpbase) {
683 * Adjust estcpu if we are in a different tick. Don't waste
684 * time if we are in the same tick.
686 * First calculate the number of ticks in the measurement
687 * interval. The ttlticks calculation can wind up 0 due to
688 * a bug in the handling of lwp_slptime (as yet not found),
689 * so make sure we do not get a divide by 0 panic.
691 ttlticks = (cpbase - lp->lwp_cpbase) /
692 gd->gd_schedclock.periodic;
695 lp->lwp_cpbase = cpbase;
699 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
702 * Calculate the percentage of one cpu used factoring in ncpus
703 * and the load and adjust estcpu. Handle degenerate cases
704 * by adding 1 to bsd4_runqcount.
706 * estcpu is scaled by ESTCPUMAX.
708 * bsd4_runqcount is the excess number of user processes
709 * that cannot be immediately scheduled to cpus. We want
710 * to count these as running to avoid range compression
711 * in the base calculation (which is the actual percentage
714 estcpu = (lp->lwp_cpticks * ESTCPUMAX) *
715 (bsd4_runqcount + ncpus) / (ncpus * ttlticks);
718 * If estcpu is > 50% we become more batch-like
719 * If estcpu is <= 50% we become less batch-like
721 * It takes 30 cpu seconds to traverse the entire range.
723 if (estcpu > ESTCPUMAX / 2) {
724 lp->lwp_batch += ttlticks;
725 if (lp->lwp_batch > BATCHMAX)
726 lp->lwp_batch = BATCHMAX;
728 lp->lwp_batch -= ttlticks;
729 if (lp->lwp_batch < 0)
733 if (usched_debug == lp->lwp_proc->p_pid) {
734 kprintf("pid %d lwp %p estcpu %3d %3d bat %d cp %d/%d",
735 lp->lwp_proc->p_pid, lp,
736 estcpu, lp->lwp_estcpu,
738 lp->lwp_cpticks, ttlticks);
742 * Adjust lp->lwp_esetcpu. The decay factor determines how
743 * quickly lwp_estcpu collapses to its realtime calculation.
744 * A slower collapse gives us a more accurate number but
745 * can cause a cpu hog to eat too much cpu before the
746 * scheduler decides to downgrade it.
748 * NOTE: p_nice is accounted for in bsd4_resetpriority(),
749 * and not here, but we must still ensure that a
750 * cpu-bound nice -20 process does not completely
751 * override a cpu-bound nice +20 process.
753 * NOTE: We must use ESTCPULIM() here to deal with any
756 decay_factor = usched_bsd4_decay;
757 if (decay_factor < 1)
759 if (decay_factor > 1024)
762 lp->lwp_estcpu = ESTCPULIM(
763 (lp->lwp_estcpu * decay_factor + estcpu) /
766 if (usched_debug == lp->lwp_proc->p_pid)
767 kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
768 bsd4_resetpriority(lp);
769 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
775 * Compute the priority of a process when running in user mode.
776 * Arrange to reschedule if the resulting priority is better
777 * than that of the current process.
779 * This routine may be called with any process.
781 * This routine is called by fork1() for initial setup with the process
782 * of the run queue, and also may be called normally with the process on or
788 bsd4_resetpriority(struct lwp *lp)
798 * Calculate the new priority and queue type
801 spin_lock(&bsd4_spin);
803 newrqtype = lp->lwp_rtprio.type;
806 case RTP_PRIO_REALTIME:
808 newpriority = PRIBASE_REALTIME +
809 (lp->lwp_rtprio.prio & PRIMASK);
811 case RTP_PRIO_NORMAL:
813 * Detune estcpu based on batchiness. lwp_batch ranges
814 * from 0 to BATCHMAX. Limit estcpu for the sake of
815 * the priority calculation to between 50% and 100%.
817 estcpu = lp->lwp_estcpu * (lp->lwp_batch + BATCHMAX) /
821 * p_nice piece Adds (0-40) * 2 0-80
822 * estcpu Adds 16384 * 4 / 512 0-128
824 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
825 newpriority += estcpu * PPQ / ESTCPUPPQ;
826 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
827 NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
828 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
831 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
833 case RTP_PRIO_THREAD:
834 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
837 panic("Bad RTP_PRIO %d", newrqtype);
842 * The newpriority incorporates the queue type so do a simple masked
843 * check to determine if the process has moved to another queue. If
844 * it has, and it is currently on a run queue, then move it.
846 if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
847 lp->lwp_priority = newpriority;
848 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
849 bsd4_remrunqueue_locked(lp);
850 lp->lwp_rqtype = newrqtype;
851 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
852 bsd4_setrunqueue_locked(lp);
855 lp->lwp_rqtype = newrqtype;
856 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
859 reschedcpu = lp->lwp_thread->td_gd->gd_cpuid;
861 lp->lwp_priority = newpriority;
867 * Determine if we need to reschedule the target cpu. This only
868 * occurs if the LWP is already on a scheduler queue, which means
869 * that idle cpu notification has already occured. At most we
870 * need only issue a need_user_resched() on the appropriate cpu.
872 * The LWP may be owned by a CPU different from the current one,
873 * in which case dd->uschedcp may be modified without an MP lock
874 * or a spinlock held. The worst that happens is that the code
875 * below causes a spurious need_user_resched() on the target CPU
876 * and dd->pri to be wrong for a short period of time, both of
877 * which are harmless.
879 * If checkpri is 0 we are adjusting the priority of the current
880 * process, possibly higher (less desireable), so ignore the upri
881 * check which will fail in that case.
883 if (reschedcpu >= 0) {
884 dd = &bsd4_pcpu[reschedcpu];
885 if ((bsd4_rdyprocmask & CPUMASK(reschedcpu)) &&
887 (dd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK))) {
889 if (reschedcpu == mycpu->gd_cpuid) {
890 spin_unlock(&bsd4_spin);
893 spin_unlock(&bsd4_spin);
894 atomic_clear_cpumask(&bsd4_rdyprocmask,
895 CPUMASK(reschedcpu));
896 lwkt_send_ipiq(lp->lwp_thread->td_gd,
897 need_user_resched_remote, NULL);
900 spin_unlock(&bsd4_spin);
904 spin_unlock(&bsd4_spin);
907 spin_unlock(&bsd4_spin);
917 bsd4_yield(struct lwp *lp)
920 /* FUTURE (or something similar) */
921 switch(lp->lwp_rqtype) {
922 case RTP_PRIO_NORMAL:
923 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
933 * Called from fork1() when a new child process is being created.
935 * Give the child process an initial estcpu that is more batch then
936 * its parent and dock the parent for the fork (but do not
937 * reschedule the parent). This comprises the main part of our batch
938 * detection heuristic for both parallel forking and sequential execs.
940 * XXX lwp should be "spawning" instead of "forking"
945 bsd4_forking(struct lwp *plp, struct lwp *lp)
948 * Put the child 4 queue slots (out of 32) higher than the parent
949 * (less desireable than the parent).
951 lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
954 * The batch status of children always starts out centerline
955 * and will inch-up or inch-down as appropriate. It takes roughly
956 * ~15 seconds of >50% cpu to hit the limit.
958 lp->lwp_batch = BATCHMAX / 2;
961 * Dock the parent a cost for the fork, protecting us from fork
962 * bombs. If the parent is forking quickly make the child more
965 plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
969 * Called when a parent waits for a child.
974 bsd4_exiting(struct lwp *lp, struct proc *child_proc)
979 * chooseproc() is called when a cpu needs a user process to LWKT schedule,
980 * it selects a user process and returns it. If chklp is non-NULL and chklp
981 * has a better or equal priority then the process that would otherwise be
982 * chosen, NULL is returned.
984 * Until we fix the RUNQ code the chklp test has to be strict or we may
985 * bounce between processes trying to acquire the current process designation.
987 * MPSAFE - must be called with bsd4_spin exclusive held. The spinlock is
988 * left intact through the entire routine.
992 chooseproc_locked(struct lwp *chklp)
996 u_int32_t *which, *which2;
1003 rtqbits = bsd4_rtqueuebits;
1004 tsqbits = bsd4_queuebits;
1005 idqbits = bsd4_idqueuebits;
1006 cpumask = mycpu->gd_cpumask;
1012 pri = bsfl(rtqbits);
1013 q = &bsd4_rtqueues[pri];
1014 which = &bsd4_rtqueuebits;
1016 } else if (tsqbits) {
1017 pri = bsfl(tsqbits);
1018 q = &bsd4_queues[pri];
1019 which = &bsd4_queuebits;
1021 } else if (idqbits) {
1022 pri = bsfl(idqbits);
1023 q = &bsd4_idqueues[pri];
1024 which = &bsd4_idqueuebits;
1029 lp = TAILQ_FIRST(q);
1030 KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1033 while ((lp->lwp_cpumask & cpumask) == 0) {
1034 lp = TAILQ_NEXT(lp, lwp_procq);
1036 *which2 &= ~(1 << pri);
1043 * If the passed lwp <chklp> is reasonably close to the selected
1044 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1046 * Note that we must error on the side of <chklp> to avoid bouncing
1047 * between threads in the acquire code.
1050 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1056 * If the chosen lwp does not reside on this cpu spend a few
1057 * cycles looking for a better candidate at the same priority level.
1058 * This is a fallback check, setrunqueue() tries to wakeup the
1059 * correct cpu and is our front-line affinity.
1061 if (lp->lwp_thread->td_gd != mycpu &&
1062 (chklp = TAILQ_NEXT(lp, lwp_procq)) != NULL
1064 if (chklp->lwp_thread->td_gd == mycpu) {
1071 TAILQ_REMOVE(q, lp, lwp_procq);
1074 *which &= ~(1 << pri);
1075 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1076 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1084 need_user_resched_remote(void *dummy)
1086 globaldata_t gd = mycpu;
1087 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
1089 need_user_resched();
1090 lwkt_schedule(&dd->helper_thread);
1096 * bsd4_remrunqueue_locked() removes a given process from the run queue
1097 * that it is on, clearing the queue busy bit if it becomes empty.
1099 * Note that user process scheduler is different from the LWKT schedule.
1100 * The user process scheduler only manages user processes but it uses LWKT
1101 * underneath, and a user process operating in the kernel will often be
1102 * 'released' from our management.
1104 * MPSAFE - bsd4_spin must be held exclusively on call
1107 bsd4_remrunqueue_locked(struct lwp *lp)
1113 KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1114 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1116 KKASSERT(bsd4_runqcount >= 0);
1118 pri = lp->lwp_rqindex;
1119 switch(lp->lwp_rqtype) {
1120 case RTP_PRIO_NORMAL:
1121 q = &bsd4_queues[pri];
1122 which = &bsd4_queuebits;
1124 case RTP_PRIO_REALTIME:
1126 q = &bsd4_rtqueues[pri];
1127 which = &bsd4_rtqueuebits;
1130 q = &bsd4_idqueues[pri];
1131 which = &bsd4_idqueuebits;
1134 panic("remrunqueue: invalid rtprio type");
1137 TAILQ_REMOVE(q, lp, lwp_procq);
1138 if (TAILQ_EMPTY(q)) {
1139 KASSERT((*which & (1 << pri)) != 0,
1140 ("remrunqueue: remove from empty queue"));
1141 *which &= ~(1 << pri);
1146 * bsd4_setrunqueue_locked()
1148 * Add a process whos rqtype and rqindex had previously been calculated
1149 * onto the appropriate run queue. Determine if the addition requires
1150 * a reschedule on a cpu and return the cpuid or -1.
1152 * NOTE: Lower priorities are better priorities.
1154 * MPSAFE - bsd4_spin must be held exclusively on call
1157 bsd4_setrunqueue_locked(struct lwp *lp)
1163 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1164 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1167 pri = lp->lwp_rqindex;
1169 switch(lp->lwp_rqtype) {
1170 case RTP_PRIO_NORMAL:
1171 q = &bsd4_queues[pri];
1172 which = &bsd4_queuebits;
1174 case RTP_PRIO_REALTIME:
1176 q = &bsd4_rtqueues[pri];
1177 which = &bsd4_rtqueuebits;
1180 q = &bsd4_idqueues[pri];
1181 which = &bsd4_idqueuebits;
1184 panic("remrunqueue: invalid rtprio type");
1189 * Add to the correct queue and set the appropriate bit. If no
1190 * lower priority (i.e. better) processes are in the queue then
1191 * we want a reschedule, calculate the best cpu for the job.
1193 * Always run reschedules on the LWPs original cpu.
1195 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1202 * For SMP systems a user scheduler helper thread is created for each
1203 * cpu and is used to allow one cpu to wakeup another for the purposes of
1204 * scheduling userland threads from setrunqueue().
1206 * UP systems do not need the helper since there is only one cpu.
1208 * We can't use the idle thread for this because we might block.
1209 * Additionally, doing things this way allows us to HLT idle cpus
1215 sched_thread(void *dummy)
1229 cpuid = gd->gd_cpuid; /* doesn't change */
1230 mask = gd->gd_cpumask; /* doesn't change */
1231 dd = &bsd4_pcpu[cpuid];
1234 * Since we are woken up only when no user processes are scheduled
1235 * on a cpu, we can run at an ultra low priority.
1237 lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1241 * We use the LWKT deschedule-interlock trick to avoid racing
1242 * bsd4_rdyprocmask. This means we cannot block through to the
1243 * manual lwkt_switch() call we make below.
1246 lwkt_deschedule_self(gd->gd_curthread);
1247 spin_lock(&bsd4_spin);
1248 atomic_set_cpumask(&bsd4_rdyprocmask, mask);
1250 clear_user_resched(); /* This satisfied the reschedule request */
1251 dd->rrcount = 0; /* Reset the round-robin counter */
1253 if ((bsd4_curprocmask & mask) == 0) {
1255 * No thread is currently scheduled.
1257 KKASSERT(dd->uschedcp == NULL);
1258 if ((nlp = chooseproc_locked(NULL)) != NULL) {
1259 atomic_set_cpumask(&bsd4_curprocmask, mask);
1260 dd->upri = nlp->lwp_priority;
1262 spin_unlock(&bsd4_spin);
1264 lwkt_acquire(nlp->lwp_thread);
1266 lwkt_schedule(nlp->lwp_thread);
1268 spin_unlock(&bsd4_spin);
1270 } else if (bsd4_runqcount) {
1271 if ((nlp = chooseproc_locked(dd->uschedcp)) != NULL) {
1272 dd->upri = nlp->lwp_priority;
1274 spin_unlock(&bsd4_spin);
1276 lwkt_acquire(nlp->lwp_thread);
1278 lwkt_schedule(nlp->lwp_thread);
1281 * CHAINING CONDITION TRAIN
1283 * We could not deal with the scheduler wakeup
1284 * request on this cpu, locate a ready scheduler
1285 * with no current lp assignment and chain to it.
1287 * This ensures that a wakeup race which fails due
1288 * to priority test does not leave other unscheduled
1289 * cpus idle when the runqueue is not empty.
1291 tmpmask = ~bsd4_curprocmask & bsd4_rdyprocmask &
1294 tmpid = BSFCPUMASK(tmpmask);
1295 tmpdd = &bsd4_pcpu[tmpid];
1296 atomic_clear_cpumask(&bsd4_rdyprocmask,
1298 spin_unlock(&bsd4_spin);
1299 lwkt_schedule(&tmpdd->helper_thread);
1301 spin_unlock(&bsd4_spin);
1306 * The runq is empty.
1308 spin_unlock(&bsd4_spin);
1312 * We're descheduled unless someone scheduled us. Switch away.
1313 * Exiting the critical section will cause splz() to be called
1314 * for us if interrupts and such are pending.
1322 * Setup our scheduler helpers. Note that curprocmask bit 0 has already
1323 * been cleared by rqinit() and we should not mess with it further.
1326 sched_thread_cpu_init(void)
1331 kprintf("start scheduler helpers on cpus:");
1333 for (i = 0; i < ncpus; ++i) {
1334 bsd4_pcpu_t dd = &bsd4_pcpu[i];
1335 cpumask_t mask = CPUMASK(i);
1337 if ((mask & smp_active_mask) == 0)
1343 lwkt_create(sched_thread, NULL, NULL, &dd->helper_thread,
1344 TDF_NOSTART, i, "usched %d", i);
1347 * Allow user scheduling on the target cpu. cpu #0 has already
1348 * been enabled in rqinit().
1351 atomic_clear_cpumask(&bsd4_curprocmask, mask);
1352 atomic_set_cpumask(&bsd4_rdyprocmask, mask);
1353 dd->upri = PRIBASE_NULL;
1358 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
1359 sched_thread_cpu_init, NULL)