2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
39 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $
40 * $DragonFly: src/sys/kern/kern_synch.c,v 1.30 2004/03/20 19:16:24 dillon Exp $
43 #include "opt_ktrace.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/signalvar.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vmmeter.h>
52 #include <sys/sysctl.h>
53 #include <sys/thread2.h>
56 #include <sys/ktrace.h>
58 #include <sys/xwait.h>
60 #include <machine/cpu.h>
61 #include <machine/ipl.h>
62 #include <machine/smp.h>
64 static void sched_setup (void *dummy);
65 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
69 int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
71 int ncpus2, ncpus2_shift, ncpus2_mask;
73 static struct callout loadav_callout;
75 struct loadavg averunnable =
76 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */
78 * Constants for averages over 1, 5, and 15 minutes
79 * when sampling at 5 second intervals.
81 static fixpt_t cexp[3] = {
82 0.9200444146293232 * FSCALE, /* exp(-1/12) */
83 0.9834714538216174 * FSCALE, /* exp(-1/60) */
84 0.9944598480048967 * FSCALE, /* exp(-1/180) */
87 static void endtsleep (void *);
88 static void loadav (void *arg);
89 static void roundrobin (void *arg);
90 static void schedcpu (void *arg);
91 static void updatepri (struct proc *p);
92 static void crit_panicints(void);
95 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
99 new_val = sched_quantum * tick;
100 error = sysctl_handle_int(oidp, &new_val, 0, req);
101 if (error != 0 || req->newptr == NULL)
105 sched_quantum = new_val / tick;
106 hogticks = 2 * sched_quantum;
110 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
111 0, sizeof sched_quantum, sysctl_kern_quantum, "I", "");
114 roundrobin_interval(void)
116 return (sched_quantum);
120 * Force switch among equal priority processes every 100ms.
122 * WARNING! The MP lock is not held on ipi message remotes.
127 roundrobin_remote(void *arg)
129 struct proc *p = lwkt_preempted_proc();
130 if (p == NULL || RTP_PRIO_NEED_RR(p->p_rtprio.type))
137 roundrobin(void *arg)
139 struct proc *p = lwkt_preempted_proc();
140 if (p == NULL || RTP_PRIO_NEED_RR(p->p_rtprio.type))
143 lwkt_send_ipiq_mask(mycpu->gd_other_cpus, roundrobin_remote, NULL);
145 timeout(roundrobin, NULL, sched_quantum);
151 resched_cpus(u_int32_t mask)
153 lwkt_send_ipiq_mask(mask, roundrobin_remote, NULL);
159 * The load average is scaled by FSCALE (2048 typ). The estimated cpu is
160 * incremented at a rate of ESTCPUFREQ per second, but this is
161 * divided up across all cpu bound processes running in the system so an
162 * individual process will get less under load.
164 * We want to decay estcpu by 18% per second, but we have to scale to the
165 * load to avoid overpowering the estcpu aggregation. To stabilize the
166 * equation under low loads we make everything relative to a load average
169 * estcpu -= estcpu * 0.18 / loadav base equation
170 * estcpu -= (estcpu + ESTCPUFREQ) * 0.18 / (loadav + 1) supplemented
172 * Note: 0.18 = 100/555
175 #define decay_cpu(loadav,estcpu) \
176 (((estcpu + ESTCPUFREQ) * (100 * FSCALE / 555)) / ((loadav) + FSCALE))
178 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
179 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
180 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
182 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
183 static int fscale __unused = FSCALE;
184 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
187 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
188 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
189 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
191 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
192 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
194 * If you don't want to bother with the faster/more-accurate formula, you
195 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
196 * (more general) method of calculating the %age of CPU used by a process.
198 #define CCPU_SHIFT 11
201 * Recompute process priorities, every hz ticks.
207 fixpt_t loadfac = averunnable.ldavg[0];
212 FOREACH_PROC_IN_SYSTEM(p) {
214 * Increment time in/out of memory and sleep time
215 * (if sleeping). We ignore overflow; with 16-bit int's
216 * (remember them?) overflow takes 45 days.
219 if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
221 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
223 * If the process has slept the entire second,
224 * stop recalculating its priority until it wakes up.
226 if (p->p_slptime > 1)
228 s = splhigh(); /* prevent state changes and protect run queue */
230 * p_pctcpu is only for ps.
232 #if (FSHIFT >= CCPU_SHIFT)
233 p->p_pctcpu += (ESTCPUFREQ == 100)?
234 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
235 100 * (((fixpt_t) p->p_cpticks)
236 << (FSHIFT - CCPU_SHIFT)) / ESTCPUFREQ;
238 p->p_pctcpu += ((FSCALE - ccpu) *
239 (p->p_cpticks * FSCALE / ESTCPUFREQ)) >> FSHIFT;
242 ndecay = decay_cpu(loadfac, p->p_estcpu);
243 if (p->p_estcpu > ndecay)
244 p->p_estcpu -= ndecay;
250 wakeup((caddr_t)&lbolt);
251 timeout(schedcpu, (void *)0, hz);
255 * Recalculate the priority of a process after it has slept for a while.
256 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
257 * least six times the loadfactor will decay p_estcpu to zero.
260 updatepri(struct proc *p)
264 ndecay = decay_cpu(averunnable.ldavg[0], p->p_estcpu) * p->p_slptime;
265 if (p->p_estcpu > ndecay)
266 p->p_estcpu -= ndecay;
273 * We're only looking at 7 bits of the address; everything is
274 * aligned to 4, lots of things are aligned to greater powers
275 * of 2. Shift right by 8, i.e. drop the bottom 256 worth.
277 #define TABLESIZE 128
278 static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE];
279 #define LOOKUP(x) (((intptr_t)(x) >> 8) & (TABLESIZE - 1))
282 * During autoconfiguration or after a panic, a sleep will simply
283 * lower the priority briefly to allow interrupts, then return.
284 * The priority to be used (safepri) is machine-dependent, thus this
285 * value is initialized and maintained in the machine-dependent layers.
286 * This priority will typically be 0, or the lowest priority
287 * that is safe for use on the interrupt stack; it can be made
288 * higher to block network software interrupts after panics.
297 sched_quantum = hz/10;
298 hogticks = 2 * sched_quantum;
299 for (i = 0; i < TABLESIZE; i++)
300 TAILQ_INIT(&slpque[i]);
304 * General sleep call. Suspends the current process until a wakeup is
305 * performed on the specified identifier. The process will then be made
306 * runnable with the specified priority. Sleeps at most timo/hz seconds
307 * (0 means no timeout). If flags includes PCATCH flag, signals are checked
308 * before and after sleeping, else signals are not checked. Returns 0 if
309 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
310 * signal needs to be delivered, ERESTART is returned if the current system
311 * call should be restarted if possible, and EINTR is returned if the system
312 * call should be interrupted by the signal (return EINTR).
314 * If the process has P_CURPROC set mi_switch() will not re-queue it to
315 * the userland scheduler queues because we are in a SSLEEP state. If
316 * we are not the current process then we have to remove ourselves from
317 * the scheduler queues.
319 * YYY priority now unused
322 tsleep(void *ident, int flags, const char *wmesg, int timo)
324 struct thread *td = curthread;
325 struct proc *p = td->td_proc; /* may be NULL */
326 int s, sig = 0, catch = flags & PCATCH;
327 int id = LOOKUP(ident);
328 struct callout_handle thandle;
331 * NOTE: removed KTRPOINT, it could cause races due to blocking
332 * even in stable. Just scrap it for now.
334 if (cold || panicstr) {
336 * After a panic, or during autoconfiguration,
337 * just give interrupts a chance, then just return;
338 * don't run any other procs or panic below,
339 * in case this is the idle process and already asleep.
344 KKASSERT(td != &mycpu->gd_idlethread); /* you must be kidding! */
346 KASSERT(ident != NULL, ("tsleep: no ident"));
347 KASSERT(p == NULL || p->p_stat == SRUN, ("tsleep %p %s %d",
348 ident, wmesg, p->p_stat));
351 td->td_wchan = ident;
352 td->td_wmesg = wmesg;
355 lwkt_deschedule_self();
356 TAILQ_INSERT_TAIL(&slpque[id], td, td_threadq);
358 thandle = timeout(endtsleep, (void *)td, timo);
360 * We put ourselves on the sleep queue and start our timeout
361 * before calling CURSIG, as we could stop there, and a wakeup
362 * or a SIGCONT (or both) could occur while we were stopped.
363 * A SIGCONT would cause us to be marked as SSLEEP
364 * without resuming us, thus we must be ready for sleep
365 * when CURSIG is called. If the wakeup happens while we're
366 * stopped, td->td_wchan will be 0 upon return from CURSIG.
370 p->p_flag |= P_SINTR;
371 if ((sig = CURSIG(p))) {
374 lwkt_schedule_self();
379 if (td->td_wchan == NULL) {
388 * If we are not the current process we have to remove ourself
389 * from the run queue.
391 KASSERT(p->p_stat == SRUN, ("PSTAT NOT SRUN %d %d", p->p_pid, p->p_stat));
393 * If this is the current 'user' process schedule another one.
395 clrrunnable(p, SSLEEP);
396 p->p_stats->p_ru.ru_nvcsw++;
397 KKASSERT(td->td_release || (p->p_flag & P_CURPROC) == 0);
399 KASSERT(p->p_stat == SRUN, ("tsleep: stat not srun"));
406 p->p_flag &= ~P_SINTR;
408 if (td->td_flags & TDF_TIMEOUT) {
409 td->td_flags &= ~TDF_TIMEOUT;
411 return (EWOULDBLOCK);
413 untimeout(endtsleep, (void *)td, thandle);
414 } else if (td->td_wmesg) {
416 * This can happen if a thread is woken up directly. Clear
417 * wmesg to avoid debugging confusion.
421 /* inline of iscaught() */
423 if (catch && (sig != 0 || (sig = CURSIG(p)))) {
424 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
433 * Implement the timeout for tsleep. We interlock against
434 * wchan when setting TDF_TIMEOUT. For processes we remove
435 * the sleep if the process is stopped rather then sleeping,
436 * so it remains stopped.
447 td->td_flags |= TDF_TIMEOUT;
448 if ((p = td->td_proc) != NULL) {
449 if (p->p_stat == SSLEEP)
462 * Remove a process from its wait queue
465 unsleep(struct thread *td)
472 if (p->p_flag & P_XSLEEP) {
473 struct xwait *w = p->p_wchan;
474 TAILQ_REMOVE(&w->waitq, p, p_procq);
475 p->p_flag &= ~P_XSLEEP;
478 TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_threadq);
486 * Make all processes sleeping on the explicit lock structure runnable.
489 xwakeup(struct xwait *w)
496 while ((p = TAILQ_FIRST(&w->waitq)) != NULL) {
497 TAILQ_REMOVE(&w->waitq, p, p_procq);
498 KASSERT(p->p_wchan == w && (p->p_flag & P_XSLEEP),
499 ("xwakeup: wchan mismatch for %p (%p/%p) %08x", p, p->p_wchan, w, p->p_flag & P_XSLEEP));
501 p->p_flag &= ~P_XSLEEP;
502 if (p->p_stat == SSLEEP) {
503 /* OPTIMIZED EXPANSION OF setrunnable(p); */
504 if (p->p_slptime > 1)
508 if (p->p_flag & P_INMEM) {
511 p->p_flag |= P_SWAPINREQ;
512 wakeup((caddr_t)&proc0);
521 * Make all processes sleeping on the specified identifier runnable.
524 _wakeup(void *ident, int count)
526 struct slpquehead *qp;
531 int id = LOOKUP(ident);
536 for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
537 ntd = TAILQ_NEXT(td, td_threadq);
538 if (td->td_wchan == ident) {
539 TAILQ_REMOVE(qp, td, td_threadq);
541 if ((p = td->td_proc) != NULL && p->p_stat == SSLEEP) {
542 /* OPTIMIZED EXPANSION OF setrunnable(p); */
543 if (p->p_slptime > 1)
547 if (p->p_flag & P_INMEM) {
550 p->p_flag |= P_SWAPINREQ;
551 wakeup((caddr_t)&proc0);
553 /* END INLINE EXPANSION */
554 } else if (p == NULL) {
572 wakeup_one(void *ident)
578 * The machine independent parts of mi_switch().
579 * Must be called at splstatclock() or higher.
584 struct thread *td = curthread;
585 struct proc *p = td->td_proc; /* XXX */
591 * XXX this spl is almost unnecessary. It is partly to allow for
592 * sloppy callers that don't do it (issignal() via CURSIG() is the
593 * main offender). It is partly to work around a bug in the i386
594 * cpu_switch() (the ipl is not preserved). We ran for years
595 * without it. I think there was only a interrupt latency problem.
596 * The main caller, tsleep(), does an splx() a couple of instructions
597 * after calling here. The buggy caller, issignal(), usually calls
598 * here at spl0() and sometimes returns at splhigh(). The process
599 * then runs for a little too long at splhigh(). The ipl gets fixed
600 * when the process returns to user mode (or earlier).
602 * It would probably be better to always call here at spl0(). Callers
603 * are prepared to give up control to another process, so they must
604 * be prepared to be interrupted. The clock stuff here may not
605 * actually need splstatclock().
611 * Check if the process exceeds its cpu resource allocation.
612 * If over max, kill it. Time spent in interrupts is not
613 * included. YYY 64 bit match is expensive. Ick.
615 ttime = td->td_sticks + td->td_uticks;
616 if (p->p_stat != SZOMB && p->p_limit->p_cpulimit != RLIM_INFINITY &&
617 ttime > p->p_limit->p_cpulimit) {
618 rlim = &p->p_rlimit[RLIMIT_CPU];
619 if (ttime / (rlim_t)1000000 >= rlim->rlim_max) {
620 killproc(p, "exceeded maximum CPU limit");
623 if (rlim->rlim_cur < rlim->rlim_max) {
624 /* XXX: we should make a private copy */
631 * Pick a new current process and record its start time. If we
632 * are in a SSTOPped state we deschedule ourselves. YYY this needs
633 * to be cleaned up, remember that LWKTs stay on their run queue
634 * which works differently then the user scheduler which removes
635 * the process from the runq when it runs it.
637 mycpu->gd_cnt.v_swtch++;
638 if (p->p_stat == SSTOP)
639 lwkt_deschedule_self();
646 * Change process state to be runnable,
647 * placing it on the run queue if it is in memory,
648 * and awakening the swapper if it isn't in memory.
651 setrunnable(struct proc *p)
661 panic("setrunnable");
664 unsleep(p->p_thread); /* e.g. when sending signals */
671 if (p->p_flag & P_INMEM)
674 if (p->p_slptime > 1)
677 if ((p->p_flag & P_INMEM) == 0) {
678 p->p_flag |= P_SWAPINREQ;
679 wakeup((caddr_t)&proc0);
684 * Change the process state to NOT be runnable, removing it from the run
685 * queue. If P_CURPROC is not set and we are in SRUN the process is on the
686 * run queue (If P_INMEM is not set then it isn't because it is swapped).
689 clrrunnable(struct proc *p, int stat)
696 if (p->p_flag & P_ONRUNQ)
707 * Compute the priority of a process when running in user mode.
708 * Arrange to reschedule if the resulting priority is better
709 * than that of the current process.
712 resetpriority(struct proc *p)
714 unsigned int newpriority;
719 * Set p_priority for general process comparisons
721 switch(p->p_rtprio.type) {
722 case RTP_PRIO_REALTIME:
723 p->p_priority = PRIBASE_REALTIME + p->p_rtprio.prio;
725 case RTP_PRIO_NORMAL:
728 p->p_priority = PRIBASE_IDLE + p->p_rtprio.prio;
730 case RTP_PRIO_THREAD:
731 p->p_priority = PRIBASE_THREAD + p->p_rtprio.prio;
736 * NORMAL priorities fall through. These are based on niceness
739 newpriority = NICE_ADJUST(p->p_nice - PRIO_MIN) +
740 p->p_estcpu / ESTCPURAMP;
741 newpriority = min(newpriority, MAXPRI);
742 npq = newpriority / PPQ;
744 opq = (p->p_priority & PRIMASK) / PPQ;
745 if (p->p_stat == SRUN && (p->p_flag & P_ONRUNQ) && opq != npq) {
747 * We have to move the process to another queue
750 p->p_priority = PRIBASE_NORMAL + newpriority;
754 * We can just adjust the priority and it will be picked
757 KKASSERT(opq == npq || (p->p_flag & P_ONRUNQ) == 0);
758 p->p_priority = PRIBASE_NORMAL + newpriority;
764 * Compute a tenex style load average of a quantity on
765 * 1, 5 and 15 minute intervals.
776 FOREACH_PROC_IN_SYSTEM(p) {
783 for (i = 0; i < 3; i++)
784 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
785 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
788 * Schedule the next update to occur after 5 seconds, but add a
789 * random variation to avoid synchronisation with processes that
790 * run at regular intervals.
792 callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
798 sched_setup(void *dummy)
801 callout_init(&loadav_callout);
803 /* Kick off timeout driven events by calling first time. */
810 * We adjust the priority of the current process. The priority of
811 * a process gets worse as it accumulates CPU time. The cpu usage
812 * estimator (p_estcpu) is increased here. resetpriority() will
813 * compute a different priority each time p_estcpu increases by
814 * INVERSE_ESTCPU_WEIGHT * (until MAXPRI is reached).
816 * The cpu usage estimator ramps up quite quickly when the process is
817 * running (linearly), and decays away exponentially, at a rate which
818 * is proportionally slower when the system is busy. The basic principle
819 * is that the system will 90% forget that the process used a lot of CPU
820 * time in 5 * loadav seconds. This causes the system to favor processes
821 * which haven't run much recently, and to round-robin among other processes.
823 * WARNING! called from a fast-int or an IPI, the MP lock MIGHT NOT BE HELD
824 * and we cannot block.
827 schedulerclock(void *dummy)
833 if ((p = td->td_proc) != NULL) {
835 p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
836 if ((p->p_estcpu % PPQ) == 0 && try_mplock()) {
851 cpri = crit_panic_save();
853 crit_panic_restore(cpri);