2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
39 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $
40 * $DragonFly: src/sys/kern/kern_synch.c,v 1.46 2005/06/26 22:03:22 dillon Exp $
43 #include "opt_ktrace.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/signalvar.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vmmeter.h>
52 #include <sys/sysctl.h>
53 #include <sys/thread2.h>
56 #include <sys/ktrace.h>
58 #include <sys/xwait.h>
60 #include <machine/cpu.h>
61 #include <machine/ipl.h>
62 #include <machine/smp.h>
64 static void sched_setup (void *dummy);
65 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
69 int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
71 int ncpus2, ncpus2_shift, ncpus2_mask;
74 static struct callout loadav_callout;
75 static struct callout roundrobin_callout;
76 static struct callout schedcpu_callout;
78 struct loadavg averunnable =
79 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */
81 * Constants for averages over 1, 5, and 15 minutes
82 * when sampling at 5 second intervals.
84 static fixpt_t cexp[3] = {
85 0.9200444146293232 * FSCALE, /* exp(-1/12) */
86 0.9834714538216174 * FSCALE, /* exp(-1/60) */
87 0.9944598480048967 * FSCALE, /* exp(-1/180) */
90 static void endtsleep (void *);
91 static void loadav (void *arg);
92 static void roundrobin (void *arg);
93 static void schedcpu (void *arg);
94 static void updatepri (struct proc *p);
97 * Adjust the scheduler quantum. The quantum is specified in microseconds.
98 * Note that 'tick' is in microseconds per tick.
101 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
105 new_val = sched_quantum * tick;
106 error = sysctl_handle_int(oidp, &new_val, 0, req);
107 if (error != 0 || req->newptr == NULL)
111 sched_quantum = new_val / tick;
112 hogticks = 2 * sched_quantum;
116 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
117 0, sizeof sched_quantum, sysctl_kern_quantum, "I", "");
120 roundrobin_interval(void)
122 return (sched_quantum);
126 * Force switch among equal priority processes every 100ms.
128 * WARNING! The MP lock is not held on ipi message remotes.
133 roundrobin_remote(void *arg)
135 struct proc *p = lwkt_preempted_proc();
136 if (p == NULL || RTP_PRIO_NEED_RR(p->p_rtprio.type))
143 roundrobin(void *arg)
145 struct proc *p = lwkt_preempted_proc();
146 if (p == NULL || RTP_PRIO_NEED_RR(p->p_rtprio.type))
149 lwkt_send_ipiq_mask(mycpu->gd_other_cpus, roundrobin_remote, NULL);
151 callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL);
157 resched_cpus(u_int32_t mask)
159 lwkt_send_ipiq_mask(mask, roundrobin_remote, NULL);
165 * The load average is scaled by FSCALE (2048 typ). The estimated cpu is
166 * incremented at a rate of ESTCPUFREQ per second (50hz typ), but this is
167 * divided up across all cpu bound processes running in the system so an
168 * individual process will get less under load. ESTCPULIM typicaly caps
169 * out at ESTCPUMAX (around 376, or 11 nice levels).
171 * Generally speaking the decay equation needs to break-even on growth
172 * at the limit at all load levels >= 1.0, so if the estimated cpu for
173 * a process increases by (ESTVCPUFREQ / load) per second, then the decay
174 * should reach this value when estcpu reaches ESTCPUMAX. That calculation
177 * ESTCPUMAX * decay = ESTCPUFREQ / load
178 * decay = ESTCPUFREQ / (load * ESTCPUMAX)
179 * decay = estcpu * 0.053 / load
181 * If the load is less then 1.0 we assume a load of 1.0.
184 #define cload(loadav) ((loadav) < FSCALE ? FSCALE : (loadav))
186 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
187 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
188 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
190 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
191 static int fscale __unused = FSCALE;
192 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
195 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
196 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
197 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
199 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
200 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
202 * If you don't want to bother with the faster/more-accurate formula, you
203 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
204 * (more general) method of calculating the %age of CPU used by a process.
206 #define CCPU_SHIFT 11
209 * Recompute process priorities, once a second.
215 fixpt_t loadfac = averunnable.ldavg[0];
219 FOREACH_PROC_IN_SYSTEM(p) {
221 * Increment time in/out of memory and sleep time
222 * (if sleeping). We ignore overflow; with 16-bit int's
223 * (remember them?) overflow takes 45 days.
226 if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
228 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
231 * If the process has slept the entire second,
232 * stop recalculating its priority until it wakes up.
234 if (p->p_slptime > 1)
236 /* prevent state changes and protect run queue */
240 * p_pctcpu is only for ps.
242 #if (FSHIFT >= CCPU_SHIFT)
243 p->p_pctcpu += (ESTCPUFREQ == 100)?
244 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
245 100 * (((fixpt_t) p->p_cpticks)
246 << (FSHIFT - CCPU_SHIFT)) / ESTCPUFREQ;
248 p->p_pctcpu += ((FSCALE - ccpu) *
249 (p->p_cpticks * FSCALE / ESTCPUFREQ)) >> FSHIFT;
252 * A single cpu-bound process with a system load of 1.0 will
253 * increment cpticks by ESTCPUFREQ per second. Scale
254 * cpticks by the load to normalize it relative to
255 * ESTCPUFREQ. This gives us an indication as to what
256 * proportional percentage of available cpu the process has
257 * used with a nominal range of 0 to ESTCPUFREQ.
259 * It should be noted that since the load average is a
260 * trailing indicator, a jump in the load will cause this
261 * calculation to be higher then normal. This is desireable
262 * because it penalizes the processes responsible for the
265 ndecay = (int)((p->p_cpticks * cload(loadfac)) >> FSHIFT);
268 * Reduce p_estcpu based on the amount of cpu that could
269 * have been used but wasn't. Convert ndecay from the
270 * amount used to the amount not used, and scale with our
273 * The nice scaling determines how much the nice value
274 * effects the cpu the process gets.
276 ndecay = ESTCPUFREQ - ndecay;
277 ndecay -= p->p_nice * (ESTCPUMAX / 16) / PRIO_MAX;
281 if (ndecay > p->p_estcpu / 2)
282 ndecay = p->p_estcpu / 2;
283 p->p_estcpu -= ndecay;
284 p->p_usched->resetpriority(p);
288 wakeup((caddr_t)&lbolt);
289 callout_reset(&schedcpu_callout, hz, schedcpu, NULL);
293 * Recalculate the priority of a process after it has slept for a while.
294 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
295 * least six times the loadfactor will decay p_estcpu to zero.
298 updatepri(struct proc *p)
302 ndecay = p->p_slptime * ESTCPUFREQ;
304 if (p->p_estcpu > ndecay)
305 p->p_estcpu -= ndecay;
308 p->p_usched->resetpriority(p);
313 * We're only looking at 7 bits of the address; everything is
314 * aligned to 4, lots of things are aligned to greater powers
315 * of 2. Shift right by 8, i.e. drop the bottom 256 worth.
317 #define TABLESIZE 128
318 static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE];
319 #define LOOKUP(x) (((intptr_t)(x) >> 8) & (TABLESIZE - 1))
322 * General scheduler initialization. We force a reschedule 25 times
323 * a second by default.
330 sched_quantum = (hz + 24) / 25;
331 hogticks = 2 * sched_quantum;
332 for (i = 0; i < TABLESIZE; i++)
333 TAILQ_INIT(&slpque[i]);
337 * General sleep call. Suspends the current process until a wakeup is
338 * performed on the specified identifier. The process will then be made
339 * runnable with the specified priority. Sleeps at most timo/hz seconds
340 * (0 means no timeout). If flags includes PCATCH flag, signals are checked
341 * before and after sleeping, else signals are not checked. Returns 0 if
342 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
343 * signal needs to be delivered, ERESTART is returned if the current system
344 * call should be restarted if possible, and EINTR is returned if the system
345 * call should be interrupted by the signal (return EINTR).
347 * Note that if we are a process, we release_curproc() before messing with
348 * the LWKT scheduler.
350 * During autoconfiguration or after a panic, a sleep will simply
351 * lower the priority briefly to allow interrupts, then return.
354 tsleep(void *ident, int flags, const char *wmesg, int timo)
356 struct thread *td = curthread;
357 struct proc *p = td->td_proc; /* may be NULL */
358 int sig = 0, catch = flags & PCATCH;
359 int id = LOOKUP(ident);
361 struct callout thandle;
364 * NOTE: removed KTRPOINT, it could cause races due to blocking
365 * even in stable. Just scrap it for now.
367 if (cold || panicstr) {
369 * After a panic, or during autoconfiguration,
370 * just give interrupts a chance, then just return;
371 * don't run any other procs or panic below,
372 * in case this is the idle process and already asleep.
375 oldpri = td->td_pri & TDPRI_MASK;
376 lwkt_setpri_self(safepri);
378 lwkt_setpri_self(oldpri);
381 KKASSERT(td != &mycpu->gd_idlethread); /* you must be kidding! */
382 crit_enter_quick(td);
383 KASSERT(ident != NULL, ("tsleep: no ident"));
384 KASSERT(p == NULL || p->p_stat == SRUN, ("tsleep %p %s %d",
385 ident, wmesg, p->p_stat));
387 td->td_wchan = ident;
388 td->td_wmesg = wmesg;
389 td->td_wdomain = flags & PDOMAIN_MASK;
391 if (flags & PNORESCHED)
392 td->td_flags |= TDF_NORESCHED;
393 p->p_usched->release_curproc(p);
396 lwkt_deschedule_self(td);
397 TAILQ_INSERT_TAIL(&slpque[id], td, td_threadq);
399 callout_init(&thandle);
400 callout_reset(&thandle, timo, endtsleep, td);
403 * We put ourselves on the sleep queue and start our timeout
404 * before calling CURSIG, as we could stop there, and a wakeup
405 * or a SIGCONT (or both) could occur while we were stopped.
406 * A SIGCONT would cause us to be marked as SSLEEP
407 * without resuming us, thus we must be ready for sleep
408 * when CURSIG is called. If the wakeup happens while we're
409 * stopped, td->td_wchan will be 0 upon return from CURSIG.
413 p->p_flag |= P_SINTR;
414 if ((sig = CURSIG(p))) {
417 lwkt_schedule_self(td);
422 if (td->td_wchan == NULL) {
431 * If we are not the current process we have to remove ourself
432 * from the run queue.
434 KASSERT(p->p_stat == SRUN, ("PSTAT NOT SRUN %d %d", p->p_pid, p->p_stat));
436 * If this is the current 'user' process schedule another one.
438 clrrunnable(p, SSLEEP);
439 p->p_stats->p_ru.ru_nvcsw++;
441 KASSERT(p->p_stat == SRUN, ("tsleep: stat not srun"));
447 p->p_flag &= ~P_SINTR;
449 td->td_flags &= ~TDF_NORESCHED;
450 if (td->td_flags & TDF_TIMEOUT) {
451 td->td_flags &= ~TDF_TIMEOUT;
453 return (EWOULDBLOCK);
455 callout_stop(&thandle);
456 } else if (td->td_wmesg) {
458 * This can happen if a thread is woken up directly. Clear
459 * wmesg to avoid debugging confusion.
463 /* inline of iscaught() */
465 if (catch && (sig != 0 || (sig = CURSIG(p)))) {
466 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
475 * Implement the timeout for tsleep. We interlock against
476 * wchan when setting TDF_TIMEOUT. For processes we remove
477 * the sleep if the process is stopped rather then sleeping,
478 * so it remains stopped.
488 td->td_flags |= TDF_TIMEOUT;
489 if ((p = td->td_proc) != NULL) {
490 if (p->p_stat == SSLEEP)
503 * Remove a process from its wait queue
506 unsleep(struct thread *td)
510 TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_threadq);
517 * Make all processes sleeping on the specified identifier runnable.
520 _wakeup(void *ident, int domain, int count)
522 struct slpquehead *qp;
526 int id = LOOKUP(ident);
531 for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
532 ntd = TAILQ_NEXT(td, td_threadq);
533 if (td->td_wchan == ident && td->td_wdomain == domain) {
534 TAILQ_REMOVE(qp, td, td_threadq);
536 if ((p = td->td_proc) != NULL && p->p_stat == SSLEEP) {
537 /* OPTIMIZED EXPANSION OF setrunnable(p); */
538 if (p->p_slptime > 1)
542 if (p->p_flag & P_INMEM) {
544 * LWKT scheduled now, there is no
545 * userland runq interaction until
546 * the thread tries to return to user
553 p->p_flag |= P_SWAPINREQ;
554 wakeup((caddr_t)&proc0);
556 /* END INLINE EXPANSION */
557 } else if (p == NULL) {
571 _wakeup(ident, 0, 0);
575 wakeup_one(void *ident)
577 _wakeup(ident, 0, 1);
581 wakeup_domain(void *ident, int domain)
583 _wakeup(ident, domain, 0);
587 wakeup_domain_one(void *ident, int domain)
589 _wakeup(ident, domain, 1);
593 * The machine independent parts of mi_switch().
595 * 'p' must be the current process.
598 mi_switch(struct proc *p)
600 thread_t td = p->p_thread;
604 KKASSERT(td == mycpu->gd_curthread);
606 crit_enter_quick(td);
609 * Check if the process exceeds its cpu resource allocation.
610 * If over max, kill it. Time spent in interrupts is not
611 * included. YYY 64 bit match is expensive. Ick.
613 ttime = td->td_sticks + td->td_uticks;
614 if (p->p_stat != SZOMB && p->p_limit->p_cpulimit != RLIM_INFINITY &&
615 ttime > p->p_limit->p_cpulimit) {
616 rlim = &p->p_rlimit[RLIMIT_CPU];
617 if (ttime / (rlim_t)1000000 >= rlim->rlim_max) {
618 killproc(p, "exceeded maximum CPU limit");
621 if (rlim->rlim_cur < rlim->rlim_max) {
622 /* XXX: we should make a private copy */
629 * If we are in a SSTOPped state we deschedule ourselves.
630 * YYY this needs to be cleaned up, remember that LWKTs stay on
631 * their run queue which works differently then the user scheduler
632 * which removes the process from the runq when it runs it.
634 mycpu->gd_cnt.v_swtch++;
635 if (p->p_stat == SSTOP)
636 lwkt_deschedule_self(td);
642 * Change process state to be runnable,
643 * placing it on the run queue if it is in memory,
644 * and awakening the swapper if it isn't in memory.
647 setrunnable(struct proc *p)
656 panic("setrunnable");
659 unsleep(p->p_thread); /* e.g. when sending signals */
668 * The process is controlled by LWKT at this point, we do not mess
669 * around with the userland scheduler until the thread tries to
670 * return to user mode.
672 if (p->p_flag & P_INMEM)
673 lwkt_schedule(p->p_thread);
675 if (p->p_slptime > 1)
678 if ((p->p_flag & P_INMEM) == 0) {
679 p->p_flag |= P_SWAPINREQ;
680 wakeup((caddr_t)&proc0);
685 * Yield / synchronous reschedule. This is a bit tricky because the trap
686 * code might have set a lazy release on the switch function. Setting
687 * P_PASSIVE_ACQ will ensure that the lazy release executes when we call
688 * switch, and that we are given a greater chance of affinity with our
691 * We call lwkt_setpri_self() to rotate our thread to the end of the lwkt
692 * run queue. lwkt_switch() will also execute any assigned passive release
693 * (which usually calls release_curproc()), allowing a same/higher priority
694 * process to be designated as the current process.
696 * While it is possible for a lower priority process to be designated,
697 * it's call to lwkt_maybe_switch() in acquire_curproc() will likely
698 * round-robin back to us and we will be able to re-acquire the current
699 * process designation.
704 struct thread *td = curthread;
705 struct proc *p = td->td_proc;
707 lwkt_setpri_self(td->td_pri & TDPRI_MASK);
709 p->p_flag |= P_PASSIVE_ACQ;
711 p->p_flag &= ~P_PASSIVE_ACQ;
718 * Change the process state to NOT be runnable, removing it from the run
722 clrrunnable(struct proc *p, int stat)
724 crit_enter_quick(p->p_thread);
725 if (p->p_stat == SRUN && (p->p_flag & P_ONRUNQ))
726 p->p_usched->remrunqueue(p);
728 crit_exit_quick(p->p_thread);
732 * Compute a tenex style load average of a quantity on
733 * 1, 5 and 15 minute intervals.
745 FOREACH_PROC_IN_SYSTEM(p) {
748 if ((td = p->p_thread) == NULL)
750 if (td->td_flags & TDF_BLOCKED)
760 for (i = 0; i < 3; i++)
761 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
762 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
765 * Schedule the next update to occur after 5 seconds, but add a
766 * random variation to avoid synchronisation with processes that
767 * run at regular intervals.
769 callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
775 sched_setup(void *dummy)
777 callout_init(&loadav_callout);
778 callout_init(&roundrobin_callout);
779 callout_init(&schedcpu_callout);
781 /* Kick off timeout driven events by calling first time. */
788 * We adjust the priority of the current process. The priority of
789 * a process gets worse as it accumulates CPU time. The cpu usage
790 * estimator (p_estcpu) is increased here. resetpriority() will
791 * compute a different priority each time p_estcpu increases by
792 * INVERSE_ESTCPU_WEIGHT * (until MAXPRI is reached).
794 * The cpu usage estimator ramps up quite quickly when the process is
795 * running (linearly), and decays away exponentially, at a rate which
796 * is proportionally slower when the system is busy. The basic principle
797 * is that the system will 90% forget that the process used a lot of CPU
798 * time in 5 * loadav seconds. This causes the system to favor processes
799 * which haven't run much recently, and to round-robin among other processes.
801 * WARNING! called from a fast-int or an IPI, the MP lock MIGHT NOT BE HELD
802 * and we cannot block.
805 schedulerclock(void *dummy)
811 if ((p = td->td_proc) != NULL) {
812 p->p_cpticks++; /* cpticks runs at ESTCPUFREQ */
813 p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
815 p->p_usched->resetpriority(p);