2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
39 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $
40 * $DragonFly: src/sys/kern/kern_synch.c,v 1.17 2003/07/11 01:23:24 dillon Exp $
43 #include "opt_ktrace.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/signalvar.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vmmeter.h>
52 #include <sys/sysctl.h>
53 #include <sys/thread2.h>
56 #include <sys/ktrace.h>
58 #include <sys/xwait.h>
60 #include <machine/cpu.h>
61 #include <machine/ipl.h>
62 #include <machine/smp.h>
64 static void sched_setup __P((void *dummy));
65 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
69 int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
72 static struct callout loadav_callout;
74 struct loadavg averunnable =
75 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */
77 * Constants for averages over 1, 5, and 15 minutes
78 * when sampling at 5 second intervals.
80 static fixpt_t cexp[3] = {
81 0.9200444146293232 * FSCALE, /* exp(-1/12) */
82 0.9834714538216174 * FSCALE, /* exp(-1/60) */
83 0.9944598480048967 * FSCALE, /* exp(-1/180) */
86 static void endtsleep __P((void *));
87 static void loadav __P((void *arg));
88 static void maybe_resched __P((struct proc *chk));
89 static void roundrobin __P((void *arg));
90 static void schedcpu __P((void *arg));
91 static void updatepri __P((struct proc *p));
92 static void crit_panicints(void);
95 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
99 new_val = sched_quantum * tick;
100 error = sysctl_handle_int(oidp, &new_val, 0, req);
101 if (error != 0 || req->newptr == NULL)
105 sched_quantum = new_val / tick;
106 hogticks = 2 * sched_quantum;
110 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
111 0, sizeof sched_quantum, sysctl_kern_quantum, "I", "");
114 * Arrange to reschedule if necessary by checking to see if the current
115 * process is on the highest priority user scheduling queue. This may
116 * be run from an interrupt so we have to follow any preemption chains
117 * back to the original process.
120 maybe_resched(struct proc *chk)
122 struct proc *cur = lwkt_preempted_proc();
128 * Check the user queue (realtime, normal, idle). Lower numbers
129 * indicate higher priority queues. Lower numbers are also better
132 if (chk->p_rtprio.type < cur->p_rtprio.type) {
134 } else if (chk->p_rtprio.type == cur->p_rtprio.type) {
135 if (chk->p_rtprio.type == RTP_PRIO_NORMAL) {
136 if (chk->p_priority / PPQ < cur->p_priority / PPQ)
139 if (chk->p_rtprio.prio < cur->p_rtprio.prio)
146 roundrobin_interval(void)
148 return (sched_quantum);
152 * Force switch among equal priority processes every 100ms.
157 roundrobin_remote(void *arg)
159 struct proc *p = lwkt_preempted_proc();
160 if (p == NULL || RTP_PRIO_NEED_RR(p->p_rtprio.type))
167 roundrobin(void *arg)
169 struct proc *p = lwkt_preempted_proc();
170 if (p == NULL || RTP_PRIO_NEED_RR(p->p_rtprio.type))
173 lwkt_send_ipiq_mask(mycpu->gd_other_cpus, roundrobin_remote, NULL);
175 timeout(roundrobin, NULL, sched_quantum);
179 resched_cpus(u_int32_t mask)
181 lwkt_send_ipiq_mask(mask, roundrobin_remote, NULL);
185 * Constants for digital decay and forget:
186 * 90% of (p_estcpu) usage in 5 * loadav time
187 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
188 * Note that, as ps(1) mentions, this can let percentages
189 * total over 100% (I've seen 137.9% for 3 processes).
191 * Note that schedclock() updates p_estcpu and p_cpticks asynchronously.
193 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
194 * That is, the system wants to compute a value of decay such
195 * that the following for loop:
196 * for (i = 0; i < (5 * loadavg); i++)
200 * for all values of loadavg:
202 * Mathematically this loop can be expressed by saying:
203 * decay ** (5 * loadavg) ~= .1
205 * The system computes decay as:
206 * decay = (2 * loadavg) / (2 * loadavg + 1)
208 * We wish to prove that the system's computation of decay
209 * will always fulfill the equation:
210 * decay ** (5 * loadavg) ~= .1
212 * If we compute b as:
215 * decay = b / (b + 1)
217 * We now need to prove two things:
218 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
219 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
222 * For x close to zero, exp(x) =~ 1 + x, since
223 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
224 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
225 * For x close to zero, ln(1+x) =~ x, since
226 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
227 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
231 * Solve (factor)**(power) =~ .1 given power (5*loadav):
232 * solving for factor,
233 * ln(factor) =~ (-2.30/5*loadav), or
234 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
235 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
238 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
240 * power*ln(b/(b+1)) =~ -2.30, or
241 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
243 * Actual power values for the implemented algorithm are as follows:
245 * power: 5.68 10.32 14.94 19.55
248 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
249 #define loadfactor(loadav) (2 * (loadav))
250 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
252 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
253 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
254 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
256 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
257 static int fscale __unused = FSCALE;
258 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
261 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
262 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
263 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
265 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
266 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
268 * If you don't want to bother with the faster/more-accurate formula, you
269 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
270 * (more general) method of calculating the %age of CPU used by a process.
272 #define CCPU_SHIFT 11
275 * Recompute process priorities, every hz ticks.
281 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
286 curp = lwkt_preempted_proc(); /* YYY temporary hack */
288 realstathz = stathz ? stathz : hz;
289 LIST_FOREACH(p, &allproc, p_list) {
291 * Increment time in/out of memory and sleep time
292 * (if sleeping). We ignore overflow; with 16-bit int's
293 * (remember them?) overflow takes 45 days.
296 if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
298 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
300 * If the process has slept the entire second,
301 * stop recalculating its priority until it wakes up.
303 if (p->p_slptime > 1)
305 s = splhigh(); /* prevent state changes and protect run queue */
307 * p_pctcpu is only for ps.
309 #if (FSHIFT >= CCPU_SHIFT)
310 p->p_pctcpu += (realstathz == 100)?
311 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
312 100 * (((fixpt_t) p->p_cpticks)
313 << (FSHIFT - CCPU_SHIFT)) / realstathz;
315 p->p_pctcpu += ((FSCALE - ccpu) *
316 (p->p_cpticks * FSCALE / realstathz)) >> FSHIFT;
319 p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
323 wakeup((caddr_t)&lbolt);
324 timeout(schedcpu, (void *)0, hz);
328 * Recalculate the priority of a process after it has slept for a while.
329 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
330 * least six times the loadfactor will decay p_estcpu to zero.
333 updatepri(struct proc *p)
335 unsigned int newcpu = p->p_estcpu;
336 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
338 if (p->p_slptime > 5 * loadfac) {
341 p->p_slptime--; /* the first time was done in schedcpu */
342 while (newcpu && --p->p_slptime)
343 newcpu = decay_cpu(loadfac, newcpu);
344 p->p_estcpu = newcpu;
350 * We're only looking at 7 bits of the address; everything is
351 * aligned to 4, lots of things are aligned to greater powers
352 * of 2. Shift right by 8, i.e. drop the bottom 256 worth.
354 #define TABLESIZE 128
355 static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE];
356 #define LOOKUP(x) (((intptr_t)(x) >> 8) & (TABLESIZE - 1))
359 * During autoconfiguration or after a panic, a sleep will simply
360 * lower the priority briefly to allow interrupts, then return.
361 * The priority to be used (safepri) is machine-dependent, thus this
362 * value is initialized and maintained in the machine-dependent layers.
363 * This priority will typically be 0, or the lowest priority
364 * that is safe for use on the interrupt stack; it can be made
365 * higher to block network software interrupts after panics.
374 sched_quantum = hz/10;
375 hogticks = 2 * sched_quantum;
376 for (i = 0; i < TABLESIZE; i++)
377 TAILQ_INIT(&slpque[i]);
381 * General sleep call. Suspends the current process until a wakeup is
382 * performed on the specified identifier. The process will then be made
383 * runnable with the specified priority. Sleeps at most timo/hz seconds
384 * (0 means no timeout). If pri includes PCATCH flag, signals are checked
385 * before and after sleeping, else signals are not checked. Returns 0 if
386 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
387 * signal needs to be delivered, ERESTART is returned if the current system
388 * call should be restarted if possible, and EINTR is returned if the system
389 * call should be interrupted by the signal (return EINTR).
391 * If the process has P_CURPROC set mi_switch() will not re-queue it to
392 * the userland scheduler queues because we are in a SSLEEP state. If
393 * we are not the current process then we have to remove ourselves from
394 * the scheduler queues.
396 * YYY priority now unused
399 tsleep(ident, priority, wmesg, timo)
404 struct thread *td = curthread;
405 struct proc *p = td->td_proc; /* may be NULL */
406 int s, sig = 0, catch = priority & PCATCH;
407 int id = LOOKUP(ident);
408 struct callout_handle thandle;
411 * NOTE: removed KTRPOINT, it could cause races due to blocking
412 * even in stable. Just scrap it for now.
414 if (cold || panicstr) {
416 * After a panic, or during autoconfiguration,
417 * just give interrupts a chance, then just return;
418 * don't run any other procs or panic below,
419 * in case this is the idle process and already asleep.
424 KKASSERT(td != &mycpu->gd_idlethread); /* you must be kidding! */
426 KASSERT(ident != NULL, ("tsleep: no ident"));
427 KASSERT(p == NULL || p->p_stat == SRUN, ("tsleep %p %s %d",
428 ident, wmesg, p->p_stat));
431 td->td_wchan = ident;
432 td->td_wmesg = wmesg;
435 lwkt_deschedule_self();
436 TAILQ_INSERT_TAIL(&slpque[id], td, td_threadq);
438 thandle = timeout(endtsleep, (void *)td, timo);
440 * We put ourselves on the sleep queue and start our timeout
441 * before calling CURSIG, as we could stop there, and a wakeup
442 * or a SIGCONT (or both) could occur while we were stopped.
443 * A SIGCONT would cause us to be marked as SSLEEP
444 * without resuming us, thus we must be ready for sleep
445 * when CURSIG is called. If the wakeup happens while we're
446 * stopped, td->td_wchan will be 0 upon return from CURSIG.
450 p->p_flag |= P_SINTR;
451 if ((sig = CURSIG(p))) {
454 lwkt_schedule_self();
459 if (td->td_wchan == NULL) {
468 * If we are not the current process we have to remove ourself
469 * from the run queue.
471 KASSERT(p->p_stat == SRUN, ("PSTAT NOT SRUN %d %d", p->p_pid, p->p_stat));
473 * If this is the current 'user' process schedule another one.
475 clrrunnable(p, SSLEEP);
476 p->p_stats->p_ru.ru_nvcsw++;
477 KKASSERT(td->td_release || (p->p_flag & P_CURPROC) == 0);
479 KASSERT(p->p_stat == SRUN, ("tsleep: stat not srun"));
486 p->p_flag &= ~P_SINTR;
488 if (td->td_flags & TDF_TIMEOUT) {
489 td->td_flags &= ~TDF_TIMEOUT;
491 return (EWOULDBLOCK);
493 untimeout(endtsleep, (void *)td, thandle);
496 if (catch && (sig != 0 || (sig = CURSIG(p)))) {
497 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
508 * General sleep call. Suspends the current process until a wakeup is
509 * performed on the specified xwait structure. The process will then be made
510 * runnable with the specified priority. Sleeps at most timo/hz seconds
511 * (0 means no timeout). If pri includes PCATCH flag, signals are checked
512 * before and after sleeping, else signals are not checked. Returns 0 if
513 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
514 * signal needs to be delivered, ERESTART is returned if the current system
515 * call should be restarted if possible, and EINTR is returned if the system
516 * call should be interrupted by the signal (return EINTR).
518 * If the passed generation number is different from the generation number
519 * in the xwait, return immediately.
522 xsleep(struct xwait *w, int priority, const char *wmesg, int timo, int *gen)
524 struct thread *td = curthread;
525 struct proc *p = td->td_proc;
526 int s, sig, catch = priority & PCATCH;
527 struct callout_handle thandle;
530 if (KTRPOINT(td, KTR_CSW))
531 ktrcsw(p->p_tracep, 1, 0);
533 if (cold || panicstr) {
535 * After a panic, or during autoconfiguration,
536 * just give interrupts a chance, then just return;
537 * don't run any other procs or panic below,
538 * in case this is the idle process and already asleep.
544 KASSERT(p != NULL, ("xsleep1"));
545 KASSERT(w != NULL && p->p_stat == SRUN, ("xsleep"));
548 * If the generation number does not match we return immediately.
550 if (*gen != w->gen) {
554 if (KTRPOINT(td, KTR_CSW))
555 ktrcsw(p->p_tracep, 0, 0);
563 p->p_flag |= P_XSLEEP;
564 TAILQ_INSERT_TAIL(&w->waitq, p, p_procq);
566 thandle = timeout(endtsleep, (void *)p, timo);
568 * We put ourselves on the sleep queue and start our timeout
569 * before calling CURSIG, as we could stop there, and a wakeup
570 * or a SIGCONT (or both) could occur while we were stopped.
571 * A SIGCONT would cause us to be marked as SSLEEP
572 * without resuming us, thus we must be ready for sleep
573 * when CURSIG is called. If the wakeup happens while we're
574 * stopped, p->p_wchan will be 0 upon return from CURSIG.
577 p->p_flag |= P_SINTR;
578 if ((sig = CURSIG(p))) {
581 lwkt_schedule_self();
586 if (p->p_wchan == NULL) {
593 clrrunnable(p, SSLEEP);
594 p->p_stats->p_ru.ru_nvcsw++;
597 *gen = w->gen; /* update generation number */
599 p->p_flag &= ~P_SINTR;
600 if (p->p_flag & P_TIMEOUT) {
601 p->p_flag &= ~P_TIMEOUT;
604 if (KTRPOINT(td, KTR_CSW))
605 ktrcsw(p->p_tracep, 0, 0);
607 return (EWOULDBLOCK);
610 untimeout(endtsleep, (void *)p, thandle);
611 if (catch && (sig != 0 || (sig = CURSIG(p)))) {
613 if (KTRPOINT(td, KTR_CSW))
614 ktrcsw(p->p_tracep, 0, 0);
616 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
621 if (KTRPOINT(td, KTR_CSW))
622 ktrcsw(p->p_tracep, 0, 0);
630 * Implement the timeout for tsleep. We interlock against
631 * wchan when setting TDF_TIMEOUT. For processes we remove
632 * the sleep if the process is stopped rather then sleeping,
633 * so it remains stopped.
644 td->td_flags |= TDF_TIMEOUT;
645 if ((p = td->td_proc) != NULL) {
646 if (p->p_stat == SSLEEP)
659 * Remove a process from its wait queue
662 unsleep(struct thread *td)
669 if (p->p_flag & P_XSLEEP) {
670 struct xwait *w = p->p_wchan;
671 TAILQ_REMOVE(&w->waitq, p, p_procq);
672 p->p_flag &= ~P_XSLEEP;
675 TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_threadq);
683 * Make all processes sleeping on the explicit lock structure runnable.
686 xwakeup(struct xwait *w)
693 while ((p = TAILQ_FIRST(&w->waitq)) != NULL) {
694 TAILQ_REMOVE(&w->waitq, p, p_procq);
695 KASSERT(p->p_wchan == w && (p->p_flag & P_XSLEEP),
696 ("xwakeup: wchan mismatch for %p (%p/%p) %08x", p, p->p_wchan, w, p->p_flag & P_XSLEEP));
698 p->p_flag &= ~P_XSLEEP;
699 if (p->p_stat == SSLEEP) {
700 /* OPTIMIZED EXPANSION OF setrunnable(p); */
701 if (p->p_slptime > 1)
705 if (p->p_flag & P_INMEM) {
709 p->p_flag |= P_SWAPINREQ;
710 wakeup((caddr_t)&proc0);
719 * Make all processes sleeping on the specified identifier runnable.
722 _wakeup(void *ident, int count)
724 struct slpquehead *qp;
729 int id = LOOKUP(ident);
734 for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
735 ntd = TAILQ_NEXT(td, td_threadq);
736 if (td->td_wchan == ident) {
737 TAILQ_REMOVE(qp, td, td_threadq);
739 if ((p = td->td_proc) != NULL && p->p_stat == SSLEEP) {
740 /* OPTIMIZED EXPANSION OF setrunnable(p); */
741 if (p->p_slptime > 1)
745 if (p->p_flag & P_INMEM) {
747 if (p->p_flag & P_CURPROC)
750 p->p_flag |= P_SWAPINREQ;
751 wakeup((caddr_t)&proc0);
753 /* END INLINE EXPANSION */
754 } else if (p == NULL) {
772 wakeup_one(void *ident)
778 * The machine independent parts of mi_switch().
779 * Must be called at splstatclock() or higher.
784 struct thread *td = curthread;
785 struct proc *p = td->td_proc; /* XXX */
791 * XXX this spl is almost unnecessary. It is partly to allow for
792 * sloppy callers that don't do it (issignal() via CURSIG() is the
793 * main offender). It is partly to work around a bug in the i386
794 * cpu_switch() (the ipl is not preserved). We ran for years
795 * without it. I think there was only a interrupt latency problem.
796 * The main caller, tsleep(), does an splx() a couple of instructions
797 * after calling here. The buggy caller, issignal(), usually calls
798 * here at spl0() and sometimes returns at splhigh(). The process
799 * then runs for a little too long at splhigh(). The ipl gets fixed
800 * when the process returns to user mode (or earlier).
802 * It would probably be better to always call here at spl0(). Callers
803 * are prepared to give up control to another process, so they must
804 * be prepared to be interrupted. The clock stuff here may not
805 * actually need splstatclock().
811 * Check if the process exceeds its cpu resource allocation.
812 * If over max, kill it. Time spent in interrupts is not
813 * included. YYY 64 bit match is expensive. Ick.
815 ttime = td->td_sticks + td->td_uticks;
816 if (p->p_stat != SZOMB && p->p_limit->p_cpulimit != RLIM_INFINITY &&
817 ttime > p->p_limit->p_cpulimit) {
818 rlim = &p->p_rlimit[RLIMIT_CPU];
819 if (ttime / (rlim_t)1000000 >= rlim->rlim_max) {
820 killproc(p, "exceeded maximum CPU limit");
823 if (rlim->rlim_cur < rlim->rlim_max) {
824 /* XXX: we should make a private copy */
831 * Pick a new current process and record its start time. If we
832 * are in a SSTOPped state we deschedule ourselves. YYY this needs
833 * to be cleaned up, remember that LWKTs stay on their run queue
834 * which works differently then the user scheduler which removes
835 * the process from the runq when it runs it.
837 mycpu->gd_cnt.v_swtch++;
838 if (p->p_stat == SSTOP)
839 lwkt_deschedule_self();
846 * Change process state to be runnable,
847 * placing it on the run queue if it is in memory,
848 * and awakening the swapper if it isn't in memory.
851 setrunnable(struct proc *p)
861 panic("setrunnable");
864 unsleep(p->p_thread); /* e.g. when sending signals */
871 if (p->p_flag & P_INMEM)
874 if (p->p_slptime > 1)
877 if ((p->p_flag & P_INMEM) == 0) {
878 p->p_flag |= P_SWAPINREQ;
879 wakeup((caddr_t)&proc0);
886 * Change the process state to NOT be runnable, removing it from the run
887 * queue. If P_CURPROC is not set and we are in SRUN the process is on the
888 * run queue (If P_INMEM is not set then it isn't because it is swapped).
891 clrrunnable(struct proc *p, int stat)
898 if (p->p_flag & P_ONRUNQ)
909 * Compute the priority of a process when running in user mode.
910 * Arrange to reschedule if the resulting priority is better
911 * than that of the current process.
913 * YYY real time / idle procs do not use p_priority XXX
916 resetpriority(struct proc *p)
918 unsigned int newpriority;
922 if (p->p_rtprio.type != RTP_PRIO_NORMAL)
924 newpriority = PUSER + p->p_estcpu / INVERSE_ESTCPU_WEIGHT +
925 NICE_WEIGHT * p->p_nice;
926 newpriority = min(newpriority, MAXPRI);
927 npq = newpriority / PPQ;
929 opq = p->p_priority / PPQ;
930 if (p->p_stat == SRUN && (p->p_flag & P_ONRUNQ) && opq != npq) {
932 * We have to move the process to another queue
935 p->p_priority = newpriority;
939 * We can just adjust the priority and it will be picked
942 KKASSERT(opq == npq || (p->p_flag & P_ONRUNQ) == 0);
943 p->p_priority = newpriority;
950 * Compute a tenex style load average of a quantity on
951 * 1, 5 and 15 minute intervals.
962 LIST_FOREACH(p, &allproc, p_list) {
969 for (i = 0; i < 3; i++)
970 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
971 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
974 * Schedule the next update to occur after 5 seconds, but add a
975 * random variation to avoid synchronisation with processes that
976 * run at regular intervals.
978 callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
988 callout_init(&loadav_callout);
990 /* Kick off timeout driven events by calling first time. */
997 * We adjust the priority of the current process. The priority of
998 * a process gets worse as it accumulates CPU time. The cpu usage
999 * estimator (p_estcpu) is increased here. resetpriority() will
1000 * compute a different priority each time p_estcpu increases by
1001 * INVERSE_ESTCPU_WEIGHT
1002 * (until MAXPRI is reached). The cpu usage estimator ramps up
1003 * quite quickly when the process is running (linearly), and decays
1004 * away exponentially, at a rate which is proportionally slower when
1005 * the system is busy. The basic principle is that the system will
1006 * 90% forget that the process used a lot of CPU time in 5 * loadav
1007 * seconds. This causes the system to favor processes which haven't
1008 * run much recently, and to round-robin among other processes.
1016 p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
1017 if ((p->p_estcpu % INVERSE_ESTCPU_WEIGHT) == 0)
1023 crit_panicints(void)
1029 cpri = crit_panic_save();
1031 crit_panic_restore(cpri);