Add 64 bit display output support to sysctl plus convenient macros.
[dragonfly.git] / sys / kern / kern_synch.c
... / ...
CommitLineData
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
39 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $
40 * $DragonFly: src/sys/kern/kern_synch.c,v 1.10 2003/06/28 02:09:52 dillon Exp $
41 */
42
43#include "opt_ktrace.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/proc.h>
48#include <sys/kernel.h>
49#include <sys/signalvar.h>
50#include <sys/resourcevar.h>
51#include <sys/vmmeter.h>
52#include <sys/sysctl.h>
53#ifdef KTRACE
54#include <sys/uio.h>
55#include <sys/ktrace.h>
56#endif
57#include <sys/xwait.h>
58
59#include <machine/cpu.h>
60#include <machine/ipl.h>
61#include <machine/smp.h>
62
63static void sched_setup __P((void *dummy));
64SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
65
66u_char curpriority;
67int hogticks;
68int lbolt;
69int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
70int ncpus;
71
72static struct callout loadav_callout;
73
74struct loadavg averunnable =
75 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */
76/*
77 * Constants for averages over 1, 5, and 15 minutes
78 * when sampling at 5 second intervals.
79 */
80static fixpt_t cexp[3] = {
81 0.9200444146293232 * FSCALE, /* exp(-1/12) */
82 0.9834714538216174 * FSCALE, /* exp(-1/60) */
83 0.9944598480048967 * FSCALE, /* exp(-1/180) */
84};
85
86static int curpriority_cmp __P((struct proc *p));
87static void endtsleep __P((void *));
88static void loadav __P((void *arg));
89static void maybe_resched __P((struct proc *chk));
90static void roundrobin __P((void *arg));
91static void schedcpu __P((void *arg));
92static void updatepri __P((struct proc *p));
93
94static int
95sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
96{
97 int error, new_val;
98
99 new_val = sched_quantum * tick;
100 error = sysctl_handle_int(oidp, &new_val, 0, req);
101 if (error != 0 || req->newptr == NULL)
102 return (error);
103 if (new_val < tick)
104 return (EINVAL);
105 sched_quantum = new_val / tick;
106 hogticks = 2 * sched_quantum;
107 return (0);
108}
109
110SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
111 0, sizeof sched_quantum, sysctl_kern_quantum, "I", "");
112
113/*-
114 * Compare priorities. Return:
115 * <0: priority of p < current priority
116 * 0: priority of p == current priority
117 * >0: priority of p > current priority
118 * The priorities are the normal priorities or the normal realtime priorities
119 * if p is on the same scheduler as curproc. Otherwise the process on the
120 * more realtimeish scheduler has lowest priority. As usual, a higher
121 * priority really means a lower priority.
122 */
123static int
124curpriority_cmp(p)
125 struct proc *p;
126{
127 int c_class, p_class;
128
129 c_class = RTP_PRIO_BASE(curproc->p_rtprio.type);
130 p_class = RTP_PRIO_BASE(p->p_rtprio.type);
131 if (p_class != c_class)
132 return (p_class - c_class);
133 if (p_class == RTP_PRIO_NORMAL)
134 return (((int)p->p_priority - (int)curpriority) / PPQ);
135 return ((int)p->p_rtprio.prio - (int)curproc->p_rtprio.prio);
136}
137
138/*
139 * Arrange to reschedule if necessary, taking the priorities and
140 * schedulers into account.
141 */
142static void
143maybe_resched(chk)
144 struct proc *chk;
145{
146 struct proc *p = curproc; /* XXX */
147
148 /*
149 * XXX idle scheduler still broken because proccess stays on idle
150 * scheduler during waits (such as when getting FS locks). If a
151 * standard process becomes runaway cpu-bound, the system can lockup
152 * due to idle-scheduler processes in wakeup never getting any cpu.
153 */
154 if (p == NULL) {
155#if 0
156 need_resched();
157#endif
158 } else if (chk == p) {
159 /* We may need to yield if our priority has been raised. */
160 if (curpriority_cmp(chk) > 0)
161 need_resched();
162 } else if (curpriority_cmp(chk) < 0)
163 need_resched();
164}
165
166int
167roundrobin_interval(void)
168{
169 return (sched_quantum);
170}
171
172/*
173 * Force switch among equal priority processes every 100ms.
174 */
175/* ARGSUSED */
176static void
177roundrobin(arg)
178 void *arg;
179{
180#ifndef SMP
181 struct proc *p = curproc; /* XXX */
182#endif
183
184#ifdef SMP
185 need_resched();
186 forward_roundrobin();
187#else
188 if (p == 0 || RTP_PRIO_NEED_RR(p->p_rtprio.type))
189 need_resched();
190#endif
191
192 timeout(roundrobin, NULL, sched_quantum);
193}
194
195/*
196 * Constants for digital decay and forget:
197 * 90% of (p_estcpu) usage in 5 * loadav time
198 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
199 * Note that, as ps(1) mentions, this can let percentages
200 * total over 100% (I've seen 137.9% for 3 processes).
201 *
202 * Note that schedclock() updates p_estcpu and p_cpticks asynchronously.
203 *
204 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
205 * That is, the system wants to compute a value of decay such
206 * that the following for loop:
207 * for (i = 0; i < (5 * loadavg); i++)
208 * p_estcpu *= decay;
209 * will compute
210 * p_estcpu *= 0.1;
211 * for all values of loadavg:
212 *
213 * Mathematically this loop can be expressed by saying:
214 * decay ** (5 * loadavg) ~= .1
215 *
216 * The system computes decay as:
217 * decay = (2 * loadavg) / (2 * loadavg + 1)
218 *
219 * We wish to prove that the system's computation of decay
220 * will always fulfill the equation:
221 * decay ** (5 * loadavg) ~= .1
222 *
223 * If we compute b as:
224 * b = 2 * loadavg
225 * then
226 * decay = b / (b + 1)
227 *
228 * We now need to prove two things:
229 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
230 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
231 *
232 * Facts:
233 * For x close to zero, exp(x) =~ 1 + x, since
234 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
235 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
236 * For x close to zero, ln(1+x) =~ x, since
237 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
238 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
239 * ln(.1) =~ -2.30
240 *
241 * Proof of (1):
242 * Solve (factor)**(power) =~ .1 given power (5*loadav):
243 * solving for factor,
244 * ln(factor) =~ (-2.30/5*loadav), or
245 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
246 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
247 *
248 * Proof of (2):
249 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
250 * solving for power,
251 * power*ln(b/(b+1)) =~ -2.30, or
252 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
253 *
254 * Actual power values for the implemented algorithm are as follows:
255 * loadav: 1 2 3 4
256 * power: 5.68 10.32 14.94 19.55
257 */
258
259/* calculations for digital decay to forget 90% of usage in 5*loadav sec */
260#define loadfactor(loadav) (2 * (loadav))
261#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
262
263/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
264static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
265SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
266
267/* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
268static int fscale __unused = FSCALE;
269SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
270
271/*
272 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
273 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
274 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
275 *
276 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
277 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
278 *
279 * If you don't want to bother with the faster/more-accurate formula, you
280 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
281 * (more general) method of calculating the %age of CPU used by a process.
282 */
283#define CCPU_SHIFT 11
284
285/*
286 * Recompute process priorities, every hz ticks.
287 */
288/* ARGSUSED */
289static void
290schedcpu(arg)
291 void *arg;
292{
293 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
294 register struct proc *p;
295 register int realstathz, s;
296
297 realstathz = stathz ? stathz : hz;
298 LIST_FOREACH(p, &allproc, p_list) {
299 /*
300 * Increment time in/out of memory and sleep time
301 * (if sleeping). We ignore overflow; with 16-bit int's
302 * (remember them?) overflow takes 45 days.
303 */
304 p->p_swtime++;
305 if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
306 p->p_slptime++;
307 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
308 /*
309 * If the process has slept the entire second,
310 * stop recalculating its priority until it wakes up.
311 */
312 if (p->p_slptime > 1)
313 continue;
314 s = splhigh(); /* prevent state changes and protect run queue */
315 /*
316 * p_pctcpu is only for ps.
317 */
318#if (FSHIFT >= CCPU_SHIFT)
319 p->p_pctcpu += (realstathz == 100)?
320 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
321 100 * (((fixpt_t) p->p_cpticks)
322 << (FSHIFT - CCPU_SHIFT)) / realstathz;
323#else
324 p->p_pctcpu += ((FSCALE - ccpu) *
325 (p->p_cpticks * FSCALE / realstathz)) >> FSHIFT;
326#endif
327 p->p_cpticks = 0;
328 p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
329 resetpriority(p);
330 if (p->p_priority >= PUSER) {
331 if ((p != curproc) &&
332#ifdef SMP
333 p->p_oncpu == 0xff && /* idle */
334#endif
335 p->p_stat == SRUN &&
336 (p->p_flag & P_INMEM) &&
337 (p->p_priority / PPQ) != (p->p_usrpri / PPQ)) {
338 remrunqueue(p);
339 p->p_priority = p->p_usrpri;
340 setrunqueue(p);
341 } else {
342 p->p_priority = p->p_usrpri;
343 }
344 }
345 splx(s);
346 }
347 wakeup((caddr_t)&lbolt);
348 timeout(schedcpu, (void *)0, hz);
349}
350
351/*
352 * Recalculate the priority of a process after it has slept for a while.
353 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
354 * least six times the loadfactor will decay p_estcpu to zero.
355 */
356static void
357updatepri(p)
358 register struct proc *p;
359{
360 register unsigned int newcpu = p->p_estcpu;
361 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
362
363 if (p->p_slptime > 5 * loadfac)
364 p->p_estcpu = 0;
365 else {
366 p->p_slptime--; /* the first time was done in schedcpu */
367 while (newcpu && --p->p_slptime)
368 newcpu = decay_cpu(loadfac, newcpu);
369 p->p_estcpu = newcpu;
370 }
371 resetpriority(p);
372}
373
374/*
375 * We're only looking at 7 bits of the address; everything is
376 * aligned to 4, lots of things are aligned to greater powers
377 * of 2. Shift right by 8, i.e. drop the bottom 256 worth.
378 */
379#define TABLESIZE 128
380static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE];
381#define LOOKUP(x) (((intptr_t)(x) >> 8) & (TABLESIZE - 1))
382
383/*
384 * During autoconfiguration or after a panic, a sleep will simply
385 * lower the priority briefly to allow interrupts, then return.
386 * The priority to be used (safepri) is machine-dependent, thus this
387 * value is initialized and maintained in the machine-dependent layers.
388 * This priority will typically be 0, or the lowest priority
389 * that is safe for use on the interrupt stack; it can be made
390 * higher to block network software interrupts after panics.
391 */
392int safepri;
393
394void
395sleepinit(void)
396{
397 int i;
398
399 sched_quantum = hz/10;
400 hogticks = 2 * sched_quantum;
401 for (i = 0; i < TABLESIZE; i++)
402 TAILQ_INIT(&slpque[i]);
403}
404
405/*
406 * General sleep call. Suspends the current process until a wakeup is
407 * performed on the specified identifier. The process will then be made
408 * runnable with the specified priority. Sleeps at most timo/hz seconds
409 * (0 means no timeout). If pri includes PCATCH flag, signals are checked
410 * before and after sleeping, else signals are not checked. Returns 0 if
411 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
412 * signal needs to be delivered, ERESTART is returned if the current system
413 * call should be restarted if possible, and EINTR is returned if the system
414 * call should be interrupted by the signal (return EINTR).
415 */
416int
417tsleep(ident, priority, wmesg, timo)
418 void *ident;
419 int priority, timo;
420 const char *wmesg;
421{
422 struct thread *td = curthread;
423 struct proc *p = td->td_proc; /* may be NULL */
424 int s, sig = 0, catch = priority & PCATCH;
425 int id = LOOKUP(ident);
426 struct callout_handle thandle;
427
428 /*
429 * NOTE: removed KTRPOINT, it could cause races due to blocking
430 * even in stable. Just scrap it for now.
431 */
432 s = splhigh();
433
434 if (cold || panicstr) {
435 /*
436 * After a panic, or during autoconfiguration,
437 * just give interrupts a chance, then just return;
438 * don't run any other procs or panic below,
439 * in case this is the idle process and already asleep.
440 */
441 splx(safepri);
442 splx(s);
443 return (0);
444 }
445 KASSERT(ident != NULL, ("tsleep: no ident"));
446 KASSERT(p == NULL || p->p_stat == SRUN, ("tsleep %p %s %d",
447 ident, wmesg, p->p_stat));
448
449 td->td_wchan = ident;
450 td->td_wmesg = wmesg;
451 if (p) {
452 p->p_slptime = 0;
453 p->p_priority = priority & PRIMASK;
454 }
455 lwkt_deschedule_self();
456 TAILQ_INSERT_TAIL(&slpque[id], td, td_threadq);
457 if (timo)
458 thandle = timeout(endtsleep, (void *)td, timo);
459 /*
460 * We put ourselves on the sleep queue and start our timeout
461 * before calling CURSIG, as we could stop there, and a wakeup
462 * or a SIGCONT (or both) could occur while we were stopped.
463 * A SIGCONT would cause us to be marked as SSLEEP
464 * without resuming us, thus we must be ready for sleep
465 * when CURSIG is called. If the wakeup happens while we're
466 * stopped, p->p_wchan will be 0 upon return from CURSIG.
467 */
468 if (p) {
469 if (catch) {
470 p->p_flag |= P_SINTR;
471 if ((sig = CURSIG(p))) {
472 if (td->td_wchan)
473 unsleep(td);
474 p->p_stat = SRUN;
475 goto resume;
476 }
477 if (p->p_wchan == 0) {
478 catch = 0;
479 goto resume;
480 }
481 } else {
482 sig = 0;
483 }
484 p->p_stat = SSLEEP;
485 p->p_stats->p_ru.ru_nvcsw++;
486 mi_switch();
487 } else {
488 lwkt_switch();
489 }
490resume:
491 if (p) {
492 curpriority = p->p_usrpri;
493 p->p_flag &= ~P_SINTR;
494 }
495 splx(s);
496 if (td->td_flags & TDF_TIMEOUT) {
497 td->td_flags &= ~TDF_TIMEOUT;
498 if (sig == 0)
499 return (EWOULDBLOCK);
500 } else if (timo) {
501 untimeout(endtsleep, (void *)td, thandle);
502 }
503 if (p) {
504 if (catch && (sig != 0 || (sig = CURSIG(p)))) {
505 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
506 return (EINTR);
507 return (ERESTART);
508 }
509 }
510 return (0);
511}
512
513#if 0
514
515/*
516 * General sleep call. Suspends the current process until a wakeup is
517 * performed on the specified xwait structure. The process will then be made
518 * runnable with the specified priority. Sleeps at most timo/hz seconds
519 * (0 means no timeout). If pri includes PCATCH flag, signals are checked
520 * before and after sleeping, else signals are not checked. Returns 0 if
521 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
522 * signal needs to be delivered, ERESTART is returned if the current system
523 * call should be restarted if possible, and EINTR is returned if the system
524 * call should be interrupted by the signal (return EINTR).
525 *
526 * If the passed generation number is different from the generation number
527 * in the xwait, return immediately.
528 */
529int
530xsleep(struct xwait *w, int priority, const char *wmesg, int timo, int *gen)
531{
532 struct thread *td = curthread;
533 struct proc *p = td->td_proc;
534 int s, sig, catch = priority & PCATCH;
535 struct callout_handle thandle;
536
537#ifdef KTRACE
538 if (KTRPOINT(td, KTR_CSW))
539 ktrcsw(p->p_tracep, 1, 0);
540#endif
541 s = splhigh();
542
543 if (cold || panicstr) {
544 /*
545 * After a panic, or during autoconfiguration,
546 * just give interrupts a chance, then just return;
547 * don't run any other procs or panic below,
548 * in case this is the idle process and already asleep.
549 */
550 splx(safepri);
551 splx(s);
552 return (0);
553 }
554 KASSERT(p != NULL, ("xsleep1"));
555 KASSERT(w != NULL && p->p_stat == SRUN, ("xsleep"));
556
557 /*
558 * If the generation number does not match we return immediately.
559 */
560 if (*gen != w->gen) {
561 *gen = w->gen;
562 splx(s);
563#ifdef KTRACE
564 if (KTRPOINT(td, KTR_CSW))
565 ktrcsw(p->p_tracep, 0, 0);
566#endif
567 return(0);
568 }
569
570 p->p_wchan = w;
571 p->p_wmesg = wmesg;
572 p->p_slptime = 0;
573 p->p_priority = priority & PRIMASK;
574 p->p_flag |= P_XSLEEP;
575 TAILQ_INSERT_TAIL(&w->waitq, p, p_procq);
576 if (timo)
577 thandle = timeout(endtsleep, (void *)p, timo);
578 /*
579 * We put ourselves on the sleep queue and start our timeout
580 * before calling CURSIG, as we could stop there, and a wakeup
581 * or a SIGCONT (or both) could occur while we were stopped.
582 * A SIGCONT would cause us to be marked as SSLEEP
583 * without resuming us, thus we must be ready for sleep
584 * when CURSIG is called. If the wakeup happens while we're
585 * stopped, p->p_wchan will be 0 upon return from CURSIG.
586 */
587 if (catch) {
588 p->p_flag |= P_SINTR;
589 if ((sig = CURSIG(p))) {
590 if (p->p_wchan)
591 unsleep(p);
592 p->p_stat = SRUN;
593 goto resume;
594 }
595 if (p->p_wchan == NULL) {
596 catch = 0;
597 goto resume;
598 }
599 } else
600 sig = 0;
601 p->p_stat = SSLEEP;
602 p->p_stats->p_ru.ru_nvcsw++;
603 mi_switch();
604resume:
605 curpriority = p->p_usrpri;
606 *gen = w->gen; /* update generation number */
607 splx(s);
608 p->p_flag &= ~P_SINTR;
609 if (p->p_flag & P_TIMEOUT) {
610 p->p_flag &= ~P_TIMEOUT;
611 if (sig == 0) {
612#ifdef KTRACE
613 if (KTRPOINT(td, KTR_CSW))
614 ktrcsw(p->p_tracep, 0, 0);
615#endif
616 return (EWOULDBLOCK);
617 }
618 } else if (timo)
619 untimeout(endtsleep, (void *)p, thandle);
620 if (catch && (sig != 0 || (sig = CURSIG(p)))) {
621#ifdef KTRACE
622 if (KTRPOINT(td, KTR_CSW))
623 ktrcsw(p->p_tracep, 0, 0);
624#endif
625 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
626 return (EINTR);
627 return (ERESTART);
628 }
629#ifdef KTRACE
630 if (KTRPOINT(td, KTR_CSW))
631 ktrcsw(p->p_tracep, 0, 0);
632#endif
633 return (0);
634}
635
636#endif
637
638/*
639 * Implement the timeout for tsleep. We interlock against
640 * wchan when setting TDF_TIMEOUT. For processes we remove
641 * the sleep if the process is stopped rather then sleeping,
642 * so it remains stopped.
643 */
644static void
645endtsleep(void *arg)
646{
647 thread_t td = arg;
648 struct proc *p;
649 int s;
650
651 s = splhigh();
652 if (td->td_wchan) {
653 td->td_flags |= TDF_TIMEOUT;
654 if ((p = td->td_proc) != NULL) {
655 if (p->p_stat == SSLEEP)
656 setrunnable(p);
657 else
658 unsleep(td);
659 } else {
660 unsleep(td);
661 lwkt_schedule(td);
662 }
663 }
664 splx(s);
665}
666
667/*
668 * Remove a process from its wait queue
669 */
670void
671unsleep(struct thread *td)
672{
673 int s;
674
675 s = splhigh();
676 if (td->td_wchan) {
677#if 0
678 if (p->p_flag & P_XSLEEP) {
679 struct xwait *w = p->p_wchan;
680 TAILQ_REMOVE(&w->waitq, p, p_procq);
681 p->p_flag &= ~P_XSLEEP;
682 } else
683#endif
684 TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_threadq);
685 td->td_wchan = NULL;
686 }
687 splx(s);
688}
689
690#if 0
691/*
692 * Make all processes sleeping on the explicit lock structure runnable.
693 */
694void
695xwakeup(struct xwait *w)
696{
697 struct proc *p;
698 int s;
699
700 s = splhigh();
701 ++w->gen;
702 while ((p = TAILQ_FIRST(&w->waitq)) != NULL) {
703 TAILQ_REMOVE(&w->waitq, p, p_procq);
704 KASSERT(p->p_wchan == w && (p->p_flag & P_XSLEEP),
705 ("xwakeup: wchan mismatch for %p (%p/%p) %08x", p, p->p_wchan, w, p->p_flag & P_XSLEEP));
706 p->p_wchan = NULL;
707 p->p_flag &= ~P_XSLEEP;
708 if (p->p_stat == SSLEEP) {
709 /* OPTIMIZED EXPANSION OF setrunnable(p); */
710 if (p->p_slptime > 1)
711 updatepri(p);
712 p->p_slptime = 0;
713 p->p_stat = SRUN;
714 if (p->p_flag & P_INMEM) {
715 setrunqueue(p);
716 maybe_resched(p);
717 } else {
718 p->p_flag |= P_SWAPINREQ;
719 wakeup((caddr_t)&proc0);
720 }
721 }
722 }
723 splx(s);
724}
725#endif
726
727/*
728 * Make all processes sleeping on the specified identifier runnable.
729 */
730static void
731_wakeup(void *ident, int count)
732{
733 struct slpquehead *qp;
734 struct thread *td;
735 struct thread *ntd;
736 struct proc *p;
737 int s;
738 int id = LOOKUP(ident);
739
740 s = splhigh();
741 qp = &slpque[id];
742restart:
743 for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
744 ntd = TAILQ_NEXT(td, td_threadq);
745 if (td->td_wchan == ident) {
746 TAILQ_REMOVE(qp, td, td_threadq);
747 td->td_wchan = NULL;
748 if ((p = td->td_proc) != NULL && p->p_stat == SSLEEP) {
749 /* OPTIMIZED EXPANSION OF setrunnable(p); */
750 if (p->p_slptime > 1)
751 updatepri(p);
752 p->p_slptime = 0;
753 p->p_stat = SRUN;
754 if (p->p_flag & P_INMEM) {
755 setrunqueue(p);
756 maybe_resched(p);
757 } else {
758 p->p_flag |= P_SWAPINREQ;
759 wakeup((caddr_t)&proc0);
760 }
761 /* END INLINE EXPANSION */
762 } else if (p == NULL) {
763 lwkt_schedule(td);
764 }
765 if (--count == 0)
766 break;
767 goto restart;
768 }
769 }
770 splx(s);
771}
772
773void
774wakeup(void *ident)
775{
776 _wakeup(ident, 0);
777}
778
779void
780wakeup_one(void *ident)
781{
782 _wakeup(ident, 1);
783}
784
785/*
786 * The machine independent parts of mi_switch().
787 * Must be called at splstatclock() or higher.
788 */
789void
790mi_switch()
791{
792 struct thread *td = curthread;
793 struct proc *p = td->td_proc; /* XXX */
794 struct rlimit *rlim;
795 int x;
796 u_int64_t ttime;
797
798 /*
799 * XXX this spl is almost unnecessary. It is partly to allow for
800 * sloppy callers that don't do it (issignal() via CURSIG() is the
801 * main offender). It is partly to work around a bug in the i386
802 * cpu_switch() (the ipl is not preserved). We ran for years
803 * without it. I think there was only a interrupt latency problem.
804 * The main caller, tsleep(), does an splx() a couple of instructions
805 * after calling here. The buggy caller, issignal(), usually calls
806 * here at spl0() and sometimes returns at splhigh(). The process
807 * then runs for a little too long at splhigh(). The ipl gets fixed
808 * when the process returns to user mode (or earlier).
809 *
810 * It would probably be better to always call here at spl0(). Callers
811 * are prepared to give up control to another process, so they must
812 * be prepared to be interrupted. The clock stuff here may not
813 * actually need splstatclock().
814 */
815 x = splstatclock();
816 clear_resched();
817
818#ifdef SIMPLELOCK_DEBUG
819 if (p->p_simple_locks)
820 printf("sleep: holding simple lock\n");
821#endif
822
823 /*
824 * Check if the process exceeds its cpu resource allocation.
825 * If over max, kill it. Time spent in interrupts is not
826 * included. YYY 64 bit match is expensive. Ick.
827 */
828 ttime = td->td_sticks + td->td_uticks;
829 if (p->p_stat != SZOMB && p->p_limit->p_cpulimit != RLIM_INFINITY &&
830 ttime > p->p_limit->p_cpulimit) {
831 rlim = &p->p_rlimit[RLIMIT_CPU];
832 if (ttime / (rlim_t)1000000 >= rlim->rlim_max) {
833 killproc(p, "exceeded maximum CPU limit");
834 } else {
835 psignal(p, SIGXCPU);
836 if (rlim->rlim_cur < rlim->rlim_max) {
837 /* XXX: we should make a private copy */
838 rlim->rlim_cur += 5;
839 }
840 }
841 }
842
843 /*
844 * Pick a new current process and record its start time.
845 * YYY lwkt_switch() will run the heavy weight process restoration
846 * code, which removes the target thread and process from their
847 * respective run queues to temporarily mimic 5.x behavior.
848 * YYY the userland scheduler should pick only one user process
849 * at a time to run per cpu.
850 */
851 cnt.v_swtch++;
852 lwkt_switch();
853 remrunqueue(p);
854
855 splx(x);
856}
857
858/*
859 * Change process state to be runnable,
860 * placing it on the run queue if it is in memory,
861 * and awakening the swapper if it isn't in memory.
862 */
863void
864setrunnable(struct proc *p)
865{
866 int s;
867
868 s = splhigh();
869 switch (p->p_stat) {
870 case 0:
871 case SRUN:
872 case SZOMB:
873 default:
874 panic("setrunnable");
875 case SSTOP:
876 case SSLEEP:
877 unsleep(p->p_thread); /* e.g. when sending signals */
878 break;
879
880 case SIDL:
881 break;
882 }
883 p->p_stat = SRUN;
884 if (p->p_flag & P_INMEM)
885 setrunqueue(p);
886 splx(s);
887 if (p->p_slptime > 1)
888 updatepri(p);
889 p->p_slptime = 0;
890 if ((p->p_flag & P_INMEM) == 0) {
891 p->p_flag |= P_SWAPINREQ;
892 wakeup((caddr_t)&proc0);
893 }
894 else
895 maybe_resched(p);
896}
897
898/*
899 * Compute the priority of a process when running in user mode.
900 * Arrange to reschedule if the resulting priority is better
901 * than that of the current process.
902 */
903void
904resetpriority(p)
905 register struct proc *p;
906{
907 register unsigned int newpriority;
908
909 if (p->p_rtprio.type == RTP_PRIO_NORMAL) {
910 newpriority = PUSER + p->p_estcpu / INVERSE_ESTCPU_WEIGHT +
911 NICE_WEIGHT * p->p_nice;
912 newpriority = min(newpriority, MAXPRI);
913 p->p_usrpri = newpriority;
914 }
915 maybe_resched(p);
916}
917
918/*
919 * Compute a tenex style load average of a quantity on
920 * 1, 5 and 15 minute intervals.
921 */
922static void
923loadav(void *arg)
924{
925 int i, nrun;
926 struct loadavg *avg;
927 struct proc *p;
928
929 avg = &averunnable;
930 nrun = 0;
931 LIST_FOREACH(p, &allproc, p_list) {
932 switch (p->p_stat) {
933 case SRUN:
934 case SIDL:
935 nrun++;
936 }
937 }
938 for (i = 0; i < 3; i++)
939 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
940 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
941
942 /*
943 * Schedule the next update to occur after 5 seconds, but add a
944 * random variation to avoid synchronisation with processes that
945 * run at regular intervals.
946 */
947 callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
948 loadav, NULL);
949}
950
951/* ARGSUSED */
952static void
953sched_setup(dummy)
954 void *dummy;
955{
956
957 callout_init(&loadav_callout);
958
959 /* Kick off timeout driven events by calling first time. */
960 roundrobin(NULL);
961 schedcpu(NULL);
962 loadav(NULL);
963}
964
965/*
966 * We adjust the priority of the current process. The priority of
967 * a process gets worse as it accumulates CPU time. The cpu usage
968 * estimator (p_estcpu) is increased here. resetpriority() will
969 * compute a different priority each time p_estcpu increases by
970 * INVERSE_ESTCPU_WEIGHT
971 * (until MAXPRI is reached). The cpu usage estimator ramps up
972 * quite quickly when the process is running (linearly), and decays
973 * away exponentially, at a rate which is proportionally slower when
974 * the system is busy. The basic principle is that the system will
975 * 90% forget that the process used a lot of CPU time in 5 * loadav
976 * seconds. This causes the system to favor processes which haven't
977 * run much recently, and to round-robin among other processes.
978 */
979void
980schedclock(p)
981 struct proc *p;
982{
983
984 p->p_cpticks++;
985 p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
986 if ((p->p_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
987 resetpriority(p);
988 if (p->p_priority >= PUSER)
989 p->p_priority = p->p_usrpri;
990 }
991}