ANSIfication and style cleanups. Non operational.
[dragonfly.git] / sys / kern / kern_synch.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
39 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $
0a3f9b47 40 * $DragonFly: src/sys/kern/kern_synch.c,v 1.31 2004/03/30 19:14:11 dillon Exp $
984263bc
MD
41 */
42
43#include "opt_ktrace.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/proc.h>
48#include <sys/kernel.h>
49#include <sys/signalvar.h>
50#include <sys/resourcevar.h>
51#include <sys/vmmeter.h>
52#include <sys/sysctl.h>
26a0694b 53#include <sys/thread2.h>
984263bc
MD
54#ifdef KTRACE
55#include <sys/uio.h>
56#include <sys/ktrace.h>
57#endif
f1d1c3fa 58#include <sys/xwait.h>
984263bc
MD
59
60#include <machine/cpu.h>
61#include <machine/ipl.h>
62#include <machine/smp.h>
63
402ed7e1 64static void sched_setup (void *dummy);
984263bc
MD
65SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
66
984263bc
MD
67int hogticks;
68int lbolt;
69int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
17a9f566 70int ncpus;
90100055 71int ncpus2, ncpus2_shift, ncpus2_mask;
984263bc
MD
72
73static struct callout loadav_callout;
74
75struct loadavg averunnable =
76 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */
77/*
78 * Constants for averages over 1, 5, and 15 minutes
79 * when sampling at 5 second intervals.
80 */
81static fixpt_t cexp[3] = {
82 0.9200444146293232 * FSCALE, /* exp(-1/12) */
83 0.9834714538216174 * FSCALE, /* exp(-1/60) */
84 0.9944598480048967 * FSCALE, /* exp(-1/180) */
85};
86
402ed7e1
RG
87static void endtsleep (void *);
88static void loadav (void *arg);
402ed7e1
RG
89static void roundrobin (void *arg);
90static void schedcpu (void *arg);
91static void updatepri (struct proc *p);
8a8d5d85 92static void crit_panicints(void);
984263bc
MD
93
94static int
95sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
96{
97 int error, new_val;
98
99 new_val = sched_quantum * tick;
100 error = sysctl_handle_int(oidp, &new_val, 0, req);
101 if (error != 0 || req->newptr == NULL)
102 return (error);
103 if (new_val < tick)
104 return (EINVAL);
105 sched_quantum = new_val / tick;
106 hogticks = 2 * sched_quantum;
107 return (0);
108}
109
110SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
111 0, sizeof sched_quantum, sysctl_kern_quantum, "I", "");
112
984263bc
MD
113int
114roundrobin_interval(void)
115{
116 return (sched_quantum);
117}
118
119/*
9ae9ee8d
MD
120 * Force switch among equal priority processes every 100ms.
121 *
122 * WARNING! The MP lock is not held on ipi message remotes.
984263bc 123 */
cb973d15
MD
124#ifdef SMP
125
984263bc 126static void
cb973d15 127roundrobin_remote(void *arg)
984263bc 128{
8a8d5d85 129 struct proc *p = lwkt_preempted_proc();
8a8d5d85 130 if (p == NULL || RTP_PRIO_NEED_RR(p->p_rtprio.type))
0a3f9b47 131 need_user_resched();
cb973d15
MD
132}
133
134#endif
135
136static void
137roundrobin(void *arg)
138{
139 struct proc *p = lwkt_preempted_proc();
8a8d5d85 140 if (p == NULL || RTP_PRIO_NEED_RR(p->p_rtprio.type))
0a3f9b47 141 need_user_resched();
cb973d15
MD
142#ifdef SMP
143 lwkt_send_ipiq_mask(mycpu->gd_other_cpus, roundrobin_remote, NULL);
984263bc 144#endif
984263bc
MD
145 timeout(roundrobin, NULL, sched_quantum);
146}
147
d9eea1a5
MD
148#ifdef SMP
149
cb973d15
MD
150void
151resched_cpus(u_int32_t mask)
152{
153 lwkt_send_ipiq_mask(mask, roundrobin_remote, NULL);
154}
155
d9eea1a5
MD
156#endif
157
984263bc 158/*
9b173e69 159 * The load average is scaled by FSCALE (2048 typ). The estimated cpu is
0a3f9b47 160 * incremented at a rate of ESTCPUVFREQ per second (40hz typ), but this is
9b173e69 161 * divided up across all cpu bound processes running in the system so an
0a3f9b47
MD
162 * individual process will get less under load. ESTCPULIM typicaly caps
163 * out at ESTCPUMAX (around 376, or 11 nice levels).
984263bc 164 *
0a3f9b47
MD
165 * Generally speaking the decay equation needs to break-even on growth
166 * at the limit at all load levels >= 1.0, so if the estimated cpu for
167 * a process increases by (ESTVCPUFREQ / load) per second, then the decay
168 * should reach this value when estcpu reaches ESTCPUMAX. That calculation
169 * is:
984263bc 170 *
0a3f9b47
MD
171 * ESTCPUMAX * decay = ESTCPUVFREQ / load
172 * decay = ESTCPUVFREQ / (load * ESTCPUMAX)
173 * decay = estcpu * 0.053 / load
984263bc 174 *
0a3f9b47 175 * If the load is less then 1.0 we assume a load of 1.0.
984263bc
MD
176 */
177
0a3f9b47 178#define cload(loadav) ((loadav) < FSCALE ? FSCALE : (loadav))
9b173e69 179#define decay_cpu(loadav,estcpu) \
0a3f9b47 180 ((estcpu) * (FSCALE * ESTCPUVFREQ / ESTCPUMAX) / cload(loadav))
984263bc
MD
181
182/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
183static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
184SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
185
186/* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
187static int fscale __unused = FSCALE;
188SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
189
190/*
191 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
192 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
193 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
194 *
195 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
196 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
197 *
198 * If you don't want to bother with the faster/more-accurate formula, you
199 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
200 * (more general) method of calculating the %age of CPU used by a process.
201 */
202#define CCPU_SHIFT 11
203
204/*
0a3f9b47 205 * Recompute process priorities, once a second.
984263bc
MD
206 */
207/* ARGSUSED */
208static void
26a0694b 209schedcpu(void *arg)
984263bc 210{
9b173e69 211 fixpt_t loadfac = averunnable.ldavg[0];
4b5f931b 212 struct proc *p;
9b173e69
MD
213 int s;
214 unsigned int ndecay;
4b5f931b 215
f62004ad 216 FOREACH_PROC_IN_SYSTEM(p) {
984263bc
MD
217 /*
218 * Increment time in/out of memory and sleep time
219 * (if sleeping). We ignore overflow; with 16-bit int's
220 * (remember them?) overflow takes 45 days.
221 */
222 p->p_swtime++;
223 if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
224 p->p_slptime++;
225 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
226 /*
227 * If the process has slept the entire second,
228 * stop recalculating its priority until it wakes up.
229 */
230 if (p->p_slptime > 1)
231 continue;
232 s = splhigh(); /* prevent state changes and protect run queue */
233 /*
234 * p_pctcpu is only for ps.
235 */
236#if (FSHIFT >= CCPU_SHIFT)
9b173e69 237 p->p_pctcpu += (ESTCPUFREQ == 100)?
984263bc
MD
238 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
239 100 * (((fixpt_t) p->p_cpticks)
9b173e69 240 << (FSHIFT - CCPU_SHIFT)) / ESTCPUFREQ;
984263bc
MD
241#else
242 p->p_pctcpu += ((FSCALE - ccpu) *
9b173e69 243 (p->p_cpticks * FSCALE / ESTCPUFREQ)) >> FSHIFT;
984263bc
MD
244#endif
245 p->p_cpticks = 0;
9b173e69
MD
246 ndecay = decay_cpu(loadfac, p->p_estcpu);
247 if (p->p_estcpu > ndecay)
248 p->p_estcpu -= ndecay;
249 else
250 p->p_estcpu = 0;
984263bc 251 resetpriority(p);
984263bc
MD
252 splx(s);
253 }
254 wakeup((caddr_t)&lbolt);
255 timeout(schedcpu, (void *)0, hz);
256}
257
258/*
259 * Recalculate the priority of a process after it has slept for a while.
260 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
261 * least six times the loadfactor will decay p_estcpu to zero.
262 */
263static void
26a0694b 264updatepri(struct proc *p)
984263bc 265{
9b173e69 266 unsigned int ndecay;
984263bc 267
9b173e69
MD
268 ndecay = decay_cpu(averunnable.ldavg[0], p->p_estcpu) * p->p_slptime;
269 if (p->p_estcpu > ndecay)
270 p->p_estcpu -= ndecay;
271 else
984263bc 272 p->p_estcpu = 0;
984263bc
MD
273 resetpriority(p);
274}
275
276/*
277 * We're only looking at 7 bits of the address; everything is
278 * aligned to 4, lots of things are aligned to greater powers
279 * of 2. Shift right by 8, i.e. drop the bottom 256 worth.
280 */
281#define TABLESIZE 128
0cfcada1 282static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE];
984263bc
MD
283#define LOOKUP(x) (((intptr_t)(x) >> 8) & (TABLESIZE - 1))
284
285/*
286 * During autoconfiguration or after a panic, a sleep will simply
287 * lower the priority briefly to allow interrupts, then return.
288 * The priority to be used (safepri) is machine-dependent, thus this
289 * value is initialized and maintained in the machine-dependent layers.
290 * This priority will typically be 0, or the lowest priority
291 * that is safe for use on the interrupt stack; it can be made
292 * higher to block network software interrupts after panics.
293 */
294int safepri;
295
296void
297sleepinit(void)
298{
299 int i;
300
301 sched_quantum = hz/10;
302 hogticks = 2 * sched_quantum;
303 for (i = 0; i < TABLESIZE; i++)
304 TAILQ_INIT(&slpque[i]);
305}
306
307/*
308 * General sleep call. Suspends the current process until a wakeup is
309 * performed on the specified identifier. The process will then be made
310 * runnable with the specified priority. Sleeps at most timo/hz seconds
377d4740 311 * (0 means no timeout). If flags includes PCATCH flag, signals are checked
984263bc
MD
312 * before and after sleeping, else signals are not checked. Returns 0 if
313 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
314 * signal needs to be delivered, ERESTART is returned if the current system
315 * call should be restarted if possible, and EINTR is returned if the system
316 * call should be interrupted by the signal (return EINTR).
26a0694b 317 *
0a3f9b47
MD
318 * Note that if we are a process, we release_curproc() before messing with
319 * the LWKT scheduler.
984263bc
MD
320 */
321int
6656cd91 322tsleep(void *ident, int flags, const char *wmesg, int timo)
984263bc 323{
dadab5e9 324 struct thread *td = curthread;
0cfcada1 325 struct proc *p = td->td_proc; /* may be NULL */
377d4740 326 int s, sig = 0, catch = flags & PCATCH;
f1d1c3fa 327 int id = LOOKUP(ident);
984263bc
MD
328 struct callout_handle thandle;
329
0cfcada1
MD
330 /*
331 * NOTE: removed KTRPOINT, it could cause races due to blocking
332 * even in stable. Just scrap it for now.
333 */
984263bc
MD
334 if (cold || panicstr) {
335 /*
336 * After a panic, or during autoconfiguration,
337 * just give interrupts a chance, then just return;
338 * don't run any other procs or panic below,
339 * in case this is the idle process and already asleep.
340 */
8a8d5d85 341 crit_panicints();
984263bc
MD
342 return (0);
343 }
a2a5ad0d 344 KKASSERT(td != &mycpu->gd_idlethread); /* you must be kidding! */
8a8d5d85 345 s = splhigh();
0cfcada1
MD
346 KASSERT(ident != NULL, ("tsleep: no ident"));
347 KASSERT(p == NULL || p->p_stat == SRUN, ("tsleep %p %s %d",
348 ident, wmesg, p->p_stat));
349
26a0694b 350 crit_enter();
0cfcada1
MD
351 td->td_wchan = ident;
352 td->td_wmesg = wmesg;
0a3f9b47
MD
353 if (p) {
354 if (flags & PNORESCHED)
355 td->td_flags |= TDF_NORESCHED;
356 release_curproc(p);
0cfcada1 357 p->p_slptime = 0;
0a3f9b47 358 }
0cfcada1
MD
359 lwkt_deschedule_self();
360 TAILQ_INSERT_TAIL(&slpque[id], td, td_threadq);
984263bc 361 if (timo)
0cfcada1 362 thandle = timeout(endtsleep, (void *)td, timo);
984263bc
MD
363 /*
364 * We put ourselves on the sleep queue and start our timeout
365 * before calling CURSIG, as we could stop there, and a wakeup
366 * or a SIGCONT (or both) could occur while we were stopped.
367 * A SIGCONT would cause us to be marked as SSLEEP
368 * without resuming us, thus we must be ready for sleep
369 * when CURSIG is called. If the wakeup happens while we're
a2a5ad0d 370 * stopped, td->td_wchan will be 0 upon return from CURSIG.
984263bc 371 */
0cfcada1
MD
372 if (p) {
373 if (catch) {
374 p->p_flag |= P_SINTR;
375 if ((sig = CURSIG(p))) {
26a0694b 376 if (td->td_wchan) {
0cfcada1 377 unsleep(td);
26a0694b
MD
378 lwkt_schedule_self();
379 }
0cfcada1
MD
380 p->p_stat = SRUN;
381 goto resume;
382 }
a2a5ad0d 383 if (td->td_wchan == NULL) {
0cfcada1
MD
384 catch = 0;
385 goto resume;
386 }
387 } else {
388 sig = 0;
984263bc 389 }
26a0694b
MD
390
391 /*
392 * If we are not the current process we have to remove ourself
393 * from the run queue.
394 */
395 KASSERT(p->p_stat == SRUN, ("PSTAT NOT SRUN %d %d", p->p_pid, p->p_stat));
396 /*
397 * If this is the current 'user' process schedule another one.
398 */
399 clrrunnable(p, SSLEEP);
0cfcada1
MD
400 p->p_stats->p_ru.ru_nvcsw++;
401 mi_switch();
26a0694b 402 KASSERT(p->p_stat == SRUN, ("tsleep: stat not srun"));
0cfcada1
MD
403 } else {
404 lwkt_switch();
405 }
984263bc 406resume:
26a0694b
MD
407 crit_exit();
408 if (p)
0cfcada1 409 p->p_flag &= ~P_SINTR;
984263bc 410 splx(s);
0a3f9b47 411 td->td_flags &= ~TDF_NORESCHED;
0cfcada1
MD
412 if (td->td_flags & TDF_TIMEOUT) {
413 td->td_flags &= ~TDF_TIMEOUT;
414 if (sig == 0)
984263bc 415 return (EWOULDBLOCK);
0cfcada1
MD
416 } else if (timo) {
417 untimeout(endtsleep, (void *)td, thandle);
ab44e20a
MD
418 } else if (td->td_wmesg) {
419 /*
420 * This can happen if a thread is woken up directly. Clear
421 * wmesg to avoid debugging confusion.
422 */
423 td->td_wmesg = NULL;
0cfcada1 424 }
a94976ad 425 /* inline of iscaught() */
0cfcada1
MD
426 if (p) {
427 if (catch && (sig != 0 || (sig = CURSIG(p)))) {
428 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
429 return (EINTR);
430 return (ERESTART);
984263bc 431 }
984263bc 432 }
984263bc
MD
433 return (0);
434}
435
984263bc 436/*
0cfcada1
MD
437 * Implement the timeout for tsleep. We interlock against
438 * wchan when setting TDF_TIMEOUT. For processes we remove
439 * the sleep if the process is stopped rather then sleeping,
440 * so it remains stopped.
984263bc
MD
441 */
442static void
0cfcada1 443endtsleep(void *arg)
984263bc 444{
0cfcada1
MD
445 thread_t td = arg;
446 struct proc *p;
984263bc
MD
447 int s;
448
984263bc 449 s = splhigh();
0cfcada1
MD
450 if (td->td_wchan) {
451 td->td_flags |= TDF_TIMEOUT;
452 if ((p = td->td_proc) != NULL) {
453 if (p->p_stat == SSLEEP)
454 setrunnable(p);
455 else
456 unsleep(td);
457 } else {
458 unsleep(td);
459 lwkt_schedule(td);
460 }
984263bc
MD
461 }
462 splx(s);
463}
464
465/*
466 * Remove a process from its wait queue
467 */
468void
0cfcada1 469unsleep(struct thread *td)
984263bc
MD
470{
471 int s;
472
473 s = splhigh();
0cfcada1
MD
474 if (td->td_wchan) {
475#if 0
f1d1c3fa
MD
476 if (p->p_flag & P_XSLEEP) {
477 struct xwait *w = p->p_wchan;
478 TAILQ_REMOVE(&w->waitq, p, p_procq);
479 p->p_flag &= ~P_XSLEEP;
0cfcada1
MD
480 } else
481#endif
482 TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_threadq);
483 td->td_wchan = NULL;
f1d1c3fa
MD
484 }
485 splx(s);
486}
487
0cfcada1 488#if 0
f1d1c3fa
MD
489/*
490 * Make all processes sleeping on the explicit lock structure runnable.
491 */
492void
493xwakeup(struct xwait *w)
494{
495 struct proc *p;
496 int s;
497
498 s = splhigh();
499 ++w->gen;
500 while ((p = TAILQ_FIRST(&w->waitq)) != NULL) {
501 TAILQ_REMOVE(&w->waitq, p, p_procq);
502 KASSERT(p->p_wchan == w && (p->p_flag & P_XSLEEP),
503 ("xwakeup: wchan mismatch for %p (%p/%p) %08x", p, p->p_wchan, w, p->p_flag & P_XSLEEP));
504 p->p_wchan = NULL;
505 p->p_flag &= ~P_XSLEEP;
506 if (p->p_stat == SSLEEP) {
507 /* OPTIMIZED EXPANSION OF setrunnable(p); */
508 if (p->p_slptime > 1)
509 updatepri(p);
510 p->p_slptime = 0;
511 p->p_stat = SRUN;
512 if (p->p_flag & P_INMEM) {
513 setrunqueue(p);
f1d1c3fa
MD
514 } else {
515 p->p_flag |= P_SWAPINREQ;
516 wakeup((caddr_t)&proc0);
517 }
518 }
984263bc
MD
519 }
520 splx(s);
521}
0cfcada1 522#endif
984263bc
MD
523
524/*
525 * Make all processes sleeping on the specified identifier runnable.
526 */
0cfcada1
MD
527static void
528_wakeup(void *ident, int count)
984263bc 529{
0cfcada1
MD
530 struct slpquehead *qp;
531 struct thread *td;
532 struct thread *ntd;
533 struct proc *p;
984263bc 534 int s;
f1d1c3fa 535 int id = LOOKUP(ident);
984263bc
MD
536
537 s = splhigh();
f1d1c3fa 538 qp = &slpque[id];
984263bc 539restart:
0cfcada1
MD
540 for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
541 ntd = TAILQ_NEXT(td, td_threadq);
542 if (td->td_wchan == ident) {
543 TAILQ_REMOVE(qp, td, td_threadq);
544 td->td_wchan = NULL;
545 if ((p = td->td_proc) != NULL && p->p_stat == SSLEEP) {
984263bc
MD
546 /* OPTIMIZED EXPANSION OF setrunnable(p); */
547 if (p->p_slptime > 1)
548 updatepri(p);
549 p->p_slptime = 0;
550 p->p_stat = SRUN;
551 if (p->p_flag & P_INMEM) {
552 setrunqueue(p);
984263bc
MD
553 } else {
554 p->p_flag |= P_SWAPINREQ;
555 wakeup((caddr_t)&proc0);
556 }
557 /* END INLINE EXPANSION */
0cfcada1
MD
558 } else if (p == NULL) {
559 lwkt_schedule(td);
984263bc 560 }
0cfcada1
MD
561 if (--count == 0)
562 break;
563 goto restart;
984263bc
MD
564 }
565 }
566 splx(s);
567}
568
984263bc 569void
0cfcada1 570wakeup(void *ident)
984263bc 571{
0cfcada1
MD
572 _wakeup(ident, 0);
573}
984263bc 574
0cfcada1
MD
575void
576wakeup_one(void *ident)
577{
578 _wakeup(ident, 1);
984263bc
MD
579}
580
581/*
582 * The machine independent parts of mi_switch().
583 * Must be called at splstatclock() or higher.
584 */
585void
586mi_switch()
587{
d16a8831
MD
588 struct thread *td = curthread;
589 struct proc *p = td->td_proc; /* XXX */
590 struct rlimit *rlim;
984263bc 591 int x;
d16a8831 592 u_int64_t ttime;
984263bc
MD
593
594 /*
595 * XXX this spl is almost unnecessary. It is partly to allow for
596 * sloppy callers that don't do it (issignal() via CURSIG() is the
597 * main offender). It is partly to work around a bug in the i386
598 * cpu_switch() (the ipl is not preserved). We ran for years
599 * without it. I think there was only a interrupt latency problem.
600 * The main caller, tsleep(), does an splx() a couple of instructions
601 * after calling here. The buggy caller, issignal(), usually calls
602 * here at spl0() and sometimes returns at splhigh(). The process
603 * then runs for a little too long at splhigh(). The ipl gets fixed
604 * when the process returns to user mode (or earlier).
605 *
606 * It would probably be better to always call here at spl0(). Callers
607 * are prepared to give up control to another process, so they must
608 * be prepared to be interrupted. The clock stuff here may not
609 * actually need splstatclock().
610 */
611 x = splstatclock();
612
984263bc
MD
613 /*
614 * Check if the process exceeds its cpu resource allocation.
d16a8831
MD
615 * If over max, kill it. Time spent in interrupts is not
616 * included. YYY 64 bit match is expensive. Ick.
984263bc 617 */
d16a8831 618 ttime = td->td_sticks + td->td_uticks;
984263bc 619 if (p->p_stat != SZOMB && p->p_limit->p_cpulimit != RLIM_INFINITY &&
d16a8831 620 ttime > p->p_limit->p_cpulimit) {
984263bc 621 rlim = &p->p_rlimit[RLIMIT_CPU];
d16a8831 622 if (ttime / (rlim_t)1000000 >= rlim->rlim_max) {
984263bc
MD
623 killproc(p, "exceeded maximum CPU limit");
624 } else {
625 psignal(p, SIGXCPU);
626 if (rlim->rlim_cur < rlim->rlim_max) {
627 /* XXX: we should make a private copy */
628 rlim->rlim_cur += 5;
629 }
630 }
631 }
632
633 /*
a2a5ad0d
MD
634 * Pick a new current process and record its start time. If we
635 * are in a SSTOPped state we deschedule ourselves. YYY this needs
636 * to be cleaned up, remember that LWKTs stay on their run queue
637 * which works differently then the user scheduler which removes
638 * the process from the runq when it runs it.
984263bc 639 */
12e4aaff 640 mycpu->gd_cnt.v_swtch++;
a2a5ad0d
MD
641 if (p->p_stat == SSTOP)
642 lwkt_deschedule_self();
8ad65e08 643 lwkt_switch();
984263bc
MD
644
645 splx(x);
646}
647
648/*
649 * Change process state to be runnable,
650 * placing it on the run queue if it is in memory,
651 * and awakening the swapper if it isn't in memory.
652 */
653void
0cfcada1 654setrunnable(struct proc *p)
984263bc 655{
0cfcada1 656 int s;
984263bc
MD
657
658 s = splhigh();
659 switch (p->p_stat) {
660 case 0:
661 case SRUN:
662 case SZOMB:
663 default:
664 panic("setrunnable");
665 case SSTOP:
666 case SSLEEP:
0cfcada1 667 unsleep(p->p_thread); /* e.g. when sending signals */
984263bc
MD
668 break;
669
670 case SIDL:
671 break;
672 }
673 p->p_stat = SRUN;
674 if (p->p_flag & P_INMEM)
675 setrunqueue(p);
676 splx(s);
677 if (p->p_slptime > 1)
678 updatepri(p);
679 p->p_slptime = 0;
680 if ((p->p_flag & P_INMEM) == 0) {
681 p->p_flag |= P_SWAPINREQ;
682 wakeup((caddr_t)&proc0);
26a0694b
MD
683 }
684}
685
686/*
687 * Change the process state to NOT be runnable, removing it from the run
0a3f9b47 688 * queue.
26a0694b
MD
689 */
690void
691clrrunnable(struct proc *p, int stat)
692{
0a3f9b47
MD
693 crit_enter_quick(p->p_thread);
694 if (p->p_stat == SRUN && (p->p_flag & P_ONRUNQ))
695 remrunqueue(p);
26a0694b 696 p->p_stat = stat;
0a3f9b47 697 crit_exit_quick(p->p_thread);
26a0694b
MD
698}
699
984263bc
MD
700/*
701 * Compute the priority of a process when running in user mode.
702 * Arrange to reschedule if the resulting priority is better
703 * than that of the current process.
704 */
705void
26a0694b 706resetpriority(struct proc *p)
984263bc 707{
26a0694b
MD
708 unsigned int newpriority;
709 int opq;
710 int npq;
711
435ff993
MD
712 /*
713 * Set p_priority for general process comparisons
714 */
715 switch(p->p_rtprio.type) {
716 case RTP_PRIO_REALTIME:
717 p->p_priority = PRIBASE_REALTIME + p->p_rtprio.prio;
718 return;
719 case RTP_PRIO_NORMAL:
720 break;
721 case RTP_PRIO_IDLE:
722 p->p_priority = PRIBASE_IDLE + p->p_rtprio.prio;
26a0694b 723 return;
435ff993
MD
724 case RTP_PRIO_THREAD:
725 p->p_priority = PRIBASE_THREAD + p->p_rtprio.prio;
726 return;
727 }
728
729 /*
730 * NORMAL priorities fall through. These are based on niceness
731 * and cpu use.
732 */
49d50643
MD
733 newpriority = NICE_ADJUST(p->p_nice - PRIO_MIN) +
734 p->p_estcpu / ESTCPURAMP;
26a0694b 735 newpriority = min(newpriority, MAXPRI);
26a0694b 736 npq = newpriority / PPQ;
d6dd2af9 737 crit_enter();
435ff993 738 opq = (p->p_priority & PRIMASK) / PPQ;
a2a5ad0d 739 if (p->p_stat == SRUN && (p->p_flag & P_ONRUNQ) && opq != npq) {
26a0694b
MD
740 /*
741 * We have to move the process to another queue
742 */
743 remrunqueue(p);
435ff993 744 p->p_priority = PRIBASE_NORMAL + newpriority;
26a0694b
MD
745 setrunqueue(p);
746 } else {
747 /*
a2a5ad0d
MD
748 * We can just adjust the priority and it will be picked
749 * up later.
26a0694b 750 */
a2a5ad0d 751 KKASSERT(opq == npq || (p->p_flag & P_ONRUNQ) == 0);
435ff993 752 p->p_priority = PRIBASE_NORMAL + newpriority;
984263bc 753 }
d6dd2af9 754 crit_exit();
984263bc
MD
755}
756
757/*
758 * Compute a tenex style load average of a quantity on
759 * 1, 5 and 15 minute intervals.
760 */
761static void
762loadav(void *arg)
763{
764 int i, nrun;
765 struct loadavg *avg;
766 struct proc *p;
767
768 avg = &averunnable;
769 nrun = 0;
f62004ad 770 FOREACH_PROC_IN_SYSTEM(p) {
984263bc
MD
771 switch (p->p_stat) {
772 case SRUN:
773 case SIDL:
774 nrun++;
775 }
776 }
777 for (i = 0; i < 3; i++)
778 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
779 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
780
781 /*
782 * Schedule the next update to occur after 5 seconds, but add a
783 * random variation to avoid synchronisation with processes that
784 * run at regular intervals.
785 */
786 callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
787 loadav, NULL);
788}
789
790/* ARGSUSED */
791static void
6656cd91 792sched_setup(void *dummy)
984263bc
MD
793{
794
795 callout_init(&loadav_callout);
796
797 /* Kick off timeout driven events by calling first time. */
798 roundrobin(NULL);
799 schedcpu(NULL);
800 loadav(NULL);
801}
802
803/*
804 * We adjust the priority of the current process. The priority of
805 * a process gets worse as it accumulates CPU time. The cpu usage
806 * estimator (p_estcpu) is increased here. resetpriority() will
807 * compute a different priority each time p_estcpu increases by
49d50643
MD
808 * INVERSE_ESTCPU_WEIGHT * (until MAXPRI is reached).
809 *
810 * The cpu usage estimator ramps up quite quickly when the process is
811 * running (linearly), and decays away exponentially, at a rate which
812 * is proportionally slower when the system is busy. The basic principle
813 * is that the system will 90% forget that the process used a lot of CPU
814 * time in 5 * loadav seconds. This causes the system to favor processes
815 * which haven't run much recently, and to round-robin among other processes.
435ff993 816 *
0a3f9b47
MD
817 * The actual schedulerclock interrupt rate is ESTCPUFREQ, but we generally
818 * want to ramp-up at a faster rate, ESTCPUVFREQ, so p_estcpu is scaled
819 * by (ESTCPUVFREQ / ESTCPUFREQ). You can control the ramp-up/ramp-down
820 * rate by adjusting ESTCPUVFREQ in sys/proc.h in integer multiples
821 * of ESTCPUFREQ.
822 *
88c4d2f6
MD
823 * WARNING! called from a fast-int or an IPI, the MP lock MIGHT NOT BE HELD
824 * and we cannot block.
984263bc
MD
825 */
826void
88c4d2f6 827schedulerclock(void *dummy)
984263bc 828{
435ff993
MD
829 struct thread *td;
830 struct proc *p;
831
832 td = curthread;
833 if ((p = td->td_proc) != NULL) {
0a3f9b47
MD
834 p->p_cpticks++; /* cpticks runs at ESTCPUFREQ */
835 p->p_estcpu = ESTCPULIM(p->p_estcpu + ESTCPUVFREQ / ESTCPUFREQ);
836 if (try_mplock()) {
435ff993
MD
837 resetpriority(p);
838 rel_mplock();
839 }
840 }
984263bc 841}
8a8d5d85
MD
842
843static
844void
845crit_panicints(void)
846{
847 int s;
848 int cpri;
849
850 s = splhigh();
851 cpri = crit_panic_save();
852 splx(safepri);
853 crit_panic_restore(cpri);
854 splx(s);
855}
856