Introduce pthread_timedjoin_np.
[dragonfly.git] / sys / kern / kern_synch.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
39 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $
dcc99b62 40 * $DragonFly: src/sys/kern/kern_synch.c,v 1.47 2005/06/27 18:37:57 dillon Exp $
984263bc
MD
41 */
42
43#include "opt_ktrace.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/proc.h>
48#include <sys/kernel.h>
49#include <sys/signalvar.h>
50#include <sys/resourcevar.h>
51#include <sys/vmmeter.h>
52#include <sys/sysctl.h>
26a0694b 53#include <sys/thread2.h>
984263bc
MD
54#ifdef KTRACE
55#include <sys/uio.h>
56#include <sys/ktrace.h>
57#endif
f1d1c3fa 58#include <sys/xwait.h>
984263bc
MD
59
60#include <machine/cpu.h>
61#include <machine/ipl.h>
62#include <machine/smp.h>
63
402ed7e1 64static void sched_setup (void *dummy);
984263bc
MD
65SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
66
984263bc
MD
67int hogticks;
68int lbolt;
69int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
17a9f566 70int ncpus;
90100055 71int ncpus2, ncpus2_shift, ncpus2_mask;
e43a034f 72int safepri;
984263bc
MD
73
74static struct callout loadav_callout;
35f9d051 75static struct callout schedcpu_callout;
984263bc
MD
76
77struct loadavg averunnable =
78 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */
79/*
80 * Constants for averages over 1, 5, and 15 minutes
81 * when sampling at 5 second intervals.
82 */
83static fixpt_t cexp[3] = {
84 0.9200444146293232 * FSCALE, /* exp(-1/12) */
85 0.9834714538216174 * FSCALE, /* exp(-1/60) */
86 0.9944598480048967 * FSCALE, /* exp(-1/180) */
87};
88
402ed7e1
RG
89static void endtsleep (void *);
90static void loadav (void *arg);
402ed7e1 91static void schedcpu (void *arg);
984263bc 92
a46fac56
MD
93/*
94 * Adjust the scheduler quantum. The quantum is specified in microseconds.
95 * Note that 'tick' is in microseconds per tick.
96 */
984263bc
MD
97static int
98sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
99{
100 int error, new_val;
101
102 new_val = sched_quantum * tick;
103 error = sysctl_handle_int(oidp, &new_val, 0, req);
104 if (error != 0 || req->newptr == NULL)
105 return (error);
106 if (new_val < tick)
107 return (EINVAL);
108 sched_quantum = new_val / tick;
109 hogticks = 2 * sched_quantum;
110 return (0);
111}
112
113SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
114 0, sizeof sched_quantum, sysctl_kern_quantum, "I", "");
115
984263bc
MD
116/*
117 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
118 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
119 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
120 *
121 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
dcc99b62 122 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
984263bc
MD
123 *
124 * If you don't want to bother with the faster/more-accurate formula, you
125 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
126 * (more general) method of calculating the %age of CPU used by a process.
dcc99b62
MD
127 *
128 * decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing
129 */
130#define CCPU_SHIFT 11
131
132static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
133SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
134
135/*
136 * kernel uses `FSCALE', userland (SHOULD) use kern.fscale
984263bc 137 */
dcc99b62
MD
138static int fscale __unused = FSCALE;
139SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
984263bc
MD
140
141/*
0a3f9b47 142 * Recompute process priorities, once a second.
dcc99b62
MD
143 *
144 * Since the userland schedulers are typically event oriented, if the
145 * estcpu calculation at wakeup() time is not sufficient to make a
146 * process runnable relative to other processes in the system we have
147 * a 1-second recalc to help out.
148 *
149 * This code also allows us to store sysclock_t data in the process structure
150 * without fear of an overrun, since sysclock_t are guarenteed to hold
151 * several seconds worth of count.
984263bc
MD
152 */
153/* ARGSUSED */
154static void
26a0694b 155schedcpu(void *arg)
984263bc 156{
4b5f931b 157 struct proc *p;
4b5f931b 158
f62004ad 159 FOREACH_PROC_IN_SYSTEM(p) {
984263bc
MD
160 /*
161 * Increment time in/out of memory and sleep time
162 * (if sleeping). We ignore overflow; with 16-bit int's
163 * (remember them?) overflow takes 45 days.
164 */
dcc99b62 165 crit_enter();
984263bc
MD
166 p->p_swtime++;
167 if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
168 p->p_slptime++;
a46fac56
MD
169
170 /*
dcc99b62
MD
171 * Only recalculate processes that are active or have slept
172 * less then 2 seconds. The schedulers understand this.
a46fac56 173 */
dcc99b62
MD
174 if (p->p_slptime <= 1) {
175 p->p_usched->recalculate(p);
176 } else {
177 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
a46fac56 178 }
e43a034f 179 crit_exit();
984263bc
MD
180 }
181 wakeup((caddr_t)&lbolt);
35f9d051 182 callout_reset(&schedcpu_callout, hz, schedcpu, NULL);
984263bc
MD
183}
184
185/*
dcc99b62
MD
186 * This is only used by ps. Generate a cpu percentage use over
187 * a period of one second.
984263bc 188 */
dcc99b62
MD
189void
190updatepcpu(struct proc *p, int cpticks, int ttlticks)
984263bc 191{
dcc99b62
MD
192 fixpt_t acc;
193 int remticks;
194
195 acc = (cpticks << FSHIFT) / ttlticks;
196 if (ttlticks >= ESTCPUFREQ) {
197 p->p_pctcpu = acc;
198 } else {
199 remticks = ESTCPUFREQ - ttlticks;
200 p->p_pctcpu = (acc * ttlticks + p->p_pctcpu * remticks) /
201 ESTCPUFREQ;
a46fac56 202 }
984263bc
MD
203}
204
dcc99b62 205
984263bc
MD
206/*
207 * We're only looking at 7 bits of the address; everything is
208 * aligned to 4, lots of things are aligned to greater powers
209 * of 2. Shift right by 8, i.e. drop the bottom 256 worth.
210 */
211#define TABLESIZE 128
0cfcada1 212static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE];
984263bc
MD
213#define LOOKUP(x) (((intptr_t)(x) >> 8) & (TABLESIZE - 1))
214
215/*
a46fac56
MD
216 * General scheduler initialization. We force a reschedule 25 times
217 * a second by default.
984263bc 218 */
984263bc
MD
219void
220sleepinit(void)
221{
222 int i;
223
a46fac56 224 sched_quantum = (hz + 24) / 25;
984263bc
MD
225 hogticks = 2 * sched_quantum;
226 for (i = 0; i < TABLESIZE; i++)
227 TAILQ_INIT(&slpque[i]);
228}
229
230/*
231 * General sleep call. Suspends the current process until a wakeup is
232 * performed on the specified identifier. The process will then be made
233 * runnable with the specified priority. Sleeps at most timo/hz seconds
377d4740 234 * (0 means no timeout). If flags includes PCATCH flag, signals are checked
984263bc
MD
235 * before and after sleeping, else signals are not checked. Returns 0 if
236 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
237 * signal needs to be delivered, ERESTART is returned if the current system
238 * call should be restarted if possible, and EINTR is returned if the system
239 * call should be interrupted by the signal (return EINTR).
26a0694b 240 *
0a3f9b47
MD
241 * Note that if we are a process, we release_curproc() before messing with
242 * the LWKT scheduler.
a46fac56
MD
243 *
244 * During autoconfiguration or after a panic, a sleep will simply
245 * lower the priority briefly to allow interrupts, then return.
984263bc
MD
246 */
247int
6656cd91 248tsleep(void *ident, int flags, const char *wmesg, int timo)
984263bc 249{
dadab5e9 250 struct thread *td = curthread;
0cfcada1 251 struct proc *p = td->td_proc; /* may be NULL */
37af14fe 252 int sig = 0, catch = flags & PCATCH;
f1d1c3fa 253 int id = LOOKUP(ident);
e43a034f 254 int oldpri;
076fecef 255 struct callout thandle;
984263bc 256
0cfcada1
MD
257 /*
258 * NOTE: removed KTRPOINT, it could cause races due to blocking
259 * even in stable. Just scrap it for now.
260 */
984263bc
MD
261 if (cold || panicstr) {
262 /*
263 * After a panic, or during autoconfiguration,
264 * just give interrupts a chance, then just return;
265 * don't run any other procs or panic below,
266 * in case this is the idle process and already asleep.
267 */
e43a034f
MD
268 splz();
269 oldpri = td->td_pri & TDPRI_MASK;
270 lwkt_setpri_self(safepri);
271 lwkt_switch();
272 lwkt_setpri_self(oldpri);
984263bc
MD
273 return (0);
274 }
a2a5ad0d 275 KKASSERT(td != &mycpu->gd_idlethread); /* you must be kidding! */
37af14fe 276 crit_enter_quick(td);
0cfcada1
MD
277 KASSERT(ident != NULL, ("tsleep: no ident"));
278 KASSERT(p == NULL || p->p_stat == SRUN, ("tsleep %p %s %d",
279 ident, wmesg, p->p_stat));
280
281 td->td_wchan = ident;
282 td->td_wmesg = wmesg;
da5fb9ef 283 td->td_wdomain = flags & PDOMAIN_MASK;
0a3f9b47
MD
284 if (p) {
285 if (flags & PNORESCHED)
286 td->td_flags |= TDF_NORESCHED;
a77ac49d 287 p->p_usched->release_curproc(p);
0cfcada1 288 p->p_slptime = 0;
0a3f9b47 289 }
37af14fe 290 lwkt_deschedule_self(td);
0cfcada1 291 TAILQ_INSERT_TAIL(&slpque[id], td, td_threadq);
076fecef
MD
292 if (timo) {
293 callout_init(&thandle);
294 callout_reset(&thandle, timo, endtsleep, td);
295 }
984263bc
MD
296 /*
297 * We put ourselves on the sleep queue and start our timeout
298 * before calling CURSIG, as we could stop there, and a wakeup
299 * or a SIGCONT (or both) could occur while we were stopped.
300 * A SIGCONT would cause us to be marked as SSLEEP
301 * without resuming us, thus we must be ready for sleep
302 * when CURSIG is called. If the wakeup happens while we're
a2a5ad0d 303 * stopped, td->td_wchan will be 0 upon return from CURSIG.
984263bc 304 */
0cfcada1
MD
305 if (p) {
306 if (catch) {
307 p->p_flag |= P_SINTR;
308 if ((sig = CURSIG(p))) {
26a0694b 309 if (td->td_wchan) {
0cfcada1 310 unsleep(td);
37af14fe 311 lwkt_schedule_self(td);
26a0694b 312 }
0cfcada1
MD
313 p->p_stat = SRUN;
314 goto resume;
315 }
a2a5ad0d 316 if (td->td_wchan == NULL) {
0cfcada1
MD
317 catch = 0;
318 goto resume;
319 }
320 } else {
321 sig = 0;
984263bc 322 }
26a0694b
MD
323
324 /*
325 * If we are not the current process we have to remove ourself
326 * from the run queue.
327 */
328 KASSERT(p->p_stat == SRUN, ("PSTAT NOT SRUN %d %d", p->p_pid, p->p_stat));
329 /*
330 * If this is the current 'user' process schedule another one.
331 */
332 clrrunnable(p, SSLEEP);
0cfcada1 333 p->p_stats->p_ru.ru_nvcsw++;
37af14fe 334 mi_switch(p);
26a0694b 335 KASSERT(p->p_stat == SRUN, ("tsleep: stat not srun"));
0cfcada1
MD
336 } else {
337 lwkt_switch();
338 }
984263bc 339resume:
26a0694b 340 if (p)
0cfcada1 341 p->p_flag &= ~P_SINTR;
37af14fe 342 crit_exit_quick(td);
0a3f9b47 343 td->td_flags &= ~TDF_NORESCHED;
0cfcada1
MD
344 if (td->td_flags & TDF_TIMEOUT) {
345 td->td_flags &= ~TDF_TIMEOUT;
346 if (sig == 0)
984263bc 347 return (EWOULDBLOCK);
0cfcada1 348 } else if (timo) {
076fecef 349 callout_stop(&thandle);
ab44e20a
MD
350 } else if (td->td_wmesg) {
351 /*
352 * This can happen if a thread is woken up directly. Clear
353 * wmesg to avoid debugging confusion.
354 */
355 td->td_wmesg = NULL;
0cfcada1 356 }
a94976ad 357 /* inline of iscaught() */
0cfcada1
MD
358 if (p) {
359 if (catch && (sig != 0 || (sig = CURSIG(p)))) {
360 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
361 return (EINTR);
362 return (ERESTART);
984263bc 363 }
984263bc 364 }
984263bc
MD
365 return (0);
366}
367
984263bc 368/*
0cfcada1
MD
369 * Implement the timeout for tsleep. We interlock against
370 * wchan when setting TDF_TIMEOUT. For processes we remove
371 * the sleep if the process is stopped rather then sleeping,
372 * so it remains stopped.
984263bc
MD
373 */
374static void
0cfcada1 375endtsleep(void *arg)
984263bc 376{
0cfcada1
MD
377 thread_t td = arg;
378 struct proc *p;
984263bc 379
37af14fe 380 crit_enter();
0cfcada1
MD
381 if (td->td_wchan) {
382 td->td_flags |= TDF_TIMEOUT;
383 if ((p = td->td_proc) != NULL) {
384 if (p->p_stat == SSLEEP)
385 setrunnable(p);
386 else
387 unsleep(td);
388 } else {
389 unsleep(td);
390 lwkt_schedule(td);
391 }
984263bc 392 }
37af14fe 393 crit_exit();
984263bc
MD
394}
395
396/*
397 * Remove a process from its wait queue
398 */
399void
0cfcada1 400unsleep(struct thread *td)
984263bc 401{
37af14fe 402 crit_enter();
0cfcada1 403 if (td->td_wchan) {
0cfcada1
MD
404 TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_threadq);
405 td->td_wchan = NULL;
f1d1c3fa 406 }
37af14fe 407 crit_exit();
f1d1c3fa
MD
408}
409
984263bc
MD
410/*
411 * Make all processes sleeping on the specified identifier runnable.
412 */
0cfcada1 413static void
da5fb9ef 414_wakeup(void *ident, int domain, int count)
984263bc 415{
0cfcada1
MD
416 struct slpquehead *qp;
417 struct thread *td;
418 struct thread *ntd;
419 struct proc *p;
f1d1c3fa 420 int id = LOOKUP(ident);
984263bc 421
37af14fe 422 crit_enter();
f1d1c3fa 423 qp = &slpque[id];
984263bc 424restart:
0cfcada1
MD
425 for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
426 ntd = TAILQ_NEXT(td, td_threadq);
da5fb9ef 427 if (td->td_wchan == ident && td->td_wdomain == domain) {
0cfcada1
MD
428 TAILQ_REMOVE(qp, td, td_threadq);
429 td->td_wchan = NULL;
430 if ((p = td->td_proc) != NULL && p->p_stat == SSLEEP) {
984263bc
MD
431 p->p_stat = SRUN;
432 if (p->p_flag & P_INMEM) {
8ec60c3f
MD
433 /*
434 * LWKT scheduled now, there is no
435 * userland runq interaction until
436 * the thread tries to return to user
dcc99b62 437 * mode. We do NOT call setrunqueue().
8ec60c3f
MD
438 */
439 lwkt_schedule(td);
984263bc
MD
440 } else {
441 p->p_flag |= P_SWAPINREQ;
442 wakeup((caddr_t)&proc0);
443 }
444 /* END INLINE EXPANSION */
0cfcada1
MD
445 } else if (p == NULL) {
446 lwkt_schedule(td);
984263bc 447 }
0cfcada1
MD
448 if (--count == 0)
449 break;
450 goto restart;
984263bc
MD
451 }
452 }
37af14fe 453 crit_exit();
984263bc
MD
454}
455
984263bc 456void
0cfcada1 457wakeup(void *ident)
984263bc 458{
da5fb9ef 459 _wakeup(ident, 0, 0);
0cfcada1 460}
984263bc 461
0cfcada1
MD
462void
463wakeup_one(void *ident)
464{
da5fb9ef
MD
465 _wakeup(ident, 0, 1);
466}
467
468void
469wakeup_domain(void *ident, int domain)
470{
471 _wakeup(ident, domain, 0);
472}
473
474void
475wakeup_domain_one(void *ident, int domain)
476{
477 _wakeup(ident, domain, 1);
984263bc
MD
478}
479
480/*
481 * The machine independent parts of mi_switch().
37af14fe
MD
482 *
483 * 'p' must be the current process.
984263bc
MD
484 */
485void
37af14fe 486mi_switch(struct proc *p)
984263bc 487{
37af14fe 488 thread_t td = p->p_thread;
d16a8831 489 struct rlimit *rlim;
d16a8831 490 u_int64_t ttime;
984263bc 491
37af14fe
MD
492 KKASSERT(td == mycpu->gd_curthread);
493
494 crit_enter_quick(td);
984263bc 495
984263bc
MD
496 /*
497 * Check if the process exceeds its cpu resource allocation.
d16a8831
MD
498 * If over max, kill it. Time spent in interrupts is not
499 * included. YYY 64 bit match is expensive. Ick.
984263bc 500 */
d16a8831 501 ttime = td->td_sticks + td->td_uticks;
984263bc 502 if (p->p_stat != SZOMB && p->p_limit->p_cpulimit != RLIM_INFINITY &&
d16a8831 503 ttime > p->p_limit->p_cpulimit) {
984263bc 504 rlim = &p->p_rlimit[RLIMIT_CPU];
d16a8831 505 if (ttime / (rlim_t)1000000 >= rlim->rlim_max) {
984263bc
MD
506 killproc(p, "exceeded maximum CPU limit");
507 } else {
508 psignal(p, SIGXCPU);
509 if (rlim->rlim_cur < rlim->rlim_max) {
510 /* XXX: we should make a private copy */
511 rlim->rlim_cur += 5;
512 }
513 }
514 }
515
516 /*
8ec60c3f
MD
517 * If we are in a SSTOPped state we deschedule ourselves.
518 * YYY this needs to be cleaned up, remember that LWKTs stay on
519 * their run queue which works differently then the user scheduler
520 * which removes the process from the runq when it runs it.
984263bc 521 */
12e4aaff 522 mycpu->gd_cnt.v_swtch++;
a2a5ad0d 523 if (p->p_stat == SSTOP)
37af14fe 524 lwkt_deschedule_self(td);
8ad65e08 525 lwkt_switch();
37af14fe 526 crit_exit_quick(td);
984263bc
MD
527}
528
529/*
530 * Change process state to be runnable,
531 * placing it on the run queue if it is in memory,
532 * and awakening the swapper if it isn't in memory.
533 */
534void
0cfcada1 535setrunnable(struct proc *p)
984263bc 536{
e43a034f 537 crit_enter();
984263bc 538
984263bc
MD
539 switch (p->p_stat) {
540 case 0:
541 case SRUN:
542 case SZOMB:
543 default:
544 panic("setrunnable");
545 case SSTOP:
546 case SSLEEP:
0cfcada1 547 unsleep(p->p_thread); /* e.g. when sending signals */
984263bc
MD
548 break;
549
550 case SIDL:
551 break;
552 }
553 p->p_stat = SRUN;
8ec60c3f
MD
554
555 /*
556 * The process is controlled by LWKT at this point, we do not mess
557 * around with the userland scheduler until the thread tries to
dcc99b62
MD
558 * return to user mode. We do not clear p_slptime or call
559 * setrunqueue().
8ec60c3f 560 */
dcc99b62 561 if (p->p_flag & P_INMEM) {
8ec60c3f 562 lwkt_schedule(p->p_thread);
dcc99b62 563 } else {
984263bc
MD
564 p->p_flag |= P_SWAPINREQ;
565 wakeup((caddr_t)&proc0);
26a0694b 566 }
dcc99b62 567 crit_exit();
26a0694b
MD
568}
569
a77ac49d
MD
570/*
571 * Yield / synchronous reschedule. This is a bit tricky because the trap
572 * code might have set a lazy release on the switch function. Setting
573 * P_PASSIVE_ACQ will ensure that the lazy release executes when we call
574 * switch, and that we are given a greater chance of affinity with our
575 * current cpu.
576 *
577 * We call lwkt_setpri_self() to rotate our thread to the end of the lwkt
578 * run queue. lwkt_switch() will also execute any assigned passive release
579 * (which usually calls release_curproc()), allowing a same/higher priority
580 * process to be designated as the current process.
581 *
582 * While it is possible for a lower priority process to be designated,
583 * it's call to lwkt_maybe_switch() in acquire_curproc() will likely
584 * round-robin back to us and we will be able to re-acquire the current
585 * process designation.
586 */
587void
588uio_yield(void)
589{
590 struct thread *td = curthread;
591 struct proc *p = td->td_proc;
592
593 lwkt_setpri_self(td->td_pri & TDPRI_MASK);
594 if (p) {
595 p->p_flag |= P_PASSIVE_ACQ;
596 lwkt_switch();
597 p->p_flag &= ~P_PASSIVE_ACQ;
598 } else {
599 lwkt_switch();
600 }
601}
602
26a0694b
MD
603/*
604 * Change the process state to NOT be runnable, removing it from the run
0a3f9b47 605 * queue.
26a0694b
MD
606 */
607void
608clrrunnable(struct proc *p, int stat)
609{
0a3f9b47
MD
610 crit_enter_quick(p->p_thread);
611 if (p->p_stat == SRUN && (p->p_flag & P_ONRUNQ))
a77ac49d 612 p->p_usched->remrunqueue(p);
26a0694b 613 p->p_stat = stat;
0a3f9b47 614 crit_exit_quick(p->p_thread);
26a0694b
MD
615}
616
984263bc
MD
617/*
618 * Compute a tenex style load average of a quantity on
619 * 1, 5 and 15 minute intervals.
620 */
621static void
622loadav(void *arg)
623{
624 int i, nrun;
625 struct loadavg *avg;
626 struct proc *p;
8ec60c3f 627 thread_t td;
984263bc
MD
628
629 avg = &averunnable;
630 nrun = 0;
f62004ad 631 FOREACH_PROC_IN_SYSTEM(p) {
8ec60c3f
MD
632 switch (p->p_stat) {
633 case SRUN:
634 if ((td = p->p_thread) == NULL)
635 break;
636 if (td->td_flags & TDF_BLOCKED)
637 break;
638 /* fall through */
639 case SIDL:
640 nrun++;
641 break;
642 default:
643 break;
984263bc
MD
644 }
645 }
646 for (i = 0; i < 3; i++)
647 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
648 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
649
650 /*
651 * Schedule the next update to occur after 5 seconds, but add a
652 * random variation to avoid synchronisation with processes that
653 * run at regular intervals.
654 */
655 callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
656 loadav, NULL);
657}
658
659/* ARGSUSED */
660static void
6656cd91 661sched_setup(void *dummy)
984263bc 662{
984263bc 663 callout_init(&loadav_callout);
35f9d051 664 callout_init(&schedcpu_callout);
984263bc
MD
665
666 /* Kick off timeout driven events by calling first time. */
984263bc
MD
667 schedcpu(NULL);
668 loadav(NULL);
669}
670