Commit | Line | Data |
---|---|---|
984263bc MD |
1 | /*- |
2 | * Copyright (c) 1982, 1986, 1990, 1991, 1993 | |
3 | * The Regents of the University of California. All rights reserved. | |
4 | * (c) UNIX System Laboratories, Inc. | |
5 | * All or some portions of this file are derived from material licensed | |
6 | * to the University of California by American Telephone and Telegraph | |
7 | * Co. or Unix System Laboratories, Inc. and are reproduced herein with | |
8 | * the permission of UNIX System Laboratories, Inc. | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | |
13 | * 1. Redistributions of source code must retain the above copyright | |
14 | * notice, this list of conditions and the following disclaimer. | |
15 | * 2. Redistributions in binary form must reproduce the above copyright | |
16 | * notice, this list of conditions and the following disclaimer in the | |
17 | * documentation and/or other materials provided with the distribution. | |
18 | * 3. All advertising materials mentioning features or use of this software | |
19 | * must display the following acknowledgement: | |
20 | * This product includes software developed by the University of | |
21 | * California, Berkeley and its contributors. | |
22 | * 4. Neither the name of the University nor the names of its contributors | |
23 | * may be used to endorse or promote products derived from this software | |
24 | * without specific prior written permission. | |
25 | * | |
26 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
27 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
28 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
29 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
30 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
31 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
32 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
33 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
34 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
35 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
36 | * SUCH DAMAGE. | |
37 | * | |
38 | * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 | |
39 | * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $ | |
2daf83b0 | 40 | * $DragonFly: src/sys/kern/kern_synch.c,v 1.78 2007/02/22 15:49:08 corecode Exp $ |
984263bc MD |
41 | */ |
42 | ||
43 | #include "opt_ktrace.h" | |
44 | ||
45 | #include <sys/param.h> | |
46 | #include <sys/systm.h> | |
47 | #include <sys/proc.h> | |
48 | #include <sys/kernel.h> | |
49 | #include <sys/signalvar.h> | |
50 | #include <sys/resourcevar.h> | |
51 | #include <sys/vmmeter.h> | |
52 | #include <sys/sysctl.h> | |
344ad853 | 53 | #include <sys/lock.h> |
984263bc MD |
54 | #ifdef KTRACE |
55 | #include <sys/uio.h> | |
56 | #include <sys/ktrace.h> | |
57 | #endif | |
f1d1c3fa | 58 | #include <sys/xwait.h> |
9afb0ffd | 59 | #include <sys/ktr.h> |
984263bc | 60 | |
bf765287 MD |
61 | #include <sys/thread2.h> |
62 | #include <sys/spinlock2.h> | |
63 | ||
984263bc | 64 | #include <machine/cpu.h> |
984263bc MD |
65 | #include <machine/smp.h> |
66 | ||
fc17ad60 MD |
67 | TAILQ_HEAD(tslpque, thread); |
68 | ||
402ed7e1 | 69 | static void sched_setup (void *dummy); |
984263bc MD |
70 | SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL) |
71 | ||
984263bc MD |
72 | int hogticks; |
73 | int lbolt; | |
344ad853 | 74 | int lbolt_syncer; |
984263bc | 75 | int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ |
17a9f566 | 76 | int ncpus; |
90100055 | 77 | int ncpus2, ncpus2_shift, ncpus2_mask; |
e43a034f | 78 | int safepri; |
984263bc MD |
79 | |
80 | static struct callout loadav_callout; | |
35f9d051 | 81 | static struct callout schedcpu_callout; |
fc17ad60 | 82 | MALLOC_DEFINE(M_TSLEEP, "tslpque", "tsleep queues"); |
984263bc | 83 | |
9afb0ffd MD |
84 | #if !defined(KTR_TSLEEP) |
85 | #define KTR_TSLEEP KTR_ALL | |
86 | #endif | |
87 | KTR_INFO_MASTER(tsleep); | |
88 | KTR_INFO(KTR_TSLEEP, tsleep, tsleep_beg, 0, "tsleep enter", 0); | |
89 | KTR_INFO(KTR_TSLEEP, tsleep, tsleep_end, 0, "tsleep exit", 0); | |
90 | KTR_INFO(KTR_TSLEEP, tsleep, wakeup_beg, 0, "wakeup enter", 0); | |
91 | KTR_INFO(KTR_TSLEEP, tsleep, wakeup_end, 0, "wakeup exit", 0); | |
92 | #define logtsleep(name) KTR_LOG(tsleep_ ## name) | |
93 | ||
984263bc MD |
94 | struct loadavg averunnable = |
95 | { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ | |
96 | /* | |
97 | * Constants for averages over 1, 5, and 15 minutes | |
98 | * when sampling at 5 second intervals. | |
99 | */ | |
100 | static fixpt_t cexp[3] = { | |
101 | 0.9200444146293232 * FSCALE, /* exp(-1/12) */ | |
102 | 0.9834714538216174 * FSCALE, /* exp(-1/60) */ | |
103 | 0.9944598480048967 * FSCALE, /* exp(-1/180) */ | |
104 | }; | |
105 | ||
402ed7e1 | 106 | static void endtsleep (void *); |
344ad853 | 107 | static void unsleep_and_wakeup_thread(struct thread *td); |
402ed7e1 | 108 | static void loadav (void *arg); |
402ed7e1 | 109 | static void schedcpu (void *arg); |
984263bc | 110 | |
a46fac56 MD |
111 | /* |
112 | * Adjust the scheduler quantum. The quantum is specified in microseconds. | |
113 | * Note that 'tick' is in microseconds per tick. | |
114 | */ | |
984263bc MD |
115 | static int |
116 | sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) | |
117 | { | |
118 | int error, new_val; | |
119 | ||
120 | new_val = sched_quantum * tick; | |
121 | error = sysctl_handle_int(oidp, &new_val, 0, req); | |
122 | if (error != 0 || req->newptr == NULL) | |
123 | return (error); | |
124 | if (new_val < tick) | |
125 | return (EINVAL); | |
126 | sched_quantum = new_val / tick; | |
127 | hogticks = 2 * sched_quantum; | |
128 | return (0); | |
129 | } | |
130 | ||
131 | SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, | |
132 | 0, sizeof sched_quantum, sysctl_kern_quantum, "I", ""); | |
133 | ||
984263bc MD |
134 | /* |
135 | * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the | |
136 | * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below | |
137 | * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). | |
138 | * | |
139 | * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: | |
dcc99b62 | 140 | * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). |
984263bc MD |
141 | * |
142 | * If you don't want to bother with the faster/more-accurate formula, you | |
143 | * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate | |
144 | * (more general) method of calculating the %age of CPU used by a process. | |
dcc99b62 | 145 | * |
08f2f1bb | 146 | * decay 95% of `lwp_pctcpu' in 60 seconds; see CCPU_SHIFT before changing |
dcc99b62 MD |
147 | */ |
148 | #define CCPU_SHIFT 11 | |
149 | ||
150 | static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ | |
151 | SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); | |
152 | ||
153 | /* | |
154 | * kernel uses `FSCALE', userland (SHOULD) use kern.fscale | |
984263bc | 155 | */ |
460426e6 | 156 | int fscale __unused = FSCALE; /* exported to systat */ |
dcc99b62 | 157 | SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); |
984263bc MD |
158 | |
159 | /* | |
0a3f9b47 | 160 | * Recompute process priorities, once a second. |
dcc99b62 MD |
161 | * |
162 | * Since the userland schedulers are typically event oriented, if the | |
163 | * estcpu calculation at wakeup() time is not sufficient to make a | |
164 | * process runnable relative to other processes in the system we have | |
165 | * a 1-second recalc to help out. | |
166 | * | |
167 | * This code also allows us to store sysclock_t data in the process structure | |
168 | * without fear of an overrun, since sysclock_t are guarenteed to hold | |
169 | * several seconds worth of count. | |
8fa76237 MD |
170 | * |
171 | * WARNING! callouts can preempt normal threads. However, they will not | |
172 | * preempt a thread holding a spinlock so we *can* safely use spinlocks. | |
984263bc | 173 | */ |
8fa76237 MD |
174 | static int schedcpu_stats(struct proc *p, void *data __unused); |
175 | static int schedcpu_resource(struct proc *p, void *data __unused); | |
176 | ||
984263bc | 177 | static void |
26a0694b | 178 | schedcpu(void *arg) |
984263bc | 179 | { |
8fa76237 MD |
180 | allproc_scan(schedcpu_stats, NULL); |
181 | allproc_scan(schedcpu_resource, NULL); | |
182 | wakeup((caddr_t)&lbolt); | |
183 | wakeup((caddr_t)&lbolt_syncer); | |
184 | callout_reset(&schedcpu_callout, hz, schedcpu, NULL); | |
185 | } | |
186 | ||
187 | /* | |
188 | * General process statistics once a second | |
189 | */ | |
190 | static int | |
191 | schedcpu_stats(struct proc *p, void *data __unused) | |
192 | { | |
08f2f1bb SS |
193 | struct lwp *lp; |
194 | ||
8fa76237 MD |
195 | crit_enter(); |
196 | p->p_swtime++; | |
c7e98b2f SS |
197 | FOREACH_LWP_IN_PROC(lp, p) { |
198 | if (lp->lwp_stat == LSSLEEP) | |
199 | lp->lwp_slptime++; | |
4b5f931b | 200 | |
c7e98b2f SS |
201 | /* |
202 | * Only recalculate processes that are active or have slept | |
203 | * less then 2 seconds. The schedulers understand this. | |
204 | */ | |
205 | if (lp->lwp_slptime <= 1) { | |
206 | p->p_usched->recalculate(lp); | |
207 | } else { | |
208 | lp->lwp_pctcpu = (lp->lwp_pctcpu * ccpu) >> FSHIFT; | |
209 | } | |
8fa76237 MD |
210 | } |
211 | crit_exit(); | |
212 | return(0); | |
213 | } | |
a46fac56 | 214 | |
8fa76237 | 215 | /* |
84204577 | 216 | * Resource checks. XXX break out since ksignal/killproc can block, |
8fa76237 MD |
217 | * limiting us to one process killed per second. There is probably |
218 | * a better way. | |
219 | */ | |
220 | static int | |
221 | schedcpu_resource(struct proc *p, void *data __unused) | |
222 | { | |
223 | u_int64_t ttime; | |
08f2f1bb | 224 | struct lwp *lp; |
8fa76237 MD |
225 | |
226 | crit_enter(); | |
227 | if (p->p_stat == SIDL || | |
416d05d7 | 228 | p->p_stat == SZOMB || |
c7e98b2f | 229 | p->p_limit == NULL |
8fa76237 | 230 | ) { |
e43a034f | 231 | crit_exit(); |
8fa76237 | 232 | return(0); |
984263bc | 233 | } |
344ad853 | 234 | |
c7e98b2f SS |
235 | ttime = 0; |
236 | FOREACH_LWP_IN_PROC(lp, p) { | |
237 | ttime += lp->lwp_thread->td_sticks; | |
238 | ttime += lp->lwp_thread->td_uticks; | |
239 | } | |
8fa76237 MD |
240 | |
241 | switch(plimit_testcpulimit(p->p_limit, ttime)) { | |
242 | case PLIMIT_TESTCPU_KILL: | |
243 | killproc(p, "exceeded maximum CPU limit"); | |
244 | break; | |
245 | case PLIMIT_TESTCPU_XCPU: | |
246 | if ((p->p_flag & P_XCPU) == 0) { | |
247 | p->p_flag |= P_XCPU; | |
84204577 | 248 | ksignal(p, SIGXCPU); |
344ad853 | 249 | } |
8fa76237 MD |
250 | break; |
251 | default: | |
c0b8a06d | 252 | break; |
344ad853 | 253 | } |
8fa76237 MD |
254 | crit_exit(); |
255 | return(0); | |
984263bc MD |
256 | } |
257 | ||
258 | /* | |
dcc99b62 MD |
259 | * This is only used by ps. Generate a cpu percentage use over |
260 | * a period of one second. | |
52eedfb5 MD |
261 | * |
262 | * MPSAFE | |
984263bc | 263 | */ |
dcc99b62 | 264 | void |
553ea3c8 | 265 | updatepcpu(struct lwp *lp, int cpticks, int ttlticks) |
984263bc | 266 | { |
dcc99b62 MD |
267 | fixpt_t acc; |
268 | int remticks; | |
269 | ||
270 | acc = (cpticks << FSHIFT) / ttlticks; | |
271 | if (ttlticks >= ESTCPUFREQ) { | |
553ea3c8 | 272 | lp->lwp_pctcpu = acc; |
dcc99b62 MD |
273 | } else { |
274 | remticks = ESTCPUFREQ - ttlticks; | |
553ea3c8 | 275 | lp->lwp_pctcpu = (acc * ttlticks + lp->lwp_pctcpu * remticks) / |
dcc99b62 | 276 | ESTCPUFREQ; |
a46fac56 | 277 | } |
984263bc MD |
278 | } |
279 | ||
280 | /* | |
281 | * We're only looking at 7 bits of the address; everything is | |
282 | * aligned to 4, lots of things are aligned to greater powers | |
283 | * of 2. Shift right by 8, i.e. drop the bottom 256 worth. | |
284 | */ | |
285 | #define TABLESIZE 128 | |
984263bc MD |
286 | #define LOOKUP(x) (((intptr_t)(x) >> 8) & (TABLESIZE - 1)) |
287 | ||
fc17ad60 MD |
288 | static cpumask_t slpque_cpumasks[TABLESIZE]; |
289 | ||
984263bc | 290 | /* |
a46fac56 | 291 | * General scheduler initialization. We force a reschedule 25 times |
fc17ad60 MD |
292 | * a second by default. Note that cpu0 is initialized in early boot and |
293 | * cannot make any high level calls. | |
294 | * | |
295 | * Each cpu has its own sleep queue. | |
984263bc | 296 | */ |
984263bc | 297 | void |
fc17ad60 | 298 | sleep_gdinit(globaldata_t gd) |
984263bc | 299 | { |
fc17ad60 | 300 | static struct tslpque slpque_cpu0[TABLESIZE]; |
9c1fad94 | 301 | int i; |
984263bc | 302 | |
fc17ad60 MD |
303 | if (gd->gd_cpuid == 0) { |
304 | sched_quantum = (hz + 24) / 25; | |
305 | hogticks = 2 * sched_quantum; | |
306 | ||
307 | gd->gd_tsleep_hash = slpque_cpu0; | |
308 | } else { | |
77652cad | 309 | gd->gd_tsleep_hash = kmalloc(sizeof(slpque_cpu0), |
fc17ad60 MD |
310 | M_TSLEEP, M_WAITOK | M_ZERO); |
311 | } | |
312 | for (i = 0; i < TABLESIZE; ++i) | |
313 | TAILQ_INIT(&gd->gd_tsleep_hash[i]); | |
984263bc MD |
314 | } |
315 | ||
316 | /* | |
317 | * General sleep call. Suspends the current process until a wakeup is | |
318 | * performed on the specified identifier. The process will then be made | |
319 | * runnable with the specified priority. Sleeps at most timo/hz seconds | |
377d4740 | 320 | * (0 means no timeout). If flags includes PCATCH flag, signals are checked |
984263bc MD |
321 | * before and after sleeping, else signals are not checked. Returns 0 if |
322 | * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a | |
323 | * signal needs to be delivered, ERESTART is returned if the current system | |
324 | * call should be restarted if possible, and EINTR is returned if the system | |
325 | * call should be interrupted by the signal (return EINTR). | |
26a0694b | 326 | * |
0a3f9b47 MD |
327 | * Note that if we are a process, we release_curproc() before messing with |
328 | * the LWKT scheduler. | |
a46fac56 MD |
329 | * |
330 | * During autoconfiguration or after a panic, a sleep will simply | |
331 | * lower the priority briefly to allow interrupts, then return. | |
984263bc MD |
332 | */ |
333 | int | |
6656cd91 | 334 | tsleep(void *ident, int flags, const char *wmesg, int timo) |
984263bc | 335 | { |
dadab5e9 | 336 | struct thread *td = curthread; |
08f2f1bb | 337 | struct lwp *lp = td->td_lwp; |
0cfcada1 | 338 | struct proc *p = td->td_proc; /* may be NULL */ |
fc17ad60 | 339 | globaldata_t gd; |
344ad853 MD |
340 | int sig; |
341 | int catch; | |
342 | int id; | |
343 | int error; | |
e43a034f | 344 | int oldpri; |
076fecef | 345 | struct callout thandle; |
984263bc | 346 | |
0cfcada1 MD |
347 | /* |
348 | * NOTE: removed KTRPOINT, it could cause races due to blocking | |
349 | * even in stable. Just scrap it for now. | |
350 | */ | |
984263bc MD |
351 | if (cold || panicstr) { |
352 | /* | |
353 | * After a panic, or during autoconfiguration, | |
354 | * just give interrupts a chance, then just return; | |
355 | * don't run any other procs or panic below, | |
356 | * in case this is the idle process and already asleep. | |
357 | */ | |
e43a034f MD |
358 | splz(); |
359 | oldpri = td->td_pri & TDPRI_MASK; | |
360 | lwkt_setpri_self(safepri); | |
361 | lwkt_switch(); | |
362 | lwkt_setpri_self(oldpri); | |
984263bc MD |
363 | return (0); |
364 | } | |
9afb0ffd | 365 | logtsleep(tsleep_beg); |
fc17ad60 MD |
366 | gd = td->td_gd; |
367 | KKASSERT(td != &gd->gd_idlethread); /* you must be kidding! */ | |
344ad853 MD |
368 | |
369 | /* | |
370 | * NOTE: all of this occurs on the current cpu, including any | |
371 | * callout-based wakeups, so a critical section is a sufficient | |
372 | * interlock. | |
373 | * | |
374 | * The entire sequence through to where we actually sleep must | |
375 | * run without breaking the critical section. | |
376 | */ | |
377 | id = LOOKUP(ident); | |
378 | catch = flags & PCATCH; | |
379 | error = 0; | |
380 | sig = 0; | |
381 | ||
37af14fe | 382 | crit_enter_quick(td); |
344ad853 | 383 | |
0cfcada1 | 384 | KASSERT(ident != NULL, ("tsleep: no ident")); |
164b8401 SS |
385 | KASSERT(lp == NULL || lp->lwp_stat == LSRUN, ("tsleep %p %s %d", |
386 | ident, wmesg, lp->lwp_stat)); | |
0cfcada1 | 387 | |
344ad853 MD |
388 | /* |
389 | * Setup for the current process (if this is a process). | |
390 | */ | |
08f2f1bb | 391 | if (lp) { |
344ad853 MD |
392 | if (catch) { |
393 | /* | |
394 | * Early termination if PCATCH was set and a | |
395 | * signal is pending, interlocked with the | |
396 | * critical section. | |
397 | * | |
398 | * Early termination only occurs when tsleep() is | |
164b8401 | 399 | * entered while in a normal LSRUN state. |
344ad853 | 400 | */ |
08f2f1bb | 401 | if ((sig = CURSIG(lp)) != 0) |
344ad853 MD |
402 | goto resume; |
403 | ||
7c1212ec MD |
404 | /* |
405 | * Early termination if PCATCH was set and a | |
406 | * mailbox signal was possibly delivered prior to | |
407 | * the system call even being made, in order to | |
408 | * allow the user to interlock without having to | |
409 | * make additional system calls. | |
410 | */ | |
411 | if (p->p_flag & P_MAILBOX) | |
412 | goto resume; | |
413 | ||
344ad853 | 414 | /* |
84204577 | 415 | * Causes ksignal to wake us up when. |
344ad853 | 416 | */ |
9a379a4a | 417 | lp->lwp_flag |= LWP_SINTR; |
344ad853 MD |
418 | } |
419 | ||
420 | /* | |
421 | * Make sure the current process has been untangled from | |
422 | * the userland scheduler and initialize slptime to start | |
423 | * counting. | |
424 | */ | |
0a3f9b47 MD |
425 | if (flags & PNORESCHED) |
426 | td->td_flags |= TDF_NORESCHED; | |
08f2f1bb SS |
427 | p->p_usched->release_curproc(lp); |
428 | lp->lwp_slptime = 0; | |
0a3f9b47 | 429 | } |
fc17ad60 MD |
430 | |
431 | /* | |
344ad853 | 432 | * Move our thread to the correct queue and setup our wchan, etc. |
fc17ad60 | 433 | */ |
37af14fe | 434 | lwkt_deschedule_self(td); |
344ad853 | 435 | td->td_flags |= TDF_TSLEEPQ; |
fc17ad60 MD |
436 | TAILQ_INSERT_TAIL(&gd->gd_tsleep_hash[id], td, td_threadq); |
437 | atomic_set_int(&slpque_cpumasks[id], gd->gd_cpumask); | |
344ad853 MD |
438 | |
439 | td->td_wchan = ident; | |
440 | td->td_wmesg = wmesg; | |
441 | td->td_wdomain = flags & PDOMAIN_MASK; | |
442 | ||
443 | /* | |
444 | * Setup the timeout, if any | |
445 | */ | |
076fecef MD |
446 | if (timo) { |
447 | callout_init(&thandle); | |
448 | callout_reset(&thandle, timo, endtsleep, td); | |
449 | } | |
344ad853 | 450 | |
984263bc | 451 | /* |
344ad853 | 452 | * Beddy bye bye. |
984263bc | 453 | */ |
08f2f1bb | 454 | if (lp) { |
26a0694b | 455 | /* |
52eedfb5 | 456 | * Ok, we are sleeping. Place us in the SSLEEP state. |
26a0694b | 457 | */ |
9388413d | 458 | KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0); |
164b8401 | 459 | lp->lwp_stat = LSSLEEP; |
08f2f1bb | 460 | lp->lwp_ru.ru_nvcsw++; |
344ad853 | 461 | lwkt_switch(); |
ab554892 MD |
462 | |
463 | /* | |
164b8401 | 464 | * And when we are woken up, put us back in LSRUN. If we |
ab554892 MD |
465 | * slept for over a second, recalculate our estcpu. |
466 | */ | |
164b8401 | 467 | lp->lwp_stat = LSRUN; |
08f2f1bb SS |
468 | if (lp->lwp_slptime) |
469 | p->p_usched->recalculate(lp); | |
470 | lp->lwp_slptime = 0; | |
0cfcada1 MD |
471 | } else { |
472 | lwkt_switch(); | |
473 | } | |
344ad853 | 474 | |
fc17ad60 MD |
475 | /* |
476 | * Make sure we haven't switched cpus while we were asleep. It's | |
344ad853 | 477 | * not supposed to happen. Cleanup our temporary flags. |
fc17ad60 MD |
478 | */ |
479 | KKASSERT(gd == td->td_gd); | |
0a3f9b47 | 480 | td->td_flags &= ~TDF_NORESCHED; |
344ad853 MD |
481 | |
482 | /* | |
483 | * Cleanup the timeout. | |
484 | */ | |
485 | if (timo) { | |
486 | if (td->td_flags & TDF_TIMEOUT) { | |
487 | td->td_flags &= ~TDF_TIMEOUT; | |
488 | if (sig == 0) | |
489 | error = EWOULDBLOCK; | |
490 | } else { | |
491 | callout_stop(&thandle); | |
492 | } | |
0cfcada1 | 493 | } |
344ad853 MD |
494 | |
495 | /* | |
496 | * Since td_threadq is used both for our run queue AND for the | |
497 | * tsleep hash queue, we can't still be on it at this point because | |
498 | * we've gotten cpu back. | |
499 | */ | |
afbfc034 | 500 | KASSERT((td->td_flags & TDF_TSLEEPQ) == 0, ("tsleep: impossible thread flags %08x", td->td_flags)); |
344ad853 MD |
501 | td->td_wchan = NULL; |
502 | td->td_wmesg = NULL; | |
503 | td->td_wdomain = 0; | |
504 | ||
505 | /* | |
7c1212ec MD |
506 | * Figure out the correct error return. If interrupted by a |
507 | * signal we want to return EINTR or ERESTART. | |
508 | * | |
509 | * If P_MAILBOX is set no automatic system call restart occurs | |
510 | * and we return EINTR. P_MAILBOX is meant to be used as an | |
511 | * interlock, the user must poll it prior to any system call | |
512 | * that it wishes to interlock a mailbox signal against since | |
513 | * the flag is cleared on *any* system call that sleeps. | |
344ad853 MD |
514 | */ |
515 | resume: | |
0cfcada1 | 516 | if (p) { |
7c1212ec MD |
517 | if (catch && error == 0) { |
518 | if ((p->p_flag & P_MAILBOX) && sig == 0) { | |
344ad853 | 519 | error = EINTR; |
08f2f1bb | 520 | } else if (sig != 0 || (sig = CURSIG(lp))) { |
7c1212ec MD |
521 | if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) |
522 | error = EINTR; | |
523 | else | |
524 | error = ERESTART; | |
525 | } | |
984263bc | 526 | } |
9a379a4a SS |
527 | lp->lwp_flag &= ~(LWP_BREAKTSLEEP | LWP_SINTR); |
528 | p->p_flag &= ~P_MAILBOX; | |
984263bc | 529 | } |
9afb0ffd | 530 | logtsleep(tsleep_end); |
344ad853 MD |
531 | crit_exit_quick(td); |
532 | return (error); | |
984263bc MD |
533 | } |
534 | ||
16523a43 MD |
535 | /* |
536 | * This is a dandy function that allows us to interlock tsleep/wakeup | |
537 | * operations with unspecified upper level locks, such as lockmgr locks, | |
538 | * simply by holding a critical section. The sequence is: | |
539 | * | |
540 | * (enter critical section) | |
541 | * (acquire upper level lock) | |
542 | * tsleep_interlock(blah) | |
543 | * (release upper level lock) | |
544 | * tsleep(blah, ...) | |
545 | * (exit critical section) | |
546 | * | |
547 | * Basically this function sets our cpumask for the ident which informs | |
548 | * other cpus that our cpu 'might' be waiting (or about to wait on) the | |
549 | * hash index related to the ident. The critical section prevents another | |
550 | * cpu's wakeup() from being processed on our cpu until we are actually | |
551 | * able to enter the tsleep(). Thus, no race occurs between our attempt | |
552 | * to release a resource and sleep, and another cpu's attempt to acquire | |
553 | * a resource and call wakeup. | |
554 | * | |
555 | * There isn't much of a point to this function unless you call it while | |
556 | * holding a critical section. | |
557 | */ | |
bf765287 MD |
558 | static __inline void |
559 | _tsleep_interlock(globaldata_t gd, void *ident) | |
560 | { | |
561 | int id = LOOKUP(ident); | |
562 | ||
563 | atomic_set_int(&slpque_cpumasks[id], gd->gd_cpumask); | |
564 | } | |
565 | ||
16523a43 MD |
566 | void |
567 | tsleep_interlock(void *ident) | |
568 | { | |
bf765287 MD |
569 | _tsleep_interlock(mycpu, ident); |
570 | } | |
571 | ||
572 | /* | |
573 | * Interlocked spinlock sleep. An exclusively held spinlock must | |
574 | * be passed to msleep(). The function will atomically release the | |
575 | * spinlock and tsleep on the ident, then reacquire the spinlock and | |
576 | * return. | |
577 | * | |
578 | * This routine is fairly important along the critical path, so optimize it | |
579 | * heavily. | |
580 | */ | |
581 | int | |
582 | msleep(void *ident, struct spinlock *spin, int flags, | |
583 | const char *wmesg, int timo) | |
584 | { | |
585 | globaldata_t gd = mycpu; | |
586 | int error; | |
16523a43 | 587 | |
bf765287 MD |
588 | crit_enter_gd(gd); |
589 | _tsleep_interlock(gd, ident); | |
590 | spin_unlock_wr_quick(gd, spin); | |
591 | error = tsleep(ident, flags, wmesg, timo); | |
592 | spin_lock_wr_quick(gd, spin); | |
593 | crit_exit_gd(gd); | |
594 | ||
595 | return (error); | |
16523a43 MD |
596 | } |
597 | ||
984263bc | 598 | /* |
344ad853 | 599 | * Implement the timeout for tsleep. |
fc17ad60 | 600 | * |
9a379a4a | 601 | * We set LWP_BREAKTSLEEP to indicate that an event has occured, but |
344ad853 MD |
602 | * we only call setrunnable if the process is not stopped. |
603 | * | |
604 | * This type of callout timeout is scheduled on the same cpu the process | |
605 | * is sleeping on. Also, at the moment, the MP lock is held. | |
984263bc MD |
606 | */ |
607 | static void | |
0cfcada1 | 608 | endtsleep(void *arg) |
984263bc | 609 | { |
0cfcada1 | 610 | thread_t td = arg; |
9a379a4a | 611 | struct lwp *lp; |
984263bc | 612 | |
344ad853 | 613 | ASSERT_MP_LOCK_HELD(curthread); |
37af14fe | 614 | crit_enter(); |
344ad853 MD |
615 | |
616 | /* | |
617 | * cpu interlock. Thread flags are only manipulated on | |
618 | * the cpu owning the thread. proc flags are only manipulated | |
619 | * by the older of the MP lock. We have both. | |
620 | */ | |
621 | if (td->td_flags & TDF_TSLEEPQ) { | |
0cfcada1 | 622 | td->td_flags |= TDF_TIMEOUT; |
344ad853 | 623 | |
9a379a4a SS |
624 | if ((lp = td->td_lwp) != NULL) { |
625 | lp->lwp_flag |= LWP_BREAKTSLEEP; | |
626 | if (lp->lwp_proc->p_stat != SSTOP) | |
627 | setrunnable(lp); | |
0cfcada1 | 628 | } else { |
344ad853 | 629 | unsleep_and_wakeup_thread(td); |
0cfcada1 | 630 | } |
984263bc | 631 | } |
37af14fe | 632 | crit_exit(); |
984263bc MD |
633 | } |
634 | ||
984263bc | 635 | /* |
344ad853 MD |
636 | * Unsleep and wakeup a thread. This function runs without the MP lock |
637 | * which means that it can only manipulate thread state on the owning cpu, | |
638 | * and cannot touch the process state at all. | |
984263bc | 639 | */ |
344ad853 | 640 | static |
8fb8bca6 | 641 | void |
344ad853 | 642 | unsleep_and_wakeup_thread(struct thread *td) |
8fb8bca6 | 643 | { |
344ad853 | 644 | globaldata_t gd = mycpu; |
fc17ad60 MD |
645 | int id; |
646 | ||
344ad853 MD |
647 | #ifdef SMP |
648 | if (td->td_gd != gd) { | |
649 | lwkt_send_ipiq(td->td_gd, (ipifunc1_t)unsleep_and_wakeup_thread, td); | |
650 | return; | |
651 | } | |
652 | #endif | |
9c1fad94 | 653 | crit_enter(); |
344ad853 MD |
654 | if (td->td_flags & TDF_TSLEEPQ) { |
655 | td->td_flags &= ~TDF_TSLEEPQ; | |
656 | id = LOOKUP(td->td_wchan); | |
657 | TAILQ_REMOVE(&gd->gd_tsleep_hash[id], td, td_threadq); | |
658 | if (TAILQ_FIRST(&gd->gd_tsleep_hash[id]) == NULL) | |
659 | atomic_clear_int(&slpque_cpumasks[id], gd->gd_cpumask); | |
660 | lwkt_schedule(td); | |
8fb8bca6 | 661 | } |
9c1fad94 | 662 | crit_exit(); |
8fb8bca6 | 663 | } |
8fb8bca6 EN |
664 | |
665 | /* | |
666 | * Make all processes sleeping on the specified identifier runnable. | |
fc17ad60 MD |
667 | * count may be zero or one only. |
668 | * | |
669 | * The domain encodes the sleep/wakeup domain AND the first cpu to check | |
670 | * (which is always the current cpu). As we iterate across cpus | |
344ad853 MD |
671 | * |
672 | * This call may run without the MP lock held. We can only manipulate thread | |
673 | * state on the cpu owning the thread. We CANNOT manipulate process state | |
674 | * at all. | |
8fb8bca6 EN |
675 | */ |
676 | static void | |
fc17ad60 | 677 | _wakeup(void *ident, int domain) |
984263bc | 678 | { |
fc17ad60 | 679 | struct tslpque *qp; |
0cfcada1 MD |
680 | struct thread *td; |
681 | struct thread *ntd; | |
fc17ad60 | 682 | globaldata_t gd; |
fc17ad60 MD |
683 | #ifdef SMP |
684 | cpumask_t mask; | |
685 | cpumask_t tmask; | |
686 | int startcpu; | |
687 | int nextcpu; | |
688 | #endif | |
689 | int id; | |
984263bc | 690 | |
37af14fe | 691 | crit_enter(); |
9afb0ffd | 692 | logtsleep(wakeup_beg); |
fc17ad60 MD |
693 | gd = mycpu; |
694 | id = LOOKUP(ident); | |
695 | qp = &gd->gd_tsleep_hash[id]; | |
984263bc | 696 | restart: |
0cfcada1 MD |
697 | for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) { |
698 | ntd = TAILQ_NEXT(td, td_threadq); | |
fc17ad60 MD |
699 | if (td->td_wchan == ident && |
700 | td->td_wdomain == (domain & PDOMAIN_MASK) | |
701 | ) { | |
344ad853 MD |
702 | KKASSERT(td->td_flags & TDF_TSLEEPQ); |
703 | td->td_flags &= ~TDF_TSLEEPQ; | |
0cfcada1 | 704 | TAILQ_REMOVE(qp, td, td_threadq); |
fc17ad60 MD |
705 | if (TAILQ_FIRST(qp) == NULL) { |
706 | atomic_clear_int(&slpque_cpumasks[id], | |
707 | gd->gd_cpumask); | |
708 | } | |
344ad853 | 709 | lwkt_schedule(td); |
fc17ad60 MD |
710 | if (domain & PWAKEUP_ONE) |
711 | goto done; | |
0cfcada1 | 712 | goto restart; |
984263bc MD |
713 | } |
714 | } | |
fc17ad60 MD |
715 | |
716 | #ifdef SMP | |
717 | /* | |
718 | * We finished checking the current cpu but there still may be | |
719 | * more work to do. Either wakeup_one was requested and no matching | |
720 | * thread was found, or a normal wakeup was requested and we have | |
721 | * to continue checking cpus. | |
722 | * | |
723 | * The cpu that started the wakeup sequence is encoded in the domain. | |
724 | * We use this information to determine which cpus still need to be | |
725 | * checked, locate a candidate cpu, and chain the wakeup | |
726 | * asynchronously with an IPI message. | |
727 | * | |
728 | * It should be noted that this scheme is actually less expensive then | |
729 | * the old scheme when waking up multiple threads, since we send | |
730 | * only one IPI message per target candidate which may then schedule | |
731 | * multiple threads. Before we could have wound up sending an IPI | |
732 | * message for each thread on the target cpu (!= current cpu) that | |
733 | * needed to be woken up. | |
734 | * | |
735 | * NOTE: Wakeups occuring on remote cpus are asynchronous. This | |
736 | * should be ok since we are passing idents in the IPI rather then | |
737 | * thread pointers. | |
738 | */ | |
b336a9b1 MD |
739 | if ((domain & PWAKEUP_MYCPU) == 0 && |
740 | (mask = slpque_cpumasks[id]) != 0 | |
741 | ) { | |
fc17ad60 MD |
742 | /* |
743 | * Look for a cpu that might have work to do. Mask out cpus | |
744 | * which have already been processed. | |
745 | * | |
746 | * 31xxxxxxxxxxxxxxxxxxxxxxxxxxxxx0 | |
747 | * ^ ^ ^ | |
748 | * start currentcpu start | |
749 | * case2 case1 | |
750 | * * * * | |
751 | * 11111111111111110000000000000111 case1 | |
752 | * 00000000111111110000000000000000 case2 | |
753 | * | |
754 | * case1: We started at start_case1 and processed through | |
755 | * to the current cpu. We have to check any bits | |
756 | * after the current cpu, then check bits before | |
757 | * the starting cpu. | |
758 | * | |
759 | * case2: We have already checked all the bits from | |
760 | * start_case2 to the end, and from 0 to the current | |
761 | * cpu. We just have the bits from the current cpu | |
762 | * to start_case2 left to check. | |
763 | */ | |
764 | startcpu = PWAKEUP_DECODE(domain); | |
765 | if (gd->gd_cpuid >= startcpu) { | |
766 | /* | |
767 | * CASE1 | |
768 | */ | |
769 | tmask = mask & ~((gd->gd_cpumask << 1) - 1); | |
770 | if (mask & tmask) { | |
771 | nextcpu = bsfl(mask & tmask); | |
772 | lwkt_send_ipiq2(globaldata_find(nextcpu), | |
773 | _wakeup, ident, domain); | |
774 | } else { | |
775 | tmask = (1 << startcpu) - 1; | |
776 | if (mask & tmask) { | |
777 | nextcpu = bsfl(mask & tmask); | |
778 | lwkt_send_ipiq2( | |
779 | globaldata_find(nextcpu), | |
780 | _wakeup, ident, domain); | |
781 | } | |
782 | } | |
783 | } else { | |
784 | /* | |
785 | * CASE2 | |
786 | */ | |
787 | tmask = ~((gd->gd_cpumask << 1) - 1) & | |
788 | ((1 << startcpu) - 1); | |
789 | if (mask & tmask) { | |
790 | nextcpu = bsfl(mask & tmask); | |
791 | lwkt_send_ipiq2(globaldata_find(nextcpu), | |
792 | _wakeup, ident, domain); | |
793 | } | |
794 | } | |
795 | } | |
796 | #endif | |
797 | done: | |
9afb0ffd | 798 | logtsleep(wakeup_end); |
37af14fe | 799 | crit_exit(); |
984263bc MD |
800 | } |
801 | ||
b336a9b1 MD |
802 | /* |
803 | * Wakeup all threads tsleep()ing on the specified ident, on all cpus | |
804 | */ | |
984263bc | 805 | void |
0cfcada1 | 806 | wakeup(void *ident) |
984263bc | 807 | { |
fc17ad60 | 808 | _wakeup(ident, PWAKEUP_ENCODE(0, mycpu->gd_cpuid)); |
0cfcada1 | 809 | } |
984263bc | 810 | |
b336a9b1 MD |
811 | /* |
812 | * Wakeup one thread tsleep()ing on the specified ident, on any cpu. | |
813 | */ | |
0cfcada1 MD |
814 | void |
815 | wakeup_one(void *ident) | |
816 | { | |
fc17ad60 MD |
817 | /* XXX potentially round-robin the first responding cpu */ |
818 | _wakeup(ident, PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | PWAKEUP_ONE); | |
da5fb9ef MD |
819 | } |
820 | ||
b336a9b1 MD |
821 | /* |
822 | * Wakeup threads tsleep()ing on the specified ident on the current cpu | |
823 | * only. | |
824 | */ | |
825 | void | |
826 | wakeup_mycpu(void *ident) | |
827 | { | |
828 | _wakeup(ident, PWAKEUP_MYCPU); | |
829 | } | |
830 | ||
831 | /* | |
832 | * Wakeup one thread tsleep()ing on the specified ident on the current cpu | |
833 | * only. | |
834 | */ | |
835 | void | |
836 | wakeup_mycpu_one(void *ident) | |
837 | { | |
838 | /* XXX potentially round-robin the first responding cpu */ | |
839 | _wakeup(ident, PWAKEUP_MYCPU|PWAKEUP_ONE); | |
840 | } | |
841 | ||
842 | /* | |
843 | * Wakeup all thread tsleep()ing on the specified ident on the specified cpu | |
844 | * only. | |
845 | */ | |
846 | void | |
847 | wakeup_oncpu(globaldata_t gd, void *ident) | |
848 | { | |
1699d292 | 849 | #ifdef SMP |
b336a9b1 MD |
850 | if (gd == mycpu) { |
851 | _wakeup(ident, PWAKEUP_MYCPU); | |
852 | } else { | |
853 | lwkt_send_ipiq2(gd, _wakeup, ident, PWAKEUP_MYCPU); | |
854 | } | |
1699d292 MD |
855 | #else |
856 | _wakeup(ident, PWAKEUP_MYCPU); | |
857 | #endif | |
b336a9b1 MD |
858 | } |
859 | ||
860 | /* | |
861 | * Wakeup one thread tsleep()ing on the specified ident on the specified cpu | |
862 | * only. | |
863 | */ | |
864 | void | |
865 | wakeup_oncpu_one(globaldata_t gd, void *ident) | |
866 | { | |
1699d292 | 867 | #ifdef SMP |
b336a9b1 MD |
868 | if (gd == mycpu) { |
869 | _wakeup(ident, PWAKEUP_MYCPU | PWAKEUP_ONE); | |
870 | } else { | |
871 | lwkt_send_ipiq2(gd, _wakeup, ident, PWAKEUP_MYCPU | PWAKEUP_ONE); | |
872 | } | |
1699d292 MD |
873 | #else |
874 | _wakeup(ident, PWAKEUP_MYCPU | PWAKEUP_ONE); | |
875 | #endif | |
b336a9b1 MD |
876 | } |
877 | ||
878 | /* | |
879 | * Wakeup all threads waiting on the specified ident that slept using | |
880 | * the specified domain, on all cpus. | |
881 | */ | |
da5fb9ef MD |
882 | void |
883 | wakeup_domain(void *ident, int domain) | |
884 | { | |
fc17ad60 | 885 | _wakeup(ident, PWAKEUP_ENCODE(domain, mycpu->gd_cpuid)); |
da5fb9ef MD |
886 | } |
887 | ||
b336a9b1 MD |
888 | /* |
889 | * Wakeup one thread waiting on the specified ident that slept using | |
890 | * the specified domain, on any cpu. | |
891 | */ | |
da5fb9ef MD |
892 | void |
893 | wakeup_domain_one(void *ident, int domain) | |
894 | { | |
fc17ad60 MD |
895 | /* XXX potentially round-robin the first responding cpu */ |
896 | _wakeup(ident, PWAKEUP_ENCODE(domain, mycpu->gd_cpuid) | PWAKEUP_ONE); | |
984263bc MD |
897 | } |
898 | ||
899 | /* | |
344ad853 MD |
900 | * setrunnable() |
901 | * | |
902 | * Make a process runnable. The MP lock must be held on call. This only | |
903 | * has an effect if we are in SSLEEP. We only break out of the | |
9a379a4a | 904 | * tsleep if LWP_BREAKTSLEEP is set, otherwise we just fix-up the state. |
37af14fe | 905 | * |
344ad853 MD |
906 | * NOTE: With the MP lock held we can only safely manipulate the process |
907 | * structure. We cannot safely manipulate the thread structure. | |
984263bc MD |
908 | */ |
909 | void | |
9a379a4a | 910 | setrunnable(struct lwp *lp) |
984263bc | 911 | { |
344ad853 MD |
912 | crit_enter(); |
913 | ASSERT_MP_LOCK_HELD(curthread); | |
2daf83b0 SS |
914 | if (lp->lwp_stat == LSSTOP) |
915 | lp->lwp_stat = LSSLEEP; | |
916 | if (lp->lwp_stat == LSSLEEP && (lp->lwp_flag & LWP_BREAKTSLEEP)) | |
08f2f1bb | 917 | unsleep_and_wakeup_thread(lp->lwp_thread); |
344ad853 | 918 | crit_exit(); |
984263bc MD |
919 | } |
920 | ||
921 | /* | |
164b8401 SS |
922 | * The process is stopped due to some condition, usually because p_stat is |
923 | * set to SSTOP, but also possibly due to being traced. | |
fc17ad60 | 924 | * |
164b8401 | 925 | * NOTE! If the caller sets SSTOP, the caller must also clear P_WAITED |
344ad853 MD |
926 | * because the parent may check the child's status before the child actually |
927 | * gets to this routine. | |
928 | * | |
9a379a4a | 929 | * This routine is called with the current lwp only, typically just |
344ad853 MD |
930 | * before returning to userland. |
931 | * | |
9a379a4a | 932 | * Setting LWP_BREAKTSLEEP before entering the tsleep will cause a passive |
344ad853 | 933 | * SIGCONT to break out of the tsleep. |
984263bc MD |
934 | */ |
935 | void | |
9a379a4a | 936 | tstop(void) |
984263bc | 937 | { |
9a379a4a SS |
938 | struct lwp *lp = curthread->td_lwp; |
939 | ||
940 | lp->lwp_flag |= LWP_BREAKTSLEEP; | |
941 | tsleep(lp->lwp_proc, 0, "stop", 0); | |
26a0694b MD |
942 | } |
943 | ||
a77ac49d MD |
944 | /* |
945 | * Yield / synchronous reschedule. This is a bit tricky because the trap | |
946 | * code might have set a lazy release on the switch function. Setting | |
947 | * P_PASSIVE_ACQ will ensure that the lazy release executes when we call | |
948 | * switch, and that we are given a greater chance of affinity with our | |
949 | * current cpu. | |
950 | * | |
951 | * We call lwkt_setpri_self() to rotate our thread to the end of the lwkt | |
952 | * run queue. lwkt_switch() will also execute any assigned passive release | |
953 | * (which usually calls release_curproc()), allowing a same/higher priority | |
954 | * process to be designated as the current process. | |
955 | * | |
956 | * While it is possible for a lower priority process to be designated, | |
957 | * it's call to lwkt_maybe_switch() in acquire_curproc() will likely | |
958 | * round-robin back to us and we will be able to re-acquire the current | |
959 | * process designation. | |
960 | */ | |
961 | void | |
962 | uio_yield(void) | |
963 | { | |
964 | struct thread *td = curthread; | |
965 | struct proc *p = td->td_proc; | |
966 | ||
967 | lwkt_setpri_self(td->td_pri & TDPRI_MASK); | |
968 | if (p) { | |
969 | p->p_flag |= P_PASSIVE_ACQ; | |
970 | lwkt_switch(); | |
971 | p->p_flag &= ~P_PASSIVE_ACQ; | |
972 | } else { | |
973 | lwkt_switch(); | |
974 | } | |
975 | } | |
976 | ||
984263bc MD |
977 | /* |
978 | * Compute a tenex style load average of a quantity on | |
979 | * 1, 5 and 15 minute intervals. | |
980 | */ | |
c7e98b2f | 981 | static int loadav_count_runnable(struct lwp *p, void *data); |
8fa76237 | 982 | |
984263bc MD |
983 | static void |
984 | loadav(void *arg) | |
985 | { | |
984263bc | 986 | struct loadavg *avg; |
8fa76237 | 987 | int i, nrun; |
984263bc | 988 | |
984263bc | 989 | nrun = 0; |
c7e98b2f | 990 | alllwp_scan(loadav_count_runnable, &nrun); |
8fa76237 MD |
991 | avg = &averunnable; |
992 | for (i = 0; i < 3; i++) { | |
984263bc MD |
993 | avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + |
994 | nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; | |
8fa76237 | 995 | } |
984263bc MD |
996 | |
997 | /* | |
998 | * Schedule the next update to occur after 5 seconds, but add a | |
999 | * random variation to avoid synchronisation with processes that | |
1000 | * run at regular intervals. | |
1001 | */ | |
cddfb7bb | 1002 | callout_reset(&loadav_callout, hz * 4 + (int)(krandom() % (hz * 2 + 1)), |
8fa76237 MD |
1003 | loadav, NULL); |
1004 | } | |
1005 | ||
1006 | static int | |
c7e98b2f | 1007 | loadav_count_runnable(struct lwp *lp, void *data) |
8fa76237 MD |
1008 | { |
1009 | int *nrunp = data; | |
1010 | thread_t td; | |
1011 | ||
164b8401 SS |
1012 | switch (lp->lwp_stat) { |
1013 | case LSRUN: | |
08f2f1bb | 1014 | if ((td = lp->lwp_thread) == NULL) |
8fa76237 MD |
1015 | break; |
1016 | if (td->td_flags & TDF_BLOCKED) | |
1017 | break; | |
8fa76237 MD |
1018 | ++*nrunp; |
1019 | break; | |
1020 | default: | |
1021 | break; | |
1022 | } | |
1023 | return(0); | |
984263bc MD |
1024 | } |
1025 | ||
1026 | /* ARGSUSED */ | |
1027 | static void | |
6656cd91 | 1028 | sched_setup(void *dummy) |
984263bc | 1029 | { |
984263bc | 1030 | callout_init(&loadav_callout); |
35f9d051 | 1031 | callout_init(&schedcpu_callout); |
984263bc MD |
1032 | |
1033 | /* Kick off timeout driven events by calling first time. */ | |
984263bc MD |
1034 | schedcpu(NULL); |
1035 | loadav(NULL); | |
1036 | } | |
1037 |