2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
35 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $
38 #include "opt_ktrace.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/signalvar.h>
45 #include <sys/resourcevar.h>
46 #include <sys/vmmeter.h>
47 #include <sys/sysctl.h>
51 #include <sys/kcollect.h>
53 #include <sys/ktrace.h>
56 #include <sys/serialize.h>
58 #include <sys/signal2.h>
59 #include <sys/thread2.h>
60 #include <sys/spinlock2.h>
61 #include <sys/mutex2.h>
63 #include <machine/cpu.h>
64 #include <machine/smp.h>
66 #include <vm/vm_extern.h>
69 TAILQ_HEAD(, thread) queue;
70 const volatile void *ident0;
71 const volatile void *ident1;
72 const volatile void *ident2;
73 const volatile void *ident3;
76 static void sched_setup (void *dummy);
77 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL);
78 static void sched_dyninit (void *dummy);
79 SYSINIT(sched_dyninit, SI_BOOT1_DYNALLOC, SI_ORDER_FIRST, sched_dyninit, NULL);
83 __read_mostly int tsleep_crypto_dump = 0;
84 __read_mostly int ncpus;
85 __read_mostly int ncpus_fit, ncpus_fit_mask; /* note: mask not cpumask_t */
86 __read_mostly int safepri;
87 __read_mostly int tsleep_now_works;
89 MALLOC_DEFINE(M_TSLEEP, "tslpque", "tsleep queues");
91 #define __DEALL(ident) __DEQUALIFY(void *, ident)
93 #if !defined(KTR_TSLEEP)
94 #define KTR_TSLEEP KTR_ALL
96 KTR_INFO_MASTER(tsleep);
97 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_beg, 0, "tsleep enter %p", const volatile void *ident);
98 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_end, 1, "tsleep exit");
99 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_beg, 2, "wakeup enter %p", const volatile void *ident);
100 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_end, 3, "wakeup exit");
101 KTR_INFO(KTR_TSLEEP, tsleep, ilockfail, 4, "interlock failed %p", const volatile void *ident);
103 #define logtsleep1(name) KTR_LOG(tsleep_ ## name)
104 #define logtsleep2(name, val) KTR_LOG(tsleep_ ## name, val)
106 __exclusive_cache_line
107 struct loadavg averunnable =
108 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */
110 * Constants for averages over 1, 5, and 15 minutes
111 * when sampling at 5 second intervals.
114 static fixpt_t cexp[3] = {
115 0.9200444146293232 * FSCALE, /* exp(-1/12) */
116 0.9834714538216174 * FSCALE, /* exp(-1/60) */
117 0.9944598480048967 * FSCALE, /* exp(-1/180) */
120 static void endtsleep (void *);
121 static void loadav (void *arg);
122 static void schedcpu (void *arg);
124 __read_mostly static int pctcpu_decay = 10;
125 SYSCTL_INT(_kern, OID_AUTO, pctcpu_decay, CTLFLAG_RW,
126 &pctcpu_decay, 0, "");
129 * kernel uses `FSCALE', userland (SHOULD) use kern.fscale
131 __read_mostly int fscale __unused = FSCALE; /* exported to systat */
132 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
135 * Issue a wakeup() from userland (debugging)
138 sysctl_wakeup(SYSCTL_HANDLER_ARGS)
143 if (req->newptr != NULL) {
144 if (priv_check(curthread, PRIV_ROOT))
146 error = SYSCTL_IN(req, &ident, sizeof(ident));
149 kprintf("issue wakeup %016jx\n", ident);
150 wakeup((void *)(intptr_t)ident);
152 if (req->oldptr != NULL) {
153 error = SYSCTL_OUT(req, &ident, sizeof(ident));
159 sysctl_wakeup_umtx(SYSCTL_HANDLER_ARGS)
164 if (req->newptr != NULL) {
165 if (priv_check(curthread, PRIV_ROOT))
167 error = SYSCTL_IN(req, &ident, sizeof(ident));
170 kprintf("issue wakeup %016jx, PDOMAIN_UMTX\n", ident);
171 wakeup_domain((void *)(intptr_t)ident, PDOMAIN_UMTX);
173 if (req->oldptr != NULL) {
174 error = SYSCTL_OUT(req, &ident, sizeof(ident));
179 SYSCTL_PROC(_debug, OID_AUTO, wakeup, CTLTYPE_UQUAD|CTLFLAG_RW, 0, 0,
180 sysctl_wakeup, "Q", "issue wakeup(addr)");
181 SYSCTL_PROC(_debug, OID_AUTO, wakeup_umtx, CTLTYPE_UQUAD|CTLFLAG_RW, 0, 0,
182 sysctl_wakeup_umtx, "Q", "issue wakeup(addr, PDOMAIN_UMTX)");
185 * Recompute process priorities, once a second.
187 * Since the userland schedulers are typically event oriented, if the
188 * estcpu calculation at wakeup() time is not sufficient to make a
189 * process runnable relative to other processes in the system we have
190 * a 1-second recalc to help out.
192 * This code also allows us to store sysclock_t data in the process structure
193 * without fear of an overrun, since sysclock_t are guarenteed to hold
194 * several seconds worth of count.
196 * WARNING! callouts can preempt normal threads. However, they will not
197 * preempt a thread holding a spinlock so we *can* safely use spinlocks.
199 static int schedcpu_stats(struct proc *p, void *data __unused);
200 static int schedcpu_resource(struct proc *p, void *data __unused);
205 allproc_scan(schedcpu_stats, NULL, 1);
206 allproc_scan(schedcpu_resource, NULL, 1);
207 if (mycpu->gd_cpuid == 0) {
208 wakeup((caddr_t)&lbolt);
209 wakeup(lbolt_syncer);
211 callout_reset(&mycpu->gd_schedcpu_callout, hz, schedcpu, NULL);
215 * General process statistics once a second
218 schedcpu_stats(struct proc *p, void *data __unused)
223 * Threads may not be completely set up if process in SIDL state.
225 if (p->p_stat == SIDL)
229 if (lwkt_trytoken(&p->p_token) == FALSE) {
235 FOREACH_LWP_IN_PROC(lp, p) {
236 if (lp->lwp_stat == LSSLEEP) {
238 if (lp->lwp_slptime == 1)
239 p->p_usched->uload_update(lp);
243 * Only recalculate processes that are active or have slept
244 * less then 2 seconds. The schedulers understand this.
245 * Otherwise decay by 50% per second.
247 if (lp->lwp_slptime <= 1) {
248 p->p_usched->recalculate(lp);
252 decay = pctcpu_decay;
258 lp->lwp_pctcpu = (lp->lwp_pctcpu * (decay - 1)) / decay;
261 lwkt_reltoken(&p->p_token);
268 * Resource checks. XXX break out since ksignal/killproc can block,
269 * limiting us to one process killed per second. There is probably
273 schedcpu_resource(struct proc *p, void *data __unused)
278 if (p->p_stat == SIDL)
282 if (lwkt_trytoken(&p->p_token) == FALSE) {
287 if (p->p_stat == SZOMB || p->p_limit == NULL) {
288 lwkt_reltoken(&p->p_token);
294 FOREACH_LWP_IN_PROC(lp, p) {
296 * We may have caught an lp in the middle of being
297 * created, lwp_thread can be NULL.
299 if (lp->lwp_thread) {
300 ttime += lp->lwp_thread->td_sticks;
301 ttime += lp->lwp_thread->td_uticks;
305 switch(plimit_testcpulimit(p, ttime)) {
306 case PLIMIT_TESTCPU_KILL:
307 killproc(p, "exceeded maximum CPU limit");
309 case PLIMIT_TESTCPU_XCPU:
310 if ((p->p_flags & P_XCPU) == 0) {
311 p->p_flags |= P_XCPU;
318 lwkt_reltoken(&p->p_token);
325 * This is only used by ps. Generate a cpu percentage use over
326 * a period of one second.
329 updatepcpu(struct lwp *lp, int cpticks, int ttlticks)
334 acc = (cpticks << FSHIFT) / ttlticks;
335 if (ttlticks >= ESTCPUFREQ) {
336 lp->lwp_pctcpu = acc;
338 remticks = ESTCPUFREQ - ttlticks;
339 lp->lwp_pctcpu = (acc * ttlticks + lp->lwp_pctcpu * remticks) /
345 * Handy macros to calculate hash indices. LOOKUP() calculates the
346 * global cpumask hash index, TCHASHSHIFT() converts that into the
349 * By making the pcpu hash arrays smaller we save a significant amount
350 * of memory at very low cost. The real cost is in IPIs, which are handled
351 * by the much larger global cpumask hash table.
353 #define LOOKUP_PRIME 66555444443333333ULL
354 #define LOOKUP(x) ((((uintptr_t)(x) + ((uintptr_t)(x) >> 18)) ^ \
355 LOOKUP_PRIME) % slpque_tablesize)
356 #define TCHASHSHIFT(x) ((x) >> 4)
358 static uint32_t slpque_tablesize;
359 static cpumask_t *slpque_cpumasks;
361 SYSCTL_UINT(_kern, OID_AUTO, slpque_tablesize, CTLFLAG_RD, &slpque_tablesize,
365 * This is a dandy function that allows us to interlock tsleep/wakeup
366 * operations with unspecified upper level locks, such as lockmgr locks,
367 * simply by holding a critical section. The sequence is:
369 * (acquire upper level lock)
370 * tsleep_interlock(blah)
371 * (release upper level lock)
374 * Basically this functions queues us on the tsleep queue without actually
375 * descheduling us. When tsleep() is later called with PINTERLOCK it
376 * assumes the thread was already queued, otherwise it queues it there.
378 * Thus it is possible to receive the wakeup prior to going to sleep and
379 * the race conditions are covered.
382 _tsleep_interlock(globaldata_t gd, const volatile void *ident, int flags)
384 thread_t td = gd->gd_curthread;
390 kprintf("tsleep_interlock: NULL ident %s\n", td->td_comm);
394 crit_enter_quick(td);
395 if (td->td_flags & TDF_TSLEEPQ) {
397 * Shortcut if unchanged
399 if (td->td_wchan == ident &&
400 td->td_wdomain == (flags & PDOMAIN_MASK)) {
406 * Remove current sleepq
408 cid = LOOKUP(td->td_wchan);
409 gid = TCHASHSHIFT(cid);
410 qp = &gd->gd_tsleep_hash[gid];
411 TAILQ_REMOVE(&qp->queue, td, td_sleepq);
412 if (TAILQ_FIRST(&qp->queue) == NULL) {
417 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid],
421 td->td_flags |= TDF_TSLEEPQ;
424 gid = TCHASHSHIFT(cid);
425 qp = &gd->gd_tsleep_hash[gid];
426 TAILQ_INSERT_TAIL(&qp->queue, td, td_sleepq);
427 if (qp->ident0 != ident && qp->ident1 != ident &&
428 qp->ident2 != ident && qp->ident3 != ident) {
429 if (qp->ident0 == NULL)
431 else if (qp->ident1 == NULL)
433 else if (qp->ident2 == NULL)
435 else if (qp->ident3 == NULL)
438 qp->ident0 = (void *)(intptr_t)-1;
440 ATOMIC_CPUMASK_ORBIT(slpque_cpumasks[cid], gd->gd_cpuid);
441 td->td_wchan = ident;
442 td->td_wdomain = flags & PDOMAIN_MASK;
447 tsleep_interlock(const volatile void *ident, int flags)
449 _tsleep_interlock(mycpu, ident, flags);
453 * Remove thread from sleepq. Must be called with a critical section held.
454 * The thread must not be migrating.
457 _tsleep_remove(thread_t td)
459 globaldata_t gd = mycpu;
464 KKASSERT(td->td_gd == gd && IN_CRITICAL_SECT(td));
465 KKASSERT((td->td_flags & TDF_MIGRATING) == 0);
466 if (td->td_flags & TDF_TSLEEPQ) {
467 td->td_flags &= ~TDF_TSLEEPQ;
468 cid = LOOKUP(td->td_wchan);
469 gid = TCHASHSHIFT(cid);
470 qp = &gd->gd_tsleep_hash[gid];
471 TAILQ_REMOVE(&qp->queue, td, td_sleepq);
472 if (TAILQ_FIRST(&qp->queue) == NULL) {
473 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid],
482 tsleep_remove(thread_t td)
488 * General sleep call. Suspends the current process until a wakeup is
489 * performed on the specified identifier. The process will then be made
490 * runnable with the specified priority. Sleeps at most timo/hz seconds
491 * (0 means no timeout). If flags includes PCATCH flag, signals are checked
492 * before and after sleeping, else signals are not checked. Returns 0 if
493 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
494 * signal needs to be delivered, ERESTART is returned if the current system
495 * call should be restarted if possible, and EINTR is returned if the system
496 * call should be interrupted by the signal (return EINTR).
498 * Note that if we are a process, we release_curproc() before messing with
499 * the LWKT scheduler.
501 * During autoconfiguration or after a panic, a sleep will simply
502 * lower the priority briefly to allow interrupts, then return.
504 * WARNING! This code can't block (short of switching away), or bad things
505 * will happen. No getting tokens, no blocking locks, etc.
508 tsleep(const volatile void *ident, int flags, const char *wmesg, int timo)
510 struct thread *td = curthread;
511 struct lwp *lp = td->td_lwp;
512 struct proc *p = td->td_proc; /* may be NULL */
518 struct callout thandle;
521 * Currently a severe hack. Make sure any delayed wakeups
522 * are flushed before we sleep or we might deadlock on whatever
523 * event we are sleeping on.
525 if (td->td_flags & TDF_DELAYED_WAKEUP)
526 wakeup_end_delayed();
529 * NOTE: removed KTRPOINT, it could cause races due to blocking
530 * even in stable. Just scrap it for now.
532 if (!tsleep_crypto_dump && (tsleep_now_works == 0 || panicstr)) {
534 * After a panic, or before we actually have an operational
535 * softclock, just give interrupts a chance, then just return;
537 * don't run any other procs or panic below,
538 * in case this is the idle process and already asleep.
542 lwkt_setpri_self(safepri);
544 lwkt_setpri_self(oldpri);
547 logtsleep2(tsleep_beg, ident);
549 KKASSERT(td != &gd->gd_idlethread); /* you must be kidding! */
552 * NOTE: all of this occurs on the current cpu, including any
553 * callout-based wakeups, so a critical section is a sufficient
556 * The entire sequence through to where we actually sleep must
557 * run without breaking the critical section.
559 catch = flags & PCATCH;
563 crit_enter_quick(td);
565 KASSERT(ident != NULL, ("tsleep: no ident"));
566 KASSERT(lp == NULL ||
567 lp->lwp_stat == LSRUN || /* Obvious */
568 lp->lwp_stat == LSSTOP, /* Set in tstop */
570 ident, wmesg, lp->lwp_stat));
573 * We interlock the sleep queue if the caller has not already done
574 * it for us. This must be done before we potentially acquire any
575 * tokens or we can loose the wakeup.
577 if ((flags & PINTERLOCKED) == 0) {
578 _tsleep_interlock(gd, ident, flags);
582 * Setup for the current process (if this is a process). We must
583 * interlock with lwp_token to avoid remote wakeup races via
587 lwkt_gettoken(&lp->lwp_token);
590 * If the umbrella process is in the SCORE state then
591 * make sure that the thread is flagged going into a
592 * normal sleep to allow the core dump to proceed, otherwise
593 * the coredump can end up waiting forever. If the normal
594 * sleep is woken up, the thread will enter a stopped state
595 * upon return to userland.
597 * We do not want to interrupt or cause a thread exist at
598 * this juncture because that will mess-up the state the
599 * coredump is trying to save.
601 if (p->p_stat == SCORE) {
602 lwkt_gettoken(&p->p_token);
603 if ((lp->lwp_mpflags & LWP_MP_WSTOP) == 0) {
604 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP);
607 lwkt_reltoken(&p->p_token);
615 * Early termination if PCATCH was set and a
616 * signal is pending, interlocked with the
619 * Early termination only occurs when tsleep() is
620 * entered while in a normal LSRUN state.
622 if ((sig = CURSIG(lp)) != 0)
626 * Causes ksignal to wake us up if a signal is
627 * received (interlocked with lp->lwp_token).
629 lp->lwp_flags |= LWP_SINTR;
636 * Make sure the current process has been untangled from
637 * the userland scheduler and initialize slptime to start
640 * NOTE: td->td_wakefromcpu is pre-set by the release function
641 * for the dfly scheduler, and then adjusted by _wakeup()
644 p->p_usched->release_curproc(lp);
649 * For PINTERLOCKED operation, TDF_TSLEEPQ might not be set if
650 * a wakeup() was processed before the thread could go to sleep.
652 * If TDF_TSLEEPQ is set, make sure the ident matches the recorded
653 * ident. If it does not then the thread slept inbetween the
654 * caller's initial tsleep_interlock() call and the caller's tsleep()
657 * Extreme loads can cause the sending of an IPI (e.g. wakeup()'s)
658 * to process incoming IPIs, thus draining incoming wakeups.
660 if ((td->td_flags & TDF_TSLEEPQ) == 0) {
661 logtsleep2(ilockfail, ident);
663 } else if (td->td_wchan != ident ||
664 td->td_wdomain != (flags & PDOMAIN_MASK)) {
665 logtsleep2(ilockfail, ident);
670 * scheduling is blocked while in a critical section. Coincide
671 * the descheduled-by-tsleep flag with the descheduling of the
674 * The timer callout is localized on our cpu and interlocked by
675 * our critical section.
677 lwkt_deschedule_self(td);
678 td->td_flags |= TDF_TSLEEP_DESCHEDULED;
679 td->td_wmesg = wmesg;
682 * Setup the timeout, if any. The timeout is only operable while
683 * the thread is flagged descheduled.
685 KKASSERT((td->td_flags & TDF_TIMEOUT) == 0);
687 callout_init_mp(&thandle);
688 callout_reset(&thandle, timo, endtsleep, td);
696 * Ok, we are sleeping. Place us in the SSLEEP state.
698 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
701 * tstop() sets LSSTOP, so don't fiddle with that.
703 if (lp->lwp_stat != LSSTOP)
704 lp->lwp_stat = LSSLEEP;
705 lp->lwp_ru.ru_nvcsw++;
706 p->p_usched->uload_update(lp);
710 * And when we are woken up, put us back in LSRUN. If we
711 * slept for over a second, recalculate our estcpu.
713 lp->lwp_stat = LSRUN;
714 if (lp->lwp_slptime) {
715 p->p_usched->uload_update(lp);
716 p->p_usched->recalculate(lp);
724 * Make sure we haven't switched cpus while we were asleep. It's
725 * not supposed to happen. Cleanup our temporary flags.
727 KKASSERT(gd == td->td_gd);
730 * Cleanup the timeout. If the timeout has already occured thandle
731 * has already been stopped, otherwise stop thandle. If the timeout
732 * is running (the callout thread must be blocked trying to get
733 * lwp_token) then wait for us to get scheduled.
736 while (td->td_flags & TDF_TIMEOUT_RUNNING) {
737 /* else we won't get rescheduled! */
738 if (lp->lwp_stat != LSSTOP)
739 lp->lwp_stat = LSSLEEP;
740 lwkt_deschedule_self(td);
741 td->td_wmesg = "tsrace";
743 kprintf("td %p %s: timeout race\n", td, td->td_comm);
745 if (td->td_flags & TDF_TIMEOUT) {
746 td->td_flags &= ~TDF_TIMEOUT;
749 /* does not block when on same cpu */
750 callout_cancel(&thandle);
753 td->td_flags &= ~TDF_TSLEEP_DESCHEDULED;
756 * Make sure we have been removed from the sleepq. In most
757 * cases this will have been done for us already but it is
758 * possible for a scheduling IPI to be in-flight from a
759 * previous tsleep/tsleep_interlock() or due to a straight-out
760 * call to lwkt_schedule() (in the case of an interrupt thread),
761 * causing a spurious wakeup.
767 * Figure out the correct error return. If interrupted by a
768 * signal we want to return EINTR or ERESTART.
772 if (catch && error == 0) {
773 if (sig != 0 || (sig = CURSIG(lp))) {
774 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
781 lp->lwp_flags &= ~LWP_SINTR;
784 * Unconditionally set us to LSRUN on resume. lwp_stat could
785 * be in a weird state due to the goto resume, particularly
786 * when tsleep() is called from tstop().
788 lp->lwp_stat = LSRUN;
789 lwkt_reltoken(&lp->lwp_token);
791 logtsleep1(tsleep_end);
798 * Interlocked spinlock sleep. An exclusively held spinlock must
799 * be passed to ssleep(). The function will atomically release the
800 * spinlock and tsleep on the ident, then reacquire the spinlock and
803 * This routine is fairly important along the critical path, so optimize it
807 ssleep(const volatile void *ident, struct spinlock *spin, int flags,
808 const char *wmesg, int timo)
810 globaldata_t gd = mycpu;
813 _tsleep_interlock(gd, ident, flags);
814 spin_unlock_quick(gd, spin);
815 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo);
816 KKASSERT(gd == mycpu);
817 _spin_lock_quick(gd, spin, wmesg);
823 lksleep(const volatile void *ident, struct lock *lock, int flags,
824 const char *wmesg, int timo)
826 globaldata_t gd = mycpu;
829 _tsleep_interlock(gd, ident, flags);
830 lockmgr(lock, LK_RELEASE);
831 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo);
832 lockmgr(lock, LK_EXCLUSIVE);
838 * Interlocked mutex sleep. An exclusively held mutex must be passed
839 * to mtxsleep(). The function will atomically release the mutex
840 * and tsleep on the ident, then reacquire the mutex and return.
843 mtxsleep(const volatile void *ident, struct mtx *mtx, int flags,
844 const char *wmesg, int timo)
846 globaldata_t gd = mycpu;
849 _tsleep_interlock(gd, ident, flags);
851 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo);
852 mtx_lock_ex_quick(mtx);
858 * Interlocked serializer sleep. An exclusively held serializer must
859 * be passed to zsleep(). The function will atomically release
860 * the serializer and tsleep on the ident, then reacquire the serializer
864 zsleep(const volatile void *ident, struct lwkt_serialize *slz, int flags,
865 const char *wmesg, int timo)
867 globaldata_t gd = mycpu;
870 ASSERT_SERIALIZED(slz);
872 _tsleep_interlock(gd, ident, flags);
873 lwkt_serialize_exit(slz);
874 ret = tsleep(ident, flags | PINTERLOCKED, wmesg, timo);
875 lwkt_serialize_enter(slz);
881 * Directly block on the LWKT thread by descheduling it. This
882 * is much faster then tsleep(), but the only legal way to wake
883 * us up is to directly schedule the thread.
885 * Setting TDF_SINTR will cause new signals to directly schedule us.
887 * This routine must be called while in a critical section.
890 lwkt_sleep(const char *wmesg, int flags)
892 thread_t td = curthread;
895 if ((flags & PCATCH) == 0 || td->td_lwp == NULL) {
896 td->td_flags |= TDF_BLOCKED;
897 td->td_wmesg = wmesg;
898 lwkt_deschedule_self(td);
901 td->td_flags &= ~TDF_BLOCKED;
904 if ((sig = CURSIG(td->td_lwp)) != 0) {
905 if (SIGISMEMBER(td->td_proc->p_sigacts->ps_sigintr, sig))
911 td->td_flags |= TDF_BLOCKED | TDF_SINTR;
912 td->td_wmesg = wmesg;
913 lwkt_deschedule_self(td);
915 td->td_flags &= ~(TDF_BLOCKED | TDF_SINTR);
921 * Implement the timeout for tsleep.
923 * This type of callout timeout is scheduled on the same cpu the process
924 * is sleeping on. Also, at the moment, the MP lock is held.
933 * We are going to have to get the lwp_token, which means we might
934 * block. This can race a tsleep getting woken up by other means
935 * so set TDF_TIMEOUT_RUNNING to force the tsleep to wait for our
936 * processing to complete (sorry tsleep!).
938 * We can safely set td_flags because td MUST be on the same cpu
941 KKASSERT(td->td_gd == mycpu);
943 td->td_flags |= TDF_TIMEOUT_RUNNING | TDF_TIMEOUT;
946 * This can block but TDF_TIMEOUT_RUNNING will prevent the thread
947 * from exiting the tsleep on us. The flag is interlocked by virtue
948 * of lp being on the same cpu as we are.
950 if ((lp = td->td_lwp) != NULL)
951 lwkt_gettoken(&lp->lwp_token);
953 KKASSERT(td->td_flags & TDF_TSLEEP_DESCHEDULED);
957 * callout timer should normally never be set in tstop()
958 * because it passes a timeout of 0. However, there is a
959 * case during thread exit (which SSTOP's all the threads)
960 * for which tstop() must break out and can (properly) leave
961 * the thread in LSSTOP.
963 KKASSERT(lp->lwp_stat != LSSTOP ||
964 (lp->lwp_mpflags & LWP_MP_WEXIT));
966 lwkt_reltoken(&lp->lwp_token);
971 KKASSERT(td->td_gd == mycpu);
972 td->td_flags &= ~TDF_TIMEOUT_RUNNING;
977 * Make all processes sleeping on the specified identifier runnable.
978 * count may be zero or one only.
980 * The domain encodes the sleep/wakeup domain, flags, plus the originating
983 * This call may run without the MP lock held. We can only manipulate thread
984 * state on the cpu owning the thread. We CANNOT manipulate process state
987 * _wakeup() can be passed to an IPI so we can't use (const volatile
991 _wakeup(void *ident, int domain)
1003 logtsleep2(wakeup_beg, ident);
1005 cid = LOOKUP(ident);
1006 gid = TCHASHSHIFT(cid);
1007 qp = &gd->gd_tsleep_hash[gid];
1009 for (td = TAILQ_FIRST(&qp->queue); td != NULL; td = ntd) {
1010 ntd = TAILQ_NEXT(td, td_sleepq);
1011 if (td->td_wchan == ident &&
1012 td->td_wdomain == (domain & PDOMAIN_MASK)
1014 KKASSERT(td->td_gd == gd);
1016 td->td_wakefromcpu = PWAKEUP_DECODE(domain);
1017 if (td->td_flags & TDF_TSLEEP_DESCHEDULED) {
1019 if (domain & PWAKEUP_ONE)
1024 if (td->td_wchan == qp->ident0)
1026 else if (td->td_wchan == qp->ident1)
1028 else if (td->td_wchan == qp->ident2)
1030 else if (td->td_wchan == qp->ident3)
1033 wids |= 16; /* force ident0 to be retained (-1) */
1037 * Because a bunch of cpumask array entries cover the same queue, it
1038 * is possible for our bit to remain set in some of them and cause
1039 * spurious wakeup IPIs later on. Make sure that the bit is cleared
1040 * when a spurious IPI occurs to prevent further spurious IPIs.
1042 if (TAILQ_FIRST(&qp->queue) == NULL) {
1043 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid], gd->gd_cpuid);
1049 if ((wids & 1) == 0) {
1050 if ((wids & 16) == 0) {
1053 KKASSERT(qp->ident0 == (void *)(intptr_t)-1);
1056 if ((wids & 2) == 0)
1058 if ((wids & 4) == 0)
1060 if ((wids & 8) == 0)
1065 * We finished checking the current cpu but there still may be
1066 * more work to do. Either wakeup_one was requested and no matching
1067 * thread was found, or a normal wakeup was requested and we have
1068 * to continue checking cpus.
1070 * It should be noted that this scheme is actually less expensive then
1071 * the old scheme when waking up multiple threads, since we send
1072 * only one IPI message per target candidate which may then schedule
1073 * multiple threads. Before we could have wound up sending an IPI
1074 * message for each thread on the target cpu (!= current cpu) that
1075 * needed to be woken up.
1077 * NOTE: Wakeups occuring on remote cpus are asynchronous. This
1078 * should be ok since we are passing idents in the IPI rather
1079 * then thread pointers.
1081 * NOTE: We MUST mfence (or use an atomic op) prior to reading
1082 * the cpumask, as another cpu may have written to it in
1083 * a fashion interlocked with whatever the caller did before
1084 * calling wakeup(). Otherwise we might miss the interaction
1085 * (kern_mutex.c can cause this problem).
1087 * lfence is insufficient as it may allow a written state to
1088 * reorder around the cpumask load.
1090 if ((domain & PWAKEUP_MYCPU) == 0) {
1092 const volatile void *id0;
1097 mask = slpque_cpumasks[cid];
1098 CPUMASK_ANDMASK(mask, gd->gd_other_cpus);
1099 while (CPUMASK_TESTNZERO(mask)) {
1100 n = BSRCPUMASK(mask);
1101 CPUMASK_NANDBIT(mask, n);
1102 tgd = globaldata_find(n);
1105 * Both ident0 compares must from a single load
1106 * to avoid ident0 update races crossing the two
1109 qp = &tgd->gd_tsleep_hash[gid];
1112 if (id0 == (void *)(intptr_t)-1) {
1113 lwkt_send_ipiq2(tgd, _wakeup, ident,
1114 domain | PWAKEUP_MYCPU);
1115 ++tgd->gd_cnt.v_wakeup_colls;
1116 } else if (id0 == ident ||
1117 qp->ident1 == ident ||
1118 qp->ident2 == ident ||
1119 qp->ident3 == ident) {
1120 lwkt_send_ipiq2(tgd, _wakeup, ident,
1121 domain | PWAKEUP_MYCPU);
1125 if (CPUMASK_TESTNZERO(mask)) {
1126 lwkt_send_ipiq2_mask(mask, _wakeup, ident,
1127 domain | PWAKEUP_MYCPU);
1132 logtsleep1(wakeup_end);
1137 * Wakeup all threads tsleep()ing on the specified ident, on all cpus
1140 wakeup(const volatile void *ident)
1142 globaldata_t gd = mycpu;
1143 thread_t td = gd->gd_curthread;
1145 if (td && (td->td_flags & TDF_DELAYED_WAKEUP)) {
1147 * If we are in a delayed wakeup section, record up to two wakeups in
1148 * a per-CPU queue and issue them when we block or exit the delayed
1151 if (atomic_cmpset_ptr(&gd->gd_delayed_wakeup[0], NULL, ident))
1153 if (atomic_cmpset_ptr(&gd->gd_delayed_wakeup[1], NULL, ident))
1156 ident = atomic_swap_ptr(__DEQUALIFY(volatile void **, &gd->gd_delayed_wakeup[1]),
1158 ident = atomic_swap_ptr(__DEQUALIFY(volatile void **, &gd->gd_delayed_wakeup[0]),
1162 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, gd->gd_cpuid));
1166 * Wakeup one thread tsleep()ing on the specified ident, on any cpu.
1169 wakeup_one(const volatile void *ident)
1171 /* XXX potentially round-robin the first responding cpu */
1172 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) |
1177 * Wakeup threads tsleep()ing on the specified ident on the current cpu
1181 wakeup_mycpu(const volatile void *ident)
1183 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) |
1188 * Wakeup one thread tsleep()ing on the specified ident on the current cpu
1192 wakeup_mycpu_one(const volatile void *ident)
1194 /* XXX potentially round-robin the first responding cpu */
1195 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) |
1196 PWAKEUP_MYCPU | PWAKEUP_ONE);
1200 * Wakeup all thread tsleep()ing on the specified ident on the specified cpu
1204 wakeup_oncpu(globaldata_t gd, const volatile void *ident)
1206 globaldata_t mygd = mycpu;
1208 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) |
1211 lwkt_send_ipiq2(gd, _wakeup, __DEALL(ident),
1212 PWAKEUP_ENCODE(0, mygd->gd_cpuid) |
1218 * Wakeup one thread tsleep()ing on the specified ident on the specified cpu
1222 wakeup_oncpu_one(globaldata_t gd, const volatile void *ident)
1224 globaldata_t mygd = mycpu;
1226 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) |
1227 PWAKEUP_MYCPU | PWAKEUP_ONE);
1229 lwkt_send_ipiq2(gd, _wakeup, __DEALL(ident),
1230 PWAKEUP_ENCODE(0, mygd->gd_cpuid) |
1231 PWAKEUP_MYCPU | PWAKEUP_ONE);
1236 * Wakeup all threads waiting on the specified ident that slept using
1237 * the specified domain, on all cpus.
1240 wakeup_domain(const volatile void *ident, int domain)
1242 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(domain, mycpu->gd_cpuid));
1246 * Wakeup one thread waiting on the specified ident that slept using
1247 * the specified domain, on any cpu.
1250 wakeup_domain_one(const volatile void *ident, int domain)
1252 /* XXX potentially round-robin the first responding cpu */
1253 _wakeup(__DEALL(ident),
1254 PWAKEUP_ENCODE(domain, mycpu->gd_cpuid) | PWAKEUP_ONE);
1258 wakeup_start_delayed(void)
1260 globaldata_t gd = mycpu;
1263 gd->gd_curthread->td_flags |= TDF_DELAYED_WAKEUP;
1268 wakeup_end_delayed(void)
1270 globaldata_t gd = mycpu;
1272 if (gd->gd_curthread->td_flags & TDF_DELAYED_WAKEUP) {
1274 gd->gd_curthread->td_flags &= ~TDF_DELAYED_WAKEUP;
1275 if (gd->gd_delayed_wakeup[0] || gd->gd_delayed_wakeup[1]) {
1276 if (gd->gd_delayed_wakeup[0]) {
1277 wakeup(gd->gd_delayed_wakeup[0]);
1278 gd->gd_delayed_wakeup[0] = NULL;
1280 if (gd->gd_delayed_wakeup[1]) {
1281 wakeup(gd->gd_delayed_wakeup[1]);
1282 gd->gd_delayed_wakeup[1] = NULL;
1292 * Make a process runnable. lp->lwp_token must be held on call and this
1293 * function must be called from the cpu owning lp.
1295 * This only has an effect if we are in LSSTOP or LSSLEEP.
1298 setrunnable(struct lwp *lp)
1300 thread_t td = lp->lwp_thread;
1302 ASSERT_LWKT_TOKEN_HELD(&lp->lwp_token);
1303 KKASSERT(td->td_gd == mycpu);
1305 if (lp->lwp_stat == LSSTOP)
1306 lp->lwp_stat = LSSLEEP;
1307 if (lp->lwp_stat == LSSLEEP) {
1310 } else if (td->td_flags & TDF_SINTR) {
1317 * The process is stopped due to some condition, usually because p_stat is
1318 * set to SSTOP, but also possibly due to being traced.
1320 * Caller must hold p->p_token
1322 * NOTE! If the caller sets SSTOP, the caller must also clear P_WAITED
1323 * because the parent may check the child's status before the child actually
1324 * gets to this routine.
1326 * This routine is called with the current lwp only, typically just
1327 * before returning to userland if the process state is detected as
1328 * possibly being in a stopped state.
1333 struct lwp *lp = curthread->td_lwp;
1334 struct proc *p = lp->lwp_proc;
1337 lwkt_gettoken(&lp->lwp_token);
1341 * If LWP_MP_WSTOP is set, we were sleeping
1342 * while our process was stopped. At this point
1343 * we were already counted as stopped.
1345 if ((lp->lwp_mpflags & LWP_MP_WSTOP) == 0) {
1347 * If we're the last thread to stop, signal
1351 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP);
1352 wakeup(&p->p_nstopped);
1353 if (p->p_nstopped == p->p_nthreads) {
1355 * Token required to interlock kern_wait()
1359 lwkt_gettoken(&q->p_token);
1360 p->p_flags &= ~P_WAITED;
1362 if ((q->p_sigacts->ps_flag & PS_NOCLDSTOP) == 0)
1363 ksignal(q, SIGCHLD);
1364 lwkt_reltoken(&q->p_token);
1370 * Wait here while in a stopped state, interlocked with lwp_token.
1371 * We must break-out if the whole process is trying to exit.
1373 while (STOPLWP(p, lp)) {
1374 lp->lwp_stat = LSSTOP;
1375 tsleep(p, 0, "stop", 0);
1378 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WSTOP);
1380 lwkt_reltoken(&lp->lwp_token);
1384 * Compute a tenex style load average of a quantity on
1385 * 1, 5 and 15 minute intervals. This is a pcpu callout.
1387 * We segment the lwp scan on a pcpu basis. This does NOT
1388 * mean the associated lwps are on this cpu, it is done
1389 * just to break the work up.
1391 * The callout on cpu0 rolls up the stats from the other
1394 static int loadav_count_runnable(struct lwp *p, void *data);
1399 globaldata_t gd = mycpu;
1400 struct loadavg *avg;
1404 alllwp_scan(loadav_count_runnable, &nrun, 1);
1405 gd->gd_loadav_nrunnable = nrun;
1406 if (gd->gd_cpuid == 0) {
1409 for (i = 0; i < ncpus; ++i)
1410 nrun += globaldata_find(i)->gd_loadav_nrunnable;
1411 for (i = 0; i < 3; i++) {
1412 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
1413 (long)nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
1418 * Schedule the next update to occur after 5 seconds, but add a
1419 * random variation to avoid synchronisation with processes that
1420 * run at regular intervals.
1422 callout_reset(&gd->gd_loadav_callout,
1423 hz * 4 + (int)(krandom() % (hz * 2 + 1)),
1428 loadav_count_runnable(struct lwp *lp, void *data)
1433 switch (lp->lwp_stat) {
1435 if ((td = lp->lwp_thread) == NULL)
1437 if (td->td_flags & TDF_BLOCKED)
1449 * Regular data collection
1452 collect_load_callback(int n)
1454 int fscale = averunnable.fscale;
1456 return ((averunnable.ldavg[0] * 100 + (fscale >> 1)) / fscale);
1460 sched_setup(void *dummy __unused)
1462 globaldata_t save_gd = mycpu;
1466 kcollect_register(KCOLLECT_LOAD, "load", collect_load_callback,
1467 KCOLLECT_SCALE(KCOLLECT_LOAD_FORMAT, 0));
1470 * Kick off timeout driven events by calling first time. We
1471 * split the work across available cpus to help scale it,
1472 * it can eat a lot of cpu when there are a lot of processes
1475 for (n = 0; n < ncpus; ++n) {
1476 gd = globaldata_find(n);
1477 lwkt_setcpu_self(gd);
1478 callout_init_mp(&gd->gd_loadav_callout);
1479 callout_init_mp(&gd->gd_schedcpu_callout);
1483 lwkt_setcpu_self(save_gd);
1487 * Extremely early initialization, dummy-up the tables so we don't have
1488 * to conditionalize for NULL in _wakeup() and tsleep_interlock(). Even
1489 * though the system isn't blocking this early, these functions still
1490 * try to access the hash table.
1492 * This setup will be overridden once sched_dyninit() -> sleep_gdinit()
1496 sleep_early_gdinit(globaldata_t gd)
1498 static struct tslpque dummy_slpque;
1499 static cpumask_t dummy_cpumasks;
1501 slpque_tablesize = 1;
1502 gd->gd_tsleep_hash = &dummy_slpque;
1503 slpque_cpumasks = &dummy_cpumasks;
1504 TAILQ_INIT(&dummy_slpque.queue);
1508 * PCPU initialization. Called after KMALLOC is operational, by
1509 * sched_dyninit() for cpu 0, and by mi_gdinit() for other cpus later.
1511 * WARNING! The pcpu hash table is smaller than the global cpumask
1512 * hash table, which can save us a lot of memory when maxproc
1516 sleep_gdinit(globaldata_t gd)
1524 * This shouldn't happen, that is there shouldn't be any threads
1525 * waiting on the dummy tsleep queue this early in the boot.
1527 if (gd->gd_cpuid == 0) {
1528 struct tslpque *qp = &gd->gd_tsleep_hash[0];
1529 TAILQ_FOREACH(td, &qp->queue, td_sleepq) {
1530 kprintf("SLEEP_GDINIT SWITCH %s\n", td->td_comm);
1535 * Note that we have to allocate one extra slot because we are
1536 * shifting a modulo value. TCHASHSHIFT(slpque_tablesize - 1) can
1537 * return the same value as TCHASHSHIFT(slpque_tablesize).
1539 n = TCHASHSHIFT(slpque_tablesize) + 1;
1541 hash_size = sizeof(struct tslpque) * n;
1542 gd->gd_tsleep_hash = (void *)kmem_alloc3(&kernel_map, hash_size,
1544 KM_CPU(gd->gd_cpuid));
1545 memset(gd->gd_tsleep_hash, 0, hash_size);
1546 for (i = 0; i < n; ++i)
1547 TAILQ_INIT(&gd->gd_tsleep_hash[i].queue);
1551 * Dynamic initialization after the memory system is operational.
1554 sched_dyninit(void *dummy __unused)
1561 * Calculate table size for slpque hash. We want a prime number
1562 * large enough to avoid overloading slpque_cpumasks when the
1563 * system has a large number of sleeping processes, which will
1564 * spam IPIs on wakeup().
1566 * While it is true this is really a per-lwp factor, generally
1567 * speaking the maxproc limit is a good metric to go by.
1569 for (tblsize = maxproc | 1; ; tblsize += 2) {
1570 if (tblsize % 3 == 0)
1572 if (tblsize % 5 == 0)
1574 tblsize2 = (tblsize / 2) | 1;
1575 for (n = 7; n < tblsize2; n += 2) {
1576 if (tblsize % n == 0)
1584 * PIDs are currently limited to 6 digits. Cap the table size
1587 if (tblsize > 2000003)
1590 slpque_tablesize = tblsize;
1591 slpque_cpumasks = kmalloc(sizeof(*slpque_cpumasks) * slpque_tablesize,
1592 M_TSLEEP, M_WAITOK | M_ZERO);
1593 sleep_gdinit(mycpu);