2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
35 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $
38 #include "opt_ktrace.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/signalvar.h>
45 #include <sys/resourcevar.h>
46 #include <sys/vmmeter.h>
47 #include <sys/sysctl.h>
50 #include <sys/kcollect.h>
52 #include <sys/ktrace.h>
55 #include <sys/serialize.h>
57 #include <sys/signal2.h>
58 #include <sys/thread2.h>
59 #include <sys/spinlock2.h>
60 #include <sys/mutex2.h>
62 #include <machine/cpu.h>
63 #include <machine/smp.h>
65 #include <vm/vm_extern.h>
68 TAILQ_HEAD(, thread) queue;
69 const volatile void *ident0;
70 const volatile void *ident1;
71 const volatile void *ident2;
72 const volatile void *ident3;
75 static void sched_setup (void *dummy);
76 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL);
77 static void sched_dyninit (void *dummy);
78 SYSINIT(sched_dyninit, SI_BOOT1_DYNALLOC, SI_ORDER_FIRST, sched_dyninit, NULL);
83 int ncpus_fit, ncpus_fit_mask; /* note: mask not cpumask_t */
86 int tsleep_crypto_dump = 0;
88 MALLOC_DEFINE(M_TSLEEP, "tslpque", "tsleep queues");
90 #define __DEALL(ident) __DEQUALIFY(void *, ident)
92 #if !defined(KTR_TSLEEP)
93 #define KTR_TSLEEP KTR_ALL
95 KTR_INFO_MASTER(tsleep);
96 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_beg, 0, "tsleep enter %p", const volatile void *ident);
97 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_end, 1, "tsleep exit");
98 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_beg, 2, "wakeup enter %p", const volatile void *ident);
99 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_end, 3, "wakeup exit");
100 KTR_INFO(KTR_TSLEEP, tsleep, ilockfail, 4, "interlock failed %p", const volatile void *ident);
102 #define logtsleep1(name) KTR_LOG(tsleep_ ## name)
103 #define logtsleep2(name, val) KTR_LOG(tsleep_ ## name, val)
105 struct loadavg averunnable =
106 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */
108 * Constants for averages over 1, 5, and 15 minutes
109 * when sampling at 5 second intervals.
111 static fixpt_t cexp[3] = {
112 0.9200444146293232 * FSCALE, /* exp(-1/12) */
113 0.9834714538216174 * FSCALE, /* exp(-1/60) */
114 0.9944598480048967 * FSCALE, /* exp(-1/180) */
117 static void endtsleep (void *);
118 static void loadav (void *arg);
119 static void schedcpu (void *arg);
121 static int pctcpu_decay = 10;
122 SYSCTL_INT(_kern, OID_AUTO, pctcpu_decay, CTLFLAG_RW, &pctcpu_decay, 0, "");
125 * kernel uses `FSCALE', userland (SHOULD) use kern.fscale
127 int fscale __unused = FSCALE; /* exported to systat */
128 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
131 * Recompute process priorities, once a second.
133 * Since the userland schedulers are typically event oriented, if the
134 * estcpu calculation at wakeup() time is not sufficient to make a
135 * process runnable relative to other processes in the system we have
136 * a 1-second recalc to help out.
138 * This code also allows us to store sysclock_t data in the process structure
139 * without fear of an overrun, since sysclock_t are guarenteed to hold
140 * several seconds worth of count.
142 * WARNING! callouts can preempt normal threads. However, they will not
143 * preempt a thread holding a spinlock so we *can* safely use spinlocks.
145 static int schedcpu_stats(struct proc *p, void *data __unused);
146 static int schedcpu_resource(struct proc *p, void *data __unused);
151 allproc_scan(schedcpu_stats, NULL, 1);
152 allproc_scan(schedcpu_resource, NULL, 1);
153 if (mycpu->gd_cpuid == 0) {
154 wakeup((caddr_t)&lbolt);
155 wakeup(lbolt_syncer);
157 callout_reset(&mycpu->gd_schedcpu_callout, hz, schedcpu, NULL);
161 * General process statistics once a second
164 schedcpu_stats(struct proc *p, void *data __unused)
169 * Threads may not be completely set up if process in SIDL state.
171 if (p->p_stat == SIDL)
175 if (lwkt_trytoken(&p->p_token) == FALSE) {
181 FOREACH_LWP_IN_PROC(lp, p) {
182 if (lp->lwp_stat == LSSLEEP) {
184 if (lp->lwp_slptime == 1)
185 p->p_usched->uload_update(lp);
189 * Only recalculate processes that are active or have slept
190 * less then 2 seconds. The schedulers understand this.
191 * Otherwise decay by 50% per second.
193 if (lp->lwp_slptime <= 1) {
194 p->p_usched->recalculate(lp);
198 decay = pctcpu_decay;
204 lp->lwp_pctcpu = (lp->lwp_pctcpu * (decay - 1)) / decay;
207 lwkt_reltoken(&p->p_token);
214 * Resource checks. XXX break out since ksignal/killproc can block,
215 * limiting us to one process killed per second. There is probably
219 schedcpu_resource(struct proc *p, void *data __unused)
224 if (p->p_stat == SIDL)
228 if (lwkt_trytoken(&p->p_token) == FALSE) {
233 if (p->p_stat == SZOMB || p->p_limit == NULL) {
234 lwkt_reltoken(&p->p_token);
240 FOREACH_LWP_IN_PROC(lp, p) {
242 * We may have caught an lp in the middle of being
243 * created, lwp_thread can be NULL.
245 if (lp->lwp_thread) {
246 ttime += lp->lwp_thread->td_sticks;
247 ttime += lp->lwp_thread->td_uticks;
251 switch(plimit_testcpulimit(p->p_limit, ttime)) {
252 case PLIMIT_TESTCPU_KILL:
253 killproc(p, "exceeded maximum CPU limit");
255 case PLIMIT_TESTCPU_XCPU:
256 if ((p->p_flags & P_XCPU) == 0) {
257 p->p_flags |= P_XCPU;
264 lwkt_reltoken(&p->p_token);
271 * This is only used by ps. Generate a cpu percentage use over
272 * a period of one second.
275 updatepcpu(struct lwp *lp, int cpticks, int ttlticks)
280 acc = (cpticks << FSHIFT) / ttlticks;
281 if (ttlticks >= ESTCPUFREQ) {
282 lp->lwp_pctcpu = acc;
284 remticks = ESTCPUFREQ - ttlticks;
285 lp->lwp_pctcpu = (acc * ttlticks + lp->lwp_pctcpu * remticks) /
291 * Handy macros to calculate hash indices. LOOKUP() calculates the
292 * global cpumask hash index, TCHASHSHIFT() converts that into the
295 * By making the pcpu hash arrays smaller we save a significant amount
296 * of memory at very low cost. The real cost is in IPIs, which are handled
297 * by the much larger global cpumask hash table.
299 #define LOOKUP_PRIME 66555444443333333ULL
300 #define LOOKUP(x) ((((uintptr_t)(x) + ((uintptr_t)(x) >> 18)) ^ \
301 LOOKUP_PRIME) % slpque_tablesize)
302 #define TCHASHSHIFT(x) ((x) >> 4)
304 static uint32_t slpque_tablesize;
305 static cpumask_t *slpque_cpumasks;
307 SYSCTL_UINT(_kern, OID_AUTO, slpque_tablesize, CTLFLAG_RD, &slpque_tablesize,
311 * This is a dandy function that allows us to interlock tsleep/wakeup
312 * operations with unspecified upper level locks, such as lockmgr locks,
313 * simply by holding a critical section. The sequence is:
315 * (acquire upper level lock)
316 * tsleep_interlock(blah)
317 * (release upper level lock)
320 * Basically this functions queues us on the tsleep queue without actually
321 * descheduling us. When tsleep() is later called with PINTERLOCK it
322 * assumes the thread was already queued, otherwise it queues it there.
324 * Thus it is possible to receive the wakeup prior to going to sleep and
325 * the race conditions are covered.
328 _tsleep_interlock(globaldata_t gd, const volatile void *ident, int flags)
330 thread_t td = gd->gd_curthread;
335 crit_enter_quick(td);
336 if (td->td_flags & TDF_TSLEEPQ) {
337 cid = LOOKUP(td->td_wchan);
338 gid = TCHASHSHIFT(cid);
339 qp = &gd->gd_tsleep_hash[gid];
340 TAILQ_REMOVE(&qp->queue, td, td_sleepq);
341 if (TAILQ_FIRST(&qp->queue) == NULL) {
346 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid],
350 td->td_flags |= TDF_TSLEEPQ;
353 gid = TCHASHSHIFT(cid);
354 qp = &gd->gd_tsleep_hash[gid];
355 TAILQ_INSERT_TAIL(&qp->queue, td, td_sleepq);
356 if (qp->ident0 != ident && qp->ident1 != ident &&
357 qp->ident2 != ident && qp->ident3 != ident) {
358 if (qp->ident0 == NULL)
360 else if (qp->ident1 == NULL)
362 else if (qp->ident2 == NULL)
364 else if (qp->ident3 == NULL)
367 qp->ident0 = (void *)(intptr_t)-1;
369 ATOMIC_CPUMASK_ORBIT(slpque_cpumasks[cid], gd->gd_cpuid);
370 td->td_wchan = ident;
371 td->td_wdomain = flags & PDOMAIN_MASK;
376 tsleep_interlock(const volatile void *ident, int flags)
378 _tsleep_interlock(mycpu, ident, flags);
382 * Remove thread from sleepq. Must be called with a critical section held.
383 * The thread must not be migrating.
386 _tsleep_remove(thread_t td)
388 globaldata_t gd = mycpu;
393 KKASSERT(td->td_gd == gd && IN_CRITICAL_SECT(td));
394 KKASSERT((td->td_flags & TDF_MIGRATING) == 0);
395 if (td->td_flags & TDF_TSLEEPQ) {
396 td->td_flags &= ~TDF_TSLEEPQ;
397 cid = LOOKUP(td->td_wchan);
398 gid = TCHASHSHIFT(cid);
399 qp = &gd->gd_tsleep_hash[gid];
400 TAILQ_REMOVE(&qp->queue, td, td_sleepq);
401 if (TAILQ_FIRST(&qp->queue) == NULL) {
402 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid],
411 tsleep_remove(thread_t td)
417 * General sleep call. Suspends the current process until a wakeup is
418 * performed on the specified identifier. The process will then be made
419 * runnable with the specified priority. Sleeps at most timo/hz seconds
420 * (0 means no timeout). If flags includes PCATCH flag, signals are checked
421 * before and after sleeping, else signals are not checked. Returns 0 if
422 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
423 * signal needs to be delivered, ERESTART is returned if the current system
424 * call should be restarted if possible, and EINTR is returned if the system
425 * call should be interrupted by the signal (return EINTR).
427 * Note that if we are a process, we release_curproc() before messing with
428 * the LWKT scheduler.
430 * During autoconfiguration or after a panic, a sleep will simply
431 * lower the priority briefly to allow interrupts, then return.
433 * WARNING! This code can't block (short of switching away), or bad things
434 * will happen. No getting tokens, no blocking locks, etc.
437 tsleep(const volatile void *ident, int flags, const char *wmesg, int timo)
439 struct thread *td = curthread;
440 struct lwp *lp = td->td_lwp;
441 struct proc *p = td->td_proc; /* may be NULL */
447 struct callout thandle;
450 * Currently a severe hack. Make sure any delayed wakeups
451 * are flushed before we sleep or we might deadlock on whatever
452 * event we are sleeping on.
454 if (td->td_flags & TDF_DELAYED_WAKEUP)
455 wakeup_end_delayed();
458 * NOTE: removed KTRPOINT, it could cause races due to blocking
459 * even in stable. Just scrap it for now.
461 if (!tsleep_crypto_dump && (tsleep_now_works == 0 || panicstr)) {
463 * After a panic, or before we actually have an operational
464 * softclock, just give interrupts a chance, then just return;
466 * don't run any other procs or panic below,
467 * in case this is the idle process and already asleep.
471 lwkt_setpri_self(safepri);
473 lwkt_setpri_self(oldpri);
476 logtsleep2(tsleep_beg, ident);
478 KKASSERT(td != &gd->gd_idlethread); /* you must be kidding! */
479 td->td_wakefromcpu = -1; /* overwritten by _wakeup */
482 * NOTE: all of this occurs on the current cpu, including any
483 * callout-based wakeups, so a critical section is a sufficient
486 * The entire sequence through to where we actually sleep must
487 * run without breaking the critical section.
489 catch = flags & PCATCH;
493 crit_enter_quick(td);
495 KASSERT(ident != NULL, ("tsleep: no ident"));
496 KASSERT(lp == NULL ||
497 lp->lwp_stat == LSRUN || /* Obvious */
498 lp->lwp_stat == LSSTOP, /* Set in tstop */
500 ident, wmesg, lp->lwp_stat));
503 * We interlock the sleep queue if the caller has not already done
504 * it for us. This must be done before we potentially acquire any
505 * tokens or we can loose the wakeup.
507 if ((flags & PINTERLOCKED) == 0) {
508 _tsleep_interlock(gd, ident, flags);
512 * Setup for the current process (if this is a process). We must
513 * interlock with lwp_token to avoid remote wakeup races via
517 lwkt_gettoken(&lp->lwp_token);
520 * If the umbrella process is in the SCORE state then
521 * make sure that the thread is flagged going into a
522 * normal sleep to allow the core dump to proceed, otherwise
523 * the coredump can end up waiting forever. If the normal
524 * sleep is woken up, the thread will enter a stopped state
525 * upon return to userland.
527 * We do not want to interrupt or cause a thread exist at
528 * this juncture because that will mess-up the state the
529 * coredump is trying to save.
531 if (p->p_stat == SCORE &&
532 (lp->lwp_mpflags & LWP_MP_WSTOP) == 0) {
533 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP);
542 * Early termination if PCATCH was set and a
543 * signal is pending, interlocked with the
546 * Early termination only occurs when tsleep() is
547 * entered while in a normal LSRUN state.
549 if ((sig = CURSIG(lp)) != 0)
553 * Causes ksignal to wake us up if a signal is
554 * received (interlocked with lp->lwp_token).
556 lp->lwp_flags |= LWP_SINTR;
563 * Make sure the current process has been untangled from
564 * the userland scheduler and initialize slptime to start
567 * NOTE: td->td_wakefromcpu is pre-set by the release function
568 * for the dfly scheduler, and then adjusted by _wakeup()
571 p->p_usched->release_curproc(lp);
576 * If the interlocked flag is set but our cpu bit in the slpqueue
577 * is no longer set, then a wakeup was processed inbetween the
578 * tsleep_interlock() (ours or the callers), and here. This can
579 * occur under numerous circumstances including when we release the
582 * Extreme loads can cause the sending of an IPI (e.g. wakeup()'s)
583 * to process incoming IPIs, thus draining incoming wakeups.
585 if ((td->td_flags & TDF_TSLEEPQ) == 0) {
586 logtsleep2(ilockfail, ident);
591 * scheduling is blocked while in a critical section. Coincide
592 * the descheduled-by-tsleep flag with the descheduling of the
595 * The timer callout is localized on our cpu and interlocked by
596 * our critical section.
598 lwkt_deschedule_self(td);
599 td->td_flags |= TDF_TSLEEP_DESCHEDULED;
600 td->td_wmesg = wmesg;
603 * Setup the timeout, if any. The timeout is only operable while
604 * the thread is flagged descheduled.
606 KKASSERT((td->td_flags & TDF_TIMEOUT) == 0);
608 callout_init_mp(&thandle);
609 callout_reset(&thandle, timo, endtsleep, td);
617 * Ok, we are sleeping. Place us in the SSLEEP state.
619 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
622 * tstop() sets LSSTOP, so don't fiddle with that.
624 if (lp->lwp_stat != LSSTOP)
625 lp->lwp_stat = LSSLEEP;
626 lp->lwp_ru.ru_nvcsw++;
627 p->p_usched->uload_update(lp);
631 * And when we are woken up, put us back in LSRUN. If we
632 * slept for over a second, recalculate our estcpu.
634 lp->lwp_stat = LSRUN;
635 if (lp->lwp_slptime) {
636 p->p_usched->uload_update(lp);
637 p->p_usched->recalculate(lp);
645 * Make sure we haven't switched cpus while we were asleep. It's
646 * not supposed to happen. Cleanup our temporary flags.
648 KKASSERT(gd == td->td_gd);
651 * Cleanup the timeout. If the timeout has already occured thandle
652 * has already been stopped, otherwise stop thandle. If the timeout
653 * is running (the callout thread must be blocked trying to get
654 * lwp_token) then wait for us to get scheduled.
657 while (td->td_flags & TDF_TIMEOUT_RUNNING) {
658 /* else we won't get rescheduled! */
659 if (lp->lwp_stat != LSSTOP)
660 lp->lwp_stat = LSSLEEP;
661 lwkt_deschedule_self(td);
662 td->td_wmesg = "tsrace";
664 kprintf("td %p %s: timeout race\n", td, td->td_comm);
666 if (td->td_flags & TDF_TIMEOUT) {
667 td->td_flags &= ~TDF_TIMEOUT;
670 /* does not block when on same cpu */
671 callout_stop(&thandle);
674 td->td_flags &= ~TDF_TSLEEP_DESCHEDULED;
677 * Make sure we have been removed from the sleepq. In most
678 * cases this will have been done for us already but it is
679 * possible for a scheduling IPI to be in-flight from a
680 * previous tsleep/tsleep_interlock() or due to a straight-out
681 * call to lwkt_schedule() (in the case of an interrupt thread),
682 * causing a spurious wakeup.
688 * Figure out the correct error return. If interrupted by a
689 * signal we want to return EINTR or ERESTART.
693 if (catch && error == 0) {
694 if (sig != 0 || (sig = CURSIG(lp))) {
695 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
702 lp->lwp_flags &= ~LWP_SINTR;
705 * Unconditionally set us to LSRUN on resume. lwp_stat could
706 * be in a weird state due to the goto resume, particularly
707 * when tsleep() is called from tstop().
709 lp->lwp_stat = LSRUN;
710 lwkt_reltoken(&lp->lwp_token);
712 logtsleep1(tsleep_end);
718 * Interlocked spinlock sleep. An exclusively held spinlock must
719 * be passed to ssleep(). The function will atomically release the
720 * spinlock and tsleep on the ident, then reacquire the spinlock and
723 * This routine is fairly important along the critical path, so optimize it
727 ssleep(const volatile void *ident, struct spinlock *spin, int flags,
728 const char *wmesg, int timo)
730 globaldata_t gd = mycpu;
733 _tsleep_interlock(gd, ident, flags);
734 spin_unlock_quick(gd, spin);
735 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo);
736 _spin_lock_quick(gd, spin, wmesg);
742 lksleep(const volatile void *ident, struct lock *lock, int flags,
743 const char *wmesg, int timo)
745 globaldata_t gd = mycpu;
748 _tsleep_interlock(gd, ident, flags);
749 lockmgr(lock, LK_RELEASE);
750 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo);
751 lockmgr(lock, LK_EXCLUSIVE);
757 * Interlocked mutex sleep. An exclusively held mutex must be passed
758 * to mtxsleep(). The function will atomically release the mutex
759 * and tsleep on the ident, then reacquire the mutex and return.
762 mtxsleep(const volatile void *ident, struct mtx *mtx, int flags,
763 const char *wmesg, int timo)
765 globaldata_t gd = mycpu;
768 _tsleep_interlock(gd, ident, flags);
770 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo);
771 mtx_lock_ex_quick(mtx);
777 * Interlocked serializer sleep. An exclusively held serializer must
778 * be passed to zsleep(). The function will atomically release
779 * the serializer and tsleep on the ident, then reacquire the serializer
783 zsleep(const volatile void *ident, struct lwkt_serialize *slz, int flags,
784 const char *wmesg, int timo)
786 globaldata_t gd = mycpu;
789 ASSERT_SERIALIZED(slz);
791 _tsleep_interlock(gd, ident, flags);
792 lwkt_serialize_exit(slz);
793 ret = tsleep(ident, flags | PINTERLOCKED, wmesg, timo);
794 lwkt_serialize_enter(slz);
800 * Directly block on the LWKT thread by descheduling it. This
801 * is much faster then tsleep(), but the only legal way to wake
802 * us up is to directly schedule the thread.
804 * Setting TDF_SINTR will cause new signals to directly schedule us.
806 * This routine must be called while in a critical section.
809 lwkt_sleep(const char *wmesg, int flags)
811 thread_t td = curthread;
814 if ((flags & PCATCH) == 0 || td->td_lwp == NULL) {
815 td->td_flags |= TDF_BLOCKED;
816 td->td_wmesg = wmesg;
817 lwkt_deschedule_self(td);
820 td->td_flags &= ~TDF_BLOCKED;
823 if ((sig = CURSIG(td->td_lwp)) != 0) {
824 if (SIGISMEMBER(td->td_proc->p_sigacts->ps_sigintr, sig))
830 td->td_flags |= TDF_BLOCKED | TDF_SINTR;
831 td->td_wmesg = wmesg;
832 lwkt_deschedule_self(td);
834 td->td_flags &= ~(TDF_BLOCKED | TDF_SINTR);
840 * Implement the timeout for tsleep.
842 * This type of callout timeout is scheduled on the same cpu the process
843 * is sleeping on. Also, at the moment, the MP lock is held.
852 * We are going to have to get the lwp_token, which means we might
853 * block. This can race a tsleep getting woken up by other means
854 * so set TDF_TIMEOUT_RUNNING to force the tsleep to wait for our
855 * processing to complete (sorry tsleep!).
857 * We can safely set td_flags because td MUST be on the same cpu
860 KKASSERT(td->td_gd == mycpu);
862 td->td_flags |= TDF_TIMEOUT_RUNNING | TDF_TIMEOUT;
865 * This can block but TDF_TIMEOUT_RUNNING will prevent the thread
866 * from exiting the tsleep on us. The flag is interlocked by virtue
867 * of lp being on the same cpu as we are.
869 if ((lp = td->td_lwp) != NULL)
870 lwkt_gettoken(&lp->lwp_token);
872 KKASSERT(td->td_flags & TDF_TSLEEP_DESCHEDULED);
876 * callout timer should normally never be set in tstop()
877 * because it passes a timeout of 0. However, there is a
878 * case during thread exit (which SSTOP's all the threads)
879 * for which tstop() must break out and can (properly) leave
880 * the thread in LSSTOP.
882 KKASSERT(lp->lwp_stat != LSSTOP ||
883 (lp->lwp_mpflags & LWP_MP_WEXIT));
885 lwkt_reltoken(&lp->lwp_token);
890 KKASSERT(td->td_gd == mycpu);
891 td->td_flags &= ~TDF_TIMEOUT_RUNNING;
896 * Make all processes sleeping on the specified identifier runnable.
897 * count may be zero or one only.
899 * The domain encodes the sleep/wakeup domain, flags, plus the originating
902 * This call may run without the MP lock held. We can only manipulate thread
903 * state on the cpu owning the thread. We CANNOT manipulate process state
906 * _wakeup() can be passed to an IPI so we can't use (const volatile
910 _wakeup(void *ident, int domain)
922 logtsleep2(wakeup_beg, ident);
925 gid = TCHASHSHIFT(cid);
926 qp = &gd->gd_tsleep_hash[gid];
928 for (td = TAILQ_FIRST(&qp->queue); td != NULL; td = ntd) {
929 ntd = TAILQ_NEXT(td, td_sleepq);
930 if (td->td_wchan == ident &&
931 td->td_wdomain == (domain & PDOMAIN_MASK)
933 KKASSERT(td->td_gd == gd);
935 td->td_wakefromcpu = PWAKEUP_DECODE(domain);
936 if (td->td_flags & TDF_TSLEEP_DESCHEDULED) {
938 if (domain & PWAKEUP_ONE)
943 if (td->td_wchan == qp->ident0)
945 else if (td->td_wchan == qp->ident1)
947 else if (td->td_wchan == qp->ident2)
949 else if (td->td_wchan == qp->ident3)
952 wids |= 16; /* force ident0 to be retained (-1) */
956 * Because a bunch of cpumask array entries cover the same queue, it
957 * is possible for our bit to remain set in some of them and cause
958 * spurious wakeup IPIs later on. Make sure that the bit is cleared
959 * when a spurious IPI occurs to prevent further spurious IPIs.
961 if (TAILQ_FIRST(&qp->queue) == NULL) {
962 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid], gd->gd_cpuid);
968 if ((wids & 1) == 0) {
969 if ((wids & 16) == 0)
981 * We finished checking the current cpu but there still may be
982 * more work to do. Either wakeup_one was requested and no matching
983 * thread was found, or a normal wakeup was requested and we have
984 * to continue checking cpus.
986 * It should be noted that this scheme is actually less expensive then
987 * the old scheme when waking up multiple threads, since we send
988 * only one IPI message per target candidate which may then schedule
989 * multiple threads. Before we could have wound up sending an IPI
990 * message for each thread on the target cpu (!= current cpu) that
991 * needed to be woken up.
993 * NOTE: Wakeups occuring on remote cpus are asynchronous. This
994 * should be ok since we are passing idents in the IPI rather
995 * then thread pointers.
997 * NOTE: We MUST mfence (or use an atomic op) prior to reading
998 * the cpumask, as another cpu may have written to it in
999 * a fashion interlocked with whatever the caller did before
1000 * calling wakeup(). Otherwise we might miss the interaction
1001 * (kern_mutex.c can cause this problem).
1003 * lfence is insufficient as it may allow a written state to
1004 * reorder around the cpumask load.
1006 if ((domain & PWAKEUP_MYCPU) == 0) {
1008 const volatile void *id0;
1012 mask = slpque_cpumasks[cid];
1013 CPUMASK_ANDMASK(mask, gd->gd_other_cpus);
1014 while (CPUMASK_TESTNZERO(mask)) {
1015 n = BSRCPUMASK(mask);
1016 CPUMASK_NANDBIT(mask, n);
1017 tgd = globaldata_find(n);
1018 qp = &tgd->gd_tsleep_hash[gid];
1021 * Both ident0 compares must from a single load
1022 * to avoid ident0 update races crossing the two
1027 if (id0 == (void *)(intptr_t)-1) {
1028 lwkt_send_ipiq2(tgd, _wakeup, ident,
1029 domain | PWAKEUP_MYCPU);
1030 ++tgd->gd_cnt.v_wakeup_colls;
1031 } else if (id0 == ident ||
1032 qp->ident1 == ident ||
1033 qp->ident2 == ident ||
1034 qp->ident3 == ident) {
1035 lwkt_send_ipiq2(tgd, _wakeup, ident,
1036 domain | PWAKEUP_MYCPU);
1039 lwkt_send_ipiq2_mask(mask, _wakeup, ident,
1040 domain | PWAKEUP_MYCPU);
1045 logtsleep1(wakeup_end);
1050 * Wakeup all threads tsleep()ing on the specified ident, on all cpus
1053 wakeup(const volatile void *ident)
1055 globaldata_t gd = mycpu;
1056 thread_t td = gd->gd_curthread;
1058 if (td && (td->td_flags & TDF_DELAYED_WAKEUP)) {
1060 * If we are in a delayed wakeup section, record up to two wakeups in
1061 * a per-CPU queue and issue them when we block or exit the delayed
1064 if (atomic_cmpset_ptr(&gd->gd_delayed_wakeup[0], NULL, ident))
1066 if (atomic_cmpset_ptr(&gd->gd_delayed_wakeup[1], NULL, ident))
1069 ident = atomic_swap_ptr(__DEQUALIFY(volatile void **, &gd->gd_delayed_wakeup[1]),
1071 ident = atomic_swap_ptr(__DEQUALIFY(volatile void **, &gd->gd_delayed_wakeup[0]),
1075 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, gd->gd_cpuid));
1079 * Wakeup one thread tsleep()ing on the specified ident, on any cpu.
1082 wakeup_one(const volatile void *ident)
1084 /* XXX potentially round-robin the first responding cpu */
1085 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) |
1090 * Wakeup threads tsleep()ing on the specified ident on the current cpu
1094 wakeup_mycpu(const volatile void *ident)
1096 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) |
1101 * Wakeup one thread tsleep()ing on the specified ident on the current cpu
1105 wakeup_mycpu_one(const volatile void *ident)
1107 /* XXX potentially round-robin the first responding cpu */
1108 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) |
1109 PWAKEUP_MYCPU | PWAKEUP_ONE);
1113 * Wakeup all thread tsleep()ing on the specified ident on the specified cpu
1117 wakeup_oncpu(globaldata_t gd, const volatile void *ident)
1119 globaldata_t mygd = mycpu;
1121 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) |
1124 lwkt_send_ipiq2(gd, _wakeup, __DEALL(ident),
1125 PWAKEUP_ENCODE(0, mygd->gd_cpuid) |
1131 * Wakeup one thread tsleep()ing on the specified ident on the specified cpu
1135 wakeup_oncpu_one(globaldata_t gd, const volatile void *ident)
1137 globaldata_t mygd = mycpu;
1139 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) |
1140 PWAKEUP_MYCPU | PWAKEUP_ONE);
1142 lwkt_send_ipiq2(gd, _wakeup, __DEALL(ident),
1143 PWAKEUP_ENCODE(0, mygd->gd_cpuid) |
1144 PWAKEUP_MYCPU | PWAKEUP_ONE);
1149 * Wakeup all threads waiting on the specified ident that slept using
1150 * the specified domain, on all cpus.
1153 wakeup_domain(const volatile void *ident, int domain)
1155 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(domain, mycpu->gd_cpuid));
1159 * Wakeup one thread waiting on the specified ident that slept using
1160 * the specified domain, on any cpu.
1163 wakeup_domain_one(const volatile void *ident, int domain)
1165 /* XXX potentially round-robin the first responding cpu */
1166 _wakeup(__DEALL(ident),
1167 PWAKEUP_ENCODE(domain, mycpu->gd_cpuid) | PWAKEUP_ONE);
1171 wakeup_start_delayed(void)
1173 globaldata_t gd = mycpu;
1176 gd->gd_curthread->td_flags |= TDF_DELAYED_WAKEUP;
1181 wakeup_end_delayed(void)
1183 globaldata_t gd = mycpu;
1185 if (gd->gd_curthread->td_flags & TDF_DELAYED_WAKEUP) {
1187 gd->gd_curthread->td_flags &= ~TDF_DELAYED_WAKEUP;
1188 if (gd->gd_delayed_wakeup[0] || gd->gd_delayed_wakeup[1]) {
1189 if (gd->gd_delayed_wakeup[0]) {
1190 wakeup(gd->gd_delayed_wakeup[0]);
1191 gd->gd_delayed_wakeup[0] = NULL;
1193 if (gd->gd_delayed_wakeup[1]) {
1194 wakeup(gd->gd_delayed_wakeup[1]);
1195 gd->gd_delayed_wakeup[1] = NULL;
1205 * Make a process runnable. lp->lwp_token must be held on call and this
1206 * function must be called from the cpu owning lp.
1208 * This only has an effect if we are in LSSTOP or LSSLEEP.
1211 setrunnable(struct lwp *lp)
1213 thread_t td = lp->lwp_thread;
1215 ASSERT_LWKT_TOKEN_HELD(&lp->lwp_token);
1216 KKASSERT(td->td_gd == mycpu);
1218 if (lp->lwp_stat == LSSTOP)
1219 lp->lwp_stat = LSSLEEP;
1220 if (lp->lwp_stat == LSSLEEP) {
1223 } else if (td->td_flags & TDF_SINTR) {
1230 * The process is stopped due to some condition, usually because p_stat is
1231 * set to SSTOP, but also possibly due to being traced.
1233 * Caller must hold p->p_token
1235 * NOTE! If the caller sets SSTOP, the caller must also clear P_WAITED
1236 * because the parent may check the child's status before the child actually
1237 * gets to this routine.
1239 * This routine is called with the current lwp only, typically just
1240 * before returning to userland if the process state is detected as
1241 * possibly being in a stopped state.
1246 struct lwp *lp = curthread->td_lwp;
1247 struct proc *p = lp->lwp_proc;
1250 lwkt_gettoken(&lp->lwp_token);
1254 * If LWP_MP_WSTOP is set, we were sleeping
1255 * while our process was stopped. At this point
1256 * we were already counted as stopped.
1258 if ((lp->lwp_mpflags & LWP_MP_WSTOP) == 0) {
1260 * If we're the last thread to stop, signal
1264 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP);
1265 wakeup(&p->p_nstopped);
1266 if (p->p_nstopped == p->p_nthreads) {
1268 * Token required to interlock kern_wait()
1272 lwkt_gettoken(&q->p_token);
1273 p->p_flags &= ~P_WAITED;
1275 if ((q->p_sigacts->ps_flag & PS_NOCLDSTOP) == 0)
1276 ksignal(q, SIGCHLD);
1277 lwkt_reltoken(&q->p_token);
1283 * Wait here while in a stopped state, interlocked with lwp_token.
1284 * We must break-out if the whole process is trying to exit.
1286 while (STOPLWP(p, lp)) {
1287 lp->lwp_stat = LSSTOP;
1288 tsleep(p, 0, "stop", 0);
1291 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WSTOP);
1293 lwkt_reltoken(&lp->lwp_token);
1297 * Compute a tenex style load average of a quantity on
1298 * 1, 5 and 15 minute intervals. This is a pcpu callout.
1300 * We segment the lwp scan on a pcpu basis. This does NOT
1301 * mean the associated lwps are on this cpu, it is done
1302 * just to break the work up.
1304 * The callout on cpu0 rolls up the stats from the other
1307 static int loadav_count_runnable(struct lwp *p, void *data);
1312 globaldata_t gd = mycpu;
1313 struct loadavg *avg;
1317 alllwp_scan(loadav_count_runnable, &nrun, 1);
1318 gd->gd_loadav_nrunnable = nrun;
1319 if (gd->gd_cpuid == 0) {
1322 for (i = 0; i < ncpus; ++i)
1323 nrun += globaldata_find(i)->gd_loadav_nrunnable;
1324 for (i = 0; i < 3; i++) {
1325 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
1326 (long)nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
1331 * Schedule the next update to occur after 5 seconds, but add a
1332 * random variation to avoid synchronisation with processes that
1333 * run at regular intervals.
1335 callout_reset(&gd->gd_loadav_callout,
1336 hz * 4 + (int)(krandom() % (hz * 2 + 1)),
1341 loadav_count_runnable(struct lwp *lp, void *data)
1346 switch (lp->lwp_stat) {
1348 if ((td = lp->lwp_thread) == NULL)
1350 if (td->td_flags & TDF_BLOCKED)
1362 * Regular data collection
1365 collect_load_callback(int n)
1367 int fscale = averunnable.fscale;
1369 return ((averunnable.ldavg[0] * 100 + (fscale >> 1)) / fscale);
1373 sched_setup(void *dummy __unused)
1375 globaldata_t save_gd = mycpu;
1379 kcollect_register(KCOLLECT_LOAD, "load", collect_load_callback,
1380 KCOLLECT_SCALE(KCOLLECT_LOAD_FORMAT, 0));
1383 * Kick off timeout driven events by calling first time. We
1384 * split the work across available cpus to help scale it,
1385 * it can eat a lot of cpu when there are a lot of processes
1388 for (n = 0; n < ncpus; ++n) {
1389 gd = globaldata_find(n);
1390 lwkt_setcpu_self(gd);
1391 callout_init_mp(&gd->gd_loadav_callout);
1392 callout_init_mp(&gd->gd_schedcpu_callout);
1396 lwkt_setcpu_self(save_gd);
1400 * Extremely early initialization, dummy-up the tables so we don't have
1401 * to conditionalize for NULL in _wakeup() and tsleep_interlock(). Even
1402 * though the system isn't blocking this early, these functions still
1403 * try to access the hash table.
1405 * This setup will be overridden once sched_dyninit() -> sleep_gdinit()
1409 sleep_early_gdinit(globaldata_t gd)
1411 static struct tslpque dummy_slpque;
1412 static cpumask_t dummy_cpumasks;
1414 slpque_tablesize = 1;
1415 gd->gd_tsleep_hash = &dummy_slpque;
1416 slpque_cpumasks = &dummy_cpumasks;
1417 TAILQ_INIT(&dummy_slpque.queue);
1421 * PCPU initialization. Called after KMALLOC is operational, by
1422 * sched_dyninit() for cpu 0, and by mi_gdinit() for other cpus later.
1424 * WARNING! The pcpu hash table is smaller than the global cpumask
1425 * hash table, which can save us a lot of memory when maxproc
1429 sleep_gdinit(globaldata_t gd)
1437 * This shouldn't happen, that is there shouldn't be any threads
1438 * waiting on the dummy tsleep queue this early in the boot.
1440 if (gd->gd_cpuid == 0) {
1441 struct tslpque *qp = &gd->gd_tsleep_hash[0];
1442 TAILQ_FOREACH(td, &qp->queue, td_sleepq) {
1443 kprintf("SLEEP_GDINIT SWITCH %s\n", td->td_comm);
1448 * Note that we have to allocate one extra slot because we are
1449 * shifting a modulo value. TCHASHSHIFT(slpque_tablesize - 1) can
1450 * return the same value as TCHASHSHIFT(slpque_tablesize).
1452 n = TCHASHSHIFT(slpque_tablesize) + 1;
1454 hash_size = sizeof(struct tslpque) * n;
1455 gd->gd_tsleep_hash = (void *)kmem_alloc3(&kernel_map, hash_size,
1457 KM_CPU(gd->gd_cpuid));
1458 memset(gd->gd_tsleep_hash, 0, hash_size);
1459 for (i = 0; i < n; ++i)
1460 TAILQ_INIT(&gd->gd_tsleep_hash[i].queue);
1464 * Dynamic initialization after the memory system is operational.
1467 sched_dyninit(void *dummy __unused)
1474 * Calculate table size for slpque hash. We want a prime number
1475 * large enough to avoid overloading slpque_cpumasks when the
1476 * system has a large number of sleeping processes, which will
1477 * spam IPIs on wakeup().
1479 * While it is true this is really a per-lwp factor, generally
1480 * speaking the maxproc limit is a good metric to go by.
1482 for (tblsize = maxproc | 1; ; tblsize += 2) {
1483 if (tblsize % 3 == 0)
1485 if (tblsize % 5 == 0)
1487 tblsize2 = (tblsize / 2) | 1;
1488 for (n = 7; n < tblsize2; n += 2) {
1489 if (tblsize % n == 0)
1497 * PIDs are currently limited to 6 digits. Cap the table size
1500 if (tblsize > 2000003)
1503 slpque_tablesize = tblsize;
1504 slpque_cpumasks = kmalloc(sizeof(*slpque_cpumasks) * slpque_tablesize,
1505 M_TSLEEP, M_WAITOK | M_ZERO);
1506 sleep_gdinit(mycpu);