2 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/kern/kern_switch.c,v 1.3.2.1 2000/05/16 06:58:12 dillon Exp $
27 * $DragonFly: src/sys/kern/Attic/kern_switch.c,v 1.21 2004/04/10 20:55:23 dillon Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
34 #include <sys/queue.h>
36 #include <sys/rtprio.h>
37 #include <sys/thread2.h>
39 #include <sys/sysctl.h>
40 #include <sys/resourcevar.h>
41 #include <machine/ipl.h>
42 #include <machine/cpu.h>
43 #include <machine/smp.h>
46 * debugging only YYY Remove me! define to schedule user processes only
47 * on the BSP. Interrupts can still be taken on the APs.
49 #undef ONLY_ONE_USER_CPU
52 * We have NQS (32) run queues per scheduling class. For the normal
53 * class, there are 128 priorities scaled onto these 32 queues. New
54 * processes are added to the last entry in each queue, and processes
55 * are selected for running by taking them from the head and maintaining
56 * a simple FIFO arrangement. Realtime and Idle priority processes have
57 * and explicit 0-31 priority which maps directly onto their class queue
58 * index. When a queue has something in it, the corresponding bit is
59 * set in the queuebits variable, allowing a single read to determine
60 * the state of all 32 queues and then a ffs() to find the first busy
63 static struct rq queues[NQS];
64 static struct rq rtqueues[NQS];
65 static struct rq idqueues[NQS];
66 static u_int32_t queuebits;
67 static u_int32_t rtqueuebits;
68 static u_int32_t idqueuebits;
69 static cpumask_t curprocmask = -1; /* currently running a user process */
70 static cpumask_t rdyprocmask; /* ready to accept a user process */
76 SYSCTL_INT(_debug, OID_AUTO, runqcount, CTLFLAG_RD, &runqcount, 0, "");
78 static int usched_stalls;
79 SYSCTL_INT(_debug, OID_AUTO, usched_stalls, CTLFLAG_RW,
80 &usched_stalls, 0, "acquire_curproc() had to stall");
81 static int usched_stolen;
82 SYSCTL_INT(_debug, OID_AUTO, usched_stolen, CTLFLAG_RW,
83 &usched_stolen, 0, "acquire_curproc() stole the des");
84 static int usched_optimal;
85 SYSCTL_INT(_debug, OID_AUTO, usched_optimal, CTLFLAG_RW,
86 &usched_optimal, 0, "acquire_curproc() was optimal");
89 static int remote_resched = 1;
90 static int remote_resched_nonaffinity;
91 static int remote_resched_affinity;
92 static int choose_affinity;
93 SYSCTL_INT(_debug, OID_AUTO, remote_resched, CTLFLAG_RW,
94 &remote_resched, 0, "Resched to another cpu");
95 SYSCTL_INT(_debug, OID_AUTO, remote_resched_nonaffinity, CTLFLAG_RD,
96 &remote_resched_nonaffinity, 0, "Number of remote rescheds");
97 SYSCTL_INT(_debug, OID_AUTO, remote_resched_affinity, CTLFLAG_RD,
98 &remote_resched_affinity, 0, "Number of remote rescheds");
99 SYSCTL_INT(_debug, OID_AUTO, choose_affinity, CTLFLAG_RD,
100 &choose_affinity, 0, "chooseproc() was smart");
104 * Initialize the run queues at boot time.
111 for (i = 0; i < NQS; i++) {
112 TAILQ_INIT(&queues[i]);
113 TAILQ_INIT(&rtqueues[i]);
114 TAILQ_INIT(&idqueues[i]);
118 SYSINIT(runqueue, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, rqinit, NULL)
121 * Returns 1 if curp is equal to or better then newp. Note that
122 * lower p_priority values == higher process priorities. Assume curp
123 * is in-context and cut it some slack to avoid ping ponging.
127 test_resched(struct proc *curp, struct proc *newp)
129 if (curp->p_priority - newp->p_priority < PPQ)
135 * chooseproc() is called when a cpu needs a user process to LWKT schedule,
136 * it selects a user process and returns it. If chkp is non-NULL and chkp
137 * has the same or higher priority then the process that would otherwise be
138 * chosen, NULL is returned.
142 chooseproc(struct proc *chkp)
150 pri = bsfl(rtqueuebits);
152 which = &rtqueuebits;
153 } else if (queuebits) {
154 pri = bsfl(queuebits);
157 } else if (idqueuebits) {
158 pri = bsfl(idqueuebits);
160 which = &idqueuebits;
165 KASSERT(p, ("chooseproc: no proc on busy queue"));
168 * If the passed process is better then the selected process,
171 if (chkp && test_resched(chkp, p))
176 * If the chosen process does not reside on this cpu spend a few
177 * cycles looking for a better candidate at the same priority level.
178 * This is a fallback check, setrunqueue() tries to wakeup the
179 * correct cpu and is our front-line affinity.
181 if (p->p_thread->td_gd != mycpu &&
182 (chkp = TAILQ_NEXT(p, p_procq)) != NULL
184 if (chkp->p_thread->td_gd == mycpu) {
191 TAILQ_REMOVE(q, p, p_procq);
194 *which &= ~(1 << pri);
195 KASSERT((p->p_flag & P_ONRUNQ) != 0, ("not on runq6!"));
196 p->p_flag &= ~P_ONRUNQ;
202 * called via an ipi message to reschedule on another cpu.
206 need_user_resched_remote(void *dummy)
214 * setrunqueue() 'wakes up' a 'user' process, which can mean several things.
216 * If P_CP_RELEASED is set the user process is under the control of the
217 * LWKT subsystem and we simply wake the thread up. This is ALWAYS the
218 * case when setrunqueue() is called from wakeup() and, in fact wakeup()
219 * asserts that P_CP_RELEASED is set.
221 * If P_CP_RELEASED is not set we place the process on the run queue and we
222 * signal other cpus in the system that may need to be woken up to service
223 * the new 'user' process.
225 * If P_PASSIVE_ACQ is set setrunqueue() will not wakeup potential target
226 * cpus in an attempt to keep the process on the current cpu at least for
227 * a little while to take advantage of locality of reference (e.g. fork/exec
228 * or short fork/exit, and uio_yield()).
230 * CPU AFFINITY: cpu affinity is handled by attempting to either schedule
231 * or (user level) preempt on the same cpu that a process was previously
232 * scheduled to. If we cannot do this but we are at enough of a higher
233 * priority then the processes running on other cpus, we will allow the
234 * process to be stolen by another cpu.
236 * WARNING! a thread can be acquired by another cpu the moment it is put
237 * on the user scheduler's run queue AND we release the MP lock. Since we
238 * release the MP lock before switching out another cpu may begin stealing
239 * our current thread before we are completely switched out! The
240 * lwkt_acquire() function will stall until TDF_RUNNING is cleared on the
241 * thread before stealing it.
243 * The associated thread must NOT be scheduled.
244 * The process must be runnable.
245 * This must be called at splhigh().
248 setrunqueue(struct proc *p)
251 struct globaldata *gd;
260 KASSERT(p->p_stat == SRUN, ("setrunqueue: proc not SRUN"));
261 KASSERT((p->p_flag & P_ONRUNQ) == 0,
262 ("process %d already on runq! flag %08x", p->p_pid, p->p_flag));
263 KKASSERT((p->p_thread->td_flags & TDF_RUNQ) == 0);
266 * If we have been released from the userland scheduler we
267 * directly schedule its thread. If the priority is sufficiently
268 * high request a user reschedule. Note that the lwkt_resched
269 * is not typically set for wakeups of userland threads that happen
270 * to be sitting in the kernel because their LWKT priorities will
271 * generally be the same.
273 if (p->p_flag & P_CP_RELEASED) {
274 lwkt_schedule(p->p_thread);
276 if (gd->gd_uschedcp && test_resched(p, gd->gd_uschedcp))
284 * We have not been released, make sure that we are not the currently
285 * designated process.
287 gd = p->p_thread->td_gd;
288 KKASSERT(gd->gd_uschedcp != p);
291 * Check cpu affinity. The associated thread is stable at the
292 * moment. Note that we may be checking another cpu here so we
293 * have to be careful. We are currently protected by the BGL.
295 cpuid = gd->gd_cpuid;
297 if ((curprocmask & (1 << cpuid)) == 0) {
298 curprocmask |= 1 << cpuid;
300 gd->gd_upri = p->p_priority;
301 lwkt_schedule(p->p_thread);
302 /* CANNOT TOUCH PROC OR TD AFTER SCHEDULE CALL TO REMOTE CPU */
306 ++remote_resched_affinity;
312 * gd and cpuid may still 'hint' at another cpu. Even so we have
313 * to place this process on the userland scheduler's run queue for
314 * action by the target cpu.
317 p->p_flag |= P_ONRUNQ;
318 if (p->p_rtprio.type == RTP_PRIO_NORMAL) {
319 pri = (p->p_priority & PRIMASK) >> 2;
321 queuebits |= 1 << pri;
322 } else if (p->p_rtprio.type == RTP_PRIO_REALTIME ||
323 p->p_rtprio.type == RTP_PRIO_FIFO) {
324 pri = (u_int8_t)p->p_rtprio.prio;
326 rtqueuebits |= 1 << pri;
327 } else if (p->p_rtprio.type == RTP_PRIO_IDLE) {
328 pri = (u_int8_t)p->p_rtprio.prio;
330 idqueuebits |= 1 << pri;
332 panic("setrunqueue: invalid rtprio type");
335 p->p_rqindex = pri; /* remember the queue index */
336 TAILQ_INSERT_TAIL(q, p, p_procq);
340 * Either wakeup other cpus user thread scheduler or request
341 * preemption on other cpus (which will also wakeup a HLT).
343 * NOTE! gd and cpuid may still be our 'hint', not our current
350 * Check cpu affinity for user preemption (when the curprocmask bit
351 * is set). Note that gd_upri is a speculative field (we modify
352 * another cpu's gd_upri to avoid sending ipiq storms).
355 if ((p->p_thread->td_flags & TDF_NORESCHED) == 0 &&
356 p->p_priority - gd->gd_upri <= -PPQ) {
360 } else if (remote_resched) {
361 if (p->p_priority - gd->gd_upri <= -PPQ) {
362 gd->gd_upri = p->p_priority;
363 lwkt_send_ipiq(gd, need_user_resched_remote, NULL);
365 ++remote_resched_affinity;
370 * No affinity, first schedule to any cpus that do not have a current
371 * process. If there is a free cpu we always schedule to it.
374 (mask = ~curprocmask & rdyprocmask & mycpu->gd_other_cpus) != 0 &&
375 (p->p_flag & P_PASSIVE_ACQ) == 0) {
377 printf("PROC %d nocpu to schedule it on\n", p->p_pid);
378 while (mask && count) {
380 KKASSERT((curprocmask & (1 << cpuid)) == 0);
381 rdyprocmask &= ~(1 << cpuid);
382 lwkt_schedule(&globaldata_find(cpuid)->gd_schedthread);
384 mask &= ~(1 << cpuid);
389 * If there are still runnable processes try to wakeup a random
390 * cpu that is running a much lower priority process in order to
391 * preempt on it. Note that gd_upri is only a hint, so we can
392 * overwrite it from the wrong cpu. If we can't find one, we
395 * We depress the priority check so multiple cpu bound programs
396 * do not bounce between cpus. Remember that the clock interrupt
397 * will also cause all cpus to reschedule.
399 * We must mask against rdyprocmask or we will race in the boot
400 * code (before all cpus have working scheduler helpers), plus
401 * some cpus might not be operational and/or not configured to
402 * handle user processes.
404 if (count && remote_resched && ncpus > 1) {
407 if (++cpuid == ncpus)
409 } while (cpuid == mycpu->gd_cpuid);
412 if (rdyprocmask & (1 << cpuid)) {
413 gd = globaldata_find(cpuid);
415 if (p->p_priority - gd->gd_upri <= -PPQ) {
416 gd->gd_upri = p->p_priority;
417 lwkt_send_ipiq(gd, need_user_resched_remote, NULL);
418 ++remote_resched_nonaffinity;
423 if ((p->p_thread->td_flags & TDF_NORESCHED) == 0 &&
424 p->p_priority - gd->gd_upri <= -PPQ) {
425 /* do not set gd_upri */
433 * remrunqueue() removes a given process from the run queue that it is on,
434 * clearing the queue busy bit if it becomes empty. This function is called
435 * when a userland process is selected for LWKT scheduling. Note that
436 * LWKT scheduling is an abstraction of 'curproc'.. there could very well be
437 * several userland processes whos threads are scheduled or otherwise in
438 * a special state, and such processes are NOT on the userland scheduler's
441 * This must be called at splhigh().
444 remrunqueue(struct proc *p)
451 KASSERT((p->p_flag & P_ONRUNQ) != 0, ("not on runq4!"));
452 p->p_flag &= ~P_ONRUNQ;
454 KKASSERT(runqcount >= 0);
456 if (p->p_rtprio.type == RTP_PRIO_NORMAL) {
459 } else if (p->p_rtprio.type == RTP_PRIO_REALTIME ||
460 p->p_rtprio.type == RTP_PRIO_FIFO) {
462 which = &rtqueuebits;
463 } else if (p->p_rtprio.type == RTP_PRIO_IDLE) {
465 which = &idqueuebits;
467 panic("remrunqueue: invalid rtprio type");
469 TAILQ_REMOVE(q, p, p_procq);
470 if (TAILQ_EMPTY(q)) {
471 KASSERT((*which & (1 << pri)) != 0,
472 ("remrunqueue: remove from empty queue"));
473 *which &= ~(1 << pri);
479 * Release the current process designation on p. P MUST BE CURPROC.
480 * Attempt to assign a new current process from the run queue.
482 * If passive is non-zero, gd_uschedcp may be left set to p, the
483 * fact that P_CP_RELEASED is set will allow it to be overridden at any
486 * If we do not have or cannot get the MP lock we just wakeup the userland
487 * helper scheduler thread for this cpu.
489 * WARNING! The MP lock may be in an unsynchronized state due to the
490 * way get_mplock() works and the fact that this function may be called
491 * from a passive release during a lwkt_switch(). try_mplock() will deal
492 * with this for us but you should be aware that td_mpcount may not be
496 release_curproc(struct proc *p)
500 globaldata_t gd = mycpu;
502 #ifdef ONLY_ONE_USER_CPU
503 KKASSERT(gd->gd_cpuid == 0 && p->p_thread->td_gd == gd);
505 KKASSERT(p->p_thread->td_gd == gd);
508 cpuid = gd->gd_cpuid;
509 if ((p->p_flag & P_CP_RELEASED) == 0) {
510 p->p_flag |= P_CP_RELEASED;
511 lwkt_setpri_self(TDPRI_KERN_USER);
513 if (gd->gd_uschedcp == p) {
516 * YYY when the MP lock is not assumed (see else) we
517 * will have to check that gd_uschedcp is still == p
518 * after acquisition of the MP lock
521 * Choose the next designated current user process.
522 * Note that we cannot schedule gd_schedthread
523 * if runqcount is 0 without creating a scheduling
526 * We do not clear the user resched request here,
527 * we need to test it later when we re-acquire.
529 if ((np = chooseproc(NULL)) != NULL) {
530 curprocmask |= 1 << cpuid;
531 gd->gd_upri = np->p_priority;
532 gd->gd_uschedcp = np;
533 lwkt_acquire(np->p_thread);
534 lwkt_schedule(np->p_thread);
535 } else if (runqcount && (rdyprocmask & (1 << cpuid))) {
536 gd->gd_uschedcp = NULL;
537 curprocmask &= ~(1 << cpuid);
538 rdyprocmask &= ~(1 << cpuid);
539 lwkt_schedule(&gd->gd_schedthread);
541 gd->gd_uschedcp = NULL;
542 curprocmask &= ~(1 << cpuid);
546 KKASSERT(0); /* MP LOCK ALWAYS HELD AT THE MOMENT */
547 /* YYY uschedcp and curprocmask */
548 if (runqcount && (rdyprocmask & (1 << cpuid))) {
549 rdyprocmask &= ~(1 << cpuid);
550 lwkt_schedule(&mycpu->gd_schedthread);
558 * Acquire the current process designation on the CURRENT process only.
559 * This function is called prior to returning to userland. If the system
560 * call or trap did not block and if no reschedule was requested it is
561 * highly likely that p is still designated.
563 * If any reschedule (lwkt or user) was requested, release_curproc() has
564 * already been called and gd_uschedcp will be NULL. We must be sure not
565 * to return without clearing both the lwkt and user ASTs.
568 acquire_curproc(struct proc *p)
572 enum { ACQ_OPTIMAL, ACQ_STOLEN, ACQ_STALLED } state;
575 globaldata_t gd = mycpu;
577 #ifdef ONLY_ONE_USER_CPU
578 KKASSERT(gd->gd_cpuid == 0);
581 * Shortcut the common case where the system call / other kernel entry
582 * did not block or otherwise release our current process designation.
583 * If a reschedule was requested the process would have been released
584 * from <arch>/<arch>/trap.c and gd_uschedcp will be NULL.
586 if (gd->gd_uschedcp == p && (p->p_flag & P_CP_RELEASED) == 0) {
592 KKASSERT(p == gd->gd_curthread->td_proc);
593 clear_user_resched();
596 * We drop our priority now.
598 * We must leave P_CP_RELEASED set. This allows other kernel threads
599 * exiting to userland to steal our gd_uschedcp.
601 * NOTE: If P_CP_RELEASED is not set here, our priority was never
602 * raised and we therefore do not have to lower it.
604 if (p->p_flag & P_CP_RELEASED)
605 lwkt_setpri_self(TDPRI_USER_NORM);
607 p->p_flag |= P_CP_RELEASED;
615 * Obtain ownership of gd_uschedcp (the current process designation).
617 * Note: the while never loops be use the construct for the initial
618 * condition test and break statements.
620 while (gd->gd_uschedcp != p) {
622 * Choose the next process to become the current process.
624 * With P_CP_RELEASED set, we can compete for the designation.
625 * if any_resched_wanted() is set
627 cpuid = gd->gd_cpuid;
628 np = gd->gd_uschedcp;
630 KKASSERT((curprocmask & (1 << cpuid)) == 0);
631 curprocmask |= 1 << cpuid;
632 if ((np = chooseproc(p)) == NULL) {
634 gd->gd_upri = p->p_priority;
637 KKASSERT((np->p_flag & P_CP_RELEASED) == 0);
638 gd->gd_upri = np->p_priority;
639 gd->gd_uschedcp = np;
640 lwkt_acquire(np->p_thread);
641 lwkt_schedule(np->p_thread);
643 } else if ((np->p_flag&P_CP_RELEASED) && !test_resched(np, p)) {
645 * When gd_uschedcp's P_CP_RELEASED flag is set it
646 * must have just called lwkt_switch() in the post
647 * acquisition code below. We can safely dequeue and
650 * Note that we reverse the arguments to test_resched()
651 * and use NOT. This reverses the hysteresis so we do
652 * not chain a sequence of steadily worse priorities
653 * and end up with a very low priority (high p_priority
654 * value) as our current process.
656 KKASSERT(curprocmask & (1 << cpuid));
658 gd->gd_upri = p->p_priority;
660 lwkt_deschedule(np->p_thread); /* local to cpu */
661 np->p_flag &= ~P_CP_RELEASED;
664 if (state == ACQ_OPTIMAL)
671 * We couldn't acquire the designation, put us on
672 * the userland run queue for selection and block.
673 * setrunqueue() will call need_user_resched() if
674 * necessary if the existing current process has a lower
677 clear_lwkt_resched();
678 lwkt_deschedule_self(curthread);
679 p->p_flag &= ~P_CP_RELEASED;
683 * WE MAY HAVE BEEN MIGRATED TO ANOTHER CPU
686 KKASSERT((p->p_flag & (P_ONRUNQ|P_CP_RELEASED)) == 0);
691 * We have acquired gd_uschedcp and our priority is correct.
693 * If P_CP_RELEASED is set we have to check lwkt_resched_wanted()
694 * and lwkt_switch() if it returns TRUE in order to run any pending
695 * threads before returning to user mode.
697 * If P_CP_RELEASED is clear we have *ALREADY* done a switch (and
698 * we were possibly dequeued and setrunqueue()'d, and then woken up
699 * again via chooseproc()), and since our priority was lowered we
700 * are guarenteed that no other kernel threads are pending and that
701 * we are in fact the gd_uschedcp.
703 if (p->p_flag & P_CP_RELEASED) {
704 if (lwkt_resched_wanted()) {
705 clear_lwkt_resched();
707 gd = mycpu; /* We may have moved */
708 if ((p->p_flag & P_CP_RELEASED) == 0) {
709 ++p->p_stats->p_ru.ru_nivcsw;
716 p->p_flag &= ~P_CP_RELEASED;
718 ++p->p_stats->p_ru.ru_nivcsw;
726 * That's it. Cleanup, we are done. The caller can return to
729 KKASSERT((p->p_flag & P_ONRUNQ) == 0 && gd->gd_uschedcp == p);
746 * Yield / synchronous reschedule. This is a bit tricky because the trap
747 * code might have set a lazy release on the switch function. Setting
748 * P_PASSIVE_ACQ will ensure that the lazy release executes when we call
749 * switch, and that we are given a greater chance of affinity with our
752 * We call lwkt_setpri_self() to rotate our thread to the end of the lwkt
753 * run queue. lwkt_switch() will also execute any assigned passive release
754 * (which usually calls release_curproc()), allowing a same/higher priority
755 * process to be designated as the current process.
757 * While it is possible for a lower priority process to be designated,
758 * it's call to lwkt_maybe_switch() in acquire_curproc() will likely
759 * round-robin back to us and we will be able to re-acquire the current
760 * process designation.
765 struct thread *td = curthread;
766 struct proc *p = td->td_proc;
768 lwkt_setpri_self(td->td_pri & TDPRI_MASK);
770 p->p_flag |= P_PASSIVE_ACQ;
772 p->p_flag &= ~P_PASSIVE_ACQ;
781 * For SMP systems a user scheduler helper thread is created for each
782 * cpu and is used to allow one cpu to wakeup another for the purposes of
783 * scheduling userland threads from setrunqueue(). UP systems do not
784 * need the helper since there is only one cpu. We can't use the idle
785 * thread for this because we need to hold the MP lock. Additionally,
786 * doing things this way allows us to HLT idle cpus on MP systems.
789 sched_thread(void *dummy)
791 globaldata_t gd = mycpu;
792 int cpuid = gd->gd_cpuid; /* doesn't change */
793 u_int32_t cpumask = 1 << cpuid; /* doesn't change */
795 #ifdef ONLY_ONE_USER_CPU
796 KKASSERT(cpuid == 0);
799 get_mplock(); /* hold the MP lock */
803 lwkt_deschedule_self(gd->gd_curthread); /* interlock */
804 rdyprocmask |= cpumask;
805 crit_enter_quick(gd->gd_curthread);
806 if ((curprocmask & cpumask) == 0 && (np = chooseproc(NULL)) != NULL) {
807 curprocmask |= cpumask;
808 gd->gd_upri = np->p_priority;
809 gd->gd_uschedcp = np;
810 lwkt_acquire(np->p_thread);
811 lwkt_schedule(np->p_thread);
813 crit_exit_quick(gd->gd_curthread);
819 * Setup our scheduler helpers. Note that curprocmask bit 0 has already
820 * been cleared by rqinit() and we should not mess with it further.
823 sched_thread_cpu_init(void)
828 printf("start scheduler helpers on cpus:");
830 for (i = 0; i < ncpus; ++i) {
831 globaldata_t dgd = globaldata_find(i);
832 cpumask_t mask = 1 << i;
834 if ((mask & smp_active_mask) == 0)
840 lwkt_create(sched_thread, NULL, NULL, &dgd->gd_schedthread,
841 TDF_STOPREQ, i, "usched %d", i);
842 #ifdef ONLY_ONE_USER_CPU
844 curprocmask |= mask; /* DISABLE USER PROCS */
847 curprocmask &= ~mask; /* schedule user proc on cpu */
854 SYSINIT(uschedtd, SI_SUB_FINISH_SMP, SI_ORDER_ANY, sched_thread_cpu_init, NULL)