2 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $DragonFly: src/sys/kern/usched_bsd4.c,v 1.26 2008/11/01 23:31:19 dillon Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
33 #include <sys/queue.h>
35 #include <sys/rtprio.h>
37 #include <sys/sysctl.h>
38 #include <sys/resourcevar.h>
39 #include <sys/spinlock.h>
40 #include <machine/cpu.h>
41 #include <machine/smp.h>
43 #include <sys/thread2.h>
44 #include <sys/spinlock2.h>
45 #include <sys/mplock2.h>
48 * Priorities. Note that with 32 run queues per scheduler each queue
49 * represents four priority levels.
53 #define PRIMASK (MAXPRI - 1)
54 #define PRIBASE_REALTIME 0
55 #define PRIBASE_NORMAL MAXPRI
56 #define PRIBASE_IDLE (MAXPRI * 2)
57 #define PRIBASE_THREAD (MAXPRI * 3)
58 #define PRIBASE_NULL (MAXPRI * 4)
60 #define NQS 32 /* 32 run queues. */
61 #define PPQ (MAXPRI / NQS) /* priorities per queue */
62 #define PPQMASK (PPQ - 1)
65 * NICEPPQ - number of nice units per priority queue
66 * ESTCPURAMP - number of scheduler ticks for estcpu to switch queues
68 * ESTCPUPPQ - number of estcpu units per priority queue
69 * ESTCPUMAX - number of estcpu units
70 * ESTCPUINCR - amount we have to increment p_estcpu per scheduling tick at
76 #define ESTCPUMAX (ESTCPUPPQ * NQS)
77 #define ESTCPUINCR (ESTCPUPPQ / ESTCPURAMP)
78 #define PRIO_RANGE (PRIO_MAX - PRIO_MIN + 1)
80 #define ESTCPULIM(v) min((v), ESTCPUMAX)
84 #define lwp_priority lwp_usdata.bsd4.priority
85 #define lwp_rqindex lwp_usdata.bsd4.rqindex
86 #define lwp_origcpu lwp_usdata.bsd4.origcpu
87 #define lwp_estcpu lwp_usdata.bsd4.estcpu
88 #define lwp_rqtype lwp_usdata.bsd4.rqtype
90 static void bsd4_acquire_curproc(struct lwp *lp);
91 static void bsd4_release_curproc(struct lwp *lp);
92 static void bsd4_select_curproc(globaldata_t gd);
93 static void bsd4_setrunqueue(struct lwp *lp);
94 static void bsd4_schedulerclock(struct lwp *lp, sysclock_t period,
96 static void bsd4_recalculate_estcpu(struct lwp *lp);
97 static void bsd4_resetpriority(struct lwp *lp);
98 static void bsd4_forking(struct lwp *plp, struct lwp *lp);
99 static void bsd4_exiting(struct lwp *plp, struct lwp *lp);
100 static void bsd4_yield(struct lwp *lp);
103 static void need_user_resched_remote(void *dummy);
105 static struct lwp *chooseproc_locked(struct lwp *chklp);
106 static void bsd4_remrunqueue_locked(struct lwp *lp);
107 static void bsd4_setrunqueue_locked(struct lwp *lp);
109 struct usched usched_bsd4 = {
111 "bsd4", "Original DragonFly Scheduler",
112 NULL, /* default registration */
113 NULL, /* default deregistration */
114 bsd4_acquire_curproc,
115 bsd4_release_curproc,
118 bsd4_recalculate_estcpu,
122 NULL, /* setcpumask not supported */
126 struct usched_bsd4_pcpu {
127 struct thread helper_thread;
130 struct lwp *uschedcp;
133 typedef struct usched_bsd4_pcpu *bsd4_pcpu_t;
136 * We have NQS (32) run queues per scheduling class. For the normal
137 * class, there are 128 priorities scaled onto these 32 queues. New
138 * processes are added to the last entry in each queue, and processes
139 * are selected for running by taking them from the head and maintaining
140 * a simple FIFO arrangement. Realtime and Idle priority processes have
141 * and explicit 0-31 priority which maps directly onto their class queue
142 * index. When a queue has something in it, the corresponding bit is
143 * set in the queuebits variable, allowing a single read to determine
144 * the state of all 32 queues and then a ffs() to find the first busy
147 static struct rq bsd4_queues[NQS];
148 static struct rq bsd4_rtqueues[NQS];
149 static struct rq bsd4_idqueues[NQS];
150 static u_int32_t bsd4_queuebits;
151 static u_int32_t bsd4_rtqueuebits;
152 static u_int32_t bsd4_idqueuebits;
153 static cpumask_t bsd4_curprocmask = -1; /* currently running a user process */
154 static cpumask_t bsd4_rdyprocmask; /* ready to accept a user process */
155 static int bsd4_runqcount;
157 static volatile int bsd4_scancpu;
159 static struct spinlock bsd4_spin;
160 static struct usched_bsd4_pcpu bsd4_pcpu[MAXCPU];
162 SYSCTL_INT(_debug, OID_AUTO, bsd4_runqcount, CTLFLAG_RD, &bsd4_runqcount, 0, "");
164 static int usched_nonoptimal;
165 SYSCTL_INT(_debug, OID_AUTO, usched_nonoptimal, CTLFLAG_RW,
166 &usched_nonoptimal, 0, "acquire_curproc() was not optimal");
167 static int usched_optimal;
168 SYSCTL_INT(_debug, OID_AUTO, usched_optimal, CTLFLAG_RW,
169 &usched_optimal, 0, "acquire_curproc() was optimal");
171 static int usched_debug = -1;
172 SYSCTL_INT(_debug, OID_AUTO, scdebug, CTLFLAG_RW, &usched_debug, 0, "");
174 static int remote_resched_nonaffinity;
175 static int remote_resched_affinity;
176 static int choose_affinity;
177 SYSCTL_INT(_debug, OID_AUTO, remote_resched_nonaffinity, CTLFLAG_RD,
178 &remote_resched_nonaffinity, 0, "Number of remote rescheds");
179 SYSCTL_INT(_debug, OID_AUTO, remote_resched_affinity, CTLFLAG_RD,
180 &remote_resched_affinity, 0, "Number of remote rescheds");
181 SYSCTL_INT(_debug, OID_AUTO, choose_affinity, CTLFLAG_RD,
182 &choose_affinity, 0, "chooseproc() was smart");
185 static int usched_bsd4_rrinterval = (ESTCPUFREQ + 9) / 10;
186 SYSCTL_INT(_kern, OID_AUTO, usched_bsd4_rrinterval, CTLFLAG_RW,
187 &usched_bsd4_rrinterval, 0, "");
188 static int usched_bsd4_decay = ESTCPUINCR / 2;
189 SYSCTL_INT(_kern, OID_AUTO, usched_bsd4_decay, CTLFLAG_RW,
190 &usched_bsd4_decay, 0, "");
193 * Initialize the run queues at boot time.
200 spin_init(&bsd4_spin);
201 for (i = 0; i < NQS; i++) {
202 TAILQ_INIT(&bsd4_queues[i]);
203 TAILQ_INIT(&bsd4_rtqueues[i]);
204 TAILQ_INIT(&bsd4_idqueues[i]);
206 atomic_clear_int(&bsd4_curprocmask, 1);
208 SYSINIT(runqueue, SI_BOOT2_USCHED, SI_ORDER_FIRST, rqinit, NULL)
211 * BSD4_ACQUIRE_CURPROC
213 * This function is called when the kernel intends to return to userland.
214 * It is responsible for making the thread the current designated userland
215 * thread for this cpu, blocking if necessary.
217 * The kernel has already depressed our LWKT priority so we must not switch
218 * until we have either assigned or disposed of the thread.
220 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
221 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will
222 * occur, this function is called only under very controlled circumstances.
227 bsd4_acquire_curproc(struct lwp *lp)
234 bsd4_recalculate_estcpu(lp);
237 * If a reschedule was requested give another thread the
240 if (user_resched_wanted()) {
241 clear_user_resched();
242 bsd4_release_curproc(lp);
246 * Loop until we are the current user thread
250 * Reload after a switch or setrunqueue/switch possibly
251 * moved us to another cpu.
253 clear_lwkt_resched();
255 dd = &bsd4_pcpu[gd->gd_cpuid];
258 * Become the currently scheduled user thread for this cpu
259 * if we can do so trivially.
261 * We can steal another thread's current thread designation
262 * on this cpu since if we are running that other thread
263 * must not be, so we can safely deschedule it.
265 if (dd->uschedcp == lp) {
266 dd->upri = lp->lwp_priority;
267 } else if (dd->uschedcp == NULL) {
268 atomic_set_int(&bsd4_curprocmask, gd->gd_cpumask);
270 dd->upri = lp->lwp_priority;
271 } else if (dd->upri > lp->lwp_priority) {
274 dd->upri = lp->lwp_priority;
275 lwkt_deschedule(olp->lwp_thread);
276 bsd4_setrunqueue(olp);
278 lwkt_deschedule(lp->lwp_thread);
279 bsd4_setrunqueue(lp);
284 * Other threads at our current user priority have already
285 * put in their bids, but we must run any kernel threads
286 * at higher priorities, and we could lose our bid to
287 * another thread trying to return to user mode in the
290 * If we lose our bid we will be descheduled and put on
291 * the run queue. When we are reactivated we will have
294 if (lwkt_check_resched(lp->lwp_thread) > 1) {
298 } while (dd->uschedcp != lp);
301 KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0);
305 * BSD4_RELEASE_CURPROC
307 * This routine detaches the current thread from the userland scheduler,
308 * usually because the thread needs to run or block in the kernel (at
309 * kernel priority) for a while.
311 * This routine is also responsible for selecting a new thread to
312 * make the current thread.
314 * NOTE: This implementation differs from the dummy example in that
315 * bsd4_select_curproc() is able to select the current process, whereas
316 * dummy_select_curproc() is not able to select the current process.
317 * This means we have to NULL out uschedcp.
319 * Additionally, note that we may already be on a run queue if releasing
320 * via the lwkt_switch() in bsd4_setrunqueue().
322 * WARNING! The MP lock may be in an unsynchronized state due to the
323 * way get_mplock() works and the fact that this function may be called
324 * from a passive release during a lwkt_switch(). try_mplock() will deal
325 * with this for us but you should be aware that td_mpcount may not be
331 bsd4_release_curproc(struct lwp *lp)
333 globaldata_t gd = mycpu;
334 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
336 if (dd->uschedcp == lp) {
338 KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0);
339 dd->uschedcp = NULL; /* don't let lp be selected */
340 dd->upri = PRIBASE_NULL;
341 atomic_clear_int(&bsd4_curprocmask, gd->gd_cpumask);
342 bsd4_select_curproc(gd);
348 * BSD4_SELECT_CURPROC
350 * Select a new current process for this cpu and clear any pending user
351 * reschedule request. The cpu currently has no current process.
353 * This routine is also responsible for equal-priority round-robining,
354 * typically triggered from bsd4_schedulerclock(). In our dummy example
355 * all the 'user' threads are LWKT scheduled all at once and we just
356 * call lwkt_switch().
358 * The calling process is not on the queue and cannot be selected.
364 bsd4_select_curproc(globaldata_t gd)
366 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
368 int cpuid = gd->gd_cpuid;
372 spin_lock_wr(&bsd4_spin);
373 if ((nlp = chooseproc_locked(dd->uschedcp)) != NULL) {
374 atomic_set_int(&bsd4_curprocmask, 1 << cpuid);
375 dd->upri = nlp->lwp_priority;
377 spin_unlock_wr(&bsd4_spin);
379 lwkt_acquire(nlp->lwp_thread);
381 lwkt_schedule(nlp->lwp_thread);
382 } else if (bsd4_runqcount && (bsd4_rdyprocmask & (1 << cpuid))) {
383 atomic_clear_int(&bsd4_rdyprocmask, 1 << cpuid);
384 spin_unlock_wr(&bsd4_spin);
385 lwkt_schedule(&dd->helper_thread);
387 spin_unlock_wr(&bsd4_spin);
395 * Place the specified lwp on the user scheduler's run queue. This routine
396 * must be called with the thread descheduled. The lwp must be runnable.
398 * The thread may be the current thread as a special case.
403 bsd4_setrunqueue(struct lwp *lp)
414 * First validate the process state relative to the current cpu.
415 * We don't need the spinlock for this, just a critical section.
416 * We are in control of the process.
419 KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
420 KASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0,
421 ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
422 lp->lwp_tid, lp->lwp_proc->p_flag, lp->lwp_flag));
423 KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
426 * Note: gd and dd are relative to the target thread's last cpu,
427 * NOT our current cpu.
429 gd = lp->lwp_thread->td_gd;
430 dd = &bsd4_pcpu[gd->gd_cpuid];
433 * This process is not supposed to be scheduled anywhere or assigned
434 * as the current process anywhere. Assert the condition.
436 KKASSERT(dd->uschedcp != lp);
440 * If we are not SMP we do not have a scheduler helper to kick
441 * and must directly activate the process if none are scheduled.
443 * This is really only an issue when bootstrapping init since
444 * the caller in all other cases will be a user process, and
445 * even if released (dd->uschedcp == NULL), that process will
446 * kickstart the scheduler when it returns to user mode from
449 if (dd->uschedcp == NULL) {
450 atomic_set_int(&bsd4_curprocmask, gd->gd_cpumask);
452 dd->upri = lp->lwp_priority;
453 lwkt_schedule(lp->lwp_thread);
461 * XXX fixme. Could be part of a remrunqueue/setrunqueue
462 * operation when the priority is recalculated, so TDF_MIGRATING
463 * may already be set.
465 if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
466 lwkt_giveaway(lp->lwp_thread);
470 * We lose control of lp the moment we release the spinlock after
471 * having placed lp on the queue. i.e. another cpu could pick it
472 * up and it could exit, or its priority could be further adjusted,
473 * or something like that.
475 spin_lock_wr(&bsd4_spin);
476 bsd4_setrunqueue_locked(lp);
480 * Kick the scheduler helper on one of the other cpu's
481 * and request a reschedule if appropriate.
483 cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
485 mask = ~bsd4_curprocmask & bsd4_rdyprocmask &
486 lp->lwp_cpumask & smp_active_mask;
487 spin_unlock_wr(&bsd4_spin);
490 tmpmask = ~((1 << cpuid) - 1);
492 cpuid = bsfl(mask & tmpmask);
495 gd = globaldata_find(cpuid);
496 dd = &bsd4_pcpu[cpuid];
498 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
500 need_user_resched_remote(NULL);
502 lwkt_send_ipiq(gd, need_user_resched_remote, NULL);
505 mask &= ~(1 << cpuid);
509 * Request a reschedule if appropriate.
511 spin_unlock_wr(&bsd4_spin);
512 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
520 * This routine is called from a systimer IPI. It MUST be MP-safe and
521 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on
524 * Because this is effectively a 'fast' interrupt, we cannot safely
525 * use spinlocks unless gd_spinlock_rd is NULL and gd_spinlocks_wr is 0,
526 * even if the spinlocks are 'non conflicting'. This is due to the way
527 * spinlock conflicts against cached read locks are handled.
533 bsd4_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
535 globaldata_t gd = mycpu;
536 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
539 * Do we need to round-robin? We round-robin 10 times a second.
540 * This should only occur for cpu-bound batch processes.
542 if (++dd->rrcount >= usched_bsd4_rrinterval) {
548 * As the process accumulates cpu time p_estcpu is bumped and may
549 * push the process into another scheduling queue. It typically
550 * takes 4 ticks to bump the queue.
552 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
555 * Reducing p_origcpu over time causes more of our estcpu to be
556 * returned to the parent when we exit. This is a small tweak
557 * for the batch detection heuristic.
563 * We can only safely call bsd4_resetpriority(), which uses spinlocks,
564 * if we aren't interrupting a thread that is using spinlocks.
565 * Otherwise we can deadlock with another cpu waiting for our read
566 * spinlocks to clear.
568 if (gd->gd_spinlock_rd == NULL && gd->gd_spinlocks_wr == 0)
569 bsd4_resetpriority(lp);
575 * Called from acquire and from kern_synch's one-second timer (one of the
576 * callout helper threads) with a critical section held.
578 * Decay p_estcpu based on the number of ticks we haven't been running
579 * and our p_nice. As the load increases each process observes a larger
580 * number of idle ticks (because other processes are running in them).
581 * This observation leads to a larger correction which tends to make the
582 * system more 'batchy'.
584 * Note that no recalculation occurs for a process which sleeps and wakes
585 * up in the same tick. That is, a system doing thousands of context
586 * switches per second will still only do serious estcpu calculations
587 * ESTCPUFREQ times per second.
593 bsd4_recalculate_estcpu(struct lwp *lp)
595 globaldata_t gd = mycpu;
603 * We have to subtract periodic to get the last schedclock
604 * timeout time, otherwise we would get the upcoming timeout.
605 * Keep in mind that a process can migrate between cpus and
606 * while the scheduler clock should be very close, boundary
607 * conditions could lead to a small negative delta.
609 cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
611 if (lp->lwp_slptime > 1) {
613 * Too much time has passed, do a coarse correction.
615 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
616 bsd4_resetpriority(lp);
617 lp->lwp_cpbase = cpbase;
619 } else if (lp->lwp_cpbase != cpbase) {
621 * Adjust estcpu if we are in a different tick. Don't waste
622 * time if we are in the same tick.
624 * First calculate the number of ticks in the measurement
625 * interval. The nticks calculation can wind up 0 due to
626 * a bug in the handling of lwp_slptime (as yet not found),
627 * so make sure we do not get a divide by 0 panic.
629 nticks = (cpbase - lp->lwp_cpbase) / gd->gd_schedclock.periodic;
632 updatepcpu(lp, lp->lwp_cpticks, nticks);
634 if ((nleft = nticks - lp->lwp_cpticks) < 0)
636 if (usched_debug == lp->lwp_proc->p_pid) {
637 kprintf("pid %d tid %d estcpu %d cpticks %d nticks %d nleft %d",
638 lp->lwp_proc->p_pid, lp->lwp_tid, lp->lwp_estcpu,
639 lp->lwp_cpticks, nticks, nleft);
643 * Calculate a decay value based on ticks remaining scaled
644 * down by the instantanious load and p_nice.
646 if ((loadfac = bsd4_runqcount) < 2)
648 ndecay = nleft * usched_bsd4_decay * 2 *
649 (PRIO_MAX * 2 - lp->lwp_proc->p_nice) / (loadfac * PRIO_MAX * 2);
652 * Adjust p_estcpu. Handle a border case where batch jobs
653 * can get stalled long enough to decay to zero when they
656 if (lp->lwp_estcpu > ndecay * 2)
657 lp->lwp_estcpu -= ndecay;
659 lp->lwp_estcpu >>= 1;
661 if (usched_debug == lp->lwp_proc->p_pid)
662 kprintf(" ndecay %d estcpu %d\n", ndecay, lp->lwp_estcpu);
663 bsd4_resetpriority(lp);
664 lp->lwp_cpbase = cpbase;
670 * Compute the priority of a process when running in user mode.
671 * Arrange to reschedule if the resulting priority is better
672 * than that of the current process.
674 * This routine may be called with any process.
676 * This routine is called by fork1() for initial setup with the process
677 * of the run queue, and also may be called normally with the process on or
683 bsd4_resetpriority(struct lwp *lp)
691 * Calculate the new priority and queue type
694 spin_lock_wr(&bsd4_spin);
696 newrqtype = lp->lwp_rtprio.type;
699 case RTP_PRIO_REALTIME:
701 newpriority = PRIBASE_REALTIME +
702 (lp->lwp_rtprio.prio & PRIMASK);
704 case RTP_PRIO_NORMAL:
705 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
706 newpriority += lp->lwp_estcpu * PPQ / ESTCPUPPQ;
707 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
708 NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
709 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
712 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
714 case RTP_PRIO_THREAD:
715 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
718 panic("Bad RTP_PRIO %d", newrqtype);
723 * The newpriority incorporates the queue type so do a simple masked
724 * check to determine if the process has moved to another queue. If
725 * it has, and it is currently on a run queue, then move it.
727 if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
728 lp->lwp_priority = newpriority;
729 if (lp->lwp_flag & LWP_ONRUNQ) {
730 bsd4_remrunqueue_locked(lp);
731 lp->lwp_rqtype = newrqtype;
732 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
733 bsd4_setrunqueue_locked(lp);
734 reschedcpu = lp->lwp_thread->td_gd->gd_cpuid;
736 lp->lwp_rqtype = newrqtype;
737 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
741 lp->lwp_priority = newpriority;
744 spin_unlock_wr(&bsd4_spin);
747 * Determine if we need to reschedule the target cpu. This only
748 * occurs if the LWP is already on a scheduler queue, which means
749 * that idle cpu notification has already occured. At most we
750 * need only issue a need_user_resched() on the appropriate cpu.
752 * The LWP may be owned by a CPU different from the current one,
753 * in which case dd->uschedcp may be modified without an MP lock
754 * or a spinlock held. The worst that happens is that the code
755 * below causes a spurious need_user_resched() on the target CPU
756 * and dd->pri to be wrong for a short period of time, both of
757 * which are harmless.
759 if (reschedcpu >= 0) {
760 dd = &bsd4_pcpu[reschedcpu];
761 if ((dd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK)) {
762 dd->upri = lp->lwp_priority;
764 if (reschedcpu == mycpu->gd_cpuid) {
767 lwkt_send_ipiq(lp->lwp_thread->td_gd,
768 need_user_resched_remote, NULL);
783 bsd4_yield(struct lwp *lp)
786 /* FUTURE (or something similar) */
787 switch(lp->lwp_rqtype) {
788 case RTP_PRIO_NORMAL:
789 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
799 * Called from fork1() when a new child process is being created.
801 * Give the child process an initial estcpu that is more batch then
802 * its parent and dock the parent for the fork (but do not
803 * reschedule the parent). This comprises the main part of our batch
804 * detection heuristic for both parallel forking and sequential execs.
806 * Interactive processes will decay the boosted estcpu quickly while batch
807 * processes will tend to compound it.
808 * XXX lwp should be "spawning" instead of "forking"
813 bsd4_forking(struct lwp *plp, struct lwp *lp)
815 lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ);
816 lp->lwp_origcpu = lp->lwp_estcpu;
817 plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ);
821 * Called when the parent reaps a child. Propogate cpu use by the child
822 * back to the parent.
827 bsd4_exiting(struct lwp *plp, struct lwp *lp)
831 if (plp->lwp_proc->p_pid != 1) {
832 delta = lp->lwp_estcpu - lp->lwp_origcpu;
834 plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + delta);
840 * chooseproc() is called when a cpu needs a user process to LWKT schedule,
841 * it selects a user process and returns it. If chklp is non-NULL and chklp
842 * has a better or equal priority then the process that would otherwise be
843 * chosen, NULL is returned.
845 * Until we fix the RUNQ code the chklp test has to be strict or we may
846 * bounce between processes trying to acquire the current process designation.
848 * MPSAFE - must be called with bsd4_spin exclusive held. The spinlock is
849 * left intact through the entire routine.
853 chooseproc_locked(struct lwp *chklp)
857 u_int32_t *which, *which2;
864 rtqbits = bsd4_rtqueuebits;
865 tsqbits = bsd4_queuebits;
866 idqbits = bsd4_idqueuebits;
867 cpumask = mycpu->gd_cpumask;
874 q = &bsd4_rtqueues[pri];
875 which = &bsd4_rtqueuebits;
877 } else if (tsqbits) {
879 q = &bsd4_queues[pri];
880 which = &bsd4_queuebits;
882 } else if (idqbits) {
884 q = &bsd4_idqueues[pri];
885 which = &bsd4_idqueuebits;
891 KASSERT(lp, ("chooseproc: no lwp on busy queue"));
894 while ((lp->lwp_cpumask & cpumask) == 0) {
895 lp = TAILQ_NEXT(lp, lwp_procq);
897 *which2 &= ~(1 << pri);
904 * If the passed lwp <chklp> is reasonably close to the selected
905 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
907 * Note that we must error on the side of <chklp> to avoid bouncing
908 * between threads in the acquire code.
911 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
917 * If the chosen lwp does not reside on this cpu spend a few
918 * cycles looking for a better candidate at the same priority level.
919 * This is a fallback check, setrunqueue() tries to wakeup the
920 * correct cpu and is our front-line affinity.
922 if (lp->lwp_thread->td_gd != mycpu &&
923 (chklp = TAILQ_NEXT(lp, lwp_procq)) != NULL
925 if (chklp->lwp_thread->td_gd == mycpu) {
932 TAILQ_REMOVE(q, lp, lwp_procq);
935 *which &= ~(1 << pri);
936 KASSERT((lp->lwp_flag & LWP_ONRUNQ) != 0, ("not on runq6!"));
937 lp->lwp_flag &= ~LWP_ONRUNQ;
944 * Called via an ipi message to reschedule on another cpu. If no
945 * user thread is active on the target cpu we wake the scheduler
946 * helper thread up to help schedule one.
952 need_user_resched_remote(void *dummy)
954 globaldata_t gd = mycpu;
955 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
957 if (dd->uschedcp == NULL && (bsd4_rdyprocmask & gd->gd_cpumask)) {
958 atomic_clear_int(&bsd4_rdyprocmask, gd->gd_cpumask);
959 lwkt_schedule(&dd->helper_thread);
968 * bsd4_remrunqueue_locked() removes a given process from the run queue
969 * that it is on, clearing the queue busy bit if it becomes empty.
971 * Note that user process scheduler is different from the LWKT schedule.
972 * The user process scheduler only manages user processes but it uses LWKT
973 * underneath, and a user process operating in the kernel will often be
974 * 'released' from our management.
976 * MPSAFE - bsd4_spin must be held exclusively on call
979 bsd4_remrunqueue_locked(struct lwp *lp)
985 KKASSERT(lp->lwp_flag & LWP_ONRUNQ);
986 lp->lwp_flag &= ~LWP_ONRUNQ;
988 KKASSERT(bsd4_runqcount >= 0);
990 pri = lp->lwp_rqindex;
991 switch(lp->lwp_rqtype) {
992 case RTP_PRIO_NORMAL:
993 q = &bsd4_queues[pri];
994 which = &bsd4_queuebits;
996 case RTP_PRIO_REALTIME:
998 q = &bsd4_rtqueues[pri];
999 which = &bsd4_rtqueuebits;
1002 q = &bsd4_idqueues[pri];
1003 which = &bsd4_idqueuebits;
1006 panic("remrunqueue: invalid rtprio type");
1009 TAILQ_REMOVE(q, lp, lwp_procq);
1010 if (TAILQ_EMPTY(q)) {
1011 KASSERT((*which & (1 << pri)) != 0,
1012 ("remrunqueue: remove from empty queue"));
1013 *which &= ~(1 << pri);
1018 * bsd4_setrunqueue_locked()
1020 * Add a process whos rqtype and rqindex had previously been calculated
1021 * onto the appropriate run queue. Determine if the addition requires
1022 * a reschedule on a cpu and return the cpuid or -1.
1024 * NOTE: Lower priorities are better priorities.
1026 * MPSAFE - bsd4_spin must be held exclusively on call
1029 bsd4_setrunqueue_locked(struct lwp *lp)
1035 KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0);
1036 lp->lwp_flag |= LWP_ONRUNQ;
1039 pri = lp->lwp_rqindex;
1041 switch(lp->lwp_rqtype) {
1042 case RTP_PRIO_NORMAL:
1043 q = &bsd4_queues[pri];
1044 which = &bsd4_queuebits;
1046 case RTP_PRIO_REALTIME:
1048 q = &bsd4_rtqueues[pri];
1049 which = &bsd4_rtqueuebits;
1052 q = &bsd4_idqueues[pri];
1053 which = &bsd4_idqueuebits;
1056 panic("remrunqueue: invalid rtprio type");
1061 * Add to the correct queue and set the appropriate bit. If no
1062 * lower priority (i.e. better) processes are in the queue then
1063 * we want a reschedule, calculate the best cpu for the job.
1065 * Always run reschedules on the LWPs original cpu.
1067 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1074 * For SMP systems a user scheduler helper thread is created for each
1075 * cpu and is used to allow one cpu to wakeup another for the purposes of
1076 * scheduling userland threads from setrunqueue(). UP systems do not
1077 * need the helper since there is only one cpu. We can't use the idle
1078 * thread for this because we need to hold the MP lock. Additionally,
1079 * doing things this way allows us to HLT idle cpus on MP systems.
1084 sched_thread(void *dummy)
1097 cpuid = gd->gd_cpuid; /* doesn't change */
1098 cpumask = gd->gd_cpumask; /* doesn't change */
1099 dd = &bsd4_pcpu[cpuid];
1102 * The scheduler thread does not need to hold the MP lock. Since we
1103 * are woken up only when no user processes are scheduled on a cpu, we
1104 * can run at an ultra low priority.
1107 lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1111 * We use the LWKT deschedule-interlock trick to avoid racing
1112 * bsd4_rdyprocmask. This means we cannot block through to the
1113 * manual lwkt_switch() call we make below.
1116 lwkt_deschedule_self(gd->gd_curthread);
1117 spin_lock_wr(&bsd4_spin);
1118 atomic_set_int(&bsd4_rdyprocmask, cpumask);
1120 clear_user_resched(); /* This satisfied the reschedule request */
1121 dd->rrcount = 0; /* Reset the round-robin counter */
1123 if ((bsd4_curprocmask & cpumask) == 0) {
1125 * No thread is currently scheduled.
1127 KKASSERT(dd->uschedcp == NULL);
1128 if ((nlp = chooseproc_locked(NULL)) != NULL) {
1129 atomic_set_int(&bsd4_curprocmask, cpumask);
1130 dd->upri = nlp->lwp_priority;
1132 spin_unlock_wr(&bsd4_spin);
1133 lwkt_acquire(nlp->lwp_thread);
1134 lwkt_schedule(nlp->lwp_thread);
1136 spin_unlock_wr(&bsd4_spin);
1140 * Disabled for now, this can create an infinite loop.
1142 } else if (bsd4_runqcount) {
1144 * Someone scheduled us but raced. In order to not lose
1145 * track of the fact that there may be a LWP ready to go,
1146 * forward the request to another cpu if available.
1148 * Rotate through cpus starting with cpuid + 1. Since cpuid
1149 * is already masked out by gd_other_cpus, just use ~cpumask.
1151 tmpmask = bsd4_rdyprocmask & mycpu->gd_other_cpus &
1154 if (tmpmask & ~(cpumask - 1))
1155 tmpid = bsfl(tmpmask & ~(cpumask - 1));
1157 tmpid = bsfl(tmpmask);
1158 bsd4_scancpu = tmpid;
1159 atomic_clear_int(&bsd4_rdyprocmask, 1 << tmpid);
1160 spin_unlock_wr(&bsd4_spin);
1161 lwkt_schedule(&bsd4_pcpu[tmpid].helper_thread);
1163 spin_unlock_wr(&bsd4_spin);
1168 * The runq is empty.
1170 spin_unlock_wr(&bsd4_spin);
1178 * Setup our scheduler helpers. Note that curprocmask bit 0 has already
1179 * been cleared by rqinit() and we should not mess with it further.
1182 sched_thread_cpu_init(void)
1187 kprintf("start scheduler helpers on cpus:");
1189 for (i = 0; i < ncpus; ++i) {
1190 bsd4_pcpu_t dd = &bsd4_pcpu[i];
1191 cpumask_t mask = 1 << i;
1193 if ((mask & smp_active_mask) == 0)
1199 lwkt_create(sched_thread, NULL, NULL, &dd->helper_thread,
1200 TDF_STOPREQ, i, "usched %d", i);
1203 * Allow user scheduling on the target cpu. cpu #0 has already
1204 * been enabled in rqinit().
1207 atomic_clear_int(&bsd4_curprocmask, mask);
1208 atomic_set_int(&bsd4_rdyprocmask, mask);
1209 dd->upri = PRIBASE_NULL;
1214 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
1215 sched_thread_cpu_init, NULL)