2 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $DragonFly: src/sys/kern/usched_bsd4.c,v 1.16 2006/07/11 01:01:50 dillon Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
33 #include <sys/queue.h>
35 #include <sys/rtprio.h>
37 #include <sys/sysctl.h>
38 #include <sys/resourcevar.h>
39 #include <sys/spinlock.h>
40 #include <machine/ipl.h>
41 #include <machine/cpu.h>
42 #include <machine/smp.h>
44 #include <sys/thread2.h>
45 #include <sys/spinlock2.h>
48 * Priorities. Note that with 32 run queues per scheduler each queue
49 * represents four priority levels.
53 #define PRIMASK (MAXPRI - 1)
54 #define PRIBASE_REALTIME 0
55 #define PRIBASE_NORMAL MAXPRI
56 #define PRIBASE_IDLE (MAXPRI * 2)
57 #define PRIBASE_THREAD (MAXPRI * 3)
58 #define PRIBASE_NULL (MAXPRI * 4)
60 #define NQS 32 /* 32 run queues. */
61 #define PPQ (MAXPRI / NQS) /* priorities per queue */
62 #define PPQMASK (PPQ - 1)
65 * NICEPPQ - number of nice units per priority queue
66 * ESTCPURAMP - number of scheduler ticks for estcpu to switch queues
68 * ESTCPUPPQ - number of estcpu units per priority queue
69 * ESTCPUMAX - number of estcpu units
70 * ESTCPUINCR - amount we have to increment p_estcpu per scheduling tick at
76 #define ESTCPUMAX (ESTCPUPPQ * NQS)
77 #define ESTCPUINCR (ESTCPUPPQ / ESTCPURAMP)
78 #define PRIO_RANGE (PRIO_MAX - PRIO_MIN + 1)
80 #define ESTCPULIM(v) min((v), ESTCPUMAX)
84 #define lwp_priority lwp_usdata.bsd4.priority
85 #define lwp_rqindex lwp_usdata.bsd4.rqindex
86 #define lwp_origcpu lwp_usdata.bsd4.origcpu
87 #define lwp_estcpu lwp_usdata.bsd4.estcpu
88 #define lwp_rqtype lwp_usdata.bsd4.rqtype
90 static void bsd4_acquire_curproc(struct lwp *lp);
91 static void bsd4_release_curproc(struct lwp *lp);
92 static void bsd4_select_curproc(globaldata_t gd);
93 static void bsd4_setrunqueue(struct lwp *lp);
94 static void bsd4_schedulerclock(struct lwp *lp, sysclock_t period,
96 static void bsd4_recalculate_estcpu(struct lwp *lp);
97 static void bsd4_resetpriority(struct lwp *lp);
98 static void bsd4_forking(struct lwp *plp, struct lwp *lp);
99 static void bsd4_exiting(struct lwp *plp, struct lwp *lp);
102 static void need_user_resched_remote(void *dummy);
104 static struct lwp *chooseproc_locked(struct lwp *chklp);
105 static void bsd4_remrunqueue_locked(struct lwp *lp);
106 static void bsd4_setrunqueue_locked(struct lwp *lp);
108 struct usched usched_bsd4 = {
110 "bsd4", "Original DragonFly Scheduler",
111 NULL, /* default registration */
112 NULL, /* default deregistration */
113 bsd4_acquire_curproc,
114 bsd4_release_curproc,
117 bsd4_recalculate_estcpu,
121 NULL /* setcpumask not supported */
124 struct usched_bsd4_pcpu {
125 struct thread helper_thread;
128 struct lwp *uschedcp;
131 typedef struct usched_bsd4_pcpu *bsd4_pcpu_t;
134 * We have NQS (32) run queues per scheduling class. For the normal
135 * class, there are 128 priorities scaled onto these 32 queues. New
136 * processes are added to the last entry in each queue, and processes
137 * are selected for running by taking them from the head and maintaining
138 * a simple FIFO arrangement. Realtime and Idle priority processes have
139 * and explicit 0-31 priority which maps directly onto their class queue
140 * index. When a queue has something in it, the corresponding bit is
141 * set in the queuebits variable, allowing a single read to determine
142 * the state of all 32 queues and then a ffs() to find the first busy
145 static struct rq bsd4_queues[NQS];
146 static struct rq bsd4_rtqueues[NQS];
147 static struct rq bsd4_idqueues[NQS];
148 static u_int32_t bsd4_queuebits;
149 static u_int32_t bsd4_rtqueuebits;
150 static u_int32_t bsd4_idqueuebits;
151 static cpumask_t bsd4_curprocmask = -1; /* currently running a user process */
152 static cpumask_t bsd4_rdyprocmask; /* ready to accept a user process */
153 static int bsd4_runqcount;
155 static volatile int bsd4_scancpu;
157 static struct spinlock bsd4_spin;
158 static struct usched_bsd4_pcpu bsd4_pcpu[MAXCPU];
160 SYSCTL_INT(_debug, OID_AUTO, bsd4_runqcount, CTLFLAG_RD, &bsd4_runqcount, 0, "");
162 static int usched_nonoptimal;
163 SYSCTL_INT(_debug, OID_AUTO, usched_nonoptimal, CTLFLAG_RW,
164 &usched_nonoptimal, 0, "acquire_curproc() was not optimal");
165 static int usched_optimal;
166 SYSCTL_INT(_debug, OID_AUTO, usched_optimal, CTLFLAG_RW,
167 &usched_optimal, 0, "acquire_curproc() was optimal");
169 static int usched_debug = -1;
170 SYSCTL_INT(_debug, OID_AUTO, scdebug, CTLFLAG_RW, &usched_debug, 0, "");
172 static int remote_resched_nonaffinity;
173 static int remote_resched_affinity;
174 static int choose_affinity;
175 SYSCTL_INT(_debug, OID_AUTO, remote_resched_nonaffinity, CTLFLAG_RD,
176 &remote_resched_nonaffinity, 0, "Number of remote rescheds");
177 SYSCTL_INT(_debug, OID_AUTO, remote_resched_affinity, CTLFLAG_RD,
178 &remote_resched_affinity, 0, "Number of remote rescheds");
179 SYSCTL_INT(_debug, OID_AUTO, choose_affinity, CTLFLAG_RD,
180 &choose_affinity, 0, "chooseproc() was smart");
183 static int usched_bsd4_rrinterval = (ESTCPUFREQ + 9) / 10;
184 SYSCTL_INT(_kern, OID_AUTO, usched_bsd4_rrinterval, CTLFLAG_RW,
185 &usched_bsd4_rrinterval, 0, "");
186 static int usched_bsd4_decay = ESTCPUINCR / 2;
187 SYSCTL_INT(_kern, OID_AUTO, usched_bsd4_decay, CTLFLAG_RW,
188 &usched_bsd4_decay, 0, "");
191 * Initialize the run queues at boot time.
198 spin_init(&bsd4_spin);
199 for (i = 0; i < NQS; i++) {
200 TAILQ_INIT(&bsd4_queues[i]);
201 TAILQ_INIT(&bsd4_rtqueues[i]);
202 TAILQ_INIT(&bsd4_idqueues[i]);
204 atomic_clear_int(&bsd4_curprocmask, 1);
206 SYSINIT(runqueue, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, rqinit, NULL)
209 * BSD4_ACQUIRE_CURPROC
211 * This function is called when the kernel intends to return to userland.
212 * It is responsible for making the thread the current designated userland
213 * thread for this cpu, blocking if necessary.
215 * We are expected to handle userland reschedule requests here too.
217 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
218 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will
219 * occur, this function is called only under very controlled circumstances.
221 * Basically we recalculate our estcpu to hopefully give us a more
222 * favorable disposition, setrunqueue, then wait for the curlwp
223 * designation to be handed to us (if the setrunqueue didn't do it).
228 bsd4_acquire_curproc(struct lwp *lp)
230 globaldata_t gd = mycpu;
231 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
234 * Possibly select another thread, or keep the current thread.
236 if (user_resched_wanted())
237 bsd4_select_curproc(gd);
240 * If uschedcp is still pointing to us, we're done
242 if (dd->uschedcp == lp)
246 * If this cpu has no current thread, and the run queue is
247 * empty, we can safely select ourself.
249 if (dd->uschedcp == NULL && bsd4_runqcount == 0) {
250 atomic_set_int(&bsd4_curprocmask, gd->gd_cpumask);
252 dd->upri = lp->lwp_priority;
257 * Adjust estcpu and recalculate our priority, then put us back on
258 * the user process scheduler's runq. Only increment the involuntary
259 * context switch count if the setrunqueue call did not immediately
262 * Loop until we become the currently scheduled process. Note that
263 * calling setrunqueue can cause us to be migrated to another cpu
264 * after we switch away.
268 bsd4_recalculate_estcpu(lp);
269 lwkt_deschedule_self(gd->gd_curthread);
270 bsd4_setrunqueue(lp);
271 if ((gd->gd_curthread->td_flags & TDF_RUNQ) == 0)
272 ++lp->lwp_stats->p_ru.ru_nivcsw;
276 dd = &bsd4_pcpu[gd->gd_cpuid];
277 } while (dd->uschedcp != lp);
278 KKASSERT((lp->lwp_proc->p_flag & P_ONRUNQ) == 0);
282 * BSD4_RELEASE_CURPROC
284 * This routine detaches the current thread from the userland scheduler,
285 * usually because the thread needs to run in the kernel (at kernel priority)
288 * This routine is also responsible for selecting a new thread to
289 * make the current thread.
291 * NOTE: This implementation differs from the dummy example in that
292 * bsd4_select_curproc() is able to select the current process, whereas
293 * dummy_select_curproc() is not able to select the current process.
294 * This means we have to NULL out uschedcp.
296 * Additionally, note that we may already be on a run queue if releasing
297 * via the lwkt_switch() in bsd4_setrunqueue().
299 * WARNING! The MP lock may be in an unsynchronized state due to the
300 * way get_mplock() works and the fact that this function may be called
301 * from a passive release during a lwkt_switch(). try_mplock() will deal
302 * with this for us but you should be aware that td_mpcount may not be
308 bsd4_release_curproc(struct lwp *lp)
310 globaldata_t gd = mycpu;
311 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
313 if (dd->uschedcp == lp) {
315 * Note: we leave ou curprocmask bit set to prevent
316 * unnecessary scheduler helper wakeups.
317 * bsd4_select_curproc() will clean it up.
319 KKASSERT((lp->lwp_proc->p_flag & P_ONRUNQ) == 0);
320 dd->uschedcp = NULL; /* don't let lp be selected */
321 bsd4_select_curproc(gd);
326 * BSD4_SELECT_CURPROC
328 * Select a new current process for this cpu. This satisfies a user
329 * scheduler reschedule request so clear that too.
331 * This routine is also responsible for equal-priority round-robining,
332 * typically triggered from bsd4_schedulerclock(). In our dummy example
333 * all the 'user' threads are LWKT scheduled all at once and we just
334 * call lwkt_switch().
340 bsd4_select_curproc(globaldata_t gd)
342 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
344 int cpuid = gd->gd_cpuid;
347 clear_user_resched(); /* This satisfied the reschedule request */
348 dd->rrcount = 0; /* Reset the round-robin counter */
350 spin_lock_wr(&bsd4_spin);
351 if ((nlp = chooseproc_locked(dd->uschedcp)) != NULL) {
352 atomic_set_int(&bsd4_curprocmask, 1 << cpuid);
353 dd->upri = nlp->lwp_priority;
355 spin_unlock_wr(&bsd4_spin);
357 lwkt_acquire(nlp->lwp_thread);
359 lwkt_schedule(nlp->lwp_thread);
360 } else if (dd->uschedcp) {
361 dd->upri = dd->uschedcp->lwp_priority;
362 spin_unlock_wr(&bsd4_spin);
363 KKASSERT(bsd4_curprocmask & (1 << cpuid));
364 } else if (bsd4_runqcount && (bsd4_rdyprocmask & (1 << cpuid))) {
365 atomic_clear_int(&bsd4_curprocmask, 1 << cpuid);
366 atomic_clear_int(&bsd4_rdyprocmask, 1 << cpuid);
368 dd->upri = PRIBASE_NULL;
369 spin_unlock_wr(&bsd4_spin);
370 lwkt_schedule(&dd->helper_thread);
373 dd->upri = PRIBASE_NULL;
374 atomic_clear_int(&bsd4_curprocmask, 1 << cpuid);
375 spin_unlock_wr(&bsd4_spin);
383 * This routine is called to schedule a new user process after a fork.
385 * The caller may set P_PASSIVE_ACQ in p_flag to indicate that we should
386 * attempt to leave the thread on the current cpu.
388 * If P_PASSIVE_ACQ is set setrunqueue() will not wakeup potential target
389 * cpus in an attempt to keep the process on the current cpu at least for
390 * a little while to take advantage of locality of reference (e.g. fork/exec
391 * or short fork/exit, and uio_yield()).
393 * CPU AFFINITY: cpu affinity is handled by attempting to either schedule
394 * or (user level) preempt on the same cpu that a process was previously
395 * scheduled to. If we cannot do this but we are at enough of a higher
396 * priority then the processes running on other cpus, we will allow the
397 * process to be stolen by another cpu.
399 * WARNING! This routine cannot block. bsd4_acquire_curproc() does
400 * a deschedule/switch interlock and we can be moved to another cpu
401 * the moment we are switched out. Our LWKT run state is the only
402 * thing preventing the transfer.
404 * The associated thread must NOT currently be scheduled (but can be the
405 * current process after it has been LWKT descheduled). It must NOT be on
406 * a bsd4 scheduler queue either. The purpose of this routine is to put
407 * it on a scheduler queue or make it the current user process and LWKT
408 * schedule it. It is possible that the thread is in the middle of a LWKT
409 * switchout on another cpu, lwkt_acquire() deals with that case.
411 * The process must be runnable.
416 bsd4_setrunqueue(struct lwp *lp)
427 * First validate the process state relative to the current cpu.
428 * We don't need the spinlock for this, just a critical section.
429 * We are in control of the process.
432 KASSERT(lp->lwp_proc->p_stat == SRUN, ("setrunqueue: proc not SRUN"));
433 KASSERT((lp->lwp_proc->p_flag & P_ONRUNQ) == 0,
434 ("lwp %d/%d already on runq! flag %08x", lp->lwp_proc->p_pid,
435 lp->lwp_tid, lp->lwp_proc->p_flag));
436 KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
439 * Note: gd and dd are relative to the target thread's last cpu,
440 * NOT our current cpu.
442 gd = lp->lwp_thread->td_gd;
443 dd = &bsd4_pcpu[gd->gd_cpuid];
446 * This process is not supposed to be scheduled anywhere or assigned
447 * as the current process anywhere. Assert the condition.
449 KKASSERT(dd->uschedcp != lp);
452 * Check local cpu affinity. The associated thread is stable at
453 * the moment. Note that we may be checking another cpu here so we
454 * have to be careful. We can only assign uschedcp on OUR cpu.
456 * This allows us to avoid actually queueing the process.
457 * acquire_curproc() will handle any threads we mistakenly schedule.
459 cpuid = gd->gd_cpuid;
460 if (gd == mycpu && (bsd4_curprocmask & (1 << cpuid)) == 0) {
461 atomic_set_int(&bsd4_curprocmask, 1 << cpuid);
463 dd->upri = lp->lwp_priority;
464 lwkt_schedule(lp->lwp_thread);
470 * gd and cpuid may still 'hint' at another cpu. Even so we have
471 * to place this process on the userland scheduler's run queue for
472 * action by the target cpu.
476 * XXX fixme. Could be part of a remrunqueue/setrunqueue
477 * operation when the priority is recalculated, so TDF_MIGRATING
478 * may already be set.
480 if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
481 lwkt_giveaway(lp->lwp_thread);
485 * We lose control of lp the moment we release the spinlock after
486 * having placed lp on the queue. i.e. another cpu could pick it
487 * up and it could exit, or its priority could be further adjusted,
488 * or something like that.
490 spin_lock_wr(&bsd4_spin);
491 bsd4_setrunqueue_locked(lp);
494 * gd, dd, and cpuid are still our target cpu 'hint', not our current
497 * We always try to schedule a LWP to its original cpu first. It
498 * is possible for the scheduler helper or setrunqueue to assign
499 * the LWP to a different cpu before the one we asked for wakes
502 * If the LWP has higher priority (lower lwp_priority value) on
503 * its target cpu, reschedule on that cpu.
505 if ((lp->lwp_thread->td_flags & TDF_NORESCHED) == 0) {
506 if ((dd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK)) {
507 dd->upri = lp->lwp_priority;
508 spin_unlock_wr(&bsd4_spin);
513 lwkt_send_ipiq(gd, need_user_resched_remote,
523 spin_unlock_wr(&bsd4_spin);
527 * Otherwise the LWP has a lower priority or we were asked not
528 * to reschedule. Look for an idle cpu whos scheduler helper
529 * is ready to accept more work.
531 * Look for an idle cpu starting at our rotator (bsd4_scancpu).
533 * If no cpus are ready to accept work, just return.
537 mask = ~bsd4_curprocmask & bsd4_rdyprocmask & mycpu->gd_other_cpus &
540 cpuid = bsd4_scancpu;
541 if (++cpuid == ncpus)
543 tmpmask = ~((1 << cpuid) - 1);
545 cpuid = bsfl(mask & tmpmask);
548 atomic_clear_int(&bsd4_rdyprocmask, 1 << cpuid);
549 bsd4_scancpu = cpuid;
550 lwkt_schedule(&bsd4_pcpu[cpuid].helper_thread);
557 * This routine is called from a systimer IPI. It MUST be MP-safe and
558 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on
561 * Because this is effectively a 'fast' interrupt, we cannot safely
562 * use spinlocks unless gd_spinlock_rd is NULL and gd_spinlocks_wr is 0,
563 * even if the spinlocks are 'non conflicting'. This is due to the way
564 * spinlock conflicts against cached read locks are handled.
570 bsd4_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
572 globaldata_t gd = mycpu;
573 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid];
576 * Do we need to round-robin? We round-robin 10 times a second.
577 * This should only occur for cpu-bound batch processes.
579 if (++dd->rrcount >= usched_bsd4_rrinterval) {
585 * As the process accumulates cpu time p_estcpu is bumped and may
586 * push the process into another scheduling queue. It typically
587 * takes 4 ticks to bump the queue.
589 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR);
592 * Reducing p_origcpu over time causes more of our estcpu to be
593 * returned to the parent when we exit. This is a small tweak
594 * for the batch detection heuristic.
600 * We can only safely call bsd4_resetpriority(), which uses spinlocks,
601 * if we aren't interrupting a thread that is using spinlocks.
602 * Otherwise we can deadlock with another cpu waiting for our read
603 * spinlocks to clear.
605 if (gd->gd_spinlock_rd == NULL && gd->gd_spinlocks_wr == 0)
606 bsd4_resetpriority(lp);
612 * Called from acquire and from kern_synch's one-second timer (one of the
613 * callout helper threads) with a critical section held.
615 * Decay p_estcpu based on the number of ticks we haven't been running
616 * and our p_nice. As the load increases each process observes a larger
617 * number of idle ticks (because other processes are running in them).
618 * This observation leads to a larger correction which tends to make the
619 * system more 'batchy'.
621 * Note that no recalculation occurs for a process which sleeps and wakes
622 * up in the same tick. That is, a system doing thousands of context
623 * switches per second will still only do serious estcpu calculations
624 * ESTCPUFREQ times per second.
630 bsd4_recalculate_estcpu(struct lwp *lp)
632 globaldata_t gd = mycpu;
640 * We have to subtract periodic to get the last schedclock
641 * timeout time, otherwise we would get the upcoming timeout.
642 * Keep in mind that a process can migrate between cpus and
643 * while the scheduler clock should be very close, boundary
644 * conditions could lead to a small negative delta.
646 cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
648 if (lp->lwp_slptime > 1) {
650 * Too much time has passed, do a coarse correction.
652 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
653 bsd4_resetpriority(lp);
654 lp->lwp_cpbase = cpbase;
656 } else if (lp->lwp_cpbase != cpbase) {
658 * Adjust estcpu if we are in a different tick. Don't waste
659 * time if we are in the same tick.
661 * First calculate the number of ticks in the measurement
662 * interval. The nticks calculation can wind up 0 due to
663 * a bug in the handling of lwp_slptime (as yet not found),
664 * so make sure we do not get a divide by 0 panic.
666 nticks = (cpbase - lp->lwp_cpbase) / gd->gd_schedclock.periodic;
669 updatepcpu(lp, lp->lwp_cpticks, nticks);
671 if ((nleft = nticks - lp->lwp_cpticks) < 0)
673 if (usched_debug == lp->lwp_proc->p_pid) {
674 printf("pid %d tid %d estcpu %d cpticks %d nticks %d nleft %d",
675 lp->lwp_proc->p_pid, lp->lwp_tid, lp->lwp_estcpu,
676 lp->lwp_cpticks, nticks, nleft);
680 * Calculate a decay value based on ticks remaining scaled
681 * down by the instantanious load and p_nice.
683 if ((loadfac = bsd4_runqcount) < 2)
685 ndecay = nleft * usched_bsd4_decay * 2 *
686 (PRIO_MAX * 2 - lp->lwp_proc->p_nice) / (loadfac * PRIO_MAX * 2);
689 * Adjust p_estcpu. Handle a border case where batch jobs
690 * can get stalled long enough to decay to zero when they
693 if (lp->lwp_estcpu > ndecay * 2)
694 lp->lwp_estcpu -= ndecay;
696 lp->lwp_estcpu >>= 1;
698 if (usched_debug == lp->lwp_proc->p_pid)
699 printf(" ndecay %d estcpu %d\n", ndecay, lp->lwp_estcpu);
700 bsd4_resetpriority(lp);
701 lp->lwp_cpbase = cpbase;
707 * Compute the priority of a process when running in user mode.
708 * Arrange to reschedule if the resulting priority is better
709 * than that of the current process.
711 * This routine may be called with any process.
713 * This routine is called by fork1() for initial setup with the process
714 * of the run queue, and also may be called normally with the process on or
720 bsd4_resetpriority(struct lwp *lp)
728 * Calculate the new priority and queue type
731 spin_lock_wr(&bsd4_spin);
733 newrqtype = lp->lwp_rtprio.type;
736 case RTP_PRIO_REALTIME:
738 newpriority = PRIBASE_REALTIME +
739 (lp->lwp_rtprio.prio & PRIMASK);
741 case RTP_PRIO_NORMAL:
742 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
743 newpriority += lp->lwp_estcpu * PPQ / ESTCPUPPQ;
744 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
745 NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
746 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
749 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
751 case RTP_PRIO_THREAD:
752 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
755 panic("Bad RTP_PRIO %d", newrqtype);
760 * The newpriority incorporates the queue type so do a simple masked
761 * check to determine if the process has moved to another queue. If
762 * it has, and it is currently on a run queue, then move it.
764 if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
765 lp->lwp_priority = newpriority;
766 if (lp->lwp_proc->p_flag & P_ONRUNQ) {
767 bsd4_remrunqueue_locked(lp);
768 lp->lwp_rqtype = newrqtype;
769 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
770 bsd4_setrunqueue_locked(lp);
771 reschedcpu = lp->lwp_thread->td_gd->gd_cpuid;
773 lp->lwp_rqtype = newrqtype;
774 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
778 lp->lwp_priority = newpriority;
781 spin_unlock_wr(&bsd4_spin);
784 * Determine if we need to reschedule the target cpu. This only
785 * occurs if the LWP is already on a scheduler queue, which means
786 * that idle cpu notification has already occured. At most we
787 * need only issue a need_user_resched() on the appropriate cpu.
789 if (reschedcpu >= 0) {
790 dd = &bsd4_pcpu[reschedcpu];
791 KKASSERT(dd->uschedcp != lp);
792 if ((dd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK)) {
793 dd->upri = lp->lwp_priority;
795 if (reschedcpu == mycpu->gd_cpuid) {
798 lwkt_send_ipiq(lp->lwp_thread->td_gd,
799 need_user_resched_remote, NULL);
810 * Called from fork1() when a new child process is being created.
812 * Give the child process an initial estcpu that is more batch then
813 * its parent and dock the parent for the fork (but do not
814 * reschedule the parent). This comprises the main part of our batch
815 * detection heuristic for both parallel forking and sequential execs.
817 * Interactive processes will decay the boosted estcpu quickly while batch
818 * processes will tend to compound it.
819 * XXX lwp should be "spawning" instead of "forking"
824 bsd4_forking(struct lwp *plp, struct lwp *lp)
826 lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ);
827 lp->lwp_origcpu = lp->lwp_estcpu;
828 plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ);
832 * Called when the parent reaps a child. Propogate cpu use by the child
833 * back to the parent.
838 bsd4_exiting(struct lwp *plp, struct lwp *lp)
842 if (plp->lwp_proc->p_pid != 1) {
843 delta = lp->lwp_estcpu - lp->lwp_origcpu;
845 plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + delta);
851 * chooseproc() is called when a cpu needs a user process to LWKT schedule,
852 * it selects a user process and returns it. If chklp is non-NULL and chklp
853 * has a better or equal priority then the process that would otherwise be
854 * chosen, NULL is returned.
856 * Until we fix the RUNQ code the chklp test has to be strict or we may
857 * bounce between processes trying to acquire the current process designation.
859 * MPSAFE - must be called with bsd4_spin exclusive held. The spinlock is
860 * left intact through the entire routine.
864 chooseproc_locked(struct lwp *chklp)
868 u_int32_t *which, *which2;
875 rtqbits = bsd4_rtqueuebits;
876 tsqbits = bsd4_queuebits;
877 idqbits = bsd4_idqueuebits;
878 cpumask = mycpu->gd_cpumask;
885 q = &bsd4_rtqueues[pri];
886 which = &bsd4_rtqueuebits;
888 } else if (tsqbits) {
890 q = &bsd4_queues[pri];
891 which = &bsd4_queuebits;
893 } else if (idqbits) {
895 q = &bsd4_idqueues[pri];
896 which = &bsd4_idqueuebits;
902 KASSERT(lp, ("chooseproc: no lwp on busy queue"));
905 while ((lp->lwp_cpumask & cpumask) == 0) {
906 lp = TAILQ_NEXT(lp, lwp_procq);
908 *which2 &= ~(1 << pri);
915 * If the passed lwp <chklp> is reasonably close to the selected
916 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
918 * Note that we must error on the side of <chklp> to avoid bouncing
919 * between threads in the acquire code.
922 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
928 * If the chosen lwp does not reside on this cpu spend a few
929 * cycles looking for a better candidate at the same priority level.
930 * This is a fallback check, setrunqueue() tries to wakeup the
931 * correct cpu and is our front-line affinity.
933 if (lp->lwp_thread->td_gd != mycpu &&
934 (chklp = TAILQ_NEXT(lp, lwp_procq)) != NULL
936 if (chklp->lwp_thread->td_gd == mycpu) {
943 TAILQ_REMOVE(q, lp, lwp_procq);
946 *which &= ~(1 << pri);
947 KASSERT((lp->lwp_proc->p_flag & P_ONRUNQ) != 0, ("not on runq6!"));
948 lp->lwp_proc->p_flag &= ~P_ONRUNQ;
954 * Called via an ipi message to reschedule on another cpu.
960 need_user_resched_remote(void *dummy)
969 * bsd4_remrunqueue_locked() removes a given process from the run queue
970 * that it is on, clearing the queue busy bit if it becomes empty.
972 * Note that user process scheduler is different from the LWKT schedule.
973 * The user process scheduler only manages user processes but it uses LWKT
974 * underneath, and a user process operating in the kernel will often be
975 * 'released' from our management.
977 * MPSAFE - bsd4_spin must be held exclusively on call
980 bsd4_remrunqueue_locked(struct lwp *lp)
986 KKASSERT(lp->lwp_proc->p_flag & P_ONRUNQ);
987 lp->lwp_proc->p_flag &= ~P_ONRUNQ;
989 KKASSERT(bsd4_runqcount >= 0);
991 pri = lp->lwp_rqindex;
992 switch(lp->lwp_rqtype) {
993 case RTP_PRIO_NORMAL:
994 q = &bsd4_queues[pri];
995 which = &bsd4_queuebits;
997 case RTP_PRIO_REALTIME:
999 q = &bsd4_rtqueues[pri];
1000 which = &bsd4_rtqueuebits;
1003 q = &bsd4_idqueues[pri];
1004 which = &bsd4_idqueuebits;
1007 panic("remrunqueue: invalid rtprio type");
1010 TAILQ_REMOVE(q, lp, lwp_procq);
1011 if (TAILQ_EMPTY(q)) {
1012 KASSERT((*which & (1 << pri)) != 0,
1013 ("remrunqueue: remove from empty queue"));
1014 *which &= ~(1 << pri);
1019 * bsd4_setrunqueue_locked()
1021 * Add a process whos rqtype and rqindex had previously been calculated
1022 * onto the appropriate run queue. Determine if the addition requires
1023 * a reschedule on a cpu and return the cpuid or -1.
1025 * NOTE: Lower priorities are better priorities.
1027 * MPSAFE - bsd4_spin must be held exclusively on call
1030 bsd4_setrunqueue_locked(struct lwp *lp)
1036 KKASSERT((lp->lwp_proc->p_flag & P_ONRUNQ) == 0);
1037 lp->lwp_proc->p_flag |= P_ONRUNQ;
1040 pri = lp->lwp_rqindex;
1042 switch(lp->lwp_rqtype) {
1043 case RTP_PRIO_NORMAL:
1044 q = &bsd4_queues[pri];
1045 which = &bsd4_queuebits;
1047 case RTP_PRIO_REALTIME:
1049 q = &bsd4_rtqueues[pri];
1050 which = &bsd4_rtqueuebits;
1053 q = &bsd4_idqueues[pri];
1054 which = &bsd4_idqueuebits;
1057 panic("remrunqueue: invalid rtprio type");
1062 * Add to the correct queue and set the appropriate bit. If no
1063 * lower priority (i.e. better) processes are in the queue then
1064 * we want a reschedule, calculate the best cpu for the job.
1066 * Always run reschedules on the LWPs original cpu.
1068 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
1075 * For SMP systems a user scheduler helper thread is created for each
1076 * cpu and is used to allow one cpu to wakeup another for the purposes of
1077 * scheduling userland threads from setrunqueue(). UP systems do not
1078 * need the helper since there is only one cpu. We can't use the idle
1079 * thread for this because we need to hold the MP lock. Additionally,
1080 * doing things this way allows us to HLT idle cpus on MP systems.
1085 sched_thread(void *dummy)
1096 cpuid = gd->gd_cpuid; /* doesn't change */
1097 cpumask = 1 << cpuid; /* doesn't change */
1098 dd = &bsd4_pcpu[cpuid];
1101 * The scheduler thread does not need to hold the MP lock. Since we
1102 * are woken up only when no user processes are scheduled on a cpu, we
1103 * can run at an ultra low priority.
1106 lwkt_setpri_self(TDPRI_USER_SCHEDULER);
1110 * We use the LWKT deschedule-interlock trick to avoid racing
1111 * bsd4_rdyprocmask. This means we cannot block through to the
1112 * manual lwkt_switch() call we make below.
1115 lwkt_deschedule_self(gd->gd_curthread);
1116 spin_lock_wr(&bsd4_spin);
1117 atomic_set_int(&bsd4_rdyprocmask, cpumask);
1118 if ((bsd4_curprocmask & cpumask) == 0) {
1119 if ((nlp = chooseproc_locked(NULL)) != NULL) {
1120 atomic_set_int(&bsd4_curprocmask, cpumask);
1121 dd->upri = nlp->lwp_priority;
1123 spin_unlock_wr(&bsd4_spin);
1124 lwkt_acquire(nlp->lwp_thread);
1125 lwkt_schedule(nlp->lwp_thread);
1127 spin_unlock_wr(&bsd4_spin);
1131 * Someone scheduled us but raced. In order to not lose
1132 * track of the fact that there may be a LWP ready to go,
1133 * forward the request to another cpu if available.
1135 * Rotate through cpus starting with cpuid + 1. Since cpuid
1136 * is already masked out by gd_other_cpus, just use ~cpumask.
1138 tmpmask = ~bsd4_curprocmask & bsd4_rdyprocmask &
1139 mycpu->gd_other_cpus;
1141 if (tmpmask & ~(cpumask - 1))
1142 tmpid = bsfl(tmpmask & ~(cpumask - 1));
1144 tmpid = bsfl(tmpmask);
1145 bsd4_scancpu = tmpid;
1146 atomic_clear_int(&bsd4_rdyprocmask, 1 << tmpid);
1147 spin_unlock_wr(&bsd4_spin);
1148 lwkt_schedule(&bsd4_pcpu[tmpid].helper_thread);
1150 spin_unlock_wr(&bsd4_spin);
1159 * Setup our scheduler helpers. Note that curprocmask bit 0 has already
1160 * been cleared by rqinit() and we should not mess with it further.
1163 sched_thread_cpu_init(void)
1168 printf("start scheduler helpers on cpus:");
1170 for (i = 0; i < ncpus; ++i) {
1171 bsd4_pcpu_t dd = &bsd4_pcpu[i];
1172 cpumask_t mask = 1 << i;
1174 if ((mask & smp_active_mask) == 0)
1180 lwkt_create(sched_thread, NULL, NULL, &dd->helper_thread,
1181 TDF_STOPREQ, i, "usched %d", i);
1184 * Allow user scheduling on the target cpu. cpu #0 has already
1185 * been enabled in rqinit().
1188 atomic_clear_int(&bsd4_curprocmask, mask);
1189 atomic_set_int(&bsd4_rdyprocmask, mask);
1194 SYSINIT(uschedtd, SI_SUB_FINISH_SMP, SI_ORDER_ANY, sched_thread_cpu_init, NULL)