2 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
39 #include <sys/queue.h>
41 #include <sys/rtprio.h>
43 #include <sys/sysctl.h>
44 #include <sys/resourcevar.h>
45 #include <sys/spinlock.h>
46 #include <machine/cpu.h>
47 #include <machine/smp.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
54 #define PRIBASE_REALTIME 0
55 #define PRIBASE_NORMAL MAXPRI
56 #define PRIBASE_IDLE (MAXPRI * 2)
57 #define PRIBASE_THREAD (MAXPRI * 3)
58 #define PRIBASE_NULL (MAXPRI * 4)
60 #define lwp_priority lwp_usdata.bsd4.priority
61 #define lwp_estcpu lwp_usdata.bsd4.estcpu
63 static void dummy_acquire_curproc(struct lwp *lp);
64 static void dummy_release_curproc(struct lwp *lp);
65 static void dummy_select_curproc(globaldata_t gd);
66 static void dummy_setrunqueue(struct lwp *lp);
67 static void dummy_schedulerclock(struct lwp *lp, sysclock_t period,
69 static void dummy_recalculate_estcpu(struct lwp *lp);
70 static void dummy_resetpriority(struct lwp *lp);
71 static void dummy_forking(struct lwp *plp, struct lwp *lp);
72 static void dummy_exiting(struct lwp *plp, struct proc *child);
73 static void dummy_uload_update(struct lwp *lp);
74 static void dummy_yield(struct lwp *lp);
76 struct usched usched_dummy = {
78 "dummy", "Dummy DragonFly Scheduler",
79 NULL, /* default registration */
80 NULL, /* default deregistration */
81 dummy_acquire_curproc,
82 dummy_release_curproc,
85 dummy_recalculate_estcpu,
90 NULL, /* setcpumask not supported */
94 struct usched_dummy_pcpu {
96 struct thread helper_thread;
100 typedef struct usched_dummy_pcpu *dummy_pcpu_t;
102 static struct usched_dummy_pcpu dummy_pcpu[MAXCPU];
103 static cpumask_t dummy_curprocmask = -1;
104 static cpumask_t dummy_rdyprocmask;
105 static struct spinlock dummy_spin;
106 static TAILQ_HEAD(rq, lwp) dummy_runq;
107 static int dummy_runqcount;
109 static int usched_dummy_rrinterval = (ESTCPUFREQ + 9) / 10;
110 SYSCTL_INT(_kern, OID_AUTO, usched_dummy_rrinterval, CTLFLAG_RW,
111 &usched_dummy_rrinterval, 0, "");
114 * Initialize the run queues at boot time, clear cpu 0 in curprocmask
115 * to allow dummy scheduling on cpu 0.
118 dummyinit(void *dummy)
120 TAILQ_INIT(&dummy_runq);
121 spin_init(&dummy_spin);
122 atomic_clear_cpumask(&dummy_curprocmask, 1);
124 SYSINIT(runqueue, SI_BOOT2_USCHED, SI_ORDER_FIRST, dummyinit, NULL)
127 * DUMMY_ACQUIRE_CURPROC
129 * This function is called when the kernel intends to return to userland.
130 * It is responsible for making the thread the current designated userland
131 * thread for this cpu, blocking if necessary.
133 * The kernel will not depress our LWKT priority until after we return,
134 * in case we have to shove over to another cpu.
136 * We must determine our thread's disposition before we switch away. This
137 * is very sensitive code.
139 * We are expected to handle userland reschedule requests here too.
141 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
142 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will
143 * occur, this function is called only under very controlled circumstances.
148 dummy_acquire_curproc(struct lwp *lp)
150 globaldata_t gd = mycpu;
151 dummy_pcpu_t dd = &dummy_pcpu[gd->gd_cpuid];
152 thread_t td = lp->lwp_thread;
155 * Possibly select another thread
157 if (user_resched_wanted())
158 dummy_select_curproc(gd);
161 * If this cpu has no current thread, select ourself
163 if (dd->uschedcp == lp ||
164 (dd->uschedcp == NULL && TAILQ_EMPTY(&dummy_runq))) {
165 atomic_set_cpumask(&dummy_curprocmask, gd->gd_cpumask);
171 * If this cpu's current user process thread is not our thread,
172 * deschedule ourselves and place us on the run queue, then
175 * We loop until we become the current process. Its a good idea
176 * to run any passive release(s) before we mess with the scheduler
177 * so our thread is in the expected state.
179 KKASSERT(dd->uschedcp != lp);
181 td->td_release(lp->lwp_thread);
184 lwkt_deschedule_self(td);
185 dummy_setrunqueue(lp);
186 if ((td->td_flags & TDF_RUNQ) == 0)
187 ++lp->lwp_ru.ru_nivcsw;
188 lwkt_switch(); /* WE MAY MIGRATE TO ANOTHER CPU */
191 dd = &dummy_pcpu[gd->gd_cpuid];
192 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
193 } while (dd->uschedcp != lp);
197 * DUMMY_RELEASE_CURPROC
199 * This routine detaches the current thread from the userland scheduler,
200 * usually because the thread needs to run in the kernel (at kernel priority)
203 * This routine is also responsible for selecting a new thread to
204 * make the current thread.
209 dummy_release_curproc(struct lwp *lp)
211 globaldata_t gd = mycpu;
212 dummy_pcpu_t dd = &dummy_pcpu[gd->gd_cpuid];
214 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
215 if (dd->uschedcp == lp) {
216 dummy_select_curproc(gd);
221 * DUMMY_SELECT_CURPROC
223 * Select a new current process for this cpu. This satisfies a user
224 * scheduler reschedule request so clear that too.
226 * This routine is also responsible for equal-priority round-robining,
227 * typically triggered from dummy_schedulerclock(). In our dummy example
228 * all the 'user' threads are LWKT scheduled all at once and we just
229 * call lwkt_switch().
235 dummy_select_curproc(globaldata_t gd)
237 dummy_pcpu_t dd = &dummy_pcpu[gd->gd_cpuid];
240 clear_user_resched();
241 spin_lock(&dummy_spin);
242 if ((lp = TAILQ_FIRST(&dummy_runq)) == NULL) {
244 atomic_clear_cpumask(&dummy_curprocmask, gd->gd_cpumask);
245 spin_unlock(&dummy_spin);
248 TAILQ_REMOVE(&dummy_runq, lp, lwp_procq);
249 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
251 atomic_set_cpumask(&dummy_curprocmask, gd->gd_cpumask);
252 spin_unlock(&dummy_spin);
253 lwkt_acquire(lp->lwp_thread);
254 lwkt_schedule(lp->lwp_thread);
261 * This routine is called to schedule a new user process after a fork.
262 * The scheduler module itself might also call this routine to place
263 * the current process on the userland scheduler's run queue prior
264 * to calling dummy_select_curproc().
266 * The caller may set LWP_PASSIVE_ACQ in lwp_flags to indicate that we should
267 * attempt to leave the thread on the current cpu.
272 dummy_setrunqueue(struct lwp *lp)
274 globaldata_t gd = mycpu;
275 dummy_pcpu_t dd = &dummy_pcpu[gd->gd_cpuid];
279 if (dd->uschedcp == NULL) {
281 atomic_set_cpumask(&dummy_curprocmask, gd->gd_cpumask);
282 lwkt_schedule(lp->lwp_thread);
285 * Add to our global runq
287 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
288 spin_lock(&dummy_spin);
290 TAILQ_INSERT_TAIL(&dummy_runq, lp, lwp_procq);
291 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
292 lwkt_giveaway(lp->lwp_thread);
294 /* lp = TAILQ_FIRST(&dummy_runq); */
297 * Notify the next available cpu. P.S. some
298 * cpu affinity could be done here.
300 * The rdyprocmask bit placeholds the knowledge that there
301 * is a process on the runq that needs service. If the
302 * helper thread cannot find a home for it it will forward
303 * the request to another available cpu.
305 mask = ~dummy_curprocmask & dummy_rdyprocmask &
308 cpuid = BSFCPUMASK(mask);
309 atomic_clear_cpumask(&dummy_rdyprocmask, CPUMASK(cpuid));
310 spin_unlock(&dummy_spin);
311 lwkt_schedule(&dummy_pcpu[cpuid].helper_thread);
313 spin_unlock(&dummy_spin);
319 * This routine is called from a systimer IPI. It must NEVER block.
320 * If a lwp compatible with this scheduler is the currently running
321 * thread this function is called with a non-NULL lp, otherwise it
322 * will be called with a NULL lp.
324 * This routine is called at ESTCPUFREQ on each cpu independantly.
326 * This routine typically queues a reschedule request, which will cause
327 * the scheduler's BLAH_select_curproc() to be called as soon as possible.
331 dummy_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
333 globaldata_t gd = mycpu;
334 dummy_pcpu_t dd = &dummy_pcpu[gd->gd_cpuid];
339 if (++dd->rrcount >= usched_dummy_rrinterval) {
346 * DUMMY_RECALCULATE_ESTCPU
348 * Called once a second for any process that is running or has slept
349 * for less then 2 seconds.
355 dummy_recalculate_estcpu(struct lwp *lp)
364 dummy_yield(struct lwp *lp)
370 * DUMMY_RESETPRIORITY
372 * This routine is called after the kernel has potentially modified
373 * the lwp_rtprio structure. The target process may be running or sleeping
374 * or scheduled but not yet running or owned by another cpu. Basically,
375 * it can be in virtually any state.
377 * This routine is called by fork1() for initial setup with the process
378 * of the run queue, and also may be called normally with the process on or
384 dummy_resetpriority(struct lwp *lp)
386 /* XXX spinlock usually needed */
388 * Set p_priority for general process comparisons
390 switch(lp->lwp_rtprio.type) {
391 case RTP_PRIO_REALTIME:
392 lp->lwp_priority = PRIBASE_REALTIME + lp->lwp_rtprio.prio;
394 case RTP_PRIO_NORMAL:
395 lp->lwp_priority = PRIBASE_NORMAL + lp->lwp_rtprio.prio;
398 lp->lwp_priority = PRIBASE_IDLE + lp->lwp_rtprio.prio;
400 case RTP_PRIO_THREAD:
401 lp->lwp_priority = PRIBASE_THREAD + lp->lwp_rtprio.prio;
406 * td_upri has normal sense (higher numbers are more desireable),
409 lp->lwp_thread->td_upri = -lp->lwp_priority;
410 /* XXX spinlock usually needed */
417 * Called from fork1() when a new child process is being created. Allows
418 * the scheduler to predispose the child process before it gets scheduled.
423 dummy_forking(struct lwp *plp, struct lwp *lp)
425 lp->lwp_estcpu = plp->lwp_estcpu;
432 * Called when a lwp is being removed from this scheduler, typically
436 dummy_exiting(struct lwp *plp, struct proc *child)
441 dummy_uload_update(struct lwp *lp)
446 * SMP systems may need a scheduler helper thread. This is how one can be
449 * We use a neat LWKT scheduling trick to interlock the helper thread. It
450 * is possible to deschedule an LWKT thread and then do some work before
451 * switching away. The thread can be rescheduled at any time, even before
457 dummy_sched_thread(void *dummy)
468 cpuid = gd->gd_cpuid;
469 dd = &dummy_pcpu[cpuid];
470 cpumask = CPUMASK(cpuid);
473 lwkt_deschedule_self(gd->gd_curthread); /* interlock */
474 atomic_set_cpumask(&dummy_rdyprocmask, cpumask);
475 spin_lock(&dummy_spin);
478 * We raced another cpu trying to schedule a thread onto us.
479 * If the runq isn't empty hit another free cpu.
481 tmpmask = ~dummy_curprocmask & dummy_rdyprocmask &
483 if (tmpmask && dummy_runqcount) {
484 tmpid = BSFCPUMASK(tmpmask);
485 KKASSERT(tmpid != cpuid);
486 atomic_clear_cpumask(&dummy_rdyprocmask, CPUMASK(tmpid));
487 spin_unlock(&dummy_spin);
488 lwkt_schedule(&dummy_pcpu[tmpid].helper_thread);
490 spin_unlock(&dummy_spin);
492 } else if ((lp = TAILQ_FIRST(&dummy_runq)) != NULL) {
494 TAILQ_REMOVE(&dummy_runq, lp, lwp_procq);
495 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
497 atomic_set_cpumask(&dummy_curprocmask, cpumask);
498 spin_unlock(&dummy_spin);
499 lwkt_acquire(lp->lwp_thread);
500 lwkt_schedule(lp->lwp_thread);
502 spin_unlock(&dummy_spin);
509 * Setup our scheduler helpers. Note that curprocmask bit 0 has already
510 * been cleared by rqinit() and we should not mess with it further.
513 dummy_sched_thread_cpu_init(void)
518 kprintf("start dummy scheduler helpers on cpus:");
520 for (i = 0; i < ncpus; ++i) {
521 dummy_pcpu_t dd = &dummy_pcpu[i];
522 cpumask_t mask = CPUMASK(i);
524 if ((mask & smp_active_mask) == 0)
530 lwkt_create(dummy_sched_thread, NULL, NULL, &dd->helper_thread,
531 TDF_NOSTART, i, "dsched %d", i);
534 * Allow user scheduling on the target cpu. cpu #0 has already
535 * been enabled in rqinit().
538 atomic_clear_cpumask(&dummy_curprocmask, mask);
539 atomic_set_cpumask(&dummy_rdyprocmask, mask);
544 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
545 dummy_sched_thread_cpu_init, NULL)