X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/blobdiff_plain/d5d75bd0a34026ee4e67f21bdf3f3c62ecea6195..c9e9fb21c9ab82a8102647d67bcf956e0aba60af:/sys/kern/usched_dummy.c diff --git a/sys/kern/usched_dummy.c b/sys/kern/usched_dummy.c index 744c415a6e..fb22eb8dcb 100644 --- a/sys/kern/usched_dummy.c +++ b/sys/kern/usched_dummy.c @@ -31,7 +31,7 @@ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $DragonFly: src/sys/kern/usched_dummy.c,v 1.8 2007/04/30 07:18:54 dillon Exp $ + * $DragonFly: src/sys/kern/usched_dummy.c,v 1.9 2008/04/21 15:24:46 dillon Exp $ */ #include @@ -50,6 +50,7 @@ #include #include +#include #define MAXPRI 128 #define PRIBASE_REALTIME 0 @@ -71,6 +72,7 @@ static void dummy_recalculate_estcpu(struct lwp *lp); static void dummy_resetpriority(struct lwp *lp); static void dummy_forking(struct lwp *plp, struct lwp *lp); static void dummy_exiting(struct lwp *plp, struct lwp *lp); +static void dummy_yield(struct lwp *lp); struct usched usched_dummy = { { NULL }, @@ -85,7 +87,8 @@ struct usched usched_dummy = { dummy_resetpriority, dummy_forking, dummy_exiting, - NULL /* setcpumask not supported */ + NULL, /* setcpumask not supported */ + dummy_yield }; struct usched_dummy_pcpu { @@ -151,7 +154,8 @@ dummy_acquire_curproc(struct lwp *lp) /* * If this cpu has no current thread, select ourself */ - if (dd->uschedcp == NULL && TAILQ_EMPTY(&dummy_runq)) { + if (dd->uschedcp == lp || + (dd->uschedcp == NULL && TAILQ_EMPTY(&dummy_runq))) { atomic_set_int(&dummy_curprocmask, gd->gd_cpumask); dd->uschedcp = lp; return; @@ -361,6 +365,16 @@ dummy_recalculate_estcpu(struct lwp *lp) { } +/* + * MPSAFE + */ +static +void +dummy_yield(struct lwp *lp) +{ + need_user_resched(); +} + /* * DUMMY_RESETPRIORITY * @@ -441,6 +455,8 @@ dummy_exiting(struct lwp *plp, struct lwp *lp) * is possible to deschedule an LWKT thread and then do some work before * switching away. The thread can be rescheduled at any time, even before * we switch away. + * + * MPSAFE */ #ifdef SMP @@ -460,11 +476,6 @@ dummy_sched_thread(void *dummy) dd = &dummy_pcpu[cpuid]; cpumask = 1 << cpuid; - /* - * Our Scheduler helper thread does not need to hold the MP lock - */ - rel_mplock(); - for (;;) { lwkt_deschedule_self(gd->gd_curthread); /* interlock */ atomic_set_int(&dummy_rdyprocmask, cpumask); @@ -526,7 +537,7 @@ dummy_sched_thread_cpu_init(void) kprintf(" %d", i); lwkt_create(dummy_sched_thread, NULL, NULL, &dd->helper_thread, - TDF_STOPREQ, i, "dsched %d", i); + TDF_STOPREQ | TDF_MPSAFE, i, "dsched %d", i); /* * Allow user scheduling on the target cpu. cpu #0 has already