X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/blobdiff_plain/4564ce438823ab4cea706dc3eebe34535d9e92cb..3b998fa96afe52828957ea4f65d15320eb0fe240:/sys/kern/lwkt_thread.c diff --git a/sys/kern/lwkt_thread.c b/sys/kern/lwkt_thread.c index a1ce259196..9f6ba58ec9 100644 --- a/sys/kern/lwkt_thread.c +++ b/sys/kern/lwkt_thread.c @@ -1,13 +1,13 @@ /* - * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. - * + * Copyright (c) 2003-2010 The DragonFly Project. All rights reserved. + * * This code is derived from software contributed to The DragonFly Project * by Matthew Dillon - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright @@ -17,7 +17,7 @@ * 3. Neither the name of The DragonFly Project nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific, prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS @@ -30,8 +30,6 @@ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.120 2008/10/26 04:29:19 sephe Exp $ */ /* @@ -40,7 +38,6 @@ * to use a critical section to avoid problems. Foreign thread * scheduling is queued via (async) IPIs. */ -#include "opt_ddb.h" #include #include @@ -58,6 +55,9 @@ #include #include +#include + +#include #include #include @@ -71,13 +71,20 @@ #include #include -#ifdef DDB -#include +#if !defined(KTR_CTXSW) +#define KTR_CTXSW KTR_ALL #endif +KTR_INFO_MASTER(ctxsw); +KTR_INFO(KTR_CTXSW, ctxsw, sw, 0, "#cpu[%d].td = %p", + sizeof(int) + sizeof(struct thread *)); +KTR_INFO(KTR_CTXSW, ctxsw, pre, 1, "#cpu[%d].td = %p", + sizeof(int) + sizeof(struct thread *)); +KTR_INFO(KTR_CTXSW, ctxsw, newtd, 2, "#threads[%p].name = %s", + sizeof (struct thread *) + sizeof(char *)); +KTR_INFO(KTR_CTXSW, ctxsw, deadtd, 3, "#threads[%p].name = ", sizeof (struct thread *)); static MALLOC_DEFINE(M_THREAD, "thread", "lwkt threads"); -static int untimely_switch = 0; #ifdef INVARIANTS static int panic_on_cscount = 0; #endif @@ -85,50 +92,51 @@ static __int64_t switch_count = 0; static __int64_t preempt_hit = 0; static __int64_t preempt_miss = 0; static __int64_t preempt_weird = 0; -static __int64_t token_contention_count = 0; -static __int64_t mplock_contention_count = 0; +static __int64_t token_contention_count __debugvar = 0; static int lwkt_use_spin_port; -#ifdef SMP -static int chain_mplock = 0; -#endif static struct objcache *thread_cache; -volatile cpumask_t mp_lock_contention_mask; +#ifdef SMP +static void lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame); +#endif extern void cpu_heavy_restore(void); extern void cpu_lwkt_restore(void); extern void cpu_kthread_restore(void); extern void cpu_idle_restore(void); -int +#ifdef __x86_64__ + +static int jg_tos_ok(struct thread *td) { + void *tos; + int tos_ok; + if (td == NULL) { return 1; } KKASSERT(td->td_sp != NULL); - unsigned long tos = ((unsigned long *)td->td_sp)[0]; - int tos_ok = 0; - if ((tos == cpu_heavy_restore) || (tos == cpu_lwkt_restore) - || (tos == cpu_kthread_restore) || (tos == cpu_idle_restore)) { + tos = ((void **)td->td_sp)[0]; + tos_ok = 0; + if ((tos == cpu_heavy_restore) || (tos == cpu_lwkt_restore) || + (tos == cpu_kthread_restore) || (tos == cpu_idle_restore)) { tos_ok = 1; } return tos_ok; } +#endif + /* * We can make all thread ports use the spin backend instead of the thread * backend. This should only be set to debug the spin backend. */ TUNABLE_INT("lwkt.use_spin_port", &lwkt_use_spin_port); -SYSCTL_INT(_lwkt, OID_AUTO, untimely_switch, CTLFLAG_RW, &untimely_switch, 0, ""); #ifdef INVARIANTS SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0, ""); #endif -#ifdef SMP -SYSCTL_INT(_lwkt, OID_AUTO, chain_mplock, CTLFLAG_RW, &chain_mplock, 0, ""); -#endif SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0, ""); SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0, ""); SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0, ""); @@ -136,23 +144,8 @@ SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0, ""); #ifdef INVARIANTS SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count, CTLFLAG_RW, &token_contention_count, 0, "spinning due to token contention"); -SYSCTL_QUAD(_lwkt, OID_AUTO, mplock_contention_count, CTLFLAG_RW, - &mplock_contention_count, 0, "spinning due to MPLOCK contention"); -#endif - -/* - * Kernel Trace - */ -#if !defined(KTR_GIANT_CONTENTION) -#define KTR_GIANT_CONTENTION KTR_ALL #endif -KTR_INFO_MASTER(giant); -KTR_INFO(KTR_GIANT_CONTENTION, giant, beg, 0, "thread=%p", sizeof(void *)); -KTR_INFO(KTR_GIANT_CONTENTION, giant, end, 1, "thread=%p", sizeof(void *)); - -#define loggiant(name) KTR_LOG(giant_ ## name, curthread) - /* * These helper procedures handle the runq, they can only be called from * within a critical section. @@ -180,7 +173,7 @@ static __inline void _lwkt_enqueue(thread_t td) { - if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING|TDF_TSLEEPQ|TDF_BLOCKQ)) == 0) { + if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING|TDF_BLOCKQ)) == 0) { int nq = td->td_pri & TDPRI_MASK; struct globaldata *gd = td->td_gd; @@ -363,6 +356,7 @@ lwkt_init_thread(thread_t td, void *stack, int stksize, int flags, td->td_flags = flags; td->td_gd = gd; td->td_pri = TDPRI_KERN_DAEMON + TDPRI_CRIT; + td->td_toks_stop = &td->td_toks_base; #ifdef SMP if ((flags & TDF_MPSAFE) == 0) td->td_mpcount = 1; @@ -391,6 +385,8 @@ lwkt_init_thread(thread_t td, void *stack, int stksize, int flags, TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); crit_exit_gd(mygd); #endif + + dsched_new_thread(td); } void @@ -401,6 +397,7 @@ lwkt_set_comm(thread_t td, const char *ctl, ...) __va_start(va, ctl); kvsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va); __va_end(va); + KTR_LOG(ctxsw_newtd, td, &td->td_comm[0]); } void @@ -439,6 +436,7 @@ lwkt_free_thread(thread_t td) td->td_kstack = NULL; td->td_kstack_size = 0; } + KTR_LOG(ctxsw_deadtd, td); } @@ -501,9 +499,7 @@ lwkt_switch(void) td->td_flags |= TDF_PANICWARN; kprintf("Warning: thread switch from interrupt or IPI, " "thread %p (%s)\n", td, td->td_comm); -#ifdef DDB - db_print_backtrace(); -#endif + print_backtrace(-1); } lwkt_switch(); gd->gd_intr_nesting_level = savegdnest; @@ -525,7 +521,7 @@ lwkt_switch(void) td->td_release(td); crit_enter_gd(gd); - if (td->td_toks) + if (TD_TOKS_HELD(td)) lwkt_relalltokens(td); /* @@ -640,7 +636,7 @@ again: * cause the core MP lock to be released. */ if ((ntd->td_mpcount && mpheld == 0 && !cpu_try_mplock()) || - (ntd->td_toks && lwkt_getalltokens(ntd) == 0) + (TD_TOKS_HELD(ntd) && lwkt_getalltokens(ntd) == 0) ) { u_int32_t rqmask = gd->gd_runqmask; @@ -650,10 +646,6 @@ again: TAILQ_FOREACH(ntd, &gd->gd_tdrunq[nq], td_threadq) { if (ntd->td_mpcount && !mpheld && !cpu_try_mplock()) { /* spinning due to MP lock being held */ -#ifdef INVARIANTS - ++mplock_contention_count; -#endif - /* mplock still not held, 'mpheld' still valid */ continue; } @@ -662,7 +654,7 @@ again: * failure, but the variable is only needed for * the loop. */ - if (ntd->td_toks && !lwkt_getalltokens(ntd)) { + if (TD_TOKS_HELD(ntd) && !lwkt_getalltokens(ntd)) { /* spinning due to token contention */ #ifdef INVARIANTS ++token_contention_count; @@ -685,24 +677,35 @@ again: * reschedule when the MP lock might become available. */ if (nq < TDPRI_KERN_LPSCHED) { + break; /* for now refuse to run */ +#if 0 if (chain_mplock == 0) break; - atomic_set_int(&mp_lock_contention_mask, - gd->gd_cpumask); /* continue loop, allow user threads to be scheduled */ +#endif } } + + /* + * Case where a (kernel) thread needed the MP lock and could + * not get one, and we may or may not have found another + * thread which does not need the MP lock to run while + * we wait (ntd). + */ if (ntd == NULL) { - cpu_mplock_contested(); ntd = &gd->gd_idlethread; ntd->td_flags |= TDF_IDLE_NOHLT; + set_mplock_contention_mask(gd); + cpu_mplock_contested(); goto using_idle_thread; } else { + clr_mplock_contention_mask(gd); ++gd->gd_cnt.v_swtch; TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); } } else { + clr_mplock_contention_mask(gd); ++gd->gd_cnt.v_swtch; TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); @@ -738,12 +741,10 @@ using_idle_thread: */ if (ntd->td_mpcount) { mpheld = MP_LOCK_HELD(); - if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) { + if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) panic("Idle thread %p was holding the BGL!", ntd); - } else if (mpheld == 0) { - cpu_mplock_contested(); + if (mpheld == 0) goto again; - } } #endif } @@ -766,7 +767,13 @@ using_idle_thread: #endif if (td != ntd) { ++switch_count; - KKASSERT(jg_tos_ok(ntd)); +#ifdef __x86_64__ + { + int tos_ok __debugvar = jg_tos_ok(ntd); + KKASSERT(tos_ok); + } +#endif + KTR_LOG(ctxsw_sw, gd->gd_cpuid, ntd); td->td_switch(ntd); } /* NOTE: current cpu may have changed after switch */ @@ -866,7 +873,7 @@ lwkt_preempt(thread_t ntd, int critpri) need_lwkt_resched(); return; } - if (ntd->td_toks) { + if (TD_TOKS_HELD(ntd)) { ++preempt_miss; need_lwkt_resched(); return; @@ -908,6 +915,7 @@ lwkt_preempt(thread_t ntd, int critpri) ++preempt_hit; ntd->td_preempted = td; td->td_flags |= TDF_PREEMPT_LOCK; + KTR_LOG(ctxsw_pre, gd->gd_cpuid, ntd); td->td_switch(ntd); KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE)); @@ -924,72 +932,30 @@ lwkt_preempt(thread_t ntd, int critpri) } /* - * Yield our thread while higher priority threads are pending. This is - * typically called when we leave a critical section but it can be safely - * called while we are in a critical section. + * Conditionally call splz() if gd_reqflags indicates work is pending. * - * This function will not generally yield to equal priority threads but it - * can occur as a side effect. Note that lwkt_switch() is called from - * inside the critical section to prevent its own crit_exit() from reentering - * lwkt_yield_quick(). - * - * gd_reqflags indicates that *something* changed, e.g. an interrupt or softint - * came along but was blocked and made pending. + * td_nest_count prevents deep nesting via splz() or doreti() which + * might otherwise blow out the kernel stack. Note that except for + * this special case, we MUST call splz() here to handle any + * pending ints, particularly after we switch, or we might accidently + * halt the cpu with interrupts pending. * * (self contained on a per cpu basis) */ void -lwkt_yield_quick(void) +splz_check(void) { globaldata_t gd = mycpu; thread_t td = gd->gd_curthread; - /* - * gd_reqflags is cleared in splz if the cpl is 0. If we were to clear - * it with a non-zero cpl then we might not wind up calling splz after - * a task switch when the critical section is exited even though the - * new task could accept the interrupt. - * - * XXX from crit_exit() only called after last crit section is released. - * If called directly will run splz() even if in a critical section. - * - * td_nest_count prevent deep nesting via splz() or doreti(). Note that - * except for this special case, we MUST call splz() here to handle any - * pending ints, particularly after we switch, or we might accidently - * halt the cpu with interrupts pending. - */ if (gd->gd_reqflags && td->td_nest_count < 2) splz(); - - /* - * YYY enabling will cause wakeup() to task-switch, which really - * confused the old 4.x code. This is a good way to simulate - * preemption and MP without actually doing preemption or MP, because a - * lot of code assumes that wakeup() does not block. - */ - if (untimely_switch && td->td_nest_count == 0 && - gd->gd_intr_nesting_level == 0 - ) { - crit_enter_quick(td); - /* - * YYY temporary hacks until we disassociate the userland scheduler - * from the LWKT scheduler. - */ - if (td->td_flags & TDF_RUNQ) { - lwkt_switch(); /* will not reenter yield function */ - } else { - lwkt_schedule_self(td); /* make sure we are scheduled */ - lwkt_switch(); /* will not reenter yield function */ - lwkt_deschedule_self(td); /* make sure we are descheduled */ - } - crit_exit_noyield(td); - } } /* - * This implements a normal yield which, unlike _quick, will yield to equal - * priority threads as well. Note that gd_reqflags tests will be handled by - * the crit_exit() call in lwkt_switch(). + * This implements a normal yield which will yield to equal priority + * threads as well as higher priority threads. Note that gd_reqflags + * tests will be handled by the crit_exit() call in lwkt_switch(). * * (self contained on a per cpu basis) */ @@ -1000,6 +966,78 @@ lwkt_yield(void) lwkt_switch(); } +/* + * This function is used along with the lwkt_passive_recover() inline + * by the trap code to negotiate a passive release of the current + * process/lwp designation with the user scheduler. + */ +void +lwkt_passive_release(struct thread *td) +{ + struct lwp *lp = td->td_lwp; + + td->td_release = NULL; + lwkt_setpri_self(TDPRI_KERN_USER); + lp->lwp_proc->p_usched->release_curproc(lp); +} + +/* + * Make a kernel thread act as if it were in user mode with regards + * to scheduling, to avoid becoming cpu-bound in the kernel. Kernel + * loops which may be potentially cpu-bound can call lwkt_user_yield(). + * + * The lwkt_user_yield() function is designed to have very low overhead + * if no yield is determined to be needed. + */ +void +lwkt_user_yield(void) +{ + thread_t td = curthread; + struct lwp *lp = td->td_lwp; + +#ifdef SMP + /* + * XXX SEVERE TEMPORARY HACK. A cpu-bound operation running in the + * kernel can prevent other cpus from servicing interrupt threads + * which still require the MP lock (which is a lot of them). This + * has a chaining effect since if the interrupt is blocked, so is + * the event, so normal scheduling will not pick up on the problem. + */ + if (mp_lock_contention_mask && td->td_mpcount) { + yield_mplock(td); + } +#endif + + /* + * Another kernel thread wants the cpu + */ + if (lwkt_resched_wanted()) + lwkt_switch(); + + /* + * If the user scheduler has asynchronously determined that the current + * process (when running in user mode) needs to lose the cpu then make + * sure we are released. + */ + if (user_resched_wanted()) { + if (td->td_release) + td->td_release(td); + } + + /* + * If we are released reduce our priority + */ + if (td->td_release == NULL) { + if (lwkt_check_resched(td) > 0) + lwkt_switch(); + if (lp) { + lp->lwp_proc->p_usched->acquire_curproc(lp); + td->td_release = lwkt_passive_release; + lwkt_setpri_self(TDPRI_USER_NORM); + } + } +} + /* * Return 0 if no runnable threads are pending at the same or higher * priority as the passed thread. @@ -1078,7 +1116,7 @@ _lwkt_schedule(thread_t td, int reschedok) _lwkt_enqueue(td); _lwkt_schedule_post(mygd, td, TDPRI_CRIT, reschedok); } else { - lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_schedule, td); + lwkt_send_ipiq3(td->td_gd, lwkt_schedule_remote, td, 0); } #else _lwkt_enqueue(td); @@ -1102,6 +1140,28 @@ lwkt_schedule_noresched(thread_t td) #ifdef SMP +/* + * When scheduled remotely if frame != NULL the IPIQ is being + * run via doreti or an interrupt then preemption can be allowed. + * + * To allow preemption we have to drop the critical section so only + * one is present in _lwkt_schedule_post. + */ +static void +lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame) +{ + thread_t td = curthread; + thread_t ntd = arg; + + if (frame && ntd->td_preemptable) { + crit_exit_noyield(td); + _lwkt_schedule(ntd, 1); + crit_enter_quick(td); + } else { + _lwkt_schedule(ntd, 1); + } +} + /* * Thread migration using a 'Pull' method. The thread may or may not be * the current thread. It MUST be descheduled and in a stable state. @@ -1110,18 +1170,23 @@ lwkt_schedule_noresched(thread_t td) * At any point after lwkt_giveaway() is called, the target cpu may * 'pull' the thread by calling lwkt_acquire(). * + * We have to make sure the thread is not sitting on a per-cpu tsleep + * queue or it will blow up when it moves to another cpu. + * * MPSAFE - must be called under very specific conditions. */ void lwkt_giveaway(thread_t td) { - globaldata_t gd = mycpu; + globaldata_t gd = mycpu; - crit_enter_gd(gd); - KKASSERT(td->td_gd == gd); - TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); - td->td_flags |= TDF_MIGRATING; - crit_exit_gd(gd); + crit_enter_gd(gd); + if (td->td_flags & TDF_TSLEEPQ) + tsleep_remove(td); + KKASSERT(td->td_gd == gd); + TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); + td->td_flags |= TDF_MIGRATING; + crit_exit_gd(gd); } void @@ -1213,6 +1278,23 @@ lwkt_setpri(thread_t td, int pri) crit_exit(); } +/* + * Set the initial priority for a thread prior to it being scheduled for + * the first time. The thread MUST NOT be scheduled before or during + * this call. The thread may be assigned to a cpu other then the current + * cpu. + * + * Typically used after a thread has been created with TDF_STOPPREQ, + * and before the thread is initially scheduled. + */ +void +lwkt_setpri_initial(thread_t td, int pri) +{ + KKASSERT(pri >= 0); + KKASSERT((td->td_flags & TDF_RUNQ) == 0); + td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; +} + void lwkt_setpri_self(int pri) { @@ -1237,6 +1319,10 @@ lwkt_setpri_self(int pri) * moving our thread to the tdallq of the target cpu, IPI messaging the * target cpu, and switching out. TDF_MIGRATING prevents scheduling * races while the thread is being migrated. + * + * We must be sure to remove ourselves from the current cpu's tsleepq + * before potentially moving to another queue. The thread can be on + * a tsleepq due to a left-over tsleep_interlock(). */ #ifdef SMP static void lwkt_setcpu_remote(void *arg); @@ -1250,6 +1336,8 @@ lwkt_setcpu_self(globaldata_t rgd) if (td->td_gd != rgd) { crit_enter_quick(td); + if (td->td_flags & TDF_TSLEEPQ) + tsleep_remove(td); td->td_flags |= TDF_MIGRATING; lwkt_deschedule_self(td); TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); @@ -1383,6 +1471,15 @@ lwkt_exit(void) gd->gd_freetd = NULL; objcache_put(thread_cache, std); } + + /* + * Remove thread resources from kernel lists and deschedule us for + * the last time. + */ + if (td->td_flags & TDF_TSLEEPQ) + tsleep_remove(td); + biosched_done(td); + dsched_exit_thread(td); lwkt_deschedule_self(td); lwkt_remove_tdallq(td); if (td->td_flags & TDF_ALLOCATED_THREAD) @@ -1433,72 +1530,4 @@ lwkt_smp_stopped(void) crit_exit_gd(gd); } -/* - * get_mplock() calls this routine if it is unable to obtain the MP lock. - * get_mplock() has already incremented td_mpcount. We must block and - * not return until giant is held. - * - * All we have to do is lwkt_switch() away. The LWKT scheduler will not - * reschedule the thread until it can obtain the giant lock for it. - */ -void -lwkt_mp_lock_contested(void) -{ - loggiant(beg); - lwkt_switch(); - loggiant(end); -} - -/* - * The rel_mplock() code will call this function after releasing the - * last reference on the MP lock if mp_lock_contention_mask is non-zero. - * - * We then chain an IPI to a single other cpu potentially needing the - * lock. This is a bit heuristical and we can wind up with IPIs flying - * all over the place. - */ -static void lwkt_mp_lock_uncontested_remote(void *arg __unused); - -void -lwkt_mp_lock_uncontested(void) -{ - globaldata_t gd; - globaldata_t dgd; - cpumask_t mask; - cpumask_t tmpmask; - int cpuid; - - if (chain_mplock) { - gd = mycpu; - atomic_clear_int(&mp_lock_contention_mask, gd->gd_cpumask); - mask = mp_lock_contention_mask; - tmpmask = ~((1 << gd->gd_cpuid) - 1); - - if (mask) { - if (mask & tmpmask) - cpuid = bsfl(mask & tmpmask); - else - cpuid = bsfl(mask); - atomic_clear_int(&mp_lock_contention_mask, 1 << cpuid); - dgd = globaldata_find(cpuid); - lwkt_send_ipiq(dgd, lwkt_mp_lock_uncontested_remote, NULL); - } - } -} - -/* - * The idea is for this IPI to interrupt a potentially lower priority - * thread, such as a user thread, to allow the scheduler to reschedule - * a higher priority kernel thread that needs the MP lock. - * - * For now we set the LWKT reschedule flag which generates an AST in - * doreti, though theoretically it is also possible to possibly preempt - * here if the underlying thread was operating in user mode. Nah. - */ -static void -lwkt_mp_lock_uncontested_remote(void *arg __unused) -{ - need_lwkt_resched(); -} - #endif