int mpheld;
#endif
int didaccumulate;
- const char *lmsg; /* diagnostic - 'systat -pv 1' */
- const void *laddr;
/*
* Switching from within a 'fast' (non thread switched) interrupt or IPI
if (ntd->td_fairq_accum >= 0 &&
#ifdef SMP
(ntd->td_mpcount + ntd->td_xpcount == 0 ||
- mpheld || cpu_try_mplock()) &&
+ mpheld || cpu_try_mplock_msg(&ntd->td_wmesg)) &&
#endif
- (!TD_TOKS_HELD(ntd) || lwkt_getalltokens(ntd, &lmsg, &laddr))
+ (!TD_TOKS_HELD(ntd) ||
+ lwkt_getalltokens(ntd))
) {
#ifdef SMP
clr_cpu_contention_mask(gd);
goto havethread;
}
- lmsg = NULL;
- laddr = NULL;
-
#ifdef SMP
if (ntd->td_fairq_accum >= 0)
set_cpu_contention_mask(gd);
/* Reload mpheld (it become stale after mplock/token ops) */
mpheld = MP_LOCK_HELD(gd);
- if (ntd->td_mpcount + ntd->td_xpcount && mpheld == 0) {
- lmsg = "mplock";
- laddr = ntd->td_mplock_stallpc;
- }
#endif
/*
*/
if (didaccumulate)
break; /* try again from the top, almost */
- if (lmsg)
- strlcpy(cpu_time.cp_msg, lmsg, sizeof(cpu_time.cp_msg));
- cpu_time.cp_stallpc = (uintptr_t)laddr;
goto haveidle;
}
user_pri_sched) && ntd->td_fairq_accum >= 0 &&
#ifdef SMP
(ntd->td_mpcount + ntd->td_xpcount == 0 ||
- mpheld || cpu_try_mplock()) &&
+ mpheld || cpu_try_mplock_msg(&ntd->td_wmesg)) &&
#endif
- (!TD_TOKS_HELD(ntd) || lwkt_getalltokens(ntd, &lmsg, &laddr))
+ (!TD_TOKS_HELD(ntd) || lwkt_getalltokens(ntd))
) {
#ifdef SMP
clr_cpu_contention_mask(gd);
* resources (tokens and/or mplock).
*/
#ifdef SMP
- ntd->td_wmesg = lmsg;
if (ntd->td_fairq_accum >= 0)
set_cpu_contention_mask(gd);
/*
* Reload mpheld (it become stale after mplock/token ops).
*/
mpheld = MP_LOCK_HELD(gd);
- if (ntd->td_mpcount + ntd->td_xpcount && mpheld == 0) {
- lmsg = "mplock";
- laddr = ntd->td_mplock_stallpc;
- }
if (ntd->td_pri >= TDPRI_KERN_LPSCHED && ntd->td_fairq_accum >= 0)
nquserok = 0;
#endif
* Called from a critical section.
*/
int
-lwkt_getalltokens(thread_t td, const char **msgp, const void **addrp)
+lwkt_getalltokens(thread_t td)
{
lwkt_tokref_t scan;
lwkt_tokref_t ref;
* Otherwise we failed to acquire all the tokens.
* Undo and return.
*/
- *msgp = tok->t_desc;
- *addrp = scan->tr_stallpc;
+ td->td_wmesg = tok->t_desc;
atomic_add_long(&tok->t_collisions, 1);
lwkt_relalltokens(td);
return(FALSE);
#define get_mplock() get_mplock_debug(__FILE__, __LINE__)
#define try_mplock() try_mplock_debug(__FILE__, __LINE__)
#define cpu_try_mplock() cpu_try_mplock_debug(__FILE__, __LINE__)
+#define cpu_try_mplock_msg(lmsg) cpu_try_mplock_msg_debug(lmsg, __FILE__, __LINE__)
void _get_mplock_predisposed(const char *file, int line);
void _get_mplock_contested(const char *file, int line);
return(1);
}
+static __inline
+int
+cpu_try_mplock_msg_debug(const char **lmsg, const char *file, int line)
+{
+ if (cpu_try_mplock_debug(file, line)) {
+ return(1);
+ } else {
+ *lmsg = "mplock";
+ return(0);
+ }
+}
+
/*
* A cpu wanted the MP lock but could not get it. This function is also
* called directly from the LWKT scheduler.
extern int lwkt_trytoken(lwkt_token_t);
extern void lwkt_reltoken(lwkt_token_t);
extern void lwkt_reltoken_hard(lwkt_token_t);
-extern int lwkt_getalltokens(thread_t, const char **, const void **);
+extern int lwkt_getalltokens(thread_t);
extern void lwkt_relalltokens(thread_t);
extern void lwkt_drain_token_requests(void);
extern void lwkt_token_init(lwkt_token_t, int, const char *);