int spinning = lwkt_spin_loops; /* loops before HLTing */
int reqflags;
int cseq;
+ int oseq;
/*
* Switching from within a 'fast' (non thread switched) interrupt or IPI
* it could cause a deadlock.
*/
cseq = atomic_fetchadd_int(&lwkt_cseq_windex, 1);
- while (lwkt_cseq_rindex != cseq) {
- DELAY(1);
- cpu_lfence();
+ while ((oseq = lwkt_cseq_rindex) != cseq) {
+ cpu_ccfence();
+ if (cpu_mi_feature & CPU_MI_MONITOR) {
+ cpu_mmw_pause_int(&lwkt_cseq_rindex, oseq);
+ } else {
+ DELAY(1);
+ cpu_lfence();
+ }
}
cseq = lwkt_spin_delay; /* don't trust the system operator */
cpu_ccfence();
void
cpu_idle(void)
{
- struct thread *td = curthread;
+ globaldata_t gd = mycpu;
+ struct thread *td = gd->gd_curthread;
+ int reqflags;
crit_exit();
KKASSERT(td->td_critcount == 0);
* CLIing to catch any interrupt races. Note that we are
* at SPL0 and interrupts are enabled.
*/
- if (cpu_idle_hlt &&
- (td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) {
+ reqflags = gd->gd_reqflags;
+ if (cpu_idle_hlt == 1 &&
+ (cpu_mi_feature & CPU_MI_MONITOR) &&
+ (reqflags & RQF_IDLECHECK_WK_MASK) == 0) {
+ cpu_mmw_pause_int(&gd->gd_reqflags, reqflags);
+ } else if (cpu_idle_hlt) {
__asm __volatile("cli");
splz();
- if ((td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) {
+ if ((gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) {
if (cpu_idle_hlt == 1)
cpu_idle_default_hook();
else