KKASSERT(pmap != &kernel_pmap);
+ lwkt_gettoken(&vm_token);
#if defined(DIAGNOSTIC)
if (object->ref_count != 1)
panic("pmap_release: pteobj reference count != 1");
info.pmap = pmap;
info.object = object;
+ KASSERT(CPUMASK_TESTZERO(pmap->pm_active),
+ ("pmap %p still active! %016jx",
+ pmap,
+ (uintmax_t)CPUMASK_LOWMASK(pmap->pm_active)));
+
spin_lock(&pmap_spin);
TAILQ_REMOVE(&pmap_list, pmap, pm_pmnode);
spin_unlock(&pmap_spin);
}
} while (info.error);
vm_object_drop(object);
+ lwkt_reltoken(&vm_token);
}
static int
pmap_pagedaemon_waken = 0;
if (warningdone < 5) {
- kprintf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n");
+ kprintf("pmap_collect: collecting pv entries -- "
+ "suggest increasing PMAP_SHPGPERPROC\n");
warningdone++;
}
crit_enter();
oldvm = p->p_vmspace;
if (oldvm != newvm) {
+ if (adjrefs)
+ vmspace_ref(newvm);
p->p_vmspace = newvm;
KKASSERT(p->p_nthreads == 1);
lp = RB_ROOT(&p->p_lwp_tree);
pmap_setlwpvm(lp, newvm);
- if (adjrefs) {
- vmspace_ref(newvm);
+ if (adjrefs)
vmspace_rel(oldvm);
- }
}
crit_exit();
}
struct pmap *pmap;
oldvm = lp->lwp_vmspace;
- if (oldvm == newvm)
- return;
- lp->lwp_vmspace = newvm;
- if (curthread->td_lwp != lp)
- return;
- /*
- * NOTE: We don't have to worry about the CPULOCK here because
- * the virtual kernel doesn't call this function when VMM
- * is enabled (and depends on the host kernel when it isn't).
- */
- crit_enter();
- pmap = vmspace_pmap(newvm);
- ATOMIC_CPUMASK_ORBIT(pmap->pm_active, mycpu->gd_cpuid);
+ if (oldvm != newvm) {
+ crit_enter();
+ lp->lwp_vmspace = newvm;
+ if (curthread->td_lwp == lp) {
+ pmap = vmspace_pmap(newvm);
+ ATOMIC_CPUMASK_ORBIT(pmap->pm_active, mycpu->gd_cpuid);
+ if (pmap->pm_active_lock & CPULOCK_EXCL)
+ pmap_interlock_wait(newvm);
#if defined(SWTCH_OPTIM_STATS)
- tlb_flush_count++;
+ tlb_flush_count++;
#endif
- pmap = vmspace_pmap(oldvm);
- ATOMIC_CPUMASK_NANDBIT(pmap->pm_active, mycpu->gd_cpuid);
- crit_exit();
+ pmap = vmspace_pmap(oldvm);
+ ATOMIC_CPUMASK_NANDBIT(pmap->pm_active,
+ mycpu->gd_cpuid);
+ }
+ crit_exit();
+ }
}
/*
{
pmap_t pmap = vmspace_pmap(vm);
- while (pmap->pm_active_lock & CPULOCK_EXCL)
- pthread_yield();
+ if (pmap->pm_active_lock & CPULOCK_EXCL) {
+ crit_enter();
+ while (pmap->pm_active_lock & CPULOCK_EXCL) {
+ cpu_ccfence();
+ pthread_yield();
+ }
+ crit_exit();
+ }
}
vm_offset_t
ret
/*
- * cpu_heavy_restore() (current thread in %rax on entry)
+ * cpu_heavy_restore() (current thread in %rax on entry, %rbx is old thread)
*
* Restore the thread after an LWKT switch. This entry is normally
* called via the LWKT switch restore function, which was pulled
movq TD_LWP(%rax),%rcx
movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */
- movq PCPU(other_cpus)+0, %rsi
+ movq PCPU(cpumask)+0, %rsi
MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+0(%rcx)
- movq PCPU(other_cpus)+8, %rsi
+ movq PCPU(cpumask)+8, %rsi
MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+8(%rcx)
- movq PCPU(other_cpus)+16, %rsi
+ movq PCPU(cpumask)+16, %rsi
MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+16(%rcx)
- movq PCPU(other_cpus)+24, %rsi
+ movq PCPU(cpumask)+24, %rsi
MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+24(%rcx)
movl VM_PMAP+PM_ACTIVE_LOCK(%rcx),%esi
/*
* cpu_idle_restore() (current thread in %rax on entry) (one-time execution)
+ * (old thread is %rbx on entry)
*
* Don't bother setting up any regs other than %rbp so backtraces
* don't die. This restore function is used to bootstrap into the
/*
* cpu_kthread_restore() (current thread is %rax on entry) (one-time execution)
+ * (old thread is %rbx on entry)
*
* Don't bother setting up any regs other then %rbp so backtraces
* don't die. This restore function is used to bootstrap into an