int cmask = CMASK;
u_int cpu_mi_feature;
+cpumask_t usched_global_cpumask;
extern struct user *proc0paddr;
extern int fallback_elf_brand;
int boothowto = 0; /* initialized so that it can be patched */
SYSCTL_INT(_debug, OID_AUTO, boothowto, CTLFLAG_RD, &boothowto, 0,
"Reboot flags, from console subsystem");
+SYSCTL_ULONG(_kern, OID_AUTO, usched_global_cpumask, CTLFLAG_RW,
+ &usched_global_cpumask, 0, "global user scheduler cpumask");
/*
* This ensures that there is at least one entry so that the sysinit_set
lwkt_gdinit(gd);
vm_map_entry_reserve_cpu_init(gd);
sleep_gdinit(gd);
+ usched_global_cpumask |= CPUMASK(cpuid);
}
++bsd4_scancpu;
cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
mask = ~bsd4_curprocmask & bsd4_rdyprocmask & lp->lwp_cpumask &
- smp_active_mask;
+ smp_active_mask & usched_global_cpumask;
while (mask) {
tmpmask = ~(CPUMASK(cpuid) - 1);
* Then cpus which might have a currently running lp
*/
mask = bsd4_curprocmask & bsd4_rdyprocmask &
- lp->lwp_cpumask & smp_active_mask;
+ lp->lwp_cpumask & smp_active_mask & usched_global_cpumask;
while (mask) {
tmpmask = ~(CPUMASK(cpuid) - 1);
* and round-robin. Other cpus will pickup as they release their
* current lwps or become ready.
*
+ * Avoid a degenerate system lockup case if usched_global_cpumask
+ * is set to 0 or otherwise does not cover lwp_cpumask.
+ *
* We only kick the target helper thread in this case, we do not
* set the user resched flag because
*/
cpuid = (bsd4_scancpu & 0xFFFF) % ncpus;
+ if ((CPUMASK(cpuid) & usched_global_cpumask) == 0) {
+ cpuid = 0;
+ }
gd = globaldata_find(cpuid);
dd = &bsd4_pcpu[cpuid];
found:
extern u_int cpu_feature; /* CPUID_* features */
extern u_int cpu_feature2; /* CPUID2_* features */
extern u_int cpu_mi_feature; /* CPU_MI_XXX machine-nonspecific features */
+extern cpumask_t usched_global_cpumask;
extern int nfs_diskless_valid; /* NFS diskless params were obtained */
extern vm_paddr_t Maxmem; /* Highest physical memory address in system */