From f1d1c3fa08a3331e9dff64598cff0e77351d0887 Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Sat, 21 Jun 2003 07:54:57 +0000 Subject: [PATCH] thread stage 8: add crit_enter(), per-thread cpl handling, fix deferred interrupt handling for critical sections, add some basic passive token code, and blocking/signaling code. Add structural definitions for additional LWKT mechanisms. Remove asleep/await. Add generation number based xsleep/xwakeup. Note that when exiting the last crit_exit() we run splz() to catch up on blocked interrupts. There is also some #if 0'd code that will cause a thread switch to occur 'at odd times'... primarily wakeup()-> lwkt_schedule()->critical_section->switch. This will be usefulf or testing purposes down the line. The passive token code is mostly disabled at the moment. It's primary use will be under SMP and its primary advantage is very low overhead on UP and, if used properly, should also have good characteristics under SMP. --- sys/conf/files | 4 +- sys/i386/apic/apic_ipl.s | 3 +- sys/i386/apic/apic_vector.s | 8 +- sys/i386/i386/genassym.c | 11 +- sys/i386/i386/globals.s | 12 +- sys/i386/i386/machdep.c | 6 +- sys/i386/i386/mp_machdep.c | 5 +- sys/i386/i386/pmap.c | 6 +- sys/i386/i386/swtch.s | 13 +- sys/i386/i386/trap.c | 6 +- sys/i386/icu/icu_ipl.s | 5 +- sys/i386/icu/icu_vector.s | 11 +- sys/i386/include/asnames.h | 3 +- sys/i386/include/globaldata.h | 5 +- sys/i386/include/thread.h | 10 + sys/i386/isa/apic_ipl.s | 3 +- sys/i386/isa/apic_vector.s | 8 +- sys/i386/isa/icu_ipl.s | 5 +- sys/i386/isa/icu_vector.s | 11 +- sys/i386/isa/ipl_funcs.c | 15 +- sys/kern/init_main.c | 3 +- sys/kern/kern_malloc.c | 11 +- sys/kern/kern_synch.c | 301 ++++++++++--------- sys/kern/lwkt_thread.c | 399 +++++++++++++++++++++++-- sys/platform/pc32/apic/apic_ipl.s | 3 +- sys/platform/pc32/apic/apic_vector.s | 8 +- sys/platform/pc32/i386/genassym.c | 11 +- sys/platform/pc32/i386/globals.s | 12 +- sys/platform/pc32/i386/machdep.c | 6 +- sys/platform/pc32/i386/mp_machdep.c | 5 +- sys/platform/pc32/i386/pmap.c | 6 +- sys/platform/pc32/i386/swtch.s | 13 +- sys/platform/pc32/i386/trap.c | 6 +- sys/platform/pc32/icu/icu_ipl.s | 5 +- sys/platform/pc32/icu/icu_vector.s | 11 +- sys/platform/pc32/include/asnames.h | 3 +- sys/platform/pc32/include/globaldata.h | 5 +- sys/platform/pc32/include/thread.h | 10 + sys/platform/pc32/isa/apic_ipl.s | 3 +- sys/platform/pc32/isa/apic_vector.s | 8 +- sys/platform/pc32/isa/icu_ipl.s | 5 +- sys/platform/pc32/isa/icu_vector.s | 11 +- sys/platform/pc32/isa/ipl_funcs.c | 15 +- sys/platform/vkernel/i386/genassym.c | 11 +- sys/sys/malloc.h | 3 +- sys/sys/proc.h | 13 +- sys/sys/systm.h | 8 +- sys/sys/thread.h | 163 ++++++++-- sys/sys/thread2.h | 90 ++++++ sys/sys/xwait.h | 28 ++ sys/vm/vm_kern.c | 9 +- sys/vm/vm_page.c | 85 +----- sys/vm/vm_page.h | 6 +- sys/vm/vm_pageout.h | 4 +- 54 files changed, 1034 insertions(+), 396 deletions(-) create mode 100644 sys/i386/include/thread.h create mode 100644 sys/platform/pc32/include/thread.h create mode 100644 sys/sys/thread2.h create mode 100644 sys/sys/xwait.h diff --git a/sys/conf/files b/sys/conf/files index f8bcb407db..54b2b4300e 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -1,5 +1,5 @@ # $FreeBSD: src/sys/conf/files,v 1.340.2.137 2003/06/04 17:10:30 sam Exp $ -# $DragonFly: src/sys/conf/files,v 1.3 2003/06/20 02:09:47 dillon Exp $ +# $DragonFly: src/sys/conf/files,v 1.4 2003/06/21 07:54:53 dillon Exp $ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and @@ -626,7 +626,7 @@ kern/kern_shutdown.c standard kern/kern_sig.c standard kern/kern_subr.c standard kern/kern_switch.c standard -kern/kern_lwkt.c standard +kern/lwkt_thread.c standard kern/kern_synch.c standard kern/kern_syscalls.c standard kern/kern_sysctl.c standard diff --git a/sys/i386/apic/apic_ipl.s b/sys/i386/apic/apic_ipl.s index cee66d00c8..b5d2c56f00 100644 --- a/sys/i386/apic/apic_ipl.s +++ b/sys/i386/apic/apic_ipl.s @@ -23,7 +23,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/isa/apic_ipl.s,v 1.27.2.2 2000/09/30 02:49:35 ps Exp $ - * $DragonFly: src/sys/i386/apic/Attic/apic_ipl.s,v 1.2 2003/06/17 04:28:36 dillon Exp $ + * $DragonFly: src/sys/i386/apic/Attic/apic_ipl.s,v 1.3 2003/06/21 07:54:56 dillon Exp $ */ @@ -105,6 +105,7 @@ splz_next: * We don't need any locking here. (ipending & ~cpl) cannot grow * while we're looking at it - any interrupt will shrink it to 0. */ + movl $0,_reqpri movl %eax,%ecx notl %ecx /* set bit = unmasked level */ andl _ipending,%ecx /* set bit = unmasked pending INT */ diff --git a/sys/i386/apic/apic_vector.s b/sys/i386/apic/apic_vector.s index 551e57d68b..bd2dc8ee28 100644 --- a/sys/i386/apic/apic_vector.s +++ b/sys/i386/apic/apic_vector.s @@ -1,7 +1,7 @@ /* * from: vector.s, 386BSD 0.1 unknown origin * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $ - * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.3 2003/06/18 06:33:33 dillon Exp $ + * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.4 2003/06/21 07:54:56 dillon Exp $ */ @@ -232,6 +232,9 @@ IDTVEC(vec_name) ; \ APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\ testl $IRQ_BIT(irq_num), _cpl ; \ jne 2f ; /* this INT masked */ \ + movl _curthread,%eax ; \ + cmpl $TDPRI_CRIT,TD_PRI(%eax) ; \ + jge 2f ; /* in critical sec */ \ ; \ incb _intr_nesting_level ; \ ; \ @@ -270,6 +273,7 @@ __CONCAT(Xresume,irq_num): ; \ EOI_IRQ(irq_num) ; \ lock ; \ orl $IRQ_BIT(irq_num), _ipending ; \ + movl $TDPRI_CRIT,_reqpri ; \ lock ; \ btsl $(irq_num), iactive ; /* still active */ \ jnc 0b ; /* retry */ \ @@ -280,6 +284,7 @@ __CONCAT(Xresume,irq_num): ; \ APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \ lock ; \ orl $IRQ_BIT(irq_num), _ipending ; \ + movl $TDPRI_CRIT,_reqpri ; \ MP_RELLOCK ; \ POP_FRAME ; \ iret ; \ @@ -288,6 +293,7 @@ __CONCAT(Xresume,irq_num): ; \ APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\ lock ; \ orl $IRQ_BIT(irq_num), _ipending ; \ + movl $TDPRI_CRIT,_reqpri ; \ testl $IRQ_BIT(irq_num), _cpl ; \ jne 4f ; /* this INT masked */ \ call forward_irq ; /* forward irq to lock holder */ \ diff --git a/sys/i386/i386/genassym.c b/sys/i386/i386/genassym.c index 52a1713637..4e51d8f150 100644 --- a/sys/i386/i386/genassym.c +++ b/sys/i386/i386/genassym.c @@ -35,7 +35,7 @@ * * from: @(#)genassym.c 5.11 (Berkeley) 5/10/91 * $FreeBSD: src/sys/i386/i386/genassym.c,v 1.86.2.3 2002/03/03 05:42:49 nyan Exp $ - * $DragonFly: src/sys/i386/i386/Attic/genassym.c,v 1.8 2003/06/20 02:09:50 dillon Exp $ + * $DragonFly: src/sys/i386/i386/Attic/genassym.c,v 1.9 2003/06/21 07:54:55 dillon Exp $ */ #include "opt_user_ldt.h" @@ -84,6 +84,12 @@ ASSYM(P_THREAD, offsetof(struct proc, p_thread)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(TD_SP, offsetof(struct thread, td_sp)); +ASSYM(TD_PRI, offsetof(struct thread, td_pri)); +ASSYM(TD_MACH, offsetof(struct thread, td_mach)); + +ASSYM(MTD_CPL, offsetof(struct mi_thread, mtd_cpl)); + +ASSYM(TDPRI_CRIT, TDPRI_CRIT); #ifdef SMP ASSYM(P_ONCPU, offsetof(struct proc, p_oncpu)); @@ -182,13 +188,14 @@ ASSYM(GD_IDLETHREAD, offsetof(struct globaldata, gd_idlethread)); ASSYM(GD_COMMON_TSSD, offsetof(struct globaldata, gd_common_tssd)); ASSYM(GD_TSS_GDT, offsetof(struct globaldata, gd_tss_gdt)); ASSYM(GD_ASTPENDING, offsetof(struct globaldata, gd_astpending)); +ASSYM(GD_REQPRI, offsetof(struct globaldata, gd_reqpri)); #ifdef USER_LDT ASSYM(GD_CURRENTLDT, offsetof(struct globaldata, gd_currentldt)); #endif #ifdef SMP -ASSYM(GD_CPUID, offsetof(struct globaldata, gd_cpuid)); +ASSYM(GD_CPU, offsetof(struct globaldata, gd_cpuid)); ASSYM(GD_CPU_LOCKID, offsetof(struct globaldata, gd_cpu_lockid)); ASSYM(GD_OTHER_CPUS, offsetof(struct globaldata, gd_other_cpus)); ASSYM(GD_SS_EFLAGS, offsetof(struct globaldata, gd_ss_eflags)); diff --git a/sys/i386/i386/globals.s b/sys/i386/i386/globals.s index 9cf2d81127..de3bca745e 100644 --- a/sys/i386/i386/globals.s +++ b/sys/i386/i386/globals.s @@ -24,7 +24,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/i386/globals.s,v 1.13.2.1 2000/05/16 06:58:06 dillon Exp $ - * $DragonFly: src/sys/i386/i386/Attic/globals.s,v 1.6 2003/06/20 02:09:50 dillon Exp $ + * $DragonFly: src/sys/i386/i386/Attic/globals.s,v 1.7 2003/06/21 07:54:55 dillon Exp $ */ #include "opt_user_ldt.h" @@ -66,11 +66,12 @@ _CPU_prvspace: * Define layout of the global data. On SMP this lives in * the per-cpu address space, otherwise it's in the data segment. */ - .globl gd_curthread, gd_npxthread, gd_astpending + .globl gd_curthread, gd_npxthread, gd_astpending, gd_reqpri .globl gd_common_tss, gd_switchtime, gd_switchticks, gd_idlethread .set gd_curthread,globaldata + GD_CURTHREAD .set gd_idlethread,globaldata + GD_IDLETHREAD .set gd_astpending,globaldata + GD_ASTPENDING + .set gd_reqpri,globaldata + GD_REQPRI .set gd_npxthread,globaldata + GD_NPXTHREAD .set gd_common_tss,globaldata + GD_COMMON_TSS .set gd_switchtime,globaldata + GD_SWITCHTIME @@ -86,11 +87,12 @@ _CPU_prvspace: #endif #ifndef SMP - .globl _curthread, _npxthread, _astpending + .globl _curthread, _npxthread, _astpending, _reqpri .globl _common_tss, _switchtime, _switchticks, _idlethread .set _curthread,globaldata + GD_CURTHREAD .set _idlethread,globaldata + GD_IDLETHREAD .set _astpending,globaldata + GD_ASTPENDING + .set _reqpri,globaldata + GD_REQPRI .set _npxthread,globaldata + GD_NPXTHREAD .set _common_tss,globaldata + GD_COMMON_TSS .set _switchtime,globaldata + GD_SWITCHTIME @@ -111,12 +113,12 @@ _CPU_prvspace: * The BSP version of these get setup in locore.s and pmap.c, while * the AP versions are setup in mp_machdep.c. */ - .globl gd_cpuid, gd_cpu_lockid, gd_other_cpus + .globl gd_cpu, gd_cpu_lockid, gd_other_cpus .globl gd_ss_eflags, gd_inside_intr .globl gd_prv_CMAP1, gd_prv_CMAP2, gd_prv_CMAP3, gd_prv_PMAP1 .globl gd_prv_CADDR1, gd_prv_CADDR2, gd_prv_CADDR3, gd_prv_PADDR1 - .set gd_cpuid,globaldata + GD_CPUID + .set gd_cpu,globaldata + GD_CPUID .set gd_cpu_lockid,globaldata + GD_CPU_LOCKID .set gd_other_cpus,globaldata + GD_OTHER_CPUS .set gd_ss_eflags,globaldata + GD_SS_EFLAGS diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c index 2cb1594738..7b5444ddfc 100644 --- a/sys/i386/i386/machdep.c +++ b/sys/i386/i386/machdep.c @@ -36,7 +36,7 @@ * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $ - * $DragonFly: src/sys/i386/i386/Attic/machdep.c,v 1.7 2003/06/20 02:09:50 dillon Exp $ + * $DragonFly: src/sys/i386/i386/Attic/machdep.c,v 1.8 2003/06/21 07:54:55 dillon Exp $ */ #include "apm.h" @@ -985,8 +985,8 @@ SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, void cpu_idle(void) { + spl0(); for (;;) { - __asm __volatile("cli"); lwkt_switch(); if (cpu_idle_hlt) { /* @@ -997,7 +997,6 @@ cpu_idle(void) } else { __asm __volatile("sti"); } - spl0(); /* unmask interrupts */ /* YYY BGL */ } } @@ -1875,6 +1874,7 @@ init386(first) thread0.td_pcb = (struct pcb *) ((char *)proc0paddr + UPAGES*PAGE_SIZE - sizeof(struct pcb)); thread0.td_kstack = (char *)proc0paddr; + thread0.td_flags = TDF_RUNNING; atdevbase = ISA_HOLE_START + KERNBASE; diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c index f68fff1f9a..7af06078b4 100644 --- a/sys/i386/i386/mp_machdep.c +++ b/sys/i386/i386/mp_machdep.c @@ -23,7 +23,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $ - * $DragonFly: src/sys/i386/i386/Attic/mp_machdep.c,v 1.5 2003/06/20 02:09:50 dillon Exp $ + * $DragonFly: src/sys/i386/i386/Attic/mp_machdep.c,v 1.6 2003/06/21 07:54:56 dillon Exp $ */ #include "opt_cpu.h" @@ -2143,10 +2143,9 @@ start_all_aps(u_int boot_addr) SMPpt[pg + 4] = 0; /* *prv_PMAP1 */ /* prime data page for it to use */ - gd->gd_cpuid = x; - gd->gd_cpu_lockid = x << 24; mi_gdinit(gd, x); cpu_gdinit(gd, x); + gd->gd_cpu_lockid = x << 24; gd->gd_prv_CMAP1 = &SMPpt[pg + 1]; gd->gd_prv_CMAP2 = &SMPpt[pg + 2]; gd->gd_prv_CMAP3 = &SMPpt[pg + 3]; diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index 10cbc29376..582f5b95cf 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -40,7 +40,7 @@ * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06 22:48:53 silby Exp $ - * $DragonFly: src/sys/i386/i386/Attic/pmap.c,v 1.7 2003/06/20 02:09:50 dillon Exp $ + * $DragonFly: src/sys/i386/i386/Attic/pmap.c,v 1.8 2003/06/21 07:54:56 dillon Exp $ */ /* @@ -871,6 +871,8 @@ void pmap_dispose_thread(struct thread *td) { /* HIPRI YYY */ + KASSERT((td->td_flags & (TDF_RUNQ|TDF_RUNNING)) == 0, + ("pmap_dispose_thread: still on queue: %08x", td->td_flags)); if (mycpu->gd_tdfreecount < CACHE_NTHREADS) { ++mycpu->gd_tdfreecount; TAILQ_INSERT_HEAD(&mycpu->gd_tdfreeq, td, td_threadq); @@ -968,6 +970,8 @@ pmap_dispose_proc(struct proc *p) { struct thread *td; + KASSERT(p->p_lock == 0, ("attempt to dispose referenced proc! %p", p)); + if ((td = p->p_thread) != NULL) { p->p_thread = NULL; td->td_proc = NULL; diff --git a/sys/i386/i386/swtch.s b/sys/i386/i386/swtch.s index 8d13d079d2..adf52fb3e4 100644 --- a/sys/i386/i386/swtch.s +++ b/sys/i386/i386/swtch.s @@ -1,6 +1,7 @@ /*- * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. + * LWKT threads Copyright (c) 2003 Matthew Dillon * * This code is derived from software contributed to Berkeley by * William Jolitz. @@ -34,7 +35,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $ - * $DragonFly: src/sys/i386/i386/Attic/swtch.s,v 1.7 2003/06/20 02:09:50 dillon Exp $ + * $DragonFly: src/sys/i386/i386/Attic/swtch.s,v 1.8 2003/06/21 07:54:56 dillon Exp $ */ #include "npx.h" @@ -78,6 +79,8 @@ _tlb_flush_count: .long 0 */ ENTRY(cpu_heavy_switch) movl _curthread,%ecx + movl _cpl,%edx /* YYY temporary */ + movl %edx,TD_MACH+MTD_CPL(%ecx) /* YYY temporary */ movl TD_PROC(%ecx),%ecx cli @@ -220,7 +223,9 @@ ENTRY(cpu_exit_switch) */ ENTRY(cpu_heavy_restore) /* interrupts are disabled */ - movl TD_PCB(%eax),%edx + movl TD_MACH+MTD_CPL(%eax),%edx + movl %edx,_cpl /* YYY temporary */ + movl TD_PCB(%eax),%edx /* YYY temporary */ movl TD_PROC(%eax),%ecx #ifdef DIAGNOSTIC cmpb $SRUN,P_STAT(%ecx) @@ -523,6 +528,8 @@ ENTRY(cpu_lwkt_switch) pushl %edi pushfl movl _curthread,%ecx + movl _cpl,%edx /* YYY temporary */ + movl %edx,TD_MACH+MTD_CPL(%ecx) /* YYY temporary */ pushl $cpu_lwkt_restore cli movl %esp,TD_SP(%ecx) @@ -542,5 +549,7 @@ ENTRY(cpu_lwkt_restore) popl %esi popl %ebx popl %ebp + movl TD_MACH+MTD_CPL(%eax),%ecx /* YYY temporary */ + movl %ecx,_cpl /* YYY temporary */ ret diff --git a/sys/i386/i386/trap.c b/sys/i386/i386/trap.c index 4539cef047..d2eb911b33 100644 --- a/sys/i386/i386/trap.c +++ b/sys/i386/i386/trap.c @@ -36,7 +36,7 @@ * * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $ - * $DragonFly: src/sys/i386/i386/Attic/trap.c,v 1.3 2003/06/18 18:29:55 dillon Exp $ + * $DragonFly: src/sys/i386/i386/Attic/trap.c,v 1.4 2003/06/21 07:54:56 dillon Exp $ */ /* @@ -941,6 +941,10 @@ trap_fatal(frame, eva) } else { printf("Idle\n"); } + printf("current thread = pri %d ", curthread->td_pri); + if (curthread->td_pri >= TDPRI_CRIT) + printf("(CRIT)"); + printf("\n"); printf("interrupt mask = "); if ((cpl & net_imask) == net_imask) printf("net "); diff --git a/sys/i386/icu/icu_ipl.s b/sys/i386/icu/icu_ipl.s index a74e8b7ed4..67fdb3c4b3 100644 --- a/sys/i386/icu/icu_ipl.s +++ b/sys/i386/icu/icu_ipl.s @@ -35,7 +35,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/isa/icu_ipl.s,v 1.6 1999/08/28 00:44:42 peter Exp $ - * $DragonFly: src/sys/i386/icu/Attic/icu_ipl.s,v 1.2 2003/06/17 04:28:37 dillon Exp $ + * $DragonFly: src/sys/i386/icu/Attic/icu_ipl.s,v 1.3 2003/06/21 07:54:56 dillon Exp $ */ .data @@ -60,6 +60,8 @@ _imen: .long HWI_MASK * -- soft splXX masks with group mechanism (cpl) * -- h/w masks for currently active or unused interrupts (imen) * -- ipending = active interrupts currently masked by cpl + * -- splz handles pending interrupts regardless of the critical + * nesting state, it is only called synchronously. */ ENTRY(splz) @@ -80,6 +82,7 @@ splz_next: * We don't need any locking here. (ipending & ~cpl) cannot grow * while we're looking at it - any interrupt will shrink it to 0. */ + movl $0,_reqpri movl %eax,%ecx notl %ecx andl _ipending,%ecx diff --git a/sys/i386/icu/icu_vector.s b/sys/i386/icu/icu_vector.s index d685bb381f..1f1c6fd11f 100644 --- a/sys/i386/icu/icu_vector.s +++ b/sys/i386/icu/icu_vector.s @@ -1,7 +1,7 @@ /* * from: vector.s, 386BSD 0.1 unknown origin * $FreeBSD: src/sys/i386/isa/icu_vector.s,v 1.14.2.2 2000/07/18 21:12:42 dfr Exp $ - * $DragonFly: src/sys/i386/icu/Attic/icu_vector.s,v 1.2 2003/06/17 04:28:37 dillon Exp $ + * $DragonFly: src/sys/i386/icu/Attic/icu_vector.s,v 1.3 2003/06/21 07:54:56 dillon Exp $ */ /* @@ -66,6 +66,9 @@ IDTVEC(vec_name) ; \ incl _cnt+V_INTR ; /* book-keeping can wait */ \ movl _intr_countp + (irq_num) * 4,%eax ; \ incl (%eax) ; \ + movl _curthread, %eax ; /* are we in a critical section? */ \ + cmpl $TDPRI_CRIT,TD_PRI(%eax) ; \ + jge 1f ; \ movl _cpl,%eax ; /* are we unmasking pending HWIs or SWIs? */ \ notl %eax ; \ andl _ipending,%eax ; \ @@ -127,7 +130,10 @@ IDTVEC(vec_name) ; \ movb %al,_imen + IRQ_BYTE(irq_num) ; \ outb %al,$icu+ICU_IMR_OFFSET ; \ enable_icus ; \ - movl _cpl,%eax ; \ + movl _curthread, %eax ; /* are we in a critical section? */ \ + cmpl $TDPRI_CRIT,TD_PRI(%eax) ; \ + jge 2f ; \ + movl _cpl,%eax ; /* is this interrupt masked by the cpl? */ \ testb $IRQ_BIT(irq_num),%reg ; \ jne 2f ; \ incb _intr_nesting_level ; \ @@ -158,6 +164,7 @@ __CONCAT(Xresume,irq_num): ; \ 2: ; \ /* XXX skip mcounting here to avoid double count */ \ orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ; \ + movl $TDPRI_CRIT,_reqpri ; \ popl %fs ; \ popl %es ; \ popl %ds ; \ diff --git a/sys/i386/include/asnames.h b/sys/i386/include/asnames.h index 9b8c87e7cb..a8c9fde167 100644 --- a/sys/i386/include/asnames.h +++ b/sys/i386/include/asnames.h @@ -24,7 +24,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/include/asnames.h,v 1.44.2.8 2003/01/22 20:14:53 jhb Exp $ - * $DragonFly: src/sys/i386/include/Attic/asnames.h,v 1.6 2003/06/20 02:09:54 dillon Exp $ + * $DragonFly: src/sys/i386/include/Attic/asnames.h,v 1.7 2003/06/21 07:54:56 dillon Exp $ */ #ifndef _MACHINE_ASNAMES_H_ @@ -341,6 +341,7 @@ #define _common_tss FS(common_tss) #define _common_tssd FS(common_tssd) #define _cpuid FS(cpuid) +#define _reqpri FS(reqpri) #define _cpu_lockid FS(cpu_lockid) #define _curthread FS(curthread) #define _idlethread FS(idlethread) diff --git a/sys/i386/include/globaldata.h b/sys/i386/include/globaldata.h index 575a16a12f..7f42e383dc 100644 --- a/sys/i386/include/globaldata.h +++ b/sys/i386/include/globaldata.h @@ -24,7 +24,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/include/globaldata.h,v 1.11.2.1 2000/05/16 06:58:10 dillon Exp $ - * $DragonFly: src/sys/i386/include/Attic/globaldata.h,v 1.8 2003/06/20 02:09:54 dillon Exp $ + * $DragonFly: src/sys/i386/include/Attic/globaldata.h,v 1.9 2003/06/21 07:54:56 dillon Exp $ */ /* @@ -50,13 +50,14 @@ struct globaldata { struct i386tss gd_common_tss; int gd_switchticks; int gd_tdfreecount; /* new thread cache */ + int gd_reqpri; /* highest pri blocked thread */ TAILQ_HEAD(,thread) gd_tdfreeq; /* new thread cache */ TAILQ_HEAD(,thread) gd_tdrunq; /* runnable threads */ struct segment_descriptor gd_common_tssd; struct segment_descriptor *gd_tss_gdt; int gd_currentldt; /* USER_LDT */ + u_int gd_cpu; #ifdef SMP - u_int gd_cpuid; u_int gd_cpu_lockid; u_int gd_other_cpus; int gd_inside_intr; diff --git a/sys/i386/include/thread.h b/sys/i386/include/thread.h new file mode 100644 index 0000000000..7a192ba0ec --- /dev/null +++ b/sys/i386/include/thread.h @@ -0,0 +1,10 @@ +/* + * i386/include/thread.h + * + * $DragonFly: src/sys/i386/include/Attic/thread.h,v 1.1 2003/06/21 07:54:56 dillon Exp $ + */ + +struct mi_thread { + unsigned int mtd_cpl; +}; + diff --git a/sys/i386/isa/apic_ipl.s b/sys/i386/isa/apic_ipl.s index 1b6e44f4a1..21cb271848 100644 --- a/sys/i386/isa/apic_ipl.s +++ b/sys/i386/isa/apic_ipl.s @@ -23,7 +23,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/isa/apic_ipl.s,v 1.27.2.2 2000/09/30 02:49:35 ps Exp $ - * $DragonFly: src/sys/i386/isa/Attic/apic_ipl.s,v 1.2 2003/06/17 04:28:36 dillon Exp $ + * $DragonFly: src/sys/i386/isa/Attic/apic_ipl.s,v 1.3 2003/06/21 07:54:56 dillon Exp $ */ @@ -105,6 +105,7 @@ splz_next: * We don't need any locking here. (ipending & ~cpl) cannot grow * while we're looking at it - any interrupt will shrink it to 0. */ + movl $0,_reqpri movl %eax,%ecx notl %ecx /* set bit = unmasked level */ andl _ipending,%ecx /* set bit = unmasked pending INT */ diff --git a/sys/i386/isa/apic_vector.s b/sys/i386/isa/apic_vector.s index 668129a697..c9a71745da 100644 --- a/sys/i386/isa/apic_vector.s +++ b/sys/i386/isa/apic_vector.s @@ -1,7 +1,7 @@ /* * from: vector.s, 386BSD 0.1 unknown origin * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $ - * $DragonFly: src/sys/i386/isa/Attic/apic_vector.s,v 1.3 2003/06/18 06:33:33 dillon Exp $ + * $DragonFly: src/sys/i386/isa/Attic/apic_vector.s,v 1.4 2003/06/21 07:54:56 dillon Exp $ */ @@ -232,6 +232,9 @@ IDTVEC(vec_name) ; \ APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\ testl $IRQ_BIT(irq_num), _cpl ; \ jne 2f ; /* this INT masked */ \ + movl _curthread,%eax ; \ + cmpl $TDPRI_CRIT,TD_PRI(%eax) ; \ + jge 2f ; /* in critical sec */ \ ; \ incb _intr_nesting_level ; \ ; \ @@ -270,6 +273,7 @@ __CONCAT(Xresume,irq_num): ; \ EOI_IRQ(irq_num) ; \ lock ; \ orl $IRQ_BIT(irq_num), _ipending ; \ + movl $TDPRI_CRIT,_reqpri ; \ lock ; \ btsl $(irq_num), iactive ; /* still active */ \ jnc 0b ; /* retry */ \ @@ -280,6 +284,7 @@ __CONCAT(Xresume,irq_num): ; \ APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \ lock ; \ orl $IRQ_BIT(irq_num), _ipending ; \ + movl $TDPRI_CRIT,_reqpri ; \ MP_RELLOCK ; \ POP_FRAME ; \ iret ; \ @@ -288,6 +293,7 @@ __CONCAT(Xresume,irq_num): ; \ APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\ lock ; \ orl $IRQ_BIT(irq_num), _ipending ; \ + movl $TDPRI_CRIT,_reqpri ; \ testl $IRQ_BIT(irq_num), _cpl ; \ jne 4f ; /* this INT masked */ \ call forward_irq ; /* forward irq to lock holder */ \ diff --git a/sys/i386/isa/icu_ipl.s b/sys/i386/isa/icu_ipl.s index c1539999fe..e5cc4f760d 100644 --- a/sys/i386/isa/icu_ipl.s +++ b/sys/i386/isa/icu_ipl.s @@ -35,7 +35,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/isa/icu_ipl.s,v 1.6 1999/08/28 00:44:42 peter Exp $ - * $DragonFly: src/sys/i386/isa/Attic/icu_ipl.s,v 1.2 2003/06/17 04:28:37 dillon Exp $ + * $DragonFly: src/sys/i386/isa/Attic/icu_ipl.s,v 1.3 2003/06/21 07:54:56 dillon Exp $ */ .data @@ -60,6 +60,8 @@ _imen: .long HWI_MASK * -- soft splXX masks with group mechanism (cpl) * -- h/w masks for currently active or unused interrupts (imen) * -- ipending = active interrupts currently masked by cpl + * -- splz handles pending interrupts regardless of the critical + * nesting state, it is only called synchronously. */ ENTRY(splz) @@ -80,6 +82,7 @@ splz_next: * We don't need any locking here. (ipending & ~cpl) cannot grow * while we're looking at it - any interrupt will shrink it to 0. */ + movl $0,_reqpri movl %eax,%ecx notl %ecx andl _ipending,%ecx diff --git a/sys/i386/isa/icu_vector.s b/sys/i386/isa/icu_vector.s index 4d541ebf55..023bfe9678 100644 --- a/sys/i386/isa/icu_vector.s +++ b/sys/i386/isa/icu_vector.s @@ -1,7 +1,7 @@ /* * from: vector.s, 386BSD 0.1 unknown origin * $FreeBSD: src/sys/i386/isa/icu_vector.s,v 1.14.2.2 2000/07/18 21:12:42 dfr Exp $ - * $DragonFly: src/sys/i386/isa/Attic/icu_vector.s,v 1.2 2003/06/17 04:28:37 dillon Exp $ + * $DragonFly: src/sys/i386/isa/Attic/icu_vector.s,v 1.3 2003/06/21 07:54:56 dillon Exp $ */ /* @@ -66,6 +66,9 @@ IDTVEC(vec_name) ; \ incl _cnt+V_INTR ; /* book-keeping can wait */ \ movl _intr_countp + (irq_num) * 4,%eax ; \ incl (%eax) ; \ + movl _curthread, %eax ; /* are we in a critical section? */ \ + cmpl $TDPRI_CRIT,TD_PRI(%eax) ; \ + jge 1f ; \ movl _cpl,%eax ; /* are we unmasking pending HWIs or SWIs? */ \ notl %eax ; \ andl _ipending,%eax ; \ @@ -127,7 +130,10 @@ IDTVEC(vec_name) ; \ movb %al,_imen + IRQ_BYTE(irq_num) ; \ outb %al,$icu+ICU_IMR_OFFSET ; \ enable_icus ; \ - movl _cpl,%eax ; \ + movl _curthread, %eax ; /* are we in a critical section? */ \ + cmpl $TDPRI_CRIT,TD_PRI(%eax) ; \ + jge 2f ; \ + movl _cpl,%eax ; /* is this interrupt masked by the cpl? */ \ testb $IRQ_BIT(irq_num),%reg ; \ jne 2f ; \ incb _intr_nesting_level ; \ @@ -158,6 +164,7 @@ __CONCAT(Xresume,irq_num): ; \ 2: ; \ /* XXX skip mcounting here to avoid double count */ \ orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ; \ + movl $TDPRI_CRIT,_reqpri ; \ popl %fs ; \ popl %es ; \ popl %ds ; \ diff --git a/sys/i386/isa/ipl_funcs.c b/sys/i386/isa/ipl_funcs.c index 52c6cb2814..2d3bbe1062 100644 --- a/sys/i386/isa/ipl_funcs.c +++ b/sys/i386/isa/ipl_funcs.c @@ -24,15 +24,17 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/isa/ipl_funcs.c,v 1.32.2.5 2002/12/17 18:04:02 sam Exp $ - * $DragonFly: src/sys/i386/isa/Attic/ipl_funcs.c,v 1.2 2003/06/17 04:28:37 dillon Exp $ + * $DragonFly: src/sys/i386/isa/Attic/ipl_funcs.c,v 1.3 2003/06/21 07:54:56 dillon Exp $ */ #include #include #include #include +#include #include #include +#include #include /* @@ -46,6 +48,7 @@ void name(void) \ { \ atomic_set_int(var, bits); \ + mycpu->gd_reqpri = TDPRI_CRIT; \ } DO_SETBITS(setdelayed, &ipending, loadandclear(&idelayed)) @@ -166,7 +169,7 @@ void spl0(void) { cpl = 0; - if (ipending) + if (ipending && curthread->td_pri < TDPRI_CRIT) splz(); } @@ -174,7 +177,7 @@ void splx(unsigned ipl) { cpl = ipl; - if (ipending & ~ipl) + if ((ipending & ~ipl) && curthread->td_pri < TDPRI_CRIT) splz(); } @@ -221,7 +224,7 @@ spl0(void) { KASSERT(inside_intr == 0, ("spl0: called from interrupt")); cpl = 0; - if (ipending) + if (ipending && curthread->td_pri < TDPRI_CRIT) splz(); } @@ -235,8 +238,10 @@ void splx(unsigned ipl) { cpl = ipl; - if (inside_intr == 0 && (ipending & ~cpl) != 0) + if (inside_intr == 0 && (ipending & ~cpl) != 0 && + curthread->td_pri < TDPRI_CRIT) { splz(); + } } diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index c6bf49d003..01e6731ef7 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -40,7 +40,7 @@ * * @(#)init_main.c 8.9 (Berkeley) 1/21/94 * $FreeBSD: src/sys/kern/init_main.c,v 1.134.2.8 2003/06/06 20:21:32 tegge Exp $ - * $DragonFly: src/sys/kern/init_main.c,v 1.7 2003/06/20 02:09:56 dillon Exp $ + * $DragonFly: src/sys/kern/init_main.c,v 1.8 2003/06/21 07:54:57 dillon Exp $ */ #include "opt_init_path.h" @@ -585,6 +585,7 @@ SYSINIT(kickinit,SI_SUB_KTHREAD_INIT, SI_ORDER_FIRST, kick_init, NULL) void mi_gdinit(struct globaldata *gd, int cpu) { + gd->gd_cpu = cpu; lwkt_gdinit(gd); } diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c index d76afbc329..e59ae9862a 100644 --- a/sys/kern/kern_malloc.c +++ b/sys/kern/kern_malloc.c @@ -32,7 +32,7 @@ * * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 * $FreeBSD: src/sys/kern/kern_malloc.c,v 1.64.2.5 2002/03/16 02:19:51 archie Exp $ - * $DragonFly: src/sys/kern/Attic/kern_malloc.c,v 1.2 2003/06/17 04:28:41 dillon Exp $ + * $DragonFly: src/sys/kern/Attic/kern_malloc.c,v 1.3 2003/06/21 07:54:57 dillon Exp $ */ #include "opt_vm.h" @@ -129,10 +129,6 @@ struct freelist { * * If M_NOWAIT is set, this routine will not block and return NULL if * the allocation fails. - * - * If M_ASLEEP is set (M_NOWAIT must also be set), this routine - * will have the side effect of calling asleep() if it returns NULL, - * allowing the parent to await() at some future time. */ void * malloc(size, type, flags) @@ -172,11 +168,6 @@ malloc(size, type, flags) kbp = &bucket[indx]; while (ksp->ks_memuse >= ksp->ks_limit) { - if (flags & M_ASLEEP) { - if (ksp->ks_limblocks < 65535) - ksp->ks_limblocks++; - asleep((caddr_t)ksp, PSWP+2, type->ks_shortdesc, 0); - } if (flags & M_NOWAIT) { splx(s); return ((void *) NULL); diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 9049ec04c8..5a015b86b7 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -37,7 +37,7 @@ * * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $ - * $DragonFly: src/sys/kern/kern_synch.c,v 1.4 2003/06/20 02:09:56 dillon Exp $ + * $DragonFly: src/sys/kern/kern_synch.c,v 1.5 2003/06/21 07:54:57 dillon Exp $ */ #include "opt_ktrace.h" @@ -54,6 +54,7 @@ #include #include #endif +#include #include #include @@ -400,6 +401,13 @@ sleepinit(void) TAILQ_INIT(&slpque[i]); } +void +xwait_init(struct xwait *w) +{ + bzero(w, sizeof(*w)); + TAILQ_INIT(&w->waitq); +} + /* * General sleep call. Suspends the current process until a wakeup is * performed on the specified identifier. The process will then be made @@ -419,6 +427,7 @@ tsleep(ident, priority, wmesg, timo) { struct proc *p = curproc; int s, sig, catch = priority & PCATCH; + int id = LOOKUP(ident); struct callout_handle thandle; #ifdef KTRACE @@ -426,6 +435,7 @@ tsleep(ident, priority, wmesg, timo) ktrcsw(p->p_tracep, 1, 0); #endif s = splhigh(); + if (cold || panicstr) { /* * After a panic, or during autoconfiguration, @@ -439,18 +449,12 @@ tsleep(ident, priority, wmesg, timo) } KASSERT(p != NULL, ("tsleep1")); KASSERT(ident != NULL && p->p_stat == SRUN, ("tsleep")); - /* - * Process may be sitting on a slpque if asleep() was called, remove - * it before re-adding. - */ - if (p->p_wchan != NULL) - unsleep(p); p->p_wchan = ident; p->p_wmesg = wmesg; p->p_slptime = 0; p->p_priority = priority & PRIMASK; - TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_procq); + TAILQ_INSERT_TAIL(&slpque[id], p, p_procq); if (timo) thandle = timeout(endtsleep, (void *)p, timo); /* @@ -511,169 +515,127 @@ resume: } /* - * asleep() - async sleep call. Place process on wait queue and return - * immediately without blocking. The process stays runnable until await() - * is called. If ident is NULL, remove process from wait queue if it is still - * on one. - * - * Only the most recent sleep condition is effective when making successive - * calls to asleep() or when calling tsleep(). - * - * The timeout, if any, is not initiated until await() is called. The sleep - * priority, signal, and timeout is specified in the asleep() call but may be - * overriden in the await() call. - * - * <<<<<<<< EXPERIMENTAL, UNTESTED >>>>>>>>>> - */ - -int -asleep(void *ident, int priority, const char *wmesg, int timo) -{ - struct proc *p = curproc; - int s; - - /* - * splhigh() while manipulating sleep structures and slpque. - * - * Remove preexisting wait condition (if any) and place process - * on appropriate slpque, but do not put process to sleep. - */ - - s = splhigh(); - - if (p->p_wchan != NULL) - unsleep(p); - - if (ident) { - p->p_wchan = ident; - p->p_wmesg = wmesg; - p->p_slptime = 0; - p->p_asleep.as_priority = priority; - p->p_asleep.as_timo = timo; - TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_procq); - } - - splx(s); - - return(0); -} - -/* - * await() - wait for async condition to occur. The process blocks until - * wakeup() is called on the most recent asleep() address. If wakeup is called - * priority to await(), await() winds up being a NOP. - * - * If await() is called more then once (without an intervening asleep() call), - * await() is still effectively a NOP but it calls mi_switch() to give other - * processes some cpu before returning. The process is left runnable. + * General sleep call. Suspends the current process until a wakeup is + * performed on the specified xwait structure. The process will then be made + * runnable with the specified priority. Sleeps at most timo/hz seconds + * (0 means no timeout). If pri includes PCATCH flag, signals are checked + * before and after sleeping, else signals are not checked. Returns 0 if + * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a + * signal needs to be delivered, ERESTART is returned if the current system + * call should be restarted if possible, and EINTR is returned if the system + * call should be interrupted by the signal (return EINTR). * - * <<<<<<<< EXPERIMENTAL, UNTESTED >>>>>>>>>> + * If the passed generation number is different from the generation number + * in the xwait, return immediately. */ - int -await(int priority, int timo) +xsleep(struct xwait *w, int priority, const char *wmesg, int timo, int *gen) { struct proc *p = curproc; - int s; + int s, sig, catch = priority & PCATCH; + struct callout_handle thandle; +#ifdef KTRACE + if (p && KTRPOINT(p, KTR_CSW)) + ktrcsw(p->p_tracep, 1, 0); +#endif s = splhigh(); - if (p->p_wchan != NULL) { - struct callout_handle thandle; - int sig; - int catch; - - /* - * The call to await() can override defaults specified in - * the original asleep(). - */ - if (priority < 0) - priority = p->p_asleep.as_priority; - if (timo < 0) - timo = p->p_asleep.as_timo; - + if (cold || panicstr) { /* - * Install timeout + * After a panic, or during autoconfiguration, + * just give interrupts a chance, then just return; + * don't run any other procs or panic below, + * in case this is the idle process and already asleep. */ + splx(safepri); + splx(s); + return (0); + } + KASSERT(p != NULL, ("tsleep1")); + KASSERT(w != NULL && p->p_stat == SRUN, ("tsleep")); - if (timo) - thandle = timeout(endtsleep, (void *)p, timo); - - sig = 0; - catch = priority & PCATCH; - - if (catch) { - p->p_flag |= P_SINTR; - if ((sig = CURSIG(p))) { - if (p->p_wchan) - unsleep(p); - p->p_stat = SRUN; - goto resume; - } - if (p->p_wchan == NULL) { - catch = 0; - goto resume; - } - } - p->p_stat = SSLEEP; - p->p_stats->p_ru.ru_nvcsw++; - mi_switch(); -resume: - curpriority = p->p_usrpri; - + /* + * If the generation number does not match we return immediately. + */ + if (*gen != w->gen) { + *gen = w->gen; splx(s); - p->p_flag &= ~P_SINTR; - if (p->p_flag & P_TIMEOUT) { - p->p_flag &= ~P_TIMEOUT; - if (sig == 0) { #ifdef KTRACE - if (KTRPOINT(p, KTR_CSW)) - ktrcsw(p->p_tracep, 0, 0); + if (p && KTRPOINT(p, KTR_CSW)) + ktrcsw(p->p_tracep, 0, 0); #endif - return (EWOULDBLOCK); - } - } else if (timo) - untimeout(endtsleep, (void *)p, thandle); - if (catch && (sig != 0 || (sig = CURSIG(p)))) { + return(0); + } + + p->p_wchan = w; + p->p_wmesg = wmesg; + p->p_slptime = 0; + p->p_priority = priority & PRIMASK; + p->p_flag |= P_XSLEEP; + TAILQ_INSERT_TAIL(&w->waitq, p, p_procq); + if (timo) + thandle = timeout(endtsleep, (void *)p, timo); + /* + * We put ourselves on the sleep queue and start our timeout + * before calling CURSIG, as we could stop there, and a wakeup + * or a SIGCONT (or both) could occur while we were stopped. + * A SIGCONT would cause us to be marked as SSLEEP + * without resuming us, thus we must be ready for sleep + * when CURSIG is called. If the wakeup happens while we're + * stopped, p->p_wchan will be 0 upon return from CURSIG. + */ + if (catch) { + p->p_flag |= P_SINTR; + if ((sig = CURSIG(p))) { + if (p->p_wchan) + unsleep(p); + p->p_stat = SRUN; + goto resume; + } + if (p->p_wchan == NULL) { + catch = 0; + goto resume; + } + } else + sig = 0; + p->p_stat = SSLEEP; + p->p_stats->p_ru.ru_nvcsw++; + mi_switch(); +resume: + curpriority = p->p_usrpri; + *gen = w->gen; /* update generation number */ + splx(s); + p->p_flag &= ~P_SINTR; + if (p->p_flag & P_TIMEOUT) { + p->p_flag &= ~P_TIMEOUT; + if (sig == 0) { #ifdef KTRACE if (KTRPOINT(p, KTR_CSW)) ktrcsw(p->p_tracep, 0, 0); #endif - if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) - return (EINTR); - return (ERESTART); + return (EWOULDBLOCK); } + } else if (timo) + untimeout(endtsleep, (void *)p, thandle); + if (catch && (sig != 0 || (sig = CURSIG(p)))) { #ifdef KTRACE if (KTRPOINT(p, KTR_CSW)) ktrcsw(p->p_tracep, 0, 0); #endif - } else { - /* - * If as_priority is 0, await() has been called without an - * intervening asleep(). We are still effectively a NOP, - * but we call mi_switch() for safety. - */ - - if (p->p_asleep.as_priority == 0) { - p->p_stats->p_ru.ru_nvcsw++; - mi_switch(); - } - splx(s); + if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) + return (EINTR); + return (ERESTART); } - - /* - * clear p_asleep.as_priority as an indication that await() has been - * called. If await() is called again without an intervening asleep(), - * await() is still effectively a NOP but the above mi_switch() code - * is triggered as a safety. - */ - p->p_asleep.as_priority = 0; - +#ifdef KTRACE + if (KTRPOINT(p, KTR_CSW)) + ktrcsw(p->p_tracep, 0, 0); +#endif return (0); } /* - * Implement timeout for tsleep or asleep()/await() + * Implement timeout for tsleep or xsleep * * If process hasn't been awakened (wchan non-zero), * set timeout flag and undo the sleep. If proc @@ -709,8 +671,49 @@ unsleep(p) s = splhigh(); if (p->p_wchan) { - TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_procq); - p->p_wchan = 0; + if (p->p_flag & P_XSLEEP) { + struct xwait *w = p->p_wchan; + TAILQ_REMOVE(&w->waitq, p, p_procq); + p->p_flag &= ~P_XSLEEP; + } else { + TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_procq); + } + p->p_wchan = NULL; + } + splx(s); +} + +/* + * Make all processes sleeping on the explicit lock structure runnable. + */ +void +xwakeup(struct xwait *w) +{ + struct proc *p; + int s; + + s = splhigh(); + ++w->gen; + while ((p = TAILQ_FIRST(&w->waitq)) != NULL) { + TAILQ_REMOVE(&w->waitq, p, p_procq); + KASSERT(p->p_wchan == w && (p->p_flag & P_XSLEEP), + ("xwakeup: wchan mismatch for %p (%p/%p) %08x", p, p->p_wchan, w, p->p_flag & P_XSLEEP)); + p->p_wchan = NULL; + p->p_flag &= ~P_XSLEEP; + if (p->p_stat == SSLEEP) { + /* OPTIMIZED EXPANSION OF setrunnable(p); */ + if (p->p_slptime > 1) + updatepri(p); + p->p_slptime = 0; + p->p_stat = SRUN; + if (p->p_flag & P_INMEM) { + setrunqueue(p); + maybe_resched(p); + } else { + p->p_flag |= P_SWAPINREQ; + wakeup((caddr_t)&proc0); + } + } } splx(s); } @@ -726,15 +729,16 @@ wakeup(ident) register struct proc *p; struct proc *np; int s; + int id = LOOKUP(ident); s = splhigh(); - qp = &slpque[LOOKUP(ident)]; + qp = &slpque[id]; restart: for (p = TAILQ_FIRST(qp); p != NULL; p = np) { np = TAILQ_NEXT(p, p_procq); if (p->p_wchan == ident) { TAILQ_REMOVE(qp, p, p_procq); - p->p_wchan = 0; + p->p_wchan = NULL; if (p->p_stat == SSLEEP) { /* OPTIMIZED EXPANSION OF setrunnable(p); */ if (p->p_slptime > 1) @@ -769,9 +773,10 @@ wakeup_one(ident) register struct proc *p; struct proc *np; int s; + int id = LOOKUP(ident); s = splhigh(); - qp = &slpque[LOOKUP(ident)]; + qp = &slpque[id]; restart: for (p = TAILQ_FIRST(qp); p != NULL; p = np) { diff --git a/sys/kern/lwkt_thread.c b/sys/kern/lwkt_thread.c index ccb7664631..1c47fb6f2c 100644 --- a/sys/kern/lwkt_thread.c +++ b/sys/kern/lwkt_thread.c @@ -23,8 +23,11 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $FreeBSD: src/sys/kern/kern_switch.c,v 1.3.2.1 2000/05/16 06:58:12 dillon Exp $ - * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.1 2003/06/20 02:09:56 dillon Exp $ + * Each cpu in a system has its own self-contained light weight kernel + * thread scheduler, which means that generally speaking we only need + * to use a critical section to prevent hicups. + * + * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.2 2003/06/21 07:54:57 dillon Exp $ */ #include @@ -33,6 +36,28 @@ #include #include #include +#include +#include + +static __inline +void +_lwkt_dequeue(thread_t td) +{ + if (td->td_flags & TDF_RUNQ) { + td->td_flags &= ~TDF_RUNQ; + TAILQ_REMOVE(&mycpu->gd_tdrunq, td, td_threadq); + } +} + +static __inline +void +_lwkt_enqueue(thread_t td) +{ + if ((td->td_flags & TDF_RUNQ) == 0) { + td->td_flags |= TDF_RUNQ; + TAILQ_INSERT_TAIL(&mycpu->gd_tdrunq, td, td_threadq); + } +} /* * LWKTs operate on a per-cpu basis @@ -47,64 +72,388 @@ lwkt_gdinit(struct globaldata *gd) /* * Switch to the next runnable lwkt. If no LWKTs are runnable then - * switch to the idlethread. + * switch to the idlethread. Switching must occur within a critical + * section to avoid races with the scheduling queue. + * + * We always have full control over our cpu's run queue. Other cpus + * that wish to manipulate our queue must use the cpu_*msg() calls to + * talk to our cpu, so a critical section is all that is needed and + * the result is very, very fast thread switching. + * + * We always 'own' our own thread and the threads on our run queue,l + * due to TDF_RUNNING or TDF_RUNQ being set. We can safely clear + * TDF_RUNNING while in a critical section. + * + * The td_switch() function must be called while in the critical section. + * This function saves as much state as is appropriate for the type of + * thread. + * + * (self contained on a per cpu basis) */ void lwkt_switch(void) { + thread_t td = curthread; thread_t ntd; + crit_enter(); if ((ntd = TAILQ_FIRST(&mycpu->gd_tdrunq)) != NULL) { TAILQ_REMOVE(&mycpu->gd_tdrunq, ntd, td_threadq); TAILQ_INSERT_TAIL(&mycpu->gd_tdrunq, ntd, td_threadq); - if (curthread != ntd) - curthread->td_switch(ntd); } else { - if (curthread != &mycpu->gd_idlethread) - curthread->td_switch(&mycpu->gd_idlethread); + ntd = &mycpu->gd_idlethread; + } + if (td != ntd) { + td->td_flags &= ~TDF_RUNNING; + ntd->td_flags |= TDF_RUNNING; + td->td_switch(ntd); } + crit_exit(); } +/* + * Yield our thread while higher priority threads are pending. This is + * typically called when we leave a critical section but it can be safely + * called while we are in a critical section. + * + * This function will not generally yield to equal priority threads but it + * can occur as a side effect. Note that lwkt_switch() is called from + * inside the critical section to pervent its own crit_exit() from reentering + * lwkt_yield_quick(). + * + * (self contained on a per cpu basis) + */ +void +lwkt_yield_quick(void) +{ + thread_t td = curthread; + while ((td->td_pri & TDPRI_MASK) < mycpu->gd_reqpri) { +#if 0 + cpu_schedule_reqs(); /* resets gd_reqpri */ +#endif + splz(); + } + + /* + * YYY enabling will cause wakeup() to task-switch, which really + * confused the old 4.x code. This is a good way to simulate + * preemption without actually doing preemption, because a lot + * of code (including schedule, deschedule) uses critical sections + * which devolve to here if an interrupt occured. + */ #if 0 + if (intr_nesting_level == 0) { + crit_enter(); + /* + * YYY temporary hacks until we disassociate the userland scheduler + * from the LWKT scheduler. + */ + if (td->td_flags & TDF_RUNQ) { + lwkt_switch(); /* will not reenter yield function */ + } else { + lwkt_schedule_self(); /* make sure we are scheduled */ + lwkt_switch(); /* will not reenter yield function */ + lwkt_deschedule_self(); /* make sure we are descheduled */ + } + crit_exit_noyield(); + } +#endif +} + /* - * Switch to the next runnable lwkt preemptively ? + * This implements a normal yield which, unlike _quick, will yield to equal + * priority threads as well. Note that gd_reqpri tests will be handled by + * the crit_exit() call in lwkt_switch(). + * + * (self contained on a per cpu basis) */ void -lwkt_preempt(void) +lwkt_yield(void) { + lwkt_schedule_self(); + lwkt_switch(); +} + +/* + * Schedule a thread to run. As the current thread we can always safely + * schedule ourselves, and a shortcut procedure is provided for that + * function. + * + * (non-blocking, self contained on a per cpu basis) + */ +void +lwkt_schedule_self(void) +{ + thread_t td = curthread; + + crit_enter(); + KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!")); + KASSERT(td->td_flags & TDF_RUNNING, ("lwkt_schedule_self(): TDF_RUNNING not set!")); + _lwkt_enqueue(td); + crit_exit(); } -#endif /* - * Schedule an LWKT. You can legally schedule yourself. + * Generic schedule. Possibly schedule threads belonging to other cpus and + * deal with threads that might be blocked on a wait queue. + * + * This function will queue requests asynchronously when possible, but may + * block if no request structures are available. Upon return the caller + * should note that the scheduling request may not yet have been processed + * by the target cpu. + * + * YYY this is one of the best places to implement any load balancing code. + * Load balancing can be accomplished by requesting other sorts of actions + * for the thread in question. */ void lwkt_schedule(thread_t td) { - if ((td->td_flags & TDF_RUNQ) == 0) { + crit_enter(); + if (td == curthread) { + _lwkt_enqueue(td); + } else { + lwkt_wait_t w; + + /* + * If the thread is on a wait list we have to send our scheduling + * request to the owner of the wait structure. Otherwise we send + * the scheduling request to the cpu owning the thread. Races + * are ok, the target will forward the message as necessary (the + * message may chase the thread around before it finally gets + * acted upon). + * + * (remember, wait structures use stable storage) + */ + if ((w = td->td_wait) != NULL) { + if (lwkt_havetoken(&w->wa_token)) { + TAILQ_REMOVE(&w->wa_waitq, td, td_threadq); + --w->wa_count; + td->td_wait = NULL; + if (td->td_cpu == mycpu->gd_cpu) { + _lwkt_enqueue(td); + } else { + panic("lwkt_schedule: cpu mismatch1"); #if 0 - if (td->td_flags & TDF_WAITQ) { - TAILQ_REMOVE(td->td_waitq, td, td_threadq); - td->td_flags &= ~TDF_WAITQ; - } + lwkt_cpu_msg_union_t msg = lwkt_getcpumsg(); + initScheduleReqMsg_Wait(&msg.mu_SchedReq, td, w); + cpu_sendnormsg(&msg.mu_Msg); #endif - td->td_flags |= TDF_RUNQ; - TAILQ_INSERT_TAIL(&mycpu->gd_tdrunq, td, td_threadq); + } + } else { + panic("lwkt_schedule: cpu mismatch2"); +#if 0 + lwkt_cpu_msg_union_t msg = lwkt_getcpumsg(); + initScheduleReqMsg_Wait(&msg.mu_SchedReq, td, w); + cpu_sendnormsg(&msg.mu_Msg); +#endif + } + } else { + /* + * If the wait structure is NULL and we own the thread, there + * is no race (since we are in a critical section). If we + * do not own the thread there might be a race but the + * target cpu will deal with it. + */ + if (td->td_cpu == mycpu->gd_cpu) { + _lwkt_enqueue(td); + } else { + panic("lwkt_schedule: cpu mismatch3"); +#if 0 + lwkt_cpu_msg_union_t msg = lwkt_getcpumsg(); + initScheduleReqMsg_Thread(&msg.mu_SchedReq, td); + cpu_sendnormsg(&msg.mu_Msg); +#endif + } + } } + crit_exit(); } /* - * Deschedule an LWKT. You can legally deschedule yourself, but if you - * are preempted the thread will automatically be rescheduled. Preemption - * must be disabled (e.g. splhi()) to avoid unexpected rescheduling of - * the thread. + * Deschedule a thread. + * + * (non-blocking, self contained on a per cpu basis) + */ +void +lwkt_deschedule_self(void) +{ + thread_t td = curthread; + + crit_enter(); + KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!")); + KASSERT(td->td_flags & TDF_RUNNING, ("lwkt_schedule_self(): TDF_RUNNING not set!")); + _lwkt_dequeue(td); + crit_exit(); +} + +/* + * Generic deschedule. Descheduling threads other then your own should be + * done only in carefully controlled circumstances. Descheduling is + * asynchronous. + * + * This function may block if the cpu has run out of messages. */ void lwkt_deschedule(thread_t td) { - if (td->td_flags & TDF_RUNQ) { - TAILQ_REMOVE(&mycpu->gd_tdrunq, td, td_threadq); - td->td_flags &= ~TDF_RUNQ; + crit_enter(); + if (td == curthread) { + _lwkt_dequeue(td); + } else { + if (td->td_cpu == mycpu->gd_cpu) { + _lwkt_dequeue(td); + } else { + panic("lwkt_deschedule: cpu mismatch"); +#if 0 + lwkt_cpu_msg_union_t msg = lwkt_getcpumsg(); + initDescheduleReqMsg_Thread(&msg.mu_DeschedReq, td); + cpu_sendnormsg(&msg.mu_Msg); +#endif + } + } + crit_exit(); +} + +/* + * This function deschedules the current thread and blocks on the specified + * wait queue. We obtain ownership of the wait queue in order to block + * on it. A generation number is used to interlock the wait queue in case + * it gets signalled while we are blocked waiting on the token. + * + * Note: alternatively we could dequeue our thread and then message the + * target cpu owning the wait queue. YYY implement as sysctl. + * + * Note: wait queue signals normally ping-pong the cpu as an optimization. + */ +void +lwkt_block(lwkt_wait_t w) +{ + thread_t td = curthread; + int gen; + + gen = td->td_gen; + lwkt_gettoken(&w->wa_token); + if (w->wa_gen == gen) { + _lwkt_dequeue(td); + TAILQ_INSERT_TAIL(&w->wa_waitq, td, td_threadq); + ++w->wa_count; + td->td_wait = w; + lwkt_switch(); } + lwkt_reltoken(&w->wa_token); +} + +/* + * Signal a wait queue. We gain ownership of the wait queue in order to + * signal it. Once a thread is removed from the wait queue we have to + * deal with the cpu owning the thread. + * + * Note: alternatively we could message the target cpu owning the wait + * queue. YYY implement as sysctl. + */ +void +lwkt_signal(lwkt_wait_t w) +{ + thread_t td; + int count; + + lwkt_gettoken(&w->wa_token); + ++w->wa_gen; + count = w->wa_count; + while ((td = TAILQ_FIRST(&w->wa_waitq)) != NULL && count) { + --count; + --w->wa_count; + TAILQ_REMOVE(&w->wa_waitq, td, td_threadq); + td->td_wait = NULL; + if (td->td_cpu == mycpu->gd_cpu) { + _lwkt_enqueue(td); + } else { +#if 0 + lwkt_cpu_msg_union_t msg = lwkt_getcpumsg(); + initScheduleReqMsg_Thread(&msg.mu_SchedReq, td); + cpu_sendnormsg(&msg.mu_Msg); +#endif + panic("lwkt_signal: cpu mismatch"); + } + lwkt_regettoken(&w->wa_token); + } + lwkt_reltoken(&w->wa_token); +} + +/* + * Aquire ownership of a token + * + * Aquire ownership of a token. The token may have spl and/or critical + * section side effects, depending on its purpose. These side effects + * guarentee that you will maintain ownership of the token as long as you + * do not block. If you block you may lose access to the token (but you + * must still release it even if you lose your access to it). + * + * Note that the spl and critical section characteristics of a token + * may not be changed once the token has been initialized. + */ +void +lwkt_gettoken(lwkt_token_t tok) +{ + /* + * Prevent preemption so the token can't be taken away from us once + * we gain ownership of it. Use a synchronous request which might + * block. The request will be forwarded as necessary playing catchup + * to the token. + */ + crit_enter(); +#if 0 + while (tok->t_cpu != mycpu->gd_cpu) { + lwkt_cpu_msg_union msg; + initTokenReqMsg(&msg.mu_TokenReq); + cpu_domsg(&msg); + } +#endif + /* + * leave us in a critical section on return. This will be undone + * by lwkt_reltoken() + */ +} + +/* + * Release your ownership of a token. Releases must occur in reverse + * order to aquisitions, eventually so priorities can be unwound properly + * like SPLs. At the moment the actual implemention doesn't care. + * + * We can safely hand a token that we own to another cpu without notifying + * it, but once we do we can't get it back without requesting it (unless + * the other cpu hands it back to us before we check). + * + * We might have lost the token, so check that. + */ +void +lwkt_reltoken(lwkt_token_t tok) +{ + if (tok->t_cpu == mycpu->gd_cpu) { + tok->t_cpu = tok->t_reqcpu; + } + crit_exit(); +} + +/* + * Reaquire a token that might have been lost. Returns 1 if we blocked + * while reaquiring the token (meaning that you might have lost other + * tokens you held when you made this call), return 0 if we did not block. + */ +int +lwkt_regettoken(lwkt_token_t tok) +{ +#if 0 + if (tok->t_cpu != mycpu->gd_cpu) { + while (tok->t_cpu != mycpu->gd_cpu) { + lwkt_cpu_msg_union msg; + initTokenReqMsg(&msg.mu_TokenReq); + cpu_domsg(&msg); + } + return(1); + } +#endif + return(0); } diff --git a/sys/platform/pc32/apic/apic_ipl.s b/sys/platform/pc32/apic/apic_ipl.s index 5bb1aa1317..adb2063e72 100644 --- a/sys/platform/pc32/apic/apic_ipl.s +++ b/sys/platform/pc32/apic/apic_ipl.s @@ -23,7 +23,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/isa/apic_ipl.s,v 1.27.2.2 2000/09/30 02:49:35 ps Exp $ - * $DragonFly: src/sys/platform/pc32/apic/apic_ipl.s,v 1.2 2003/06/17 04:28:36 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/apic/apic_ipl.s,v 1.3 2003/06/21 07:54:56 dillon Exp $ */ @@ -105,6 +105,7 @@ splz_next: * We don't need any locking here. (ipending & ~cpl) cannot grow * while we're looking at it - any interrupt will shrink it to 0. */ + movl $0,_reqpri movl %eax,%ecx notl %ecx /* set bit = unmasked level */ andl _ipending,%ecx /* set bit = unmasked pending INT */ diff --git a/sys/platform/pc32/apic/apic_vector.s b/sys/platform/pc32/apic/apic_vector.s index fe7702a4d1..6e81b41ab6 100644 --- a/sys/platform/pc32/apic/apic_vector.s +++ b/sys/platform/pc32/apic/apic_vector.s @@ -1,7 +1,7 @@ /* * from: vector.s, 386BSD 0.1 unknown origin * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $ - * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.3 2003/06/18 06:33:33 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.4 2003/06/21 07:54:56 dillon Exp $ */ @@ -232,6 +232,9 @@ IDTVEC(vec_name) ; \ APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\ testl $IRQ_BIT(irq_num), _cpl ; \ jne 2f ; /* this INT masked */ \ + movl _curthread,%eax ; \ + cmpl $TDPRI_CRIT,TD_PRI(%eax) ; \ + jge 2f ; /* in critical sec */ \ ; \ incb _intr_nesting_level ; \ ; \ @@ -270,6 +273,7 @@ __CONCAT(Xresume,irq_num): ; \ EOI_IRQ(irq_num) ; \ lock ; \ orl $IRQ_BIT(irq_num), _ipending ; \ + movl $TDPRI_CRIT,_reqpri ; \ lock ; \ btsl $(irq_num), iactive ; /* still active */ \ jnc 0b ; /* retry */ \ @@ -280,6 +284,7 @@ __CONCAT(Xresume,irq_num): ; \ APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \ lock ; \ orl $IRQ_BIT(irq_num), _ipending ; \ + movl $TDPRI_CRIT,_reqpri ; \ MP_RELLOCK ; \ POP_FRAME ; \ iret ; \ @@ -288,6 +293,7 @@ __CONCAT(Xresume,irq_num): ; \ APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\ lock ; \ orl $IRQ_BIT(irq_num), _ipending ; \ + movl $TDPRI_CRIT,_reqpri ; \ testl $IRQ_BIT(irq_num), _cpl ; \ jne 4f ; /* this INT masked */ \ call forward_irq ; /* forward irq to lock holder */ \ diff --git a/sys/platform/pc32/i386/genassym.c b/sys/platform/pc32/i386/genassym.c index 080f6f9a52..398765a4ae 100644 --- a/sys/platform/pc32/i386/genassym.c +++ b/sys/platform/pc32/i386/genassym.c @@ -35,7 +35,7 @@ * * from: @(#)genassym.c 5.11 (Berkeley) 5/10/91 * $FreeBSD: src/sys/i386/i386/genassym.c,v 1.86.2.3 2002/03/03 05:42:49 nyan Exp $ - * $DragonFly: src/sys/platform/pc32/i386/genassym.c,v 1.8 2003/06/20 02:09:50 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/i386/genassym.c,v 1.9 2003/06/21 07:54:55 dillon Exp $ */ #include "opt_user_ldt.h" @@ -84,6 +84,12 @@ ASSYM(P_THREAD, offsetof(struct proc, p_thread)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(TD_SP, offsetof(struct thread, td_sp)); +ASSYM(TD_PRI, offsetof(struct thread, td_pri)); +ASSYM(TD_MACH, offsetof(struct thread, td_mach)); + +ASSYM(MTD_CPL, offsetof(struct mi_thread, mtd_cpl)); + +ASSYM(TDPRI_CRIT, TDPRI_CRIT); #ifdef SMP ASSYM(P_ONCPU, offsetof(struct proc, p_oncpu)); @@ -182,13 +188,14 @@ ASSYM(GD_IDLETHREAD, offsetof(struct globaldata, gd_idlethread)); ASSYM(GD_COMMON_TSSD, offsetof(struct globaldata, gd_common_tssd)); ASSYM(GD_TSS_GDT, offsetof(struct globaldata, gd_tss_gdt)); ASSYM(GD_ASTPENDING, offsetof(struct globaldata, gd_astpending)); +ASSYM(GD_REQPRI, offsetof(struct globaldata, gd_reqpri)); #ifdef USER_LDT ASSYM(GD_CURRENTLDT, offsetof(struct globaldata, gd_currentldt)); #endif #ifdef SMP -ASSYM(GD_CPUID, offsetof(struct globaldata, gd_cpuid)); +ASSYM(GD_CPU, offsetof(struct globaldata, gd_cpuid)); ASSYM(GD_CPU_LOCKID, offsetof(struct globaldata, gd_cpu_lockid)); ASSYM(GD_OTHER_CPUS, offsetof(struct globaldata, gd_other_cpus)); ASSYM(GD_SS_EFLAGS, offsetof(struct globaldata, gd_ss_eflags)); diff --git a/sys/platform/pc32/i386/globals.s b/sys/platform/pc32/i386/globals.s index 64b6290994..abea1cdac4 100644 --- a/sys/platform/pc32/i386/globals.s +++ b/sys/platform/pc32/i386/globals.s @@ -24,7 +24,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/i386/globals.s,v 1.13.2.1 2000/05/16 06:58:06 dillon Exp $ - * $DragonFly: src/sys/platform/pc32/i386/globals.s,v 1.6 2003/06/20 02:09:50 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/i386/globals.s,v 1.7 2003/06/21 07:54:55 dillon Exp $ */ #include "opt_user_ldt.h" @@ -66,11 +66,12 @@ _CPU_prvspace: * Define layout of the global data. On SMP this lives in * the per-cpu address space, otherwise it's in the data segment. */ - .globl gd_curthread, gd_npxthread, gd_astpending + .globl gd_curthread, gd_npxthread, gd_astpending, gd_reqpri .globl gd_common_tss, gd_switchtime, gd_switchticks, gd_idlethread .set gd_curthread,globaldata + GD_CURTHREAD .set gd_idlethread,globaldata + GD_IDLETHREAD .set gd_astpending,globaldata + GD_ASTPENDING + .set gd_reqpri,globaldata + GD_REQPRI .set gd_npxthread,globaldata + GD_NPXTHREAD .set gd_common_tss,globaldata + GD_COMMON_TSS .set gd_switchtime,globaldata + GD_SWITCHTIME @@ -86,11 +87,12 @@ _CPU_prvspace: #endif #ifndef SMP - .globl _curthread, _npxthread, _astpending + .globl _curthread, _npxthread, _astpending, _reqpri .globl _common_tss, _switchtime, _switchticks, _idlethread .set _curthread,globaldata + GD_CURTHREAD .set _idlethread,globaldata + GD_IDLETHREAD .set _astpending,globaldata + GD_ASTPENDING + .set _reqpri,globaldata + GD_REQPRI .set _npxthread,globaldata + GD_NPXTHREAD .set _common_tss,globaldata + GD_COMMON_TSS .set _switchtime,globaldata + GD_SWITCHTIME @@ -111,12 +113,12 @@ _CPU_prvspace: * The BSP version of these get setup in locore.s and pmap.c, while * the AP versions are setup in mp_machdep.c. */ - .globl gd_cpuid, gd_cpu_lockid, gd_other_cpus + .globl gd_cpu, gd_cpu_lockid, gd_other_cpus .globl gd_ss_eflags, gd_inside_intr .globl gd_prv_CMAP1, gd_prv_CMAP2, gd_prv_CMAP3, gd_prv_PMAP1 .globl gd_prv_CADDR1, gd_prv_CADDR2, gd_prv_CADDR3, gd_prv_PADDR1 - .set gd_cpuid,globaldata + GD_CPUID + .set gd_cpu,globaldata + GD_CPUID .set gd_cpu_lockid,globaldata + GD_CPU_LOCKID .set gd_other_cpus,globaldata + GD_OTHER_CPUS .set gd_ss_eflags,globaldata + GD_SS_EFLAGS diff --git a/sys/platform/pc32/i386/machdep.c b/sys/platform/pc32/i386/machdep.c index fd1141c9f8..bef6545285 100644 --- a/sys/platform/pc32/i386/machdep.c +++ b/sys/platform/pc32/i386/machdep.c @@ -36,7 +36,7 @@ * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $ - * $DragonFly: src/sys/platform/pc32/i386/machdep.c,v 1.7 2003/06/20 02:09:50 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/i386/machdep.c,v 1.8 2003/06/21 07:54:55 dillon Exp $ */ #include "apm.h" @@ -985,8 +985,8 @@ SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, void cpu_idle(void) { + spl0(); for (;;) { - __asm __volatile("cli"); lwkt_switch(); if (cpu_idle_hlt) { /* @@ -997,7 +997,6 @@ cpu_idle(void) } else { __asm __volatile("sti"); } - spl0(); /* unmask interrupts */ /* YYY BGL */ } } @@ -1875,6 +1874,7 @@ init386(first) thread0.td_pcb = (struct pcb *) ((char *)proc0paddr + UPAGES*PAGE_SIZE - sizeof(struct pcb)); thread0.td_kstack = (char *)proc0paddr; + thread0.td_flags = TDF_RUNNING; atdevbase = ISA_HOLE_START + KERNBASE; diff --git a/sys/platform/pc32/i386/mp_machdep.c b/sys/platform/pc32/i386/mp_machdep.c index c9ee0b58df..d58be41847 100644 --- a/sys/platform/pc32/i386/mp_machdep.c +++ b/sys/platform/pc32/i386/mp_machdep.c @@ -23,7 +23,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $ - * $DragonFly: src/sys/platform/pc32/i386/mp_machdep.c,v 1.5 2003/06/20 02:09:50 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/i386/mp_machdep.c,v 1.6 2003/06/21 07:54:56 dillon Exp $ */ #include "opt_cpu.h" @@ -2143,10 +2143,9 @@ start_all_aps(u_int boot_addr) SMPpt[pg + 4] = 0; /* *prv_PMAP1 */ /* prime data page for it to use */ - gd->gd_cpuid = x; - gd->gd_cpu_lockid = x << 24; mi_gdinit(gd, x); cpu_gdinit(gd, x); + gd->gd_cpu_lockid = x << 24; gd->gd_prv_CMAP1 = &SMPpt[pg + 1]; gd->gd_prv_CMAP2 = &SMPpt[pg + 2]; gd->gd_prv_CMAP3 = &SMPpt[pg + 3]; diff --git a/sys/platform/pc32/i386/pmap.c b/sys/platform/pc32/i386/pmap.c index d6a1646a0c..0e85214cab 100644 --- a/sys/platform/pc32/i386/pmap.c +++ b/sys/platform/pc32/i386/pmap.c @@ -40,7 +40,7 @@ * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06 22:48:53 silby Exp $ - * $DragonFly: src/sys/platform/pc32/i386/pmap.c,v 1.7 2003/06/20 02:09:50 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/i386/pmap.c,v 1.8 2003/06/21 07:54:56 dillon Exp $ */ /* @@ -871,6 +871,8 @@ void pmap_dispose_thread(struct thread *td) { /* HIPRI YYY */ + KASSERT((td->td_flags & (TDF_RUNQ|TDF_RUNNING)) == 0, + ("pmap_dispose_thread: still on queue: %08x", td->td_flags)); if (mycpu->gd_tdfreecount < CACHE_NTHREADS) { ++mycpu->gd_tdfreecount; TAILQ_INSERT_HEAD(&mycpu->gd_tdfreeq, td, td_threadq); @@ -968,6 +970,8 @@ pmap_dispose_proc(struct proc *p) { struct thread *td; + KASSERT(p->p_lock == 0, ("attempt to dispose referenced proc! %p", p)); + if ((td = p->p_thread) != NULL) { p->p_thread = NULL; td->td_proc = NULL; diff --git a/sys/platform/pc32/i386/swtch.s b/sys/platform/pc32/i386/swtch.s index 65ea669781..755d02c5ff 100644 --- a/sys/platform/pc32/i386/swtch.s +++ b/sys/platform/pc32/i386/swtch.s @@ -1,6 +1,7 @@ /*- * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. + * LWKT threads Copyright (c) 2003 Matthew Dillon * * This code is derived from software contributed to Berkeley by * William Jolitz. @@ -34,7 +35,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $ - * $DragonFly: src/sys/platform/pc32/i386/swtch.s,v 1.7 2003/06/20 02:09:50 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/i386/swtch.s,v 1.8 2003/06/21 07:54:56 dillon Exp $ */ #include "npx.h" @@ -78,6 +79,8 @@ _tlb_flush_count: .long 0 */ ENTRY(cpu_heavy_switch) movl _curthread,%ecx + movl _cpl,%edx /* YYY temporary */ + movl %edx,TD_MACH+MTD_CPL(%ecx) /* YYY temporary */ movl TD_PROC(%ecx),%ecx cli @@ -220,7 +223,9 @@ ENTRY(cpu_exit_switch) */ ENTRY(cpu_heavy_restore) /* interrupts are disabled */ - movl TD_PCB(%eax),%edx + movl TD_MACH+MTD_CPL(%eax),%edx + movl %edx,_cpl /* YYY temporary */ + movl TD_PCB(%eax),%edx /* YYY temporary */ movl TD_PROC(%eax),%ecx #ifdef DIAGNOSTIC cmpb $SRUN,P_STAT(%ecx) @@ -523,6 +528,8 @@ ENTRY(cpu_lwkt_switch) pushl %edi pushfl movl _curthread,%ecx + movl _cpl,%edx /* YYY temporary */ + movl %edx,TD_MACH+MTD_CPL(%ecx) /* YYY temporary */ pushl $cpu_lwkt_restore cli movl %esp,TD_SP(%ecx) @@ -542,5 +549,7 @@ ENTRY(cpu_lwkt_restore) popl %esi popl %ebx popl %ebp + movl TD_MACH+MTD_CPL(%eax),%ecx /* YYY temporary */ + movl %ecx,_cpl /* YYY temporary */ ret diff --git a/sys/platform/pc32/i386/trap.c b/sys/platform/pc32/i386/trap.c index 095edcbfc5..8c5f16a6c6 100644 --- a/sys/platform/pc32/i386/trap.c +++ b/sys/platform/pc32/i386/trap.c @@ -36,7 +36,7 @@ * * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $ - * $DragonFly: src/sys/platform/pc32/i386/trap.c,v 1.3 2003/06/18 18:29:55 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/i386/trap.c,v 1.4 2003/06/21 07:54:56 dillon Exp $ */ /* @@ -941,6 +941,10 @@ trap_fatal(frame, eva) } else { printf("Idle\n"); } + printf("current thread = pri %d ", curthread->td_pri); + if (curthread->td_pri >= TDPRI_CRIT) + printf("(CRIT)"); + printf("\n"); printf("interrupt mask = "); if ((cpl & net_imask) == net_imask) printf("net "); diff --git a/sys/platform/pc32/icu/icu_ipl.s b/sys/platform/pc32/icu/icu_ipl.s index 9af40ee549..3d4824e456 100644 --- a/sys/platform/pc32/icu/icu_ipl.s +++ b/sys/platform/pc32/icu/icu_ipl.s @@ -35,7 +35,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/isa/icu_ipl.s,v 1.6 1999/08/28 00:44:42 peter Exp $ - * $DragonFly: src/sys/platform/pc32/icu/icu_ipl.s,v 1.2 2003/06/17 04:28:37 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/icu/icu_ipl.s,v 1.3 2003/06/21 07:54:56 dillon Exp $ */ .data @@ -60,6 +60,8 @@ _imen: .long HWI_MASK * -- soft splXX masks with group mechanism (cpl) * -- h/w masks for currently active or unused interrupts (imen) * -- ipending = active interrupts currently masked by cpl + * -- splz handles pending interrupts regardless of the critical + * nesting state, it is only called synchronously. */ ENTRY(splz) @@ -80,6 +82,7 @@ splz_next: * We don't need any locking here. (ipending & ~cpl) cannot grow * while we're looking at it - any interrupt will shrink it to 0. */ + movl $0,_reqpri movl %eax,%ecx notl %ecx andl _ipending,%ecx diff --git a/sys/platform/pc32/icu/icu_vector.s b/sys/platform/pc32/icu/icu_vector.s index 30cbf28e41..960b6d4fda 100644 --- a/sys/platform/pc32/icu/icu_vector.s +++ b/sys/platform/pc32/icu/icu_vector.s @@ -1,7 +1,7 @@ /* * from: vector.s, 386BSD 0.1 unknown origin * $FreeBSD: src/sys/i386/isa/icu_vector.s,v 1.14.2.2 2000/07/18 21:12:42 dfr Exp $ - * $DragonFly: src/sys/platform/pc32/icu/icu_vector.s,v 1.2 2003/06/17 04:28:37 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/icu/icu_vector.s,v 1.3 2003/06/21 07:54:56 dillon Exp $ */ /* @@ -66,6 +66,9 @@ IDTVEC(vec_name) ; \ incl _cnt+V_INTR ; /* book-keeping can wait */ \ movl _intr_countp + (irq_num) * 4,%eax ; \ incl (%eax) ; \ + movl _curthread, %eax ; /* are we in a critical section? */ \ + cmpl $TDPRI_CRIT,TD_PRI(%eax) ; \ + jge 1f ; \ movl _cpl,%eax ; /* are we unmasking pending HWIs or SWIs? */ \ notl %eax ; \ andl _ipending,%eax ; \ @@ -127,7 +130,10 @@ IDTVEC(vec_name) ; \ movb %al,_imen + IRQ_BYTE(irq_num) ; \ outb %al,$icu+ICU_IMR_OFFSET ; \ enable_icus ; \ - movl _cpl,%eax ; \ + movl _curthread, %eax ; /* are we in a critical section? */ \ + cmpl $TDPRI_CRIT,TD_PRI(%eax) ; \ + jge 2f ; \ + movl _cpl,%eax ; /* is this interrupt masked by the cpl? */ \ testb $IRQ_BIT(irq_num),%reg ; \ jne 2f ; \ incb _intr_nesting_level ; \ @@ -158,6 +164,7 @@ __CONCAT(Xresume,irq_num): ; \ 2: ; \ /* XXX skip mcounting here to avoid double count */ \ orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ; \ + movl $TDPRI_CRIT,_reqpri ; \ popl %fs ; \ popl %es ; \ popl %ds ; \ diff --git a/sys/platform/pc32/include/asnames.h b/sys/platform/pc32/include/asnames.h index 66dcf60c2b..5afb9a1641 100644 --- a/sys/platform/pc32/include/asnames.h +++ b/sys/platform/pc32/include/asnames.h @@ -24,7 +24,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/include/asnames.h,v 1.44.2.8 2003/01/22 20:14:53 jhb Exp $ - * $DragonFly: src/sys/platform/pc32/include/Attic/asnames.h,v 1.6 2003/06/20 02:09:54 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/include/Attic/asnames.h,v 1.7 2003/06/21 07:54:56 dillon Exp $ */ #ifndef _MACHINE_ASNAMES_H_ @@ -341,6 +341,7 @@ #define _common_tss FS(common_tss) #define _common_tssd FS(common_tssd) #define _cpuid FS(cpuid) +#define _reqpri FS(reqpri) #define _cpu_lockid FS(cpu_lockid) #define _curthread FS(curthread) #define _idlethread FS(idlethread) diff --git a/sys/platform/pc32/include/globaldata.h b/sys/platform/pc32/include/globaldata.h index ce9b39ab78..d4a7e632ba 100644 --- a/sys/platform/pc32/include/globaldata.h +++ b/sys/platform/pc32/include/globaldata.h @@ -24,7 +24,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/include/globaldata.h,v 1.11.2.1 2000/05/16 06:58:10 dillon Exp $ - * $DragonFly: src/sys/platform/pc32/include/globaldata.h,v 1.8 2003/06/20 02:09:54 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/include/globaldata.h,v 1.9 2003/06/21 07:54:56 dillon Exp $ */ /* @@ -50,13 +50,14 @@ struct globaldata { struct i386tss gd_common_tss; int gd_switchticks; int gd_tdfreecount; /* new thread cache */ + int gd_reqpri; /* highest pri blocked thread */ TAILQ_HEAD(,thread) gd_tdfreeq; /* new thread cache */ TAILQ_HEAD(,thread) gd_tdrunq; /* runnable threads */ struct segment_descriptor gd_common_tssd; struct segment_descriptor *gd_tss_gdt; int gd_currentldt; /* USER_LDT */ + u_int gd_cpu; #ifdef SMP - u_int gd_cpuid; u_int gd_cpu_lockid; u_int gd_other_cpus; int gd_inside_intr; diff --git a/sys/platform/pc32/include/thread.h b/sys/platform/pc32/include/thread.h new file mode 100644 index 0000000000..0faf7701a2 --- /dev/null +++ b/sys/platform/pc32/include/thread.h @@ -0,0 +1,10 @@ +/* + * i386/include/thread.h + * + * $DragonFly: src/sys/platform/pc32/include/thread.h,v 1.1 2003/06/21 07:54:56 dillon Exp $ + */ + +struct mi_thread { + unsigned int mtd_cpl; +}; + diff --git a/sys/platform/pc32/isa/apic_ipl.s b/sys/platform/pc32/isa/apic_ipl.s index 5f5bdf4de5..b49e3a9f18 100644 --- a/sys/platform/pc32/isa/apic_ipl.s +++ b/sys/platform/pc32/isa/apic_ipl.s @@ -23,7 +23,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/isa/apic_ipl.s,v 1.27.2.2 2000/09/30 02:49:35 ps Exp $ - * $DragonFly: src/sys/platform/pc32/isa/Attic/apic_ipl.s,v 1.2 2003/06/17 04:28:36 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/isa/Attic/apic_ipl.s,v 1.3 2003/06/21 07:54:56 dillon Exp $ */ @@ -105,6 +105,7 @@ splz_next: * We don't need any locking here. (ipending & ~cpl) cannot grow * while we're looking at it - any interrupt will shrink it to 0. */ + movl $0,_reqpri movl %eax,%ecx notl %ecx /* set bit = unmasked level */ andl _ipending,%ecx /* set bit = unmasked pending INT */ diff --git a/sys/platform/pc32/isa/apic_vector.s b/sys/platform/pc32/isa/apic_vector.s index aa49ccb9cd..a6a6164e03 100644 --- a/sys/platform/pc32/isa/apic_vector.s +++ b/sys/platform/pc32/isa/apic_vector.s @@ -1,7 +1,7 @@ /* * from: vector.s, 386BSD 0.1 unknown origin * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $ - * $DragonFly: src/sys/platform/pc32/isa/Attic/apic_vector.s,v 1.3 2003/06/18 06:33:33 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/isa/Attic/apic_vector.s,v 1.4 2003/06/21 07:54:56 dillon Exp $ */ @@ -232,6 +232,9 @@ IDTVEC(vec_name) ; \ APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\ testl $IRQ_BIT(irq_num), _cpl ; \ jne 2f ; /* this INT masked */ \ + movl _curthread,%eax ; \ + cmpl $TDPRI_CRIT,TD_PRI(%eax) ; \ + jge 2f ; /* in critical sec */ \ ; \ incb _intr_nesting_level ; \ ; \ @@ -270,6 +273,7 @@ __CONCAT(Xresume,irq_num): ; \ EOI_IRQ(irq_num) ; \ lock ; \ orl $IRQ_BIT(irq_num), _ipending ; \ + movl $TDPRI_CRIT,_reqpri ; \ lock ; \ btsl $(irq_num), iactive ; /* still active */ \ jnc 0b ; /* retry */ \ @@ -280,6 +284,7 @@ __CONCAT(Xresume,irq_num): ; \ APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \ lock ; \ orl $IRQ_BIT(irq_num), _ipending ; \ + movl $TDPRI_CRIT,_reqpri ; \ MP_RELLOCK ; \ POP_FRAME ; \ iret ; \ @@ -288,6 +293,7 @@ __CONCAT(Xresume,irq_num): ; \ APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\ lock ; \ orl $IRQ_BIT(irq_num), _ipending ; \ + movl $TDPRI_CRIT,_reqpri ; \ testl $IRQ_BIT(irq_num), _cpl ; \ jne 4f ; /* this INT masked */ \ call forward_irq ; /* forward irq to lock holder */ \ diff --git a/sys/platform/pc32/isa/icu_ipl.s b/sys/platform/pc32/isa/icu_ipl.s index 041a9d9675..ce975aa47a 100644 --- a/sys/platform/pc32/isa/icu_ipl.s +++ b/sys/platform/pc32/isa/icu_ipl.s @@ -35,7 +35,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/isa/icu_ipl.s,v 1.6 1999/08/28 00:44:42 peter Exp $ - * $DragonFly: src/sys/platform/pc32/isa/Attic/icu_ipl.s,v 1.2 2003/06/17 04:28:37 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/isa/Attic/icu_ipl.s,v 1.3 2003/06/21 07:54:56 dillon Exp $ */ .data @@ -60,6 +60,8 @@ _imen: .long HWI_MASK * -- soft splXX masks with group mechanism (cpl) * -- h/w masks for currently active or unused interrupts (imen) * -- ipending = active interrupts currently masked by cpl + * -- splz handles pending interrupts regardless of the critical + * nesting state, it is only called synchronously. */ ENTRY(splz) @@ -80,6 +82,7 @@ splz_next: * We don't need any locking here. (ipending & ~cpl) cannot grow * while we're looking at it - any interrupt will shrink it to 0. */ + movl $0,_reqpri movl %eax,%ecx notl %ecx andl _ipending,%ecx diff --git a/sys/platform/pc32/isa/icu_vector.s b/sys/platform/pc32/isa/icu_vector.s index d34fe4ea9c..c7dd0a2f97 100644 --- a/sys/platform/pc32/isa/icu_vector.s +++ b/sys/platform/pc32/isa/icu_vector.s @@ -1,7 +1,7 @@ /* * from: vector.s, 386BSD 0.1 unknown origin * $FreeBSD: src/sys/i386/isa/icu_vector.s,v 1.14.2.2 2000/07/18 21:12:42 dfr Exp $ - * $DragonFly: src/sys/platform/pc32/isa/Attic/icu_vector.s,v 1.2 2003/06/17 04:28:37 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/isa/Attic/icu_vector.s,v 1.3 2003/06/21 07:54:56 dillon Exp $ */ /* @@ -66,6 +66,9 @@ IDTVEC(vec_name) ; \ incl _cnt+V_INTR ; /* book-keeping can wait */ \ movl _intr_countp + (irq_num) * 4,%eax ; \ incl (%eax) ; \ + movl _curthread, %eax ; /* are we in a critical section? */ \ + cmpl $TDPRI_CRIT,TD_PRI(%eax) ; \ + jge 1f ; \ movl _cpl,%eax ; /* are we unmasking pending HWIs or SWIs? */ \ notl %eax ; \ andl _ipending,%eax ; \ @@ -127,7 +130,10 @@ IDTVEC(vec_name) ; \ movb %al,_imen + IRQ_BYTE(irq_num) ; \ outb %al,$icu+ICU_IMR_OFFSET ; \ enable_icus ; \ - movl _cpl,%eax ; \ + movl _curthread, %eax ; /* are we in a critical section? */ \ + cmpl $TDPRI_CRIT,TD_PRI(%eax) ; \ + jge 2f ; \ + movl _cpl,%eax ; /* is this interrupt masked by the cpl? */ \ testb $IRQ_BIT(irq_num),%reg ; \ jne 2f ; \ incb _intr_nesting_level ; \ @@ -158,6 +164,7 @@ __CONCAT(Xresume,irq_num): ; \ 2: ; \ /* XXX skip mcounting here to avoid double count */ \ orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ; \ + movl $TDPRI_CRIT,_reqpri ; \ popl %fs ; \ popl %es ; \ popl %ds ; \ diff --git a/sys/platform/pc32/isa/ipl_funcs.c b/sys/platform/pc32/isa/ipl_funcs.c index 51be1e504a..d3b7f5a369 100644 --- a/sys/platform/pc32/isa/ipl_funcs.c +++ b/sys/platform/pc32/isa/ipl_funcs.c @@ -24,15 +24,17 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/isa/ipl_funcs.c,v 1.32.2.5 2002/12/17 18:04:02 sam Exp $ - * $DragonFly: src/sys/platform/pc32/isa/ipl_funcs.c,v 1.2 2003/06/17 04:28:37 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/isa/ipl_funcs.c,v 1.3 2003/06/21 07:54:56 dillon Exp $ */ #include #include #include #include +#include #include #include +#include #include /* @@ -46,6 +48,7 @@ void name(void) \ { \ atomic_set_int(var, bits); \ + mycpu->gd_reqpri = TDPRI_CRIT; \ } DO_SETBITS(setdelayed, &ipending, loadandclear(&idelayed)) @@ -166,7 +169,7 @@ void spl0(void) { cpl = 0; - if (ipending) + if (ipending && curthread->td_pri < TDPRI_CRIT) splz(); } @@ -174,7 +177,7 @@ void splx(unsigned ipl) { cpl = ipl; - if (ipending & ~ipl) + if ((ipending & ~ipl) && curthread->td_pri < TDPRI_CRIT) splz(); } @@ -221,7 +224,7 @@ spl0(void) { KASSERT(inside_intr == 0, ("spl0: called from interrupt")); cpl = 0; - if (ipending) + if (ipending && curthread->td_pri < TDPRI_CRIT) splz(); } @@ -235,8 +238,10 @@ void splx(unsigned ipl) { cpl = ipl; - if (inside_intr == 0 && (ipending & ~cpl) != 0) + if (inside_intr == 0 && (ipending & ~cpl) != 0 && + curthread->td_pri < TDPRI_CRIT) { splz(); + } } diff --git a/sys/platform/vkernel/i386/genassym.c b/sys/platform/vkernel/i386/genassym.c index a8e4eba0c2..1ded946488 100644 --- a/sys/platform/vkernel/i386/genassym.c +++ b/sys/platform/vkernel/i386/genassym.c @@ -35,7 +35,7 @@ * * from: @(#)genassym.c 5.11 (Berkeley) 5/10/91 * $FreeBSD: src/sys/i386/i386/genassym.c,v 1.86.2.3 2002/03/03 05:42:49 nyan Exp $ - * $DragonFly: src/sys/platform/vkernel/i386/genassym.c,v 1.8 2003/06/20 02:09:50 dillon Exp $ + * $DragonFly: src/sys/platform/vkernel/i386/genassym.c,v 1.9 2003/06/21 07:54:55 dillon Exp $ */ #include "opt_user_ldt.h" @@ -84,6 +84,12 @@ ASSYM(P_THREAD, offsetof(struct proc, p_thread)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(TD_SP, offsetof(struct thread, td_sp)); +ASSYM(TD_PRI, offsetof(struct thread, td_pri)); +ASSYM(TD_MACH, offsetof(struct thread, td_mach)); + +ASSYM(MTD_CPL, offsetof(struct mi_thread, mtd_cpl)); + +ASSYM(TDPRI_CRIT, TDPRI_CRIT); #ifdef SMP ASSYM(P_ONCPU, offsetof(struct proc, p_oncpu)); @@ -182,13 +188,14 @@ ASSYM(GD_IDLETHREAD, offsetof(struct globaldata, gd_idlethread)); ASSYM(GD_COMMON_TSSD, offsetof(struct globaldata, gd_common_tssd)); ASSYM(GD_TSS_GDT, offsetof(struct globaldata, gd_tss_gdt)); ASSYM(GD_ASTPENDING, offsetof(struct globaldata, gd_astpending)); +ASSYM(GD_REQPRI, offsetof(struct globaldata, gd_reqpri)); #ifdef USER_LDT ASSYM(GD_CURRENTLDT, offsetof(struct globaldata, gd_currentldt)); #endif #ifdef SMP -ASSYM(GD_CPUID, offsetof(struct globaldata, gd_cpuid)); +ASSYM(GD_CPU, offsetof(struct globaldata, gd_cpuid)); ASSYM(GD_CPU_LOCKID, offsetof(struct globaldata, gd_cpu_lockid)); ASSYM(GD_OTHER_CPUS, offsetof(struct globaldata, gd_other_cpus)); ASSYM(GD_SS_EFLAGS, offsetof(struct globaldata, gd_ss_eflags)); diff --git a/sys/sys/malloc.h b/sys/sys/malloc.h index edeea86181..f361b18a25 100644 --- a/sys/sys/malloc.h +++ b/sys/sys/malloc.h @@ -32,7 +32,7 @@ * * @(#)malloc.h 8.5 (Berkeley) 5/3/95 * $FreeBSD: src/sys/sys/malloc.h,v 1.48.2.2 2002/03/16 02:19:16 archie Exp $ - * $DragonFly: src/sys/sys/malloc.h,v 1.2 2003/06/17 04:28:58 dillon Exp $ + * $DragonFly: src/sys/sys/malloc.h,v 1.3 2003/06/21 07:54:57 dillon Exp $ */ #ifndef _SYS_MALLOC_H_ @@ -46,7 +46,6 @@ #define M_WAITOK 0x0000 #define M_NOWAIT 0x0001 /* do not block */ #define M_USE_RESERVE 0x0002 /* can alloc out of reserve memory */ -#define M_ASLEEP 0x0004 /* async sleep on failure */ #define M_ZERO 0x0008 /* bzero the allocation */ #define M_MAGIC 877983977 /* time when first defined :-) */ diff --git a/sys/sys/proc.h b/sys/sys/proc.h index ed22d2f35b..e6c14e9871 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -37,7 +37,7 @@ * * @(#)proc.h 8.15 (Berkeley) 5/19/95 * $FreeBSD: src/sys/sys/proc.h,v 1.99.2.9 2003/06/06 20:21:32 tegge Exp $ - * $DragonFly: src/sys/sys/proc.h,v 1.8 2003/06/20 02:09:59 dillon Exp $ + * $DragonFly: src/sys/sys/proc.h,v 1.9 2003/06/21 07:54:57 dillon Exp $ */ #ifndef _SYS_PROC_H_ @@ -91,15 +91,6 @@ struct procsig { #define PS_NOCLDWAIT 0x0001 /* No zombies if child dies */ #define PS_NOCLDSTOP 0x0002 /* No SIGCHLD when children stop. */ -/* - * pasleep structure, used by asleep() syscall to hold requested priority - * and timeout values for await(). - */ -struct pasleep { - int as_priority; /* Async priority. */ - int as_timo; /* Async timeout. */ -}; - /* * pargs, used to hold a copy of the command line, if it had a sane * length @@ -249,7 +240,6 @@ struct proc { int p_wakeup; /* thread id */ struct proc *p_peers; struct proc *p_leader; - struct pasleep p_asleep; /* Used by asleep()/await(). */ void *p_emuldata; /* process-specific emulator state data */ struct thread *p_thread; /* temporarily embed thread struct in proc */ }; @@ -293,6 +283,7 @@ struct proc { /* Marked a kernel thread */ #define P_UNUSED100000 0x100000 #define P_KTHREADP 0x200000 /* Process is really a kernel thread */ +#define P_XSLEEP 0x400000 /* process sitting on xwait_t structure */ #define P_DEADLKTREAT 0x800000 /* lock aquisition - deadlock treatment */ diff --git a/sys/sys/systm.h b/sys/sys/systm.h index 20b1328c2d..e9c0040e53 100644 --- a/sys/sys/systm.h +++ b/sys/sys/systm.h @@ -37,7 +37,7 @@ * * @(#)systm.h 8.7 (Berkeley) 3/29/95 * $FreeBSD: src/sys/sys/systm.h,v 1.111.2.18 2002/12/17 18:04:02 sam Exp $ - * $DragonFly: src/sys/sys/systm.h,v 1.3 2003/06/20 02:09:59 dillon Exp $ + * $DragonFly: src/sys/sys/systm.h,v 1.4 2003/06/21 07:54:57 dillon Exp $ */ #ifndef _SYS_SYSTM_H_ @@ -97,6 +97,7 @@ extern int maxusers; /* system tune hint */ struct clockframe; struct malloc_type; struct proc; +struct xwait; struct timeval; struct tty; struct uio; @@ -337,8 +338,9 @@ extern watchdog_tickle_fn wdog_tickler; * less often. */ int tsleep __P((void *chan, int pri, const char *wmesg, int timo)); -int asleep __P((void *chan, int pri, const char *wmesg, int timo)); -int await __P((int pri, int timo)); +int xsleep __P((struct xwait *w, int pri, const char *wmesg, int timo, int *gen)); +void xwakeup __P((struct xwait *w)); +void xwait_init __P((struct xwait *w)); void wakeup __P((void *chan)); void wakeup_one __P((void *chan)); diff --git a/sys/sys/thread.h b/sys/sys/thread.h index 995de22ce3..74c5803606 100644 --- a/sys/sys/thread.h +++ b/sys/sys/thread.h @@ -4,63 +4,168 @@ * Implements the architecture independant portion of the LWKT * subsystem. * - * $DragonFly: src/sys/sys/thread.h,v 1.3 2003/06/20 02:09:59 dillon Exp $ + * $DragonFly: src/sys/sys/thread.h,v 1.4 2003/06/21 07:54:57 dillon Exp $ */ #ifndef _SYS_THREAD_H_ #define _SYS_THREAD_H_ +struct globaldata; struct proc; struct thread; -struct globaldata; +struct lwkt_queue; +struct lwkt_token; +struct lwkt_wait; +struct lwkt_msg; +struct lwkt_port; +struct lwkt_cpu_msg; +struct lwkt_cpu_port; +struct lwkt_rwlock; + +typedef struct lwkt_queue *lwkt_queue_t; +typedef struct lwkt_token *lwkt_token_t; +typedef struct lwkt_wait *lwkt_wait_t; +typedef struct lwkt_msg *lwkt_msg_t; +typedef struct lwkt_port *lwkt_port_t; +typedef struct lwkt_cpu_msg *lwkt_cpu_msg_t; +typedef struct lwkt_cpu_port *lwkt_cpu_port_t; +typedef struct lwkt_rwlock *lwkt_rwlock_t; +typedef struct thread *thread_t; + +typedef TAILQ_HEAD(lwkt_queue, thread) lwkt_queue; +typedef TAILQ_HEAD(lwkt_msg_queue, lwkt_msg) lwkt_msg_queue; + +#include + +/* + * Tokens arbitrate access to information. They are 'soft' arbitrators + * in that they are associated with cpus rather then threads, making the + * optimal aquisition case very fast if your cpu already happens to own the + * token you are requesting. + */ +typedef struct lwkt_token { + int t_cpu; /* the current owner of the token */ + int t_reqcpu; /* return ownership to this cpu on release */ +#if 0 + int t_pri; /* raise thread priority to hold token */ +#endif +} lwkt_token; + +/* + * Wait structures deal with blocked threads. Due to the way remote cpus + * interact with these structures stable storage must be used. + */ +typedef struct lwkt_wait { + lwkt_queue wa_waitq; /* list of waiting threads */ + lwkt_token wa_token; /* who currently owns the list */ + int wa_gen; + int wa_count; +} lwkt_wait; + +/* + * The standarding message and port structure for communications between + * threads. + */ +typedef struct lwkt_msg { + TAILQ_ENTRY(lwkt_msg) ms_node; + lwkt_port_t ms_replyport; + int ms_cmd; + int ms_flags; + int ms_error; +} lwkt_msg; + +#define MSGF_DONE 0x0001 +#define MSGF_REPLY 0x0002 +#define MSGF_QUEUED 0x0004 + +typedef struct lwkt_port { + lwkt_msg_queue mp_msgq; + lwkt_wait mp_wait; +} lwkt_port; -typedef TAILQ_HEAD(, thread) thread_list_t; +#define mp_token mp_wait.wa_token +/* + * The standard message and queue structure used for communications between + * cpus. Messages are typically queued via a machine-specific non-linked + * FIFO matrix allowing any cpu to send a message to any other cpu without + * blocking. + */ +typedef struct lwkt_cpu_msg { + void (*cm_func)(lwkt_cpu_msg_t msg); /* primary dispatch function */ + int cm_code; /* request code if applicable */ + int cm_cpu; /* reply to cpu */ + thread_t cm_originator; /* originating thread for wakeup */ +} lwkt_cpu_msg; + +/* + * reader/writer lock + */ +typedef struct lwkt_rwlock { + lwkt_token rw_token; + thread_t rw_owner; + int rw_count; +} lwkt_rwlock; + +#define rw_token rw_wait.wa_token + +/* + * Thread structure. Note that ownership of a thread structure is special + * cased and there is no 'token'. A thread is always owned by td_cpu and + * any manipulation of the thread by some other cpu must be done through + * cpu_*msg() functions. e.g. you could request ownership of a thread that + * way, or hand a thread off to another cpu by changing td_cpu and sending + * a schedule request to the other cpu. + */ struct thread { TAILQ_ENTRY(thread) td_threadq; struct proc *td_proc; /* (optional) associated process */ struct pcb *td_pcb; /* points to pcb and top of kstack */ + int td_cpu; /* cpu owning the thread */ int td_pri; /* 0-31, 0=highest priority */ int td_flags; /* THF flags */ + int td_gen; /* wait queue chasing generation number */ char *td_kstack; /* kernel stack */ char *td_sp; /* kernel stack pointer for LWKT restore */ void (*td_switch)(struct thread *ntd); - thread_list_t *td_waitq; -#if 0 - int td_bglcount; /* big giant lock count */ -#endif + lwkt_wait_t td_wait; /* thread sitting on wait structure */ + struct mi_thread td_mach; }; -typedef struct thread *thread_t; - /* - * Thread states. Note that the RUNNING state is independant from the + * Thread flags. Note that the RUNNING state is independant from the * RUNQ/WAITQ state. That is, a thread's queueing state can be manipulated * while it is running. If a thread is preempted it will always be moved * back to the RUNQ if it isn't on it. */ - #define TDF_RUNNING 0x0001 /* currently running */ #define TDF_RUNQ 0x0002 /* on run queue */ -#define TDF_WAITQ 0x0004 /* on wait queue */ /* * Thread priorities. Typically only one thread from any given * user process scheduling queue is on the LWKT run queue at a time. * Remember that there is one LWKT run queue per cpu. + * + * Critical sections are handled by bumping td_pri above TDPRI_MAX, which + * causes interrupts to be masked as they occur. When this occurs + * mycpu->gd_reqpri will be raised (possibly just set to TDPRI_CRIT for + * interrupt masking). */ - -#define THPRI_INT_HIGH 2 /* high priority interrupt */ -#define THPRI_INT_MED 4 /* medium priority interrupt */ -#define THPRI_INT_LOW 6 /* low priority interrupt */ -#define THPRI_INT_SUPPORT 10 /* kernel / high priority support */ -#define THPRI_SOFT_TIMER 12 /* kernel / timer */ -#define THPRI_SOFT_NORM 15 /* kernel / normal */ -#define THPRI_KERN_USER 20 /* kernel / block in syscall */ -#define THPRI_USER_REAL 25 /* user scheduler real time */ -#define THPRI_USER_NORM 27 /* user scheduler normal */ -#define THPRI_USER_IDLE 29 /* user scheduler idle */ -#define THPRI_IDLE_THREAD 31 /* the idle thread */ +#define TDPRI_IDLE_THREAD 0 /* the idle thread */ +#define TDPRI_USER_IDLE 4 /* user scheduler idle */ +#define TDPRI_USER_NORM 6 /* user scheduler normal */ +#define TDPRI_USER_REAL 8 /* user scheduler real time */ +#define TDPRI_KERN_USER 10 /* kernel / block in syscall */ +#define TDPRI_SOFT_NORM 14 /* kernel / normal */ +#define TDPRI_SOFT_TIMER 16 /* kernel / timer */ +#define TDPRI_INT_SUPPORT 20 /* kernel / high priority support */ +#define TDPRI_INT_LOW 27 /* low priority interrupt */ +#define TDPRI_INT_MED 28 /* medium priority interrupt */ +#define TDPRI_INT_HIGH 29 /* high priority interrupt */ +#define TDPRI_MAX 31 + +#define TDPRI_MASK 31 +#define TDPRI_CRIT 32 /* high bits of td_pri used for crit */ #define CACHE_NTHREADS 4 @@ -72,7 +177,17 @@ extern void lwkt_gdinit(struct globaldata *gd); extern void lwkt_switch(void); extern void lwkt_preempt(void); extern void lwkt_schedule(thread_t td); +extern void lwkt_schedule_self(void); extern void lwkt_deschedule(thread_t td); +extern void lwkt_deschedule_self(void); +extern void lwkt_yield(void); +extern void lwkt_yield_quick(void); + +extern void lwkt_block(lwkt_wait_t w); +extern void lwkt_signal(lwkt_wait_t w); +extern void lwkt_gettoken(lwkt_token_t tok); +extern void lwkt_reltoken(lwkt_token_t tok); +extern int lwkt_regettoken(lwkt_token_t tok); #endif diff --git a/sys/sys/thread2.h b/sys/sys/thread2.h new file mode 100644 index 0000000000..a8c951e861 --- /dev/null +++ b/sys/sys/thread2.h @@ -0,0 +1,90 @@ +/* + * SYS/THREAD2.H + * + * Implements inline procedure support for the LWKT subsystem. + * + * Generally speaking these routines only operate on threads associated + * with the current cpu. For example, a higher priority thread pending + * on a different cpu will not be immediately scheduled by a yield() on + * this cpu. + * + * $DragonFly: src/sys/sys/thread2.h,v 1.1 2003/06/21 07:54:57 dillon Exp $ + */ + +#ifndef _SYS_THREAD2_H_ +#define _SYS_THREAD2_H_ + +/* + * Critical sections prevent preemption by raising a thread's priority + * above the highest possible interrupting priority. Additionally, the + * current cpu will not be able to schedule a new thread but will instead + * place it on a pending list (with interrupts physically disabled) and + * set mycpu->gd_reqpri to indicate that work needs to be done, which + * lwkt_yield_quick() takes care of. + * + * Synchronous switching and blocking is allowed while in a critical section. + */ +static __inline void +crit_enter(void) +{ + curthread->td_pri += TDPRI_CRIT; +} + +static __inline void +crit_exit_noyield(void) +{ + thread_t td = curthread; + + td->td_pri -= TDPRI_CRIT; + KASSERT(td->td_pri >= 0, ("crit_exit nesting error")); +} + +static __inline void +crit_exit(void) +{ + thread_t td = curthread; + + td->td_pri -= TDPRI_CRIT; + KASSERT(td->td_pri >= 0, ("crit_exit nesting error")); + if (td->td_pri < mycpu->gd_reqpri) + lwkt_yield_quick(); +} + +static __inline int +lwkt_raisepri(int pri) +{ + int opri = curthread->td_pri; + if (opri < pri) + curthread->td_pri = pri; + return(opri); +} + +static __inline int +lwkt_lowerpri(int pri) +{ + thread_t td = curthread; + int opri = td->td_pri; + if (opri > pri) { + td->td_pri = pri; + if (pri < mycpu->gd_reqpri) + lwkt_yield_quick(); + } + return(opri); +} + +static __inline void +lwkt_setpri(int pri) +{ + curthread->td_pri = pri; + if (pri < mycpu->gd_reqpri) + lwkt_yield_quick(); +} + +static __inline int +lwkt_havetoken(lwkt_token_t tok) +{ + return (tok->t_cpu == mycpu->gd_cpu); +} + +#endif + diff --git a/sys/sys/xwait.h b/sys/sys/xwait.h new file mode 100644 index 0000000000..c12a5bf933 --- /dev/null +++ b/sys/sys/xwait.h @@ -0,0 +1,28 @@ +/* + * SYS/XWAIT.H + * + * $DragonFly: src/sys/sys/xwait.h,v 1.1 2003/06/21 07:54:57 dillon Exp $ + */ + +#ifndef _SYS_XWAIT_H_ +#define _SYS_XWAIT_H_ + +struct proc; + +/* + * XWAIT structure for xsleep()/xwakeup() + */ + +struct xwait { + int gen; + TAILQ_HEAD(,proc) waitq; +}; + +static __inline void +xupdate_gen(struct xwait *w) +{ + ++w->gen; +} + +#endif + diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c index 921b7f96ec..dee333c141 100644 --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -62,7 +62,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/vm_kern.c,v 1.61.2.2 2002/03/12 18:25:26 tegge Exp $ - * $DragonFly: src/sys/vm/vm_kern.c,v 1.2 2003/06/17 04:29:00 dillon Exp $ + * $DragonFly: src/sys/vm/vm_kern.c,v 1.3 2003/06/21 07:54:57 dillon Exp $ */ /* @@ -340,14 +340,14 @@ kmem_malloc(map, size, flags) /* * Note: if M_NOWAIT specified alone, allocate from * interrupt-safe queues only (just the free list). If - * M_ASLEEP or M_USE_RESERVE is also specified, we can also + * M_USE_RESERVE is also specified, we can also * allocate from the cache. Neither of the latter two * flags may be specified from an interrupt since interrupts * are not allowed to mess with the cache queue. */ retry: m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), - ((flags & (M_NOWAIT|M_ASLEEP|M_USE_RESERVE)) == M_NOWAIT) ? + ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) ? VM_ALLOC_INTERRUPT : VM_ALLOC_SYSTEM); @@ -377,9 +377,6 @@ retry: } vm_map_delete(map, addr, addr + size); vm_map_unlock(map); - if (flags & M_ASLEEP) { - VM_AWAIT; - } return (0); } vm_page_flag_clear(m, PG_ZERO); diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 67b16143c4..d5f880caf6 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -35,7 +35,7 @@ * * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $ - * $DragonFly: src/sys/vm/vm_page.c,v 1.2 2003/06/17 04:29:00 dillon Exp $ + * $DragonFly: src/sys/vm/vm_page.c,v 1.3 2003/06/21 07:54:57 dillon Exp $ */ /* @@ -907,89 +907,6 @@ vm_waitpfault(void) splx(s); } -/* - * vm_await: (also see VM_AWAIT macro) - * - * asleep on an event that will signal when free pages are available - * for allocation. - */ - -void -vm_await(void) -{ - int s; - - s = splvm(); - if (curproc == pageproc) { - vm_pageout_pages_needed = 1; - asleep(&vm_pageout_pages_needed, PSWP, "vmwait", 0); - } else { - if (!vm_pages_needed) { - vm_pages_needed++; - wakeup(&vm_pages_needed); - } - asleep(&cnt.v_free_count, PVM, "vmwait", 0); - } - splx(s); -} - -#if 0 -/* - * vm_page_sleep: - * - * Block until page is no longer busy. - */ - -int -vm_page_sleep(vm_page_t m, char *msg, char *busy) -{ - int slept = 0; - if ((busy && *busy) || (m->flags & PG_BUSY)) { - int s; - s = splvm(); - if ((busy && *busy) || (m->flags & PG_BUSY)) { - vm_page_flag_set(m, PG_WANTED); - tsleep(m, PVM, msg, 0); - slept = 1; - } - splx(s); - } - return slept; -} - -#endif - -#if 0 - -/* - * vm_page_asleep: - * - * Similar to vm_page_sleep(), but does not block. Returns 0 if - * the page is not busy, or 1 if the page is busy. - * - * This routine has the side effect of calling asleep() if the page - * was busy (1 returned). - */ - -int -vm_page_asleep(vm_page_t m, char *msg, char *busy) -{ - int slept = 0; - if ((busy && *busy) || (m->flags & PG_BUSY)) { - int s; - s = splvm(); - if ((busy && *busy) || (m->flags & PG_BUSY)) { - vm_page_flag_set(m, PG_WANTED); - asleep(m, PVM, msg, 0); - slept = 1; - } - splx(s); - } - return slept; -} - -#endif - /* * vm_page_activate: * diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index c5253ace94..5edb94ae90 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -62,7 +62,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/vm_page.h,v 1.75.2.8 2002/03/06 01:07:09 dillon Exp $ - * $DragonFly: src/sys/vm/vm_page.h,v 1.2 2003/06/17 04:29:00 dillon Exp $ + * $DragonFly: src/sys/vm/vm_page.h,v 1.3 2003/06/21 07:54:57 dillon Exp $ */ /* @@ -427,10 +427,6 @@ int vm_page_is_valid (vm_page_t, int, int); void vm_page_test_dirty (vm_page_t); int vm_page_bits (int, int); vm_page_t _vm_page_list_find (int, int); -#if 0 -int vm_page_sleep(vm_page_t m, char *msg, char *busy); -int vm_page_asleep(vm_page_t m, char *msg, char *busy); -#endif void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); void vm_page_free_toq(vm_page_t m); diff --git a/sys/vm/vm_pageout.h b/sys/vm/vm_pageout.h index c1c94892f9..5439f9f1ed 100644 --- a/sys/vm/vm_pageout.h +++ b/sys/vm/vm_pageout.h @@ -62,7 +62,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/vm_pageout.h,v 1.26.2.1 2002/02/26 05:49:28 silby Exp $ - * $DragonFly: src/sys/vm/vm_pageout.h,v 1.2 2003/06/17 04:29:00 dillon Exp $ + * $DragonFly: src/sys/vm/vm_pageout.h,v 1.3 2003/06/21 07:54:57 dillon Exp $ */ #ifndef _VM_VM_PAGEOUT_H_ @@ -101,10 +101,8 @@ extern int vm_pageout_deficit; extern void pagedaemon_wakeup __P((void)); #define VM_WAIT vm_wait() -#define VM_AWAIT vm_await() #define VM_WAITPFAULT vm_waitpfault() extern void vm_wait __P((void)); -extern void vm_await __P((void)); extern void vm_waitpfault __P((void)); #ifdef _KERNEL -- 2.41.0